diff --git a/include/zephyr/fs/zms.h b/include/zephyr/fs/zms.h new file mode 100644 index 000000000000000..0057ad0b97ec4c4 --- /dev/null +++ b/include/zephyr/fs/zms.h @@ -0,0 +1,199 @@ +/* ZMS: Zephyr Memory Storage + * + * Copyright (c) 2024 BayLibre SAS + * + * SPDX-License-Identifier: Apache-2.0 + */ +#ifndef ZEPHYR_INCLUDE_FS_ZMS_H_ +#define ZEPHYR_INCLUDE_FS_ZMS_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Zephyr Memory Storage (ZMS) + * @defgroup zms Zephyr Memory Storage (ZMS) + * @ingroup file_system_storage + * @{ + * @} + */ + +/** + * @brief Zephyr Memory Storage Data Structures + * @defgroup zms_data_structures Zephyr Memory Storage Data Structures + * @ingroup zms + * @{ + */ + +/** + * @brief Zephyr Memory Storage File system structure + */ +struct zms_fs { + /** File system offset in flash **/ + off_t offset; + /** Allocation table entry write address. + * Addresses are stored as uint64_t: + * - high 4 bytes correspond to the sector + * - low 4 bytes are the offset in the sector + */ + uint64_t ate_wra; + /** Data write address */ + uint64_t data_wra; + /** File system is split into sectors, each sector must be multiple of flash pages */ + uint32_t sector_size; + /** Number of sectors in the file system */ + uint32_t sector_count; + /** Current cycle counter of the sector */ + uint8_t sector_cycle; + /** Flag indicating if the file system is initialized */ + bool ready; + /** Mutex */ + struct k_mutex zms_lock; + /** Flash device runtime structure */ + const struct device *flash_device; + /** Flash memory parameters structure */ + const struct flash_parameters *flash_parameters; +#if CONFIG_ZMS_LOOKUP_CACHE + uint64_t lookup_cache[CONFIG_ZMS_LOOKUP_CACHE_SIZE]; +#endif +}; + +/** + * @} + */ + +/** + * @brief Zephyr Memory Storage APIs + * @defgroup zms_high_level_api Zephyr Memory Storage APIs + * @ingroup zms + * @{ + */ + +/** + * @brief Mount a ZMS file system onto the device specified in @p fs. + * + * @param fs Pointer to file system + * @retval 0 Success + * @retval -ERRNO errno code if error + */ +int zms_mount(struct zms_fs *fs); + +/** + * @brief Clear the ZMS file system from device. + * + * @param fs Pointer to file system + * @retval 0 Success + * @retval -ERRNO errno code if error + */ +int zms_clear(struct zms_fs *fs); + +/** + * @brief Write an entry to the file system. + * + * @note When @p len parameter is equal to @p 0 then entry is effectively removed (it is + * equivalent to calling of zms_delete). Any calls to zms_read for entries with data of length + * @p 0 will return error.@n It is not possible to distinguish between deleted entry and entry + * with data of length 0. + * + * @param fs Pointer to file system + * @param id Id of the entry to be written + * @param data Pointer to the data to be written + * @param len Number of bytes to be written + * + * @return Number of bytes written. On success, it will be equal to the number of bytes requested + * to be written. When a rewrite of the same data already stored is attempted, nothing is written + * to flash, thus 0 is returned. On error, returns negative value of errno.h defined error codes. + */ +ssize_t zms_write(struct zms_fs *fs, uint32_t id, const void *data, size_t len); + +/** + * @brief Delete an entry from the file system + * + * @param fs Pointer to file system + * @param id Id of the entry to be deleted + * @retval 0 Success + * @retval -ERRNO errno code if error + */ +int zms_delete(struct zms_fs *fs, uint32_t id); + +/** + * @brief Read an entry from the file system. + * + * @param fs Pointer to file system + * @param id Id of the entry to be read + * @param data Pointer to data buffer + * @param len Number of bytes to be read + * + * @return Number of bytes read. On success, it will be equal to the number of bytes requested + * to be read. When the return value is larger than the number of bytes requested to read this + * indicates not all bytes were read, and more data is available. On error, returns negative + * value of errno.h defined error codes. + */ +ssize_t zms_read(struct zms_fs *fs, uint32_t id, void *data, size_t len); + +/** + * @brief Read a history entry from the file system. + * + * @param fs Pointer to file system + * @param id Id of the entry to be read + * @param data Pointer to data buffer + * @param len Number of bytes to be read + * @param cnt History counter: 0: latest entry, 1: one before latest ... + * + * @return Number of bytes read. On success, it will be equal to the number of bytes requested + * to be read. When the return value is larger than the number of bytes requested to read this + * indicates not all bytes were read, and more data is available. On error, returns negative + * value of errno.h defined error codes. + */ +ssize_t zms_read_hist(struct zms_fs *fs, uint32_t id, void *data, size_t len, uint32_t cnt); + +/** + * @brief Calculate the available free space in the file system. + * + * @param fs Pointer to file system + * + * @return Number of bytes free. On success, it will be equal to the number of bytes that can + * still be written to the file system. Calculating the free space is a time consuming operation, + * especially on spi flash. On error, returns negative value of errno.h defined error codes. + */ +ssize_t zms_calc_free_space(struct zms_fs *fs); + +/** + * @brief Tell how many contiguous free space remains in the currently active ZMS sector. + * + * @param fs Pointer to the file system. + * + * @return Number of free bytes. + */ +size_t zms_sector_max_data_size(struct zms_fs *fs); + +/** + * @brief Close the currently active sector and switch to the next one. + * + * @note The garbage collector is called on the new sector. + * + * @warning This routine is made available for specific use cases. + * It breaks the aim of the ZMS to avoid any unnecessary flash erases. + * Using this routine extensively can result in premature failure of the flash device. + * + * @param fs Pointer to the file system. + * + * @return 0 on success. On error, returns negative value of errno.h defined error codes. + */ +int zms_sector_use_next(struct zms_fs *fs); + +/** + * @} + */ + +#ifdef __cplusplus +} +#endif + +#endif /* ZEPHYR_INCLUDE_FS_ZMS_H_ */ diff --git a/subsys/fs/CMakeLists.txt b/subsys/fs/CMakeLists.txt index fb774711543a7ba..825b8356381bf97 100644 --- a/subsys/fs/CMakeLists.txt +++ b/subsys/fs/CMakeLists.txt @@ -25,6 +25,7 @@ endif() add_subdirectory_ifdef(CONFIG_FCB ./fcb) add_subdirectory_ifdef(CONFIG_NVS ./nvs) +add_subdirectory_ifdef(CONFIG_ZMS ./zms) if(CONFIG_FUSE_FS_ACCESS) zephyr_library_named(FS_FUSE) diff --git a/subsys/fs/Kconfig b/subsys/fs/Kconfig index 0a20aea7ee8a48b..7029f84f5fdadd5 100644 --- a/subsys/fs/Kconfig +++ b/subsys/fs/Kconfig @@ -99,5 +99,6 @@ endif # FILE_SYSTEM rsource "fcb/Kconfig" rsource "nvs/Kconfig" +rsource "zms/Kconfig" endmenu diff --git a/subsys/fs/zms/CMakeLists.txt b/subsys/fs/zms/CMakeLists.txt new file mode 100644 index 000000000000000..b6db8a3f57fa9a0 --- /dev/null +++ b/subsys/fs/zms/CMakeLists.txt @@ -0,0 +1,3 @@ +#SPDX-License-Identifier: Apache-2.0 + +zephyr_sources(zms.c) diff --git a/subsys/fs/zms/Kconfig b/subsys/fs/zms/Kconfig new file mode 100644 index 000000000000000..49a3d715d6d1b78 --- /dev/null +++ b/subsys/fs/zms/Kconfig @@ -0,0 +1,40 @@ +#Zephyr Memory Storage ZMS + +#Copyright(c) 2024 Riadh Ghaddab < rghaddab @baylibre.fr> + +#SPDX-License-Identifier: Apache-2.0 + +config ZMS + bool "Zephyr Memory Storage" + select CRC + help + Enable support of Zephyr Memory Storage. + +if ZMS + +config ZMS_LOOKUP_CACHE + bool "ZMS lookup cache" + help + Enable ZMS cache, used to reduce the ZMS data lookup time. + Each cache entry holds an address of the most recent allocation + table entry (ATE) for all ZMS IDs that fall into that cache position. + +config ZMS_LOOKUP_CACHE_SIZE + int "ZMS Storage lookup cache size" + default 128 + range 1 65536 + depends on ZMS_LOOKUP_CACHE + help + Number of entries in ZMS lookup cache. + It is recommended that it be a power of 2. + +config ZMS_DATA_CRC + bool "ZMS DATA CRC" + help + Enables DATA CRC + +module = ZMS +module-str = zms +source "subsys/logging/Kconfig.template.log_config" + +endif # ZMS diff --git a/subsys/fs/zms/zms.c b/subsys/fs/zms/zms.c new file mode 100644 index 000000000000000..be89b7f8c5c327b --- /dev/null +++ b/subsys/fs/zms/zms.c @@ -0,0 +1,1602 @@ +/* ZMS: Zephyr Memory Storage + * + * Copyright (c) 2024 BayLibre SAS + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include +#include +#include +#include +#include +#include +#include "zms_priv.h" + +#include +LOG_MODULE_REGISTER(fs_zms, CONFIG_ZMS_LOG_LEVEL); + +static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate); +static int zms_ate_valid(struct zms_fs *fs, const struct zms_ate *entry); +static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt); +static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate, + struct zms_ate *close_ate); +static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_ate *entry, + uint8_t cycle_cnt); + +#ifdef CONFIG_ZMS_LOOKUP_CACHE + +static inline size_t zms_lookup_cache_pos(uint32_t id) +{ + uint32_t hash; + + /* 32-bit integer hash function found by https://github.com/skeeto/hash-prospector. */ + hash = id; + hash ^= hash >> 16; + hash *= 0x7feb352dU; + hash ^= hash >> 15; + hash *= 0x846ca68bU; + hash ^= hash >> 16; + + return hash % CONFIG_ZMS_LOOKUP_CACHE_SIZE; +} + +static int zms_lookup_cache_rebuild(struct zms_fs *fs) +{ + int rc, previous_sector_num = -1; + uint64_t addr, ate_addr; + uint64_t *cache_entry; + uint8_t current_cycle; + struct zms_ate ate; + + memset(fs->lookup_cache, 0xff, sizeof(fs->lookup_cache)); + addr = fs->ate_wra; + + while (true) { + /* Make a copy of 'addr' as it will be advanced by zms_prev_ate() */ + ate_addr = addr; + rc = zms_prev_ate(fs, &addr, &ate); + + if (rc) { + return rc; + } + + cache_entry = &fs->lookup_cache[zms_lookup_cache_pos(ate.id)]; + + if (ate.id != ZMS_HEAD_ID && *cache_entry == ZMS_LOOKUP_CACHE_NO_ADDR) { + /* read the ate cycle only when we change the sector + * or if it is the first read + */ + if ((SECTOR_NUM(ate_addr) != previous_sector_num) || + (previous_sector_num == -1)) { + rc = zms_get_sector_cycle(fs, ate_addr, ¤t_cycle); + if (rc) { + return rc; + } + } + if (zms_ate_valid_different_sector(fs, &ate, current_cycle)) { + *cache_entry = ate_addr; + } + previous_sector_num = SECTOR_NUM(ate_addr); + } + + if (addr == fs->ate_wra) { + break; + } + } + + return 0; +} + +static void zms_lookup_cache_invalidate(struct zms_fs *fs, uint32_t sector) +{ + uint64_t *cache_entry = fs->lookup_cache; + uint64_t *const cache_end = &fs->lookup_cache[CONFIG_ZMS_LOOKUP_CACHE_SIZE]; + + for (; cache_entry < cache_end; ++cache_entry) { + if ((*cache_entry >> ADDR_SECT_SHIFT) == sector) { + *cache_entry = ZMS_LOOKUP_CACHE_NO_ADDR; + } + } +} + +#endif /* CONFIG_ZMS_LOOKUP_CACHE */ + +/* zms_al_size returns size aligned to fs->write_block_size */ +static inline size_t zms_al_size(struct zms_fs *fs, size_t len) +{ + size_t write_block_size = fs->flash_parameters->write_block_size; + + if (write_block_size <= 1U) { + return len; + } + return (len + (write_block_size - 1U)) & ~(write_block_size - 1U); +} + +/* Aligned memory write */ +static int zms_flash_al_wrt(struct zms_fs *fs, uint64_t addr, const void *data, size_t len) +{ + const uint8_t *data8 = (const uint8_t *)data; + int rc = 0; + off_t offset; + size_t blen; + uint8_t buf[ZMS_BLOCK_SIZE]; + bool ebw_required = + flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT; + + if (!len) { + /* Nothing to write, avoid changing the flash protection */ + return 0; + } + + offset = fs->offset; + offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT); + offset += addr & ADDR_OFFS_MASK; + + blen = len & ~(fs->flash_parameters->write_block_size - 1U); + if (blen > 0) { + rc = flash_write(fs->flash_device, offset, data8, blen); + if (rc) { + /* flash write error */ + goto end; + } + len -= blen; + offset += blen; + data8 += blen; + } + if (len) { + memcpy(buf, data8, len); + if (ebw_required) { + (void)memset(buf + len, fs->flash_parameters->erase_value, + fs->flash_parameters->write_block_size - len); + } + + rc = flash_write(fs->flash_device, offset, buf, + fs->flash_parameters->write_block_size); + } + +end: + return rc; +} + +/* basic flash read from zms address */ +static int zms_flash_rd(struct zms_fs *fs, uint64_t addr, void *data, size_t len) +{ + int rc; + off_t offset; + + offset = fs->offset; + offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT); + offset += addr & ADDR_OFFS_MASK; + + rc = flash_read(fs->flash_device, offset, data, len); + return rc; +} + +/* allocation entry write */ +static int zms_flash_ate_wrt(struct zms_fs *fs, const struct zms_ate *entry) +{ + int rc; + + rc = zms_flash_al_wrt(fs, fs->ate_wra, entry, sizeof(struct zms_ate)); +#ifdef CONFIG_ZMS_LOOKUP_CACHE + /* 0xFFFFFFFF is a special-purpose identifier. Exclude it from the cache */ + if (entry->id != ZMS_HEAD_ID) { + fs->lookup_cache[zms_lookup_cache_pos(entry->id)] = fs->ate_wra; + } +#endif + fs->ate_wra -= zms_al_size(fs, sizeof(struct zms_ate)); + + return rc; +} + +/* data write */ +static int zms_flash_data_wrt(struct zms_fs *fs, const void *data, size_t len) +{ + int rc; + + rc = zms_flash_al_wrt(fs, fs->data_wra, data, len); + fs->data_wra += zms_al_size(fs, len); + + return rc; +} + +/* flash ate read */ +static int zms_flash_ate_rd(struct zms_fs *fs, uint64_t addr, struct zms_ate *entry) +{ + return zms_flash_rd(fs, addr, entry, sizeof(struct zms_ate)); +} + +/* zms_flash_block_cmp compares the data in flash at addr to data + * in blocks of size ZMS_BLOCK_SIZE aligned to fs->write_block_size + * returns 0 if equal, 1 if not equal, errcode if error + */ +static int zms_flash_block_cmp(struct zms_fs *fs, uint64_t addr, const void *data, size_t len) +{ + const uint8_t *data8 = (const uint8_t *)data; + int rc; + size_t bytes_to_cmp, block_size; + uint8_t buf[ZMS_BLOCK_SIZE]; + + block_size = ZMS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U); + + while (len) { + bytes_to_cmp = MIN(block_size, len); + rc = zms_flash_rd(fs, addr, buf, bytes_to_cmp); + if (rc) { + return rc; + } + rc = memcmp(data8, buf, bytes_to_cmp); + if (rc) { + return 1; + } + len -= bytes_to_cmp; + addr += bytes_to_cmp; + data8 += bytes_to_cmp; + } + return 0; +} + +/* zms_flash_cmp_const compares the data in flash at addr to a constant + * value. returns 0 if all data in flash is equal to value, 1 if not equal, + * errcode if error + */ +static int zms_flash_cmp_const(struct zms_fs *fs, uint64_t addr, uint8_t value, size_t len) +{ + int rc; + size_t bytes_to_cmp, block_size; + uint8_t cmp[ZMS_BLOCK_SIZE]; + + block_size = ZMS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U); + + (void)memset(cmp, value, block_size); + while (len) { + bytes_to_cmp = MIN(block_size, len); + rc = zms_flash_block_cmp(fs, addr, cmp, bytes_to_cmp); + if (rc) { + return rc; + } + len -= bytes_to_cmp; + addr += bytes_to_cmp; + } + return 0; +} + +/* flash block move: move a block at addr to the current data write location + * and updates the data write location. + */ +static int zms_flash_block_move(struct zms_fs *fs, uint64_t addr, size_t len) +{ + int rc; + size_t bytes_to_copy, block_size; + uint8_t buf[ZMS_BLOCK_SIZE]; + + block_size = ZMS_BLOCK_SIZE & ~(fs->flash_parameters->write_block_size - 1U); + + while (len) { + bytes_to_copy = MIN(block_size, len); + rc = zms_flash_rd(fs, addr, buf, bytes_to_copy); + if (rc) { + return rc; + } + rc = zms_flash_data_wrt(fs, buf, bytes_to_copy); + if (rc) { + return rc; + } + len -= bytes_to_copy; + addr += bytes_to_copy; + } + return 0; +} + +/* erase a sector and verify erase was OK. + * return 0 if OK, errorcode on error. + */ +static int zms_flash_erase_sector(struct zms_fs *fs, uint64_t addr) +{ + int rc; + off_t offset; + + addr &= ADDR_SECT_MASK; + + offset = fs->offset; + offset += fs->sector_size * (addr >> ADDR_SECT_SHIFT); + + LOG_DBG("Erasing flash at %lx, len %d", (long)offset, fs->sector_size); + +#ifdef CONFIG_ZMS_LOOKUP_CACHE + zms_lookup_cache_invalidate(fs, addr >> ADDR_SECT_SHIFT); +#endif + rc = flash_flatten(fs->flash_device, offset, fs->sector_size); + + if (rc) { + return rc; + } + + if (zms_flash_cmp_const(fs, addr, fs->flash_parameters->erase_value, fs->sector_size)) { + rc = -ENXIO; + } + + return rc; +} + +/* crc update on allocation entry */ +static void zms_ate_crc8_update(struct zms_ate *entry) +{ + uint8_t crc8; + + crc8 = crc8_ccitt(0xff, entry, offsetof(struct zms_ate, crc8)); + entry->crc8 = crc8; +} + +/* crc check on allocation entry + * returns 0 if OK, 1 on crc fail + */ +static int zms_ate_crc8_check(const struct zms_ate *entry) +{ + uint8_t crc8; + + crc8 = crc8_ccitt(0xff, entry, offsetof(struct zms_ate, crc8)); + if (crc8 == entry->crc8) { + return 0; + } + return 1; +} + +/* zms_ate_valid validates an ate: + * return 1 if crc8 and offset valid, + * 0 otherwise + */ +static int zms_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) +{ + size_t ate_size; + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + if ((zms_ate_crc8_check(entry)) || (entry->offset >= (fs->sector_size - ate_size)) || + (fs->sector_cycle != entry->cycle_cnt)) { + return 0; + } + + return 1; +} + +/* zms_ate_valid_different_sector validates an ate that is in a different + * sector than the active one. It takes as argument the cycle_cnt of the + * sector where the ATE to be validated is stored + * return 1 if crc8 , offset and cycle_cnt are valid, + * 0 otherwise + */ +static int zms_ate_valid_different_sector(struct zms_fs *fs, const struct zms_ate *entry, + uint8_t cycle_cnt) +{ + size_t ate_size; + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + if ((zms_ate_crc8_check(entry)) || (entry->offset >= (fs->sector_size - ate_size)) || + (cycle_cnt != entry->cycle_cnt)) { + return 0; + } + + return 1; +} + +static inline int zms_get_cycle_on_sector_change(struct zms_fs *fs, uint64_t addr, + int previous_sector_num, uint8_t *cycle_cnt) +{ + int rc; + + /* read the ate cycle only when we change the sector + * or if it is the first read + */ + if ((SECTOR_NUM(addr) != previous_sector_num) || (previous_sector_num == -1)) { + rc = zms_get_sector_cycle(fs, addr, cycle_cnt); + if (rc) { + return rc; + } + } + + return 0; +} + +/* zms_close_ate_valid validates an sector close ate: a valid sector close ate: + * - valid ate + * - len = 0 and id = ZMS_HEAD_ID + * - offset points to location at ate multiple from sector size + * return 1 if valid, 0 otherwise + */ +static int zms_close_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) +{ + size_t ate_size; + + if ((!zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt)) || (entry->len != 0U) || + (entry->id != ZMS_HEAD_ID)) { + return 0; + } + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + if ((fs->sector_size - entry->offset) % ate_size) { + return 0; + } + + return 1; +} + +/* zms_empty_ate_valid validates an sector empty ate: a valid sector empty ate: + * - valid ate + * - len = 0xffff and id = 0xffffffff + * return 1 if valid, 0 otherwise + */ +static int zms_empty_ate_valid(struct zms_fs *fs, const struct zms_ate *entry) +{ + if ((!zms_ate_valid_different_sector(fs, entry, entry->cycle_cnt)) || + (entry->len != 0xffff) || (entry->id != ZMS_HEAD_ID)) { + return 0; + } + + return 1; +} + +/* store an entry in flash */ +static int zms_flash_write_entry(struct zms_fs *fs, uint32_t id, const void *data, size_t len) +{ + int rc; + struct zms_ate entry; + + entry.id = id; + entry.offset = (uint32_t)(fs->data_wra & ADDR_OFFS_MASK); + entry.len = (uint16_t)len; + entry.cycle_cnt = fs->sector_cycle; + + /* only compute CRC if len is greater than 4 bytes */ + if (IS_ENABLED(CONFIG_ZMS_DATA_CRC) && (len > 4)) { + entry.data_crc = crc32_ieee(data, len); + } else if ((len > 0) && (len <= 4)) { + /* Copy data into entry for small data ( < 4B) */ + uint8_t *data8 = (uint8_t *)data; + + memcpy(&entry.data, data8, len); + } + + zms_ate_crc8_update(&entry); + + if (len > 4) { + rc = zms_flash_data_wrt(fs, data, len); + if (rc) { + return rc; + } + } + + rc = zms_flash_ate_wrt(fs, &entry); + if (rc) { + return rc; + } + + return 0; +} + +/* end of flash routines */ + +/* Search for the last valid ATE written in a sector + */ +static int zms_recover_last_ate(struct zms_fs *fs, uint64_t *addr) +{ + uint64_t data_end_addr, ate_end_addr; + struct zms_ate end_ate; + size_t ate_size; + int rc; + + LOG_DBG("Recovering last ate from sector %llu", SECTOR_NUM(*addr)); + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + /* skip close and empty ATE */ + *addr -= 2 * ate_size; + + ate_end_addr = *addr; + data_end_addr = *addr & ADDR_SECT_MASK; + while (ate_end_addr > data_end_addr) { + rc = zms_flash_ate_rd(fs, ate_end_addr, &end_ate); + if (rc) { + return rc; + } + if (zms_ate_valid(fs, &end_ate)) { + /* found a valid ate, update data_end_addr and *addr */ + data_end_addr &= ADDR_SECT_MASK; + if (end_ate.len > 4) { + data_end_addr += end_ate.offset + end_ate.len; + } + *addr = ate_end_addr; + } + ate_end_addr -= ate_size; + } + + return 0; +} + +/* walking through allocation entry list, from newest to oldest entries + * read ate from addr, modify addr to the previous ate + */ +static int zms_prev_ate(struct zms_fs *fs, uint64_t *addr, struct zms_ate *ate) +{ + int rc; + struct zms_ate empty_ate, close_ate; + size_t ate_size; + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + rc = zms_flash_ate_rd(fs, *addr, ate); + if (rc) { + return rc; + } + + *addr += ate_size; + if (((*addr) & ADDR_OFFS_MASK) != (fs->sector_size - 2 * ate_size)) { + return 0; + } + + /* last ate in sector, do jump to previous sector */ + if (((*addr) >> ADDR_SECT_SHIFT) == 0U) { + *addr += ((uint64_t)(fs->sector_count - 1) << ADDR_SECT_SHIFT); + } else { + *addr -= (1ULL << ADDR_SECT_SHIFT); + } + + /* read the first and second ate */ + rc = zms_get_sector_header(fs, *addr + ate_size, &empty_ate, &close_ate); + if (rc) { + return rc; + } + + if (!zms_empty_ate_valid(fs, &empty_ate) || !zms_close_ate_valid(fs, &close_ate) || + (empty_ate.cycle_cnt != close_ate.cycle_cnt)) { + /* at the end of filesystem */ + *addr = fs->ate_wra; + return 0; + } + + /* Update the address if the close ate is valid. + */ + if (zms_close_ate_valid(fs, &close_ate) && zms_empty_ate_valid(fs, &empty_ate) && + (close_ate.cycle_cnt == empty_ate.cycle_cnt)) { + (*addr) &= ADDR_SECT_MASK; + (*addr) += close_ate.offset; + return 0; + } + + /* The close_ate was invalid, `lets find out the last valid ate + * and point the address to this found ate. + * + * remark: if there was absolutely no valid data in the sector *addr + * is kept at sector_end - 2*ate_size, the next read will contain + * invalid data and continue with a sector jump + */ + return zms_recover_last_ate(fs, addr); +} + +static void zms_sector_advance(struct zms_fs *fs, uint64_t *addr) +{ + *addr += (1ULL << ADDR_SECT_SHIFT); + if ((*addr >> ADDR_SECT_SHIFT) == fs->sector_count) { + *addr -= ((uint64_t)fs->sector_count << ADDR_SECT_SHIFT); + } +} + +/* allocation entry close (this closes the current sector) by writing offset + * of last ate to the sector end. + */ +static int zms_sector_close(struct zms_fs *fs) +{ + int rc; + struct zms_ate close_ate, garbage_ate; + size_t ate_size; + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + close_ate.id = ZMS_HEAD_ID; + close_ate.len = 0U; + close_ate.offset = (uint32_t)((fs->ate_wra + ate_size) & ADDR_OFFS_MASK); + close_ate.data = 0xffffffff; + + /* When we close the sector, we must write all non used ATE with + * a non valid (Junk) ATE. + * This is needed to avoid some corner cases where some ATEs are + * not overwritten and become valid when the cycle counter wrap again + * to the same cycle counter of the old ATE. + * Example : + * - An ATE.cycl_cnt == 0 is written as last ATE of the sector + - This ATE was never overwritten in the next 255 cycles because of + large data size + - Next 256th cycle the leading cycle_cnt is 0, this ATE becomes + valid even if it is not the case. + */ + close_ate.cycle_cnt = fs->sector_cycle; + memset(&garbage_ate, fs->flash_parameters->erase_value, sizeof(garbage_ate)); + while (fs->ate_wra >= (fs->data_wra + ate_size)) { + rc = zms_flash_ate_wrt(fs, &garbage_ate); + if (rc) { + return rc; + } + } + + fs->ate_wra &= ADDR_SECT_MASK; + fs->ate_wra += (fs->sector_size - ate_size); + + zms_ate_crc8_update(&close_ate); + + (void)zms_flash_ate_wrt(fs, &close_ate); + + /* skip the empty ATE */ + fs->ate_wra -= ate_size; + + zms_sector_advance(fs, &fs->ate_wra); + + rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); + if (rc) { + return rc; + } + + fs->data_wra = fs->ate_wra & ADDR_SECT_MASK; + + return 0; +} + +static int zms_add_gc_done_ate(struct zms_fs *fs) +{ + struct zms_ate gc_done_ate; + + LOG_DBG("Adding gc done ate at %llx", fs->ate_wra & ADDR_OFFS_MASK); + gc_done_ate.id = ZMS_HEAD_ID; + gc_done_ate.len = 0U; + gc_done_ate.offset = (uint32_t)(fs->data_wra & ADDR_OFFS_MASK); + gc_done_ate.data = 0xffffffff; + gc_done_ate.cycle_cnt = fs->sector_cycle; + + zms_ate_crc8_update(&gc_done_ate); + + return zms_flash_ate_wrt(fs, &gc_done_ate); +} + +static int zms_add_empty_ate(struct zms_fs *fs, uint64_t addr) +{ + struct zms_ate empty_ate; + size_t ate_size; + uint8_t cycle_cnt; + int rc = 0; + + addr &= ADDR_SECT_MASK; + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + LOG_DBG("Adding empty ate at %llx", (uint64_t)(addr + fs->sector_size - 2 * ate_size)); + empty_ate.id = ZMS_HEAD_ID; + empty_ate.len = 0xffff; + empty_ate.offset = 0U; + empty_ate.data = 0xffffffff; + rc = zms_get_sector_cycle(fs, addr, &cycle_cnt); + if (rc) { + if (rc == -ENOENT) { + /* sector never used */ + cycle_cnt = 0; + } else { + /* bad flash read */ + return rc; + } + } + /* increase cycle counter */ + empty_ate.cycle_cnt = (cycle_cnt + 1) % BIT(8); + zms_ate_crc8_update(&empty_ate); + fs->ate_wra = addr + fs->sector_size - (2 * ate_size); + + return zms_flash_ate_wrt(fs, &empty_ate); +} + +static int zms_get_sector_cycle(struct zms_fs *fs, uint64_t addr, uint8_t *cycle_cnt) +{ + int rc; + size_t ate_size; + struct zms_ate empty_ate; + uint64_t empty_addr; + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + empty_addr = addr & ADDR_SECT_MASK; + empty_addr += fs->sector_size - 2 * ate_size; + + /* read the cycle counter of the current sector */ + rc = zms_flash_ate_rd(fs, empty_addr, &empty_ate); + if (rc < 0) { + /* flash error */ + return rc; + } + + if (zms_empty_ate_valid(fs, &empty_ate)) { + *cycle_cnt = empty_ate.cycle_cnt; + } else { + /* there is no empty ATE in this sector */ + return -ENOENT; + } + + return 0; +} + +static int zms_get_sector_header(struct zms_fs *fs, uint64_t addr, struct zms_ate *empty_ate, + struct zms_ate *close_ate) +{ + int rc; + size_t ate_size; + uint64_t close_addr; + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + close_addr = addr & ADDR_SECT_MASK; + close_addr += fs->sector_size - ate_size; + + LOG_DBG("Getting sector header ATEs at %llx", close_addr); + /* read the first ate in the sector to get the close ATE */ + rc = zms_flash_ate_rd(fs, close_addr, close_ate); + if (rc) { + return rc; + } + + /* read the second ate in the sector to get the empty ATE */ + rc = zms_flash_ate_rd(fs, close_addr - ate_size, empty_ate); + if (rc) { + return rc; + } + + return 0; +} + +/* garbage collection: the address ate_wra has been updated to the new sector + * that has just been started. The data to gc is in the sector after this new + * sector. + */ +static int zms_gc(struct zms_fs *fs) +{ + int rc, previous_sector_num = -1; + struct zms_ate close_ate, gc_ate, wlk_ate, empty_ate; + uint64_t sec_addr, gc_addr, gc_prev_addr, wlk_addr, wlk_prev_addr, data_addr, stop_addr, + previous_ate_wra; + uint8_t current_cycle, previous_cycle = 0; + size_t ate_size; + bool ebw_required = + flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT; + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + rc = zms_get_sector_cycle(fs, fs->ate_wra, &fs->sector_cycle); + if (rc < 0) { + /* flash error */ + return rc; + } + previous_cycle = fs->sector_cycle; + + sec_addr = (fs->ate_wra & ADDR_SECT_MASK); + zms_sector_advance(fs, &sec_addr); + gc_addr = sec_addr + fs->sector_size - ate_size; + /* skip header ATEs */ + stop_addr = gc_addr - 2 * ate_size; + + /* if the sector is not closed don't do gc */ + rc = zms_get_sector_header(fs, gc_addr, &empty_ate, &close_ate); + if (rc < 0) { + return rc; + } + if (!zms_empty_ate_valid(fs, &empty_ate) || !zms_close_ate_valid(fs, &close_ate) || + (close_ate.cycle_cnt != empty_ate.cycle_cnt)) { + goto gc_done; + } + + /* update sector_cycle */ + fs->sector_cycle = empty_ate.cycle_cnt; + + /* At this step empty & close ATEs are valid. + * let's start the GC + */ + gc_addr &= ADDR_SECT_MASK; + gc_addr += close_ate.offset; + + do { + gc_prev_addr = gc_addr; + rc = zms_prev_ate(fs, &gc_addr, &gc_ate); + if (rc) { + return rc; + } + + if (!zms_ate_valid(fs, &gc_ate)) { + continue; + } + +#ifdef CONFIG_ZMS_LOOKUP_CACHE + wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(gc_ate.id)]; + + if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) { + wlk_addr = fs->ate_wra; + } +#else + wlk_addr = fs->ate_wra; +#endif + /* initialize the previous_sector_num to an impossible value + * to read the sector cycle in the first loop + */ + previous_sector_num = -1; + + do { + wlk_prev_addr = wlk_addr; + rc = zms_prev_ate(fs, &wlk_addr, &wlk_ate); + if (rc) { + return rc; + } + /* if ate with same id is reached we might need to copy. + * only consider valid wlk_ate's. Something wrong might + * have been written that has the same ate but is + * invalid, don't consider these as a match. + */ + if (wlk_ate.id == gc_ate.id) { + /* read the ate cycle only when we change the sector + * or if it is the first read + */ + rc = zms_get_cycle_on_sector_change( + fs, wlk_prev_addr, previous_sector_num, ¤t_cycle); + if (rc) { + return rc; + } + if (zms_ate_valid_different_sector(fs, &wlk_ate, current_cycle)) { + break; + } + previous_sector_num = SECTOR_NUM(wlk_prev_addr); + } + } while (wlk_addr != fs->ate_wra); + + /* if walk has reached the same address as gc_addr copy is + * needed unless it is a deleted item. + */ + if ((wlk_prev_addr == gc_prev_addr) && gc_ate.len) { + /* copy needed */ + LOG_DBG("Moving %d, len %d", gc_ate.id, gc_ate.len); + + if (gc_ate.len > 4) { + /* Copy Data only when len > 4 + * Otherwise, Data is already inside ATE + */ + data_addr = (gc_prev_addr & ADDR_SECT_MASK); + data_addr += gc_ate.offset; + gc_ate.offset = (uint16_t)(fs->data_wra & ADDR_OFFS_MASK); + + rc = zms_flash_block_move(fs, data_addr, gc_ate.len); + if (rc) { + return rc; + } + } + + gc_ate.cycle_cnt = previous_cycle; + zms_ate_crc8_update(&gc_ate); + rc = zms_flash_ate_wrt(fs, &gc_ate); + if (rc) { + return rc; + } + } + } while (gc_prev_addr != stop_addr); + +gc_done: + + /* restore the previous sector_cycle */ + fs->sector_cycle = previous_cycle; + + /* Make it possible to detect that gc has finished by writing a + * gc done ate to the sector. In the field we might have zms systems + * that do not have sufficient space to add this ate, so for these + * situations avoid adding the gc done ate. + */ + + if (fs->ate_wra >= (fs->data_wra + ate_size)) { + rc = zms_add_gc_done_ate(fs); + if (rc) { + return rc; + } + } + + /* Erase the GC'ed sector when needed */ + if (ebw_required) { + rc = zms_flash_erase_sector(fs, sec_addr); + if (rc) { + return rc; + } + } + /* Adding empty ate to the gc'ed sector increments fs->ate_wra + * Restore the ate_wra of the current sector after this + */ + previous_ate_wra = fs->ate_wra; +#ifdef CONFIG_ZMS_LOOKUP_CACHE + zms_lookup_cache_invalidate(fs, sec_addr >> ADDR_SECT_SHIFT); +#endif + rc = zms_add_empty_ate(fs, sec_addr); + fs->ate_wra = previous_ate_wra; + + return rc; +} + +int zms_clear(struct zms_fs *fs) +{ + int rc; + uint64_t addr; + bool ebw_required = + flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT; + + if (!fs->ready) { + LOG_ERR("zms not initialized"); + return -EACCES; + } + + for (uint16_t i = 0; i < fs->sector_count; i++) { + addr = (uint64_t)i << ADDR_SECT_SHIFT; + if (ebw_required) { + rc = zms_flash_erase_sector(fs, addr); + if (rc) { + return rc; + } + } + rc = zms_add_empty_ate(fs, addr); + if (rc) { + return rc; + } + } + + /* zms needs to be reinitialized after clearing */ + fs->ready = false; + + return 0; +} + +static int zms_init(struct zms_fs *fs) +{ + int rc; + struct zms_ate last_ate, first_ate, close_ate, empty_ate; + size_t ate_size; + /* Initialize addr to 0 for the case fs->sector_count == 0. This + * should never happen as this is verified in zms_mount() but both + * Coverity and GCC believe the contrary. + */ + uint64_t addr = 0U; + uint32_t i, closed_sectors = 0; + bool ebw_required = + flash_params_get_erase_cap(fs->flash_parameters) & FLASH_ERASE_C_EXPLICIT; + + k_mutex_lock(&fs->zms_lock, K_FOREVER); + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + /* step through the sectors to find a open sector following + * a closed sector, this is where zms can write. + */ + + for (i = 0; i < fs->sector_count; i++) { + addr = ((uint64_t)i << ADDR_SECT_SHIFT) + (uint32_t)(fs->sector_size - ate_size); + + /* read the first and second ate to find out if sector is + * open/close + */ + rc = zms_get_sector_header(fs, addr, &empty_ate, &close_ate); + if (rc) { + goto end; + } + + /* update cycle count */ + fs->sector_cycle = empty_ate.cycle_cnt; + + if (zms_close_ate_valid(fs, &close_ate) && zms_empty_ate_valid(fs, &empty_ate) && + (close_ate.cycle_cnt == empty_ate.cycle_cnt)) { + /* closed sector */ + closed_sectors++; + zms_sector_advance(fs, &addr); + /* addr is pointing to the close ATE */ + rc = zms_get_sector_header(fs, addr, &empty_ate, &close_ate); + if (rc) { + goto end; + } + /* update cycle count */ + fs->sector_cycle = empty_ate.cycle_cnt; + if (!zms_empty_ate_valid(fs, &empty_ate) || + !zms_close_ate_valid(fs, &close_ate) || + (close_ate.cycle_cnt != empty_ate.cycle_cnt)) { + /* open sector */ + break; + } + } + } + /* all sectors are closed, this is not a zms fs */ + if (closed_sectors == fs->sector_count) { + rc = -EDEADLK; + goto end; + } + + if (i == fs->sector_count) { + /* none of the sectors were closed, which means that the first + * sector is the one in use, except if there are only 2 sectors. + * Let's check if the last sector has valid ATEs otherwise set + * the open sector to the first one. + */ + rc = zms_flash_ate_rd(fs, addr - 2 * ate_size, &first_ate); + if (rc) { + goto end; + } + if (!zms_ate_valid(fs, &first_ate)) { + zms_sector_advance(fs, &addr); + } + rc = zms_get_sector_header(fs, addr, &empty_ate, &close_ate); + if (rc) { + goto end; + } + if (!zms_empty_ate_valid(fs, &empty_ate)) { + if (ebw_required) { + zms_flash_erase_sector(fs, addr); + } + zms_add_empty_ate(fs, addr); + } + rc = zms_get_sector_cycle(fs, addr, &fs->sector_cycle); + if (rc) { + goto end; + } + } + + /* addr contains address of closing ate in the most recent sector, + * search for the last valid ate using the recover_last_ate routine + */ + + rc = zms_recover_last_ate(fs, &addr); + if (rc) { + goto end; + } + + /* addr contains address of the last valid ate in the most recent sector + */ + fs->ate_wra = addr; + fs->data_wra = addr & ADDR_SECT_MASK; + + /* fs->ate_wra should point to the next available entry. let's find it + * and set data_wra as well + */ + while (fs->ate_wra >= fs->data_wra) { + rc = zms_flash_ate_rd(fs, fs->ate_wra, &last_ate); + if (rc) { + goto end; + } + if (!zms_ate_valid(fs, &last_ate)) { + /* found empty location */ + break; + } + + /* ATE is valid: complete write of ate was performed */ + fs->data_wra = addr & ADDR_SECT_MASK; + /* Align the data write address to the current + * write block size so that it is possible to write to + * the sector even if the block size has changed after + * a software upgrade (unless the physical ATE size + * will change)." + */ + fs->data_wra += zms_al_size(fs, last_ate.offset + last_ate.len); + + /* ate on the last position within the sector is + * reserved for deletion an entry + */ + if (fs->ate_wra == fs->data_wra && last_ate.len) { + /* not a delete ate */ + rc = -ESPIPE; + goto end; + } + + fs->ate_wra -= ate_size; + } + + /* The sector after the write sector is either empty with a valid empty ATE (regular case) + * or it has never been used or it is a closed sector (GC didn't finish) + * If it is a closed sector we must look for a valid GC done ATE in the current write + * sector, if it is missing, we need to restart gc because it has been interrupted. + * If no valid empty ATE is found then it has never been used. Just erase it by adding + * a valid empty ATE. + * When gc needs to be restarted, first erase the sector by adding an empty + * ATE otherwise the data might not fit into the sector. + */ + addr = fs->ate_wra & ADDR_SECT_MASK; + addr += fs->sector_size - ate_size; + zms_sector_advance(fs, &addr); + /* read the first and second ate to verify that the sector is empty */ + rc = zms_get_sector_header(fs, addr, &empty_ate, &close_ate); + if (rc) { + goto end; + } + + if (zms_close_ate_valid(fs, &close_ate) && zms_empty_ate_valid(fs, &empty_ate) && + (close_ate.cycle_cnt == empty_ate.cycle_cnt)) { + /* The sector after fs->ate_wrt is closed. + * Look for a marker (gc_done_ate) that indicates that gc was finished. + */ + bool gc_done_marker = false; + struct zms_ate gc_done_ate; + + fs->sector_cycle = empty_ate.cycle_cnt; + addr = fs->ate_wra + ate_size; + while ((addr & ADDR_OFFS_MASK) < (fs->sector_size - 2 * ate_size)) { + rc = zms_flash_ate_rd(fs, addr, &gc_done_ate); + if (rc) { + goto end; + } + if (zms_ate_valid(fs, &gc_done_ate) && (gc_done_ate.id == 0xffff) && + (gc_done_ate.len == 0U)) { + gc_done_marker = true; + break; + } + addr += ate_size; + } + + if (gc_done_marker) { + /* erase the next sector */ + LOG_INF("GC Done marker found"); + addr = fs->ate_wra & ADDR_SECT_MASK; + zms_sector_advance(fs, &addr); + if (ebw_required) { + rc = zms_flash_erase_sector(fs, addr); + if (rc < 0) { + goto end; + } + } + rc = zms_add_empty_ate(fs, addr); + goto end; + } + LOG_INF("No GC Done marker found: restarting gc"); + if (ebw_required) { + rc = zms_flash_erase_sector(fs, fs->ate_wra); + if (rc) { + goto end; + } + } + rc = zms_add_empty_ate(fs, fs->ate_wra); + if (rc) { + goto end; + } + fs->ate_wra &= ADDR_SECT_MASK; + fs->ate_wra += (fs->sector_size - 3 * ate_size); + fs->data_wra = (fs->ate_wra & ADDR_SECT_MASK); +#ifdef CONFIG_ZMS_LOOKUP_CACHE + /** + * At this point, the lookup cache wasn't built but the gc function need to use it. + * So, temporarily, we set the lookup cache to the end of the fs. + * The cache will be rebuilt afterwards + **/ + for (i = 0; i < CONFIG_ZMS_LOOKUP_CACHE_SIZE; i++) { + fs->lookup_cache[i] = fs->ate_wra; + } +#endif + rc = zms_gc(fs); + goto end; + } + +end: +#ifdef CONFIG_ZMS_LOOKUP_CACHE + if (!rc) { + rc = zms_lookup_cache_rebuild(fs); + } +#endif + /* If the sector is empty add a gc done ate to avoid having insufficient + * space when doing gc. + */ + if ((!rc) && ((fs->ate_wra & ADDR_OFFS_MASK) == (fs->sector_size - 3 * ate_size))) { + + rc = zms_add_gc_done_ate(fs); + } + k_mutex_unlock(&fs->zms_lock); + return rc; +} + +int zms_mount(struct zms_fs *fs) +{ + + int rc; + struct flash_pages_info info; + size_t write_block_size; + + k_mutex_init(&fs->zms_lock); + + fs->flash_parameters = flash_get_parameters(fs->flash_device); + if (fs->flash_parameters == NULL) { + LOG_ERR("Could not obtain flash parameters"); + return -EINVAL; + } + + write_block_size = flash_get_write_block_size(fs->flash_device); + + /* check that the write block size is supported */ + if (write_block_size > ZMS_BLOCK_SIZE || write_block_size == 0) { + LOG_ERR("Unsupported write block size"); + return -EINVAL; + } + + /* check that sector size is a multiple of pagesize */ + rc = flash_get_page_info_by_offs(fs->flash_device, fs->offset, &info); + if (rc) { + LOG_ERR("Unable to get page info"); + return -EINVAL; + } + if (!fs->sector_size || fs->sector_size % info.size) { + LOG_ERR("Invalid sector size"); + return -EINVAL; + } + + /* check the number of sectors, it should be at least 2 */ + if (fs->sector_count < 2) { + LOG_ERR("Configuration error - sector count"); + return -EINVAL; + } + + rc = zms_init(fs); + + if (rc) { + return rc; + } + + /* zms is ready for use */ + fs->ready = true; + + LOG_INF("%d Sectors of %d bytes", fs->sector_count, fs->sector_size); + LOG_INF("alloc wra: %llu, %llx", SECTOR_NUM(fs->ate_wra), + (fs->ate_wra & ADDR_OFFS_MASK)); + LOG_INF("data wra: %llu, %llx", SECTOR_NUM(fs->data_wra), + (fs->data_wra & ADDR_OFFS_MASK)); + + return 0; +} + +ssize_t zms_write(struct zms_fs *fs, uint32_t id, const void *data, size_t len) +{ + int rc, gc_count, previous_sector_num = -1; + size_t ate_size, data_size; + struct zms_ate wlk_ate; + uint64_t wlk_addr, rd_addr; + uint8_t current_cycle; + uint16_t required_space = 0U; /* no space, appropriate for delete ate */ + bool prev_found = false; + + if (!fs->ready) { + LOG_ERR("zms not initialized"); + return -EACCES; + } + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + data_size = zms_al_size(fs, len); + + /* The maximum data size is sector size - 5 ate + * where: 1 ate for data, 1 ate for sector close, 1 ate for empty, + * 1 ate for gc done, and 1 ate to always allow a delete. + */ + if ((len > (fs->sector_size - 5 * ate_size)) || ((len > 0) && (data == NULL))) { + return -EINVAL; + } + + /* find latest entry with same id */ +#ifdef CONFIG_ZMS_LOOKUP_CACHE + wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)]; + + if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) { + goto no_cached_entry; + } +#else + wlk_addr = fs->ate_wra; +#endif + rd_addr = wlk_addr; + + while (1) { + rd_addr = wlk_addr; + rc = zms_prev_ate(fs, &wlk_addr, &wlk_ate); + if (rc) { + return rc; + } + if (wlk_ate.id == id) { + /* read the ate cycle only when we change the sector + * or if it is the first read + */ + rc = zms_get_cycle_on_sector_change(fs, rd_addr, previous_sector_num, + ¤t_cycle); + if (rc) { + return rc; + } + if (zms_ate_valid_different_sector(fs, &wlk_ate, current_cycle)) { + prev_found = true; + break; + } + previous_sector_num = SECTOR_NUM(rd_addr); + } + + if (wlk_addr == fs->ate_wra) { + break; + } + } + +#ifdef CONFIG_ZMS_LOOKUP_CACHE +no_cached_entry: +#endif + if (prev_found) { + /* previous entry found */ + rd_addr &= ADDR_SECT_MASK; + rd_addr += wlk_ate.offset; + + if (len == 0) { + /* do not try to compare with empty data */ + if (wlk_ate.len == 0U) { + /* skip delete entry as it is already the + * last one + */ + return 0; + } + } else if (len == wlk_ate.len) { + /* do not try to compare if lengths are not equal */ + /* compare the data and if equal return 0 */ + if (len <= 4) { + rc = memcmp(&wlk_ate.data, data, len); + if (!rc) { + return 0; + } + } else { + rc = zms_flash_block_cmp(fs, rd_addr, data, len); + if (rc <= 0) { + return rc; + } + } + } + } else { + /* skip delete entry for non-existing entry */ + if (len == 0) { + return 0; + } + } + + /* calculate required space if the entry contains data */ + if (data_size) { + /* Leave space for delete ate */ + required_space = data_size + ate_size; + } + + k_mutex_lock(&fs->zms_lock, K_FOREVER); + + gc_count = 0; + while (1) { + if (gc_count == fs->sector_count) { + /* gc'ed all sectors, no extra space will be created + * by extra gc. + */ + rc = -ENOSPC; + goto end; + } + + if (fs->ate_wra >= (fs->data_wra + required_space)) { + + rc = zms_flash_write_entry(fs, id, data, len); + if (rc) { + goto end; + } + break; + } + rc = zms_sector_close(fs); + if (rc) { + goto end; + } + + rc = zms_gc(fs); + if (rc) { + goto end; + } + gc_count++; + } + rc = len; +end: + k_mutex_unlock(&fs->zms_lock); + return rc; +} + +int zms_delete(struct zms_fs *fs, uint32_t id) +{ + return zms_write(fs, id, NULL, 0); +} + +ssize_t zms_read_hist(struct zms_fs *fs, uint32_t id, void *data, size_t len, uint32_t cnt) +{ + int rc, previous_sector_num = -1; + uint64_t wlk_addr, rd_addr; + uint8_t current_cycle; + uint32_t cnt_his; + struct zms_ate wlk_ate; + size_t ate_size; +#ifdef CONFIG_ZMS_DATA_CRC + uint32_t computed_data_crc; +#endif + + if (!fs->ready) { + LOG_ERR("zms not initialized"); + return -EACCES; + } + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + if (len > (fs->sector_size - 2 * ate_size)) { + return -EINVAL; + } + + cnt_his = 0U; + +#ifdef CONFIG_ZMS_LOOKUP_CACHE + wlk_addr = fs->lookup_cache[zms_lookup_cache_pos(id)]; + + if (wlk_addr == ZMS_LOOKUP_CACHE_NO_ADDR) { + rc = -ENOENT; + goto err; + } +#else + wlk_addr = fs->ate_wra; +#endif + rd_addr = wlk_addr; + + while (cnt_his <= cnt) { + rd_addr = wlk_addr; + rc = zms_prev_ate(fs, &wlk_addr, &wlk_ate); + if (rc) { + goto err; + } + if (wlk_ate.id == id) { + /* read the ate cycle only when we change the sector + * or if it is the first read + */ + rc = zms_get_cycle_on_sector_change(fs, rd_addr, previous_sector_num, + ¤t_cycle); + if (rc) { + return rc; + } + if (zms_ate_valid_different_sector(fs, &wlk_ate, current_cycle)) { + cnt_his++; + } + previous_sector_num = SECTOR_NUM(rd_addr); + } + if (wlk_addr == fs->ate_wra) { + break; + } + } + + if (((wlk_addr == fs->ate_wra) && (wlk_ate.id != id)) || (wlk_ate.len == 0U) || + (cnt_his < cnt)) { + return -ENOENT; + } + + if (len <= 4) { + /* data is stored in the ATE */ + memcpy(data, &wlk_ate.data, MIN(len, wlk_ate.len)); + } else { + rd_addr &= ADDR_SECT_MASK; + rd_addr += wlk_ate.offset; + rc = zms_flash_rd(fs, rd_addr, data, MIN(len, wlk_ate.len)); + if (rc) { + goto err; + } +#ifdef CONFIG_ZMS_DATA_CRC + computed_data_crc = crc32_ieee(data, wlk_ate.len); + if (computed_data_crc != wlk_ate.data_crc) { + LOG_ERR("Invalid data CRC: ATE_CRC=0x%08X, computed_data_crc=0x%08X", + wlk_ate.data_crc, computed_data_crc); + return -EIO; + } +#endif + } + + return MIN(len, wlk_ate.len); + +err: + return rc; +} + +ssize_t zms_read(struct zms_fs *fs, uint32_t id, void *data, size_t len) +{ + int rc; + + rc = zms_read_hist(fs, id, data, len, 0); + return rc; +} + +ssize_t zms_calc_free_space(struct zms_fs *fs) +{ + + int rc, previous_sector_num = -1; + struct zms_ate step_ate, wlk_ate; + uint64_t step_addr, wlk_addr; + uint8_t current_cycle; + size_t ate_size, free_space; + + if (!fs->ready) { + LOG_ERR("zms not initialized"); + return -EACCES; + } + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + free_space = 0; + for (uint16_t i = 1; i < fs->sector_count; i++) { + free_space += (fs->sector_size - ate_size); + } + + step_addr = fs->ate_wra; + + while (1) { + rc = zms_prev_ate(fs, &step_addr, &step_ate); + if (rc) { + return rc; + } + + wlk_addr = fs->ate_wra; + + while (1) { + rc = zms_prev_ate(fs, &wlk_addr, &wlk_ate); + if (rc) { + return rc; + } + if ((wlk_ate.id == step_ate.id) || (wlk_addr == fs->ate_wra)) { + break; + } + } + + if ((wlk_addr == step_addr) && step_ate.len) { + /* read the ate cycle only when we change the sector + * or if it is the first read + */ + rc = zms_get_cycle_on_sector_change(fs, step_addr, previous_sector_num, + ¤t_cycle); + if (rc) { + return rc; + } + if (zms_ate_valid_different_sector(fs, &wlk_ate, current_cycle)) { + /* count needed */ + free_space -= zms_al_size(fs, step_ate.len); + free_space -= ate_size; + } + previous_sector_num = SECTOR_NUM(step_addr); + } + + if (step_addr == fs->ate_wra) { + break; + } + } + return free_space; +} + +size_t zms_sector_max_data_size(struct zms_fs *fs) +{ + size_t ate_size; + + if (!fs->ready) { + LOG_ERR("ZMS not initialized"); + return -EACCES; + } + + ate_size = zms_al_size(fs, sizeof(struct zms_ate)); + + return fs->ate_wra - fs->data_wra - ate_size; +} + +int zms_sector_use_next(struct zms_fs *fs) +{ + int ret; + + if (!fs->ready) { + LOG_ERR("ZMS not initialized"); + return -EACCES; + } + + k_mutex_lock(&fs->zms_lock, K_FOREVER); + + ret = zms_sector_close(fs); + if (ret != 0) { + goto end; + } + + ret = zms_gc(fs); + +end: + k_mutex_unlock(&fs->zms_lock); + return ret; +} diff --git a/subsys/fs/zms/zms_priv.h b/subsys/fs/zms/zms_priv.h new file mode 100644 index 000000000000000..f9f4b44e6cb15bd --- /dev/null +++ b/subsys/fs/zms/zms_priv.h @@ -0,0 +1,50 @@ +/* ZMS: Zephyr Memory Storage + * + * Copyright (c) 2024 BayLibre SAS + * + * SPDX-License-Identifier: Apache-2.0 + */ +#ifndef __ZMS_PRIV_H_ +#define __ZMS_PRIV_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +/* + * MASKS AND SHIFT FOR ADDRESSES + * an address in zms is an uint64_t where: + * high 4 bytes represent the sector number + * low 4 bytes represent the offset in a sector + */ +#define ADDR_SECT_MASK GENMASK64(63, 32) +#define ADDR_SECT_SHIFT 32 +#define ADDR_OFFS_MASK GENMASK64(31, 0) +#define SECTOR_NUM(x) FIELD_GET(ADDR_SECT_MASK, x) + +#define ZMS_BLOCK_SIZE 32 + +#define ZMS_LOOKUP_CACHE_NO_ADDR GENMASK64(63, 0) +#define ZMS_HEAD_ID GENMASK(31, 0) + +/* Allocation Table Entry */ +struct zms_ate { + uint32_t id; /* data id */ + uint32_t offset; /* data offset within sector */ + uint16_t len; /* data len within sector */ + union { + uint32_t data_crc; /* crc for data */ + uint32_t data; /* used to store small size data */ + }; + uint8_t cycle_cnt; /* cycle counter for non erasable devices */ + uint8_t crc8; /* crc8 check of the entry */ +} __packed; + +BUILD_ASSERT(offsetof(struct zms_ate, crc8) == sizeof(struct zms_ate) - sizeof(uint8_t), + "crc8 must be the last member"); + +#ifdef __cplusplus +} +#endif + +#endif /* __ZMS_PRIV_H_ */