You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
506 lines
12 KiB
506 lines
12 KiB
#include <stdlib.h>
|
|
#include <string.h>
|
|
|
|
#include <bitops.h>
|
|
#include <flash.h>
|
|
#include <ftl.h>
|
|
#include <macros.h>
|
|
|
|
#include "gc.h"
|
|
#include "map.h"
|
|
|
|
/* Given the group number, this function checks if a page group is erased by
|
|
* checking if the pages that compose the page group are erased.
|
|
*/
|
|
#ifdef is_group_erased
|
|
#undef is_group_erased
|
|
#define is_group_erased __real_is_group_erased
|
|
#endif
|
|
|
|
int is_group_erased(struct ftl_map *map, uint32_t group)
|
|
{
|
|
uint8_t data[32];
|
|
struct flash_dev *dev = map->dev;
|
|
uint32_t addr = group << (map->log2_pages_per_group + map->log2_page_size);
|
|
size_t i, nbytes, len = 1 << (map->log2_pages_per_group + map->log2_page_size);
|
|
|
|
while (len) {
|
|
nbytes = min(32, len);
|
|
|
|
if (flash_read(dev, addr, data, nbytes) == 0)
|
|
return 0;
|
|
|
|
for (i = 0; i < sizeof(data); ++i) {
|
|
if (data[i] != 0xff)
|
|
return 0;
|
|
}
|
|
|
|
addr += nbytes;
|
|
len -= nbytes;
|
|
}
|
|
|
|
return 1;
|
|
}
|
|
|
|
#ifdef is_group_erased
|
|
#undef is_group_erased
|
|
#define is_group_erased __wrap_is_group_erased
|
|
#endif
|
|
|
|
int __wrap_is_group_erased(struct ftl_map *map, uint32_t group);
|
|
|
|
/* Given the current user page, this function computes the page number of the
|
|
* next user page by incrementing the page number. However, if incrementing the
|
|
* page number results in the page number of a page containing page
|
|
* descriptors, the page number is incremented again to have it point to the
|
|
* first user page of the next page group. Finally, if incrementing the page
|
|
* number results in a page number that is larger than the total amount of
|
|
* possible pages on the devices, the page number of the very first user page
|
|
* is returned instead.
|
|
*/
|
|
uint32_t next_upage(struct ftl_map *map, uint32_t p)
|
|
{
|
|
size_t log2_pages_per_block = map->log2_pages_per_group +
|
|
map->log2_groups_per_block;
|
|
|
|
++p;
|
|
|
|
if (is_aligned(p + 1, map->log2_pages_per_group))
|
|
++p;
|
|
|
|
if (p >= (map->nblocks << log2_pages_per_block))
|
|
p = 0;
|
|
|
|
return p;
|
|
}
|
|
|
|
/* Reads the header of the given page group.
|
|
*/
|
|
int read_page_group(struct ftl_map *map,
|
|
struct ftl_page_group *group, uint32_t group_no)
|
|
{
|
|
uint32_t page, addr;
|
|
|
|
page = ((group_no + 1) << map->log2_pages_per_group) - 1;
|
|
addr = page << map->log2_page_size;
|
|
|
|
if (flash_read(map->dev, addr, group, sizeof *group) == 0)
|
|
return -1;
|
|
|
|
if (memcmp(group->magic, "FTL", 3) != 0)
|
|
return -1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Given the page number of a user page, reads the page descriptor associated
|
|
* with the user page by locating the footer and more specifically the page
|
|
* descriptor within the page group.
|
|
*/
|
|
#ifdef read_page_desc
|
|
#undef read_page_desc
|
|
#define read_page_desc __real_read_page_desc
|
|
#endif
|
|
|
|
int read_page_desc(struct ftl_map *map,
|
|
struct ftl_page_desc *page_desc, uint32_t upage)
|
|
{
|
|
uint32_t addr, offset;
|
|
|
|
addr = align(upage, map->log2_pages_per_group) +
|
|
(1 << map->log2_pages_per_group) - 1;
|
|
|
|
if (addr == upage)
|
|
return -1;
|
|
|
|
addr <<= map->log2_page_size;
|
|
offset = sizeof(struct ftl_page_group) +
|
|
BIT_MASK(upage, map->log2_pages_per_group) * sizeof *page_desc;
|
|
|
|
if (flash_read(map->dev, addr + offset, page_desc, sizeof *page_desc) == 0)
|
|
return -1;
|
|
|
|
if (memcmp(page_desc->magic, "page", sizeof page_desc->magic) != 0)
|
|
return -1;
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef read_page_desc
|
|
#undef read_page_desc
|
|
#define read_page_desc __wrap_read_page_desc
|
|
#endif
|
|
|
|
/* Writes the page descriptor to the footer of the current page group and
|
|
* increments the head to point to the next free user page.
|
|
*/
|
|
#ifdef write_page_desc
|
|
#undef write_page_desc
|
|
#define write_page_desc __real_write_page_desc
|
|
#endif
|
|
|
|
int write_page_desc(struct ftl_map *map,
|
|
struct ftl_page_desc *page_desc)
|
|
{
|
|
struct ftl_page_group group;
|
|
uint32_t upage, addr, offset, head;
|
|
|
|
upage = map->head;
|
|
offset = sizeof(struct ftl_page_group) +
|
|
BIT_MASK(upage, map->log2_pages_per_group) * sizeof *page_desc;
|
|
upage = align(upage, map->log2_pages_per_group) +
|
|
(1 << map->log2_pages_per_group) - 1;
|
|
addr = upage << map->log2_page_size;
|
|
|
|
/* Write the page group header. */
|
|
if (flash_is_erased(map->dev, upage, 1)) {
|
|
memcpy(&group.magic, "FTL", sizeof group.magic);
|
|
group.epoch = map->epoch;
|
|
group.tail = map->tail;
|
|
|
|
if (flash_write(map->dev, addr, &group, sizeof group) == 0)
|
|
return -1;
|
|
}
|
|
|
|
memcpy(page_desc->magic, "page", sizeof page_desc->magic);
|
|
|
|
if (flash_write(map->dev, addr + offset, page_desc, sizeof *page_desc) == 0)
|
|
return -1;
|
|
|
|
map->root = map->head;
|
|
|
|
head = map->head;
|
|
map->head = next_upage(map, map->head);
|
|
|
|
if (map->head < head)
|
|
++map->epoch;
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef write_page_desc
|
|
#undef write_page_desc
|
|
#define write_page_desc __wrap_write_page_desc
|
|
#endif
|
|
|
|
/* Prepares the head for writing, writes the user page to the current available
|
|
* user page and finally writes the page descriptor to the footer of the page
|
|
* group, whereupon the head is incremented to point to the next available user
|
|
* page.
|
|
*/
|
|
int write_upage(struct ftl_map *map, const void *page,
|
|
struct ftl_page_desc *page_desc)
|
|
{
|
|
if (prepare_head(map) < 0)
|
|
return -1;
|
|
|
|
if (page && flash_write(map->dev, map->head << map->log2_page_size, page,
|
|
1 << map->log2_page_size) == 0)
|
|
return -1;
|
|
|
|
return write_page_desc(map, page_desc);
|
|
}
|
|
|
|
/* Determines the amount of user pages to store in a page group by determining
|
|
* how many page descriptors the last page of the page group can contain at
|
|
* most. Because the page group consists of $2^n$ pages, $2^{n - 1}$ of those
|
|
* pages will end up becoming user pages. Once the amount of pages in a page
|
|
* group has been determined, the amount of page groups within an erase block
|
|
* can also be determined, as a single page group may not cover a whole erase
|
|
* block.
|
|
*/
|
|
int find_block_div(struct ftl_map *map)
|
|
{
|
|
size_t log2_pages_per_block = map->log2_block_size - map->log2_page_size;
|
|
size_t nbytes_avail = (1 << map->log2_page_size) -
|
|
sizeof(struct ftl_page_group);
|
|
size_t nbytes = sizeof(struct ftl_page_desc);
|
|
|
|
map->log2_pages_per_group = 1;
|
|
|
|
while (map->log2_pages_per_group < log2_pages_per_block) {
|
|
nbytes = 2 * nbytes + sizeof(struct ftl_page_desc);
|
|
|
|
if (nbytes > nbytes_avail)
|
|
break;
|
|
|
|
++map->log2_pages_per_group;
|
|
}
|
|
|
|
map->log2_groups_per_block = log2_pages_per_block - map->log2_pages_per_group;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Given a block number, this function attempts to find the first block that is
|
|
* in use. A block is considered to be in use when the first page group is in
|
|
* use, as a block can only be erased as a whole. Therefore, if the first page
|
|
* group is not in use, neither will the other page groups in a block.
|
|
*/
|
|
#ifdef find_block
|
|
#undef find_block
|
|
#define find_block __real_find_block
|
|
#endif
|
|
|
|
int find_block(struct ftl_map *map, struct ftl_page_group *group,
|
|
uint32_t *where, uint32_t block)
|
|
{
|
|
uint32_t page;
|
|
unsigned attempt;
|
|
|
|
for (attempt = 0; block < map->nblocks && attempt < FTL_MAX_ATTEMPTS;
|
|
++attempt, ++block) {
|
|
page = block << map->log2_block_size;
|
|
page |= ((UINT32_C(1) << map->log2_pages_per_group) - 1) << map->log2_page_size;
|
|
|
|
if (flash_read(map->dev, page, group, sizeof *group) == 0)
|
|
continue;
|
|
|
|
if (memcmp(group->magic, "FTL", sizeof group->magic) != 0)
|
|
continue;
|
|
|
|
*where = block;
|
|
|
|
return 0;
|
|
}
|
|
|
|
return -1;
|
|
}
|
|
|
|
#ifdef find_block
|
|
#undef find_block
|
|
#define find_block __wrap_find_block
|
|
|
|
int find_block(struct ftl_map *map, struct ftl_page_group *group,
|
|
uint32_t *where, uint32_t block);
|
|
#endif
|
|
|
|
/* Given the block number of the first block, attempts to use binary search to
|
|
* find the last block that is in use.
|
|
*/
|
|
uint32_t find_last_block(struct ftl_map *map, uint32_t first)
|
|
{
|
|
struct ftl_page_group group;
|
|
uint32_t mid, low = first, high = map->nblocks - 1;
|
|
uint32_t found, next;
|
|
|
|
while (low < high) {
|
|
mid = (low + high) / 2;
|
|
|
|
if (find_block(map, &group, &found, mid) < 0 ||
|
|
group.epoch != map->epoch) {
|
|
high = mid - 1;
|
|
continue;
|
|
}
|
|
|
|
if (find_block(map, &group, &next, found + 1) < 0 ||
|
|
group.epoch != map->epoch)
|
|
return found;
|
|
|
|
low = next;
|
|
}
|
|
|
|
return low;
|
|
}
|
|
|
|
/* Attempts to find the last page group that is in use within a block by
|
|
* performing a binary search on the page groups.
|
|
*/
|
|
uint32_t find_last_group(struct ftl_map *map, uint32_t block)
|
|
{
|
|
uint32_t ngroups = UINT32_C(1) << map->log2_groups_per_block;
|
|
uint32_t mid, low = 0, high = ngroups - 1;
|
|
|
|
low += block << map->log2_groups_per_block;
|
|
high += block << map->log2_groups_per_block;
|
|
|
|
while (low < high) {
|
|
mid = (low + high) / 2;
|
|
|
|
if (is_group_erased(map, mid)) {
|
|
high = mid - 1;
|
|
continue;
|
|
}
|
|
|
|
if (is_group_erased(map, mid + 1))
|
|
return mid;
|
|
|
|
low = mid + 1;
|
|
}
|
|
|
|
return low;
|
|
}
|
|
|
|
int find_root(struct ftl_map *map, uint32_t group)
|
|
{
|
|
struct ftl_page_desc page_desc;
|
|
uint32_t upage;
|
|
int ret = -1;
|
|
|
|
upage = group << map->log2_pages_per_group;
|
|
|
|
while (read_page_desc(map, &page_desc, upage) == 0) {
|
|
map->root = upage;
|
|
ret = 0;
|
|
upage = next_upage(map, upage);
|
|
}
|
|
|
|
return ret;
|
|
}
|
|
|
|
/* Attempts to find the first free page within a page group by looking for the
|
|
* first page that is considered to be erased. If no such page could be found
|
|
* within the page group, the first user page of the next page group should be
|
|
* used as that page group should not be in use.
|
|
*/
|
|
int find_head(struct ftl_map *map)
|
|
{
|
|
size_t log2_pages_per_block = map->log2_pages_per_group +
|
|
map->log2_groups_per_block;
|
|
|
|
map->head = map->root;
|
|
|
|
do {
|
|
map->head = next_upage(map, map->head);
|
|
|
|
if (is_aligned(map->head, log2_pages_per_block))
|
|
return 0;
|
|
} while (!flash_is_erased(map->dev, map->head, 1));
|
|
|
|
return 0;
|
|
}
|
|
|
|
static void reset_map(struct ftl_map *map)
|
|
{
|
|
map->log2_erase_size = map->dev->log2_block_size;
|
|
map->log2_page_size = ilog2(1 * KIB);
|
|
map->log2_block_size = ilog2(64 * KIB);
|
|
|
|
find_block_div(map);
|
|
|
|
map->nblocks = flash_get_size(map->dev) >> map->log2_block_size;
|
|
|
|
memset(&map->outstanding, 0, sizeof map->outstanding);
|
|
|
|
map->offset = 0;
|
|
map->head = 0;
|
|
map->tail = 0;
|
|
map->root = UINT32_MAX;
|
|
map->nused_pages = 0;
|
|
map->epoch = 0;
|
|
}
|
|
|
|
int ftl_init_map(struct ftl_map *map, struct flash_dev *dev)
|
|
{
|
|
map->dev = dev;
|
|
|
|
reset_map(map);
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Resumes the map by finding the first block that is in use, the last
|
|
* block that is in use, the last page group that is in use, and setting the
|
|
* head to the first free user page.
|
|
*/
|
|
int ftl_resume_map(struct ftl_map *map)
|
|
{
|
|
struct ftl_page_group group;
|
|
struct ftl_page_desc page_desc;
|
|
uint32_t first, last, group_no;
|
|
|
|
if (!map)
|
|
return -1;
|
|
|
|
if (find_block(map, &group, &first, 0) < 0) {
|
|
reset_map(map);
|
|
|
|
return -1;
|
|
}
|
|
|
|
map->epoch = group.epoch;
|
|
last = find_last_block(map, first);
|
|
group_no = find_last_group(map, last);
|
|
|
|
if (find_root(map, group_no) < 0)
|
|
return -1;
|
|
|
|
if (find_head(map) < 0)
|
|
return -1;
|
|
|
|
if (read_page_group(map, &group, map->root >> map->log2_pages_per_group) < 0)
|
|
return -1;
|
|
|
|
if (read_page_desc(map, &page_desc, map->root) < 0)
|
|
return -1;
|
|
|
|
map->tail = group.tail;
|
|
map->nused_pages = page_desc.nused_pages;
|
|
|
|
return 0;
|
|
}
|
|
|
|
/* Trace a path for a given virtual target address by comparing each of the
|
|
* bits in the target address with the virtual address of our root. In case of
|
|
* a mismatch, we proceed our traversal with the given subtree at the current
|
|
* depth until we have either found that there is no further subtree to
|
|
* traverse or until we have found the actual user page.
|
|
*/
|
|
#ifdef trace_path
|
|
#undef trace_path
|
|
#define trace_path __real_trace_path
|
|
#endif
|
|
|
|
int trace_path(struct ftl_map *map, struct ftl_page_desc *new_page_desc,
|
|
uint32_t *page, uint32_t va)
|
|
{
|
|
struct ftl_page_desc page_desc;
|
|
uint8_t depth = 0;
|
|
uint32_t upage = map->root;
|
|
|
|
if (new_page_desc)
|
|
new_page_desc->va = va;
|
|
|
|
if (upage == UINT32_MAX)
|
|
goto err_not_found;
|
|
|
|
if (read_page_desc(map, &page_desc, upage) < 0)
|
|
return -1;
|
|
|
|
for (; depth < 32; ++depth) {
|
|
if (page_desc.va == UINT32_MAX)
|
|
goto err_not_found;
|
|
|
|
if (!((va ^ page_desc.va) & (1 << (32 - depth - 1)))) {
|
|
if (new_page_desc)
|
|
new_page_desc->subtrees[depth] = page_desc.subtrees[depth];
|
|
|
|
continue;
|
|
}
|
|
|
|
if (new_page_desc)
|
|
new_page_desc->subtrees[depth] = upage;
|
|
|
|
if ((upage = page_desc.subtrees[depth]) == UINT32_MAX) {
|
|
++depth;
|
|
goto err_not_found;
|
|
}
|
|
|
|
if (read_page_desc(map, &page_desc, upage) < 0)
|
|
return -1;
|
|
}
|
|
|
|
if (page)
|
|
*page = upage;
|
|
|
|
return 0;
|
|
|
|
err_not_found:
|
|
if (new_page_desc) {
|
|
for (; depth < 32; ++depth) {
|
|
new_page_desc->subtrees[depth] = UINT32_MAX;
|
|
}
|
|
}
|
|
|
|
return -ERR_NOT_FOUND;
|
|
}
|
|
|