|
|
|
@ -12,16 +12,16 @@ |
|
|
|
|
/* Given a page number, this function checks whether the page is fully erased
|
|
|
|
|
* by checking if all bits are set to ones. |
|
|
|
|
*/ |
|
|
|
|
static int is_page_erased(struct ftl_journal *j, uint32_t page) |
|
|
|
|
static int is_page_erased(struct ftl_map *map, uint32_t page) |
|
|
|
|
{ |
|
|
|
|
uint8_t data[64]; |
|
|
|
|
size_t i, nbytes, len = j->log2_page_size; |
|
|
|
|
uint32_t addr = page << j->log2_page_size; |
|
|
|
|
size_t i, nbytes, len = map->log2_page_size; |
|
|
|
|
uint32_t addr = page << map->log2_page_size; |
|
|
|
|
|
|
|
|
|
while (len) { |
|
|
|
|
nbytes = min(sizeof data, len); |
|
|
|
|
|
|
|
|
|
if (flash_read(j->dev, addr, data, nbytes) < 0) |
|
|
|
|
if (flash_read(map->dev, addr, data, nbytes) < 0) |
|
|
|
|
return 0; |
|
|
|
|
|
|
|
|
|
for (i = 0; i < nbytes; ++i) { |
|
|
|
@ -39,14 +39,14 @@ static int is_page_erased(struct ftl_journal *j, uint32_t page) |
|
|
|
|
/* Given the group number, this function checks if a page group is erased by
|
|
|
|
|
* checking if the pages that compose the page group are erased. |
|
|
|
|
*/ |
|
|
|
|
static int is_group_erased(struct ftl_journal *j, uint32_t group) |
|
|
|
|
static int is_group_erased(struct ftl_map *map, uint32_t group) |
|
|
|
|
{ |
|
|
|
|
uint32_t npages = UINT32_C(1) << j->log2_pages_per_group; |
|
|
|
|
uint32_t page = group << j->log2_pages_per_group; |
|
|
|
|
uint32_t npages = UINT32_C(1) << map->log2_pages_per_group; |
|
|
|
|
uint32_t page = group << map->log2_pages_per_group; |
|
|
|
|
uint32_t i; |
|
|
|
|
|
|
|
|
|
for (i = 0; i < npages; ++i) { |
|
|
|
|
if (!is_page_erased(j, page + i)) |
|
|
|
|
if (!is_page_erased(map, page + i)) |
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
@ -62,17 +62,17 @@ static int is_group_erased(struct ftl_journal *j, uint32_t group) |
|
|
|
|
* possible pages on the devices, the page number of the very first user page |
|
|
|
|
* is returned instead. |
|
|
|
|
*/ |
|
|
|
|
static uint32_t next_upage(struct ftl_journal *j, uint32_t p) |
|
|
|
|
static uint32_t next_upage(struct ftl_map *map, uint32_t p) |
|
|
|
|
{ |
|
|
|
|
size_t log2_pages_per_block = j->log2_pages_per_group + |
|
|
|
|
j->log2_groups_per_block; |
|
|
|
|
size_t log2_pages_per_block = map->log2_pages_per_group + |
|
|
|
|
map->log2_groups_per_block; |
|
|
|
|
|
|
|
|
|
++p; |
|
|
|
|
|
|
|
|
|
if (is_aligned(p + 1, j->log2_pages_per_group)) |
|
|
|
|
if (is_aligned(p + 1, map->log2_pages_per_group)) |
|
|
|
|
++p; |
|
|
|
|
|
|
|
|
|
if (p >= (j->nblocks << log2_pages_per_block)) |
|
|
|
|
if (p >= (map->nblocks << log2_pages_per_block)) |
|
|
|
|
p = 0; |
|
|
|
|
|
|
|
|
|
return p; |
|
|
|
@ -80,71 +80,71 @@ static uint32_t next_upage(struct ftl_journal *j, uint32_t p) |
|
|
|
|
|
|
|
|
|
/* Reads the header of the given page group.
|
|
|
|
|
*/ |
|
|
|
|
int read_page_group(struct ftl_journal *j, |
|
|
|
|
int read_page_group(struct ftl_map *map, |
|
|
|
|
struct ftl_page_group *group, uint32_t group_no) |
|
|
|
|
{ |
|
|
|
|
uint32_t page, addr; |
|
|
|
|
|
|
|
|
|
page = ((group_no + 1) << j->log2_pages_per_group) - 1; |
|
|
|
|
addr = page << j->log2_page_size; |
|
|
|
|
page = ((group_no + 1) << map->log2_pages_per_group) - 1; |
|
|
|
|
addr = page << map->log2_page_size; |
|
|
|
|
|
|
|
|
|
return flash_read(j->dev, addr, group, sizeof *group); |
|
|
|
|
return flash_read(map->dev, addr, group, sizeof *group); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* Given the page number of a user page, reads the page descriptor associated
|
|
|
|
|
* with the user page by locating the footer and more specifically the page |
|
|
|
|
* descriptor within the page group. |
|
|
|
|
*/ |
|
|
|
|
int read_page_desc(struct ftl_journal *j, |
|
|
|
|
int read_page_desc(struct ftl_map *map, |
|
|
|
|
struct ftl_page_desc *page_desc, uint32_t upage) |
|
|
|
|
{ |
|
|
|
|
uint32_t group_no, page, addr, offset; |
|
|
|
|
|
|
|
|
|
group_no = upage >> j->log2_pages_per_group; |
|
|
|
|
page = ((group_no + 1) << j->log2_pages_per_group) - 1; |
|
|
|
|
addr = page << j->log2_page_size; |
|
|
|
|
group_no = upage >> map->log2_pages_per_group; |
|
|
|
|
page = ((group_no + 1) << map->log2_pages_per_group) - 1; |
|
|
|
|
addr = page << map->log2_page_size; |
|
|
|
|
offset = sizeof(struct ftl_page_group) + |
|
|
|
|
(upage & ((1 << j->log2_pages_per_group) - 1)) * sizeof *page_desc; |
|
|
|
|
(upage & ((1 << map->log2_pages_per_group) - 1)) * sizeof *page_desc; |
|
|
|
|
|
|
|
|
|
return flash_read(j->dev, addr + offset, page_desc, sizeof *page_desc); |
|
|
|
|
return flash_read(map->dev, addr + offset, page_desc, sizeof *page_desc); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* Writes the page descriptor to the footer of the current page group and
|
|
|
|
|
* increments the head to point to the next free user page. |
|
|
|
|
*/ |
|
|
|
|
static int write_page_desc(struct ftl_journal *j, |
|
|
|
|
static int write_page_desc(struct ftl_map *map, |
|
|
|
|
const struct ftl_page_desc *page_desc) |
|
|
|
|
{ |
|
|
|
|
struct ftl_page_group group; |
|
|
|
|
uint32_t group_no, page, addr, offset, head; |
|
|
|
|
|
|
|
|
|
group_no = j->head >> j->log2_pages_per_group; |
|
|
|
|
page = ((group_no + 1) << j->log2_pages_per_group) - 1; |
|
|
|
|
addr = page << j->log2_page_size; |
|
|
|
|
group_no = map->head >> map->log2_pages_per_group; |
|
|
|
|
page = ((group_no + 1) << map->log2_pages_per_group) - 1; |
|
|
|
|
addr = page << map->log2_page_size; |
|
|
|
|
|
|
|
|
|
/* Write the page group header. */ |
|
|
|
|
if (is_page_erased(j, page)) { |
|
|
|
|
if (is_page_erased(map, page)) { |
|
|
|
|
memcpy(&group.magic, "FTL", sizeof group.magic); |
|
|
|
|
group.epoch = j->epoch; |
|
|
|
|
group.tail = j->tail; |
|
|
|
|
group.epoch = map->epoch; |
|
|
|
|
group.tail = map->tail; |
|
|
|
|
|
|
|
|
|
if (flash_write(j->dev, addr, &group, sizeof group) < 0) |
|
|
|
|
if (flash_write(map->dev, addr, &group, sizeof group) < 0) |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
offset = sizeof group + (j->head & ((1 << j->log2_pages_per_group) - 1)) * |
|
|
|
|
offset = sizeof group + (map->head & ((1 << map->log2_pages_per_group) - 1)) * |
|
|
|
|
sizeof *page_desc; |
|
|
|
|
|
|
|
|
|
if (flash_write(j->dev, addr + offset, page_desc, sizeof *page_desc) < 0) |
|
|
|
|
if (flash_write(map->dev, addr + offset, page_desc, sizeof *page_desc) < 0) |
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
j->root = j->head; |
|
|
|
|
map->root = map->head; |
|
|
|
|
|
|
|
|
|
head = j->head; |
|
|
|
|
j->head = next_upage(j, j->head); |
|
|
|
|
head = map->head; |
|
|
|
|
map->head = next_upage(map, map->head); |
|
|
|
|
|
|
|
|
|
if (j->head < head) |
|
|
|
|
++j->epoch; |
|
|
|
|
if (map->head < head) |
|
|
|
|
++map->epoch; |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
@ -154,17 +154,17 @@ static int write_page_desc(struct ftl_journal *j, |
|
|
|
|
* group, whereupon the head is incremented to point to the next available user |
|
|
|
|
* page. |
|
|
|
|
*/ |
|
|
|
|
int write_upage(struct ftl_journal *j, const uint8_t *page, |
|
|
|
|
int write_upage(struct ftl_map *map, const uint8_t *page, |
|
|
|
|
const struct ftl_page_desc *page_desc) |
|
|
|
|
{ |
|
|
|
|
if (prepare_head(j) < 0) |
|
|
|
|
if (prepare_head(map) < 0) |
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
if (page && flash_write(j->dev, j->head << j->log2_page_size, page, |
|
|
|
|
j->log2_page_size) < 0) |
|
|
|
|
if (page && flash_write(map->dev, map->head << map->log2_page_size, page, |
|
|
|
|
map->log2_page_size) < 0) |
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
return write_page_desc(j, page_desc); |
|
|
|
|
return write_page_desc(map, page_desc); |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* Determines the amount of user pages to store in a page group by determining
|
|
|
|
@ -175,25 +175,25 @@ int write_upage(struct ftl_journal *j, const uint8_t *page, |
|
|
|
|
* can also be determined, as a single page group may not cover a whole erase |
|
|
|
|
* block. |
|
|
|
|
*/ |
|
|
|
|
static int find_block_div(struct ftl_journal *j) |
|
|
|
|
static int find_block_div(struct ftl_map *map) |
|
|
|
|
{ |
|
|
|
|
size_t log2_pages_per_block = j->log2_block_size - j->log2_page_size; |
|
|
|
|
size_t nbytes_avail = (1 << j->log2_page_size) - |
|
|
|
|
size_t log2_pages_per_block = map->log2_block_size - map->log2_page_size; |
|
|
|
|
size_t nbytes_avail = (1 << map->log2_page_size) - |
|
|
|
|
sizeof(struct ftl_page_group); |
|
|
|
|
size_t nbytes = sizeof(struct ftl_page_desc); |
|
|
|
|
|
|
|
|
|
j->log2_pages_per_group = 1; |
|
|
|
|
map->log2_pages_per_group = 1; |
|
|
|
|
|
|
|
|
|
while (j->log2_pages_per_group < log2_pages_per_block) { |
|
|
|
|
while (map->log2_pages_per_group < log2_pages_per_block) { |
|
|
|
|
nbytes = 2 * nbytes + sizeof(struct ftl_page_desc); |
|
|
|
|
|
|
|
|
|
if (nbytes > nbytes_avail) |
|
|
|
|
break; |
|
|
|
|
|
|
|
|
|
++j->log2_pages_per_group; |
|
|
|
|
++map->log2_pages_per_group; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
j->log2_groups_per_block = log2_pages_per_block - j->log2_pages_per_group; |
|
|
|
|
map->log2_groups_per_block = log2_pages_per_block - map->log2_pages_per_group; |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
@ -203,18 +203,18 @@ static int find_block_div(struct ftl_journal *j) |
|
|
|
|
* use, as a block can only be erased as a whole. Therefore, if the first page |
|
|
|
|
* group is not in use, neither will the other page groups in a block. |
|
|
|
|
*/ |
|
|
|
|
static int find_block(struct ftl_journal *j, struct ftl_page_group *group, |
|
|
|
|
static int find_block(struct ftl_map *map, struct ftl_page_group *group, |
|
|
|
|
uint32_t *where, uint32_t block) |
|
|
|
|
{ |
|
|
|
|
uint32_t page; |
|
|
|
|
unsigned attempt; |
|
|
|
|
|
|
|
|
|
for (attempt = 0; block < j->nblocks && attempt < FTL_MAX_ATTEMPTS; |
|
|
|
|
for (attempt = 0; block < map->nblocks && attempt < FTL_MAX_ATTEMPTS; |
|
|
|
|
++attempt, ++block) { |
|
|
|
|
page = block << j->log2_block_size; |
|
|
|
|
page |= ((UINT32_C(1) << j->log2_pages_per_group) - 1) << j->log2_page_size; |
|
|
|
|
page = block << map->log2_block_size; |
|
|
|
|
page |= ((UINT32_C(1) << map->log2_pages_per_group) - 1) << map->log2_page_size; |
|
|
|
|
|
|
|
|
|
if (flash_read(j->dev, page, group, sizeof *group) < 0) |
|
|
|
|
if (flash_read(map->dev, page, group, sizeof *group) < 0) |
|
|
|
|
continue; |
|
|
|
|
|
|
|
|
|
if (memcmp(group->magic, "FTL", sizeof group->magic) != 0) |
|
|
|
@ -231,17 +231,17 @@ static int find_block(struct ftl_journal *j, struct ftl_page_group *group, |
|
|
|
|
/* Given the block number of the first block, attempts to use binary search to
|
|
|
|
|
* find the last block that is in use. |
|
|
|
|
*/ |
|
|
|
|
static uint32_t find_last_block(struct ftl_journal *j, uint32_t first) |
|
|
|
|
static uint32_t find_last_block(struct ftl_map *map, uint32_t first) |
|
|
|
|
{ |
|
|
|
|
struct ftl_page_group group; |
|
|
|
|
uint32_t mid, low = first, high = j->nblocks - 1; |
|
|
|
|
uint32_t mid, low = first, high = map->nblocks - 1; |
|
|
|
|
uint32_t found, next; |
|
|
|
|
|
|
|
|
|
while (low <= high) { |
|
|
|
|
mid = (low + high) / 2; |
|
|
|
|
|
|
|
|
|
if (find_block(j, &group, &found, mid) < 0 || |
|
|
|
|
group.epoch != j->epoch) { |
|
|
|
|
if (find_block(map, &group, &found, mid) < 0 || |
|
|
|
|
group.epoch != map->epoch) { |
|
|
|
|
if (!mid) |
|
|
|
|
return first; |
|
|
|
|
|
|
|
|
@ -250,9 +250,9 @@ static uint32_t find_last_block(struct ftl_journal *j, uint32_t first) |
|
|
|
|
continue; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (((found + 1) > j->nblocks) || |
|
|
|
|
find_block(j, &group, &next, found + 1) < 0 || |
|
|
|
|
group.epoch != j->epoch) |
|
|
|
|
if (((found + 1) > map->nblocks) || |
|
|
|
|
find_block(map, &group, &next, found + 1) < 0 || |
|
|
|
|
group.epoch != map->epoch) |
|
|
|
|
return found; |
|
|
|
|
|
|
|
|
|
low = next; |
|
|
|
@ -264,41 +264,41 @@ static uint32_t find_last_block(struct ftl_journal *j, uint32_t first) |
|
|
|
|
/* Attempts to find the last page group that is in use within a block by
|
|
|
|
|
* performing a binary search on the page groups. |
|
|
|
|
*/ |
|
|
|
|
static uint32_t find_last_group(struct ftl_journal *j, uint32_t block) |
|
|
|
|
static uint32_t find_last_group(struct ftl_map *map, uint32_t block) |
|
|
|
|
{ |
|
|
|
|
uint32_t ngroups = UINT32_C(1) << j->log2_groups_per_block; |
|
|
|
|
uint32_t ngroups = UINT32_C(1) << map->log2_groups_per_block; |
|
|
|
|
uint32_t mid, low = 0, high = ngroups - 1; |
|
|
|
|
|
|
|
|
|
while (low <= high) { |
|
|
|
|
mid = (low + high) / 2; |
|
|
|
|
|
|
|
|
|
if (is_group_erased(j, mid)) { |
|
|
|
|
if (is_group_erased(map, mid)) { |
|
|
|
|
high = mid - 1; |
|
|
|
|
continue; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (((mid + 1) >= ngroups) || |
|
|
|
|
is_group_erased(j, mid + 1)) |
|
|
|
|
return (block << j->log2_groups_per_block) + mid; |
|
|
|
|
is_group_erased(map, mid + 1)) |
|
|
|
|
return (block << map->log2_groups_per_block) + mid; |
|
|
|
|
|
|
|
|
|
low = mid + 1; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
return block << j->log2_groups_per_block; |
|
|
|
|
return block << map->log2_groups_per_block; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static int find_root(struct ftl_journal *j, uint32_t group) |
|
|
|
|
static int find_root(struct ftl_map *map, uint32_t group) |
|
|
|
|
{ |
|
|
|
|
struct ftl_page_desc page_desc; |
|
|
|
|
uint32_t upage; |
|
|
|
|
|
|
|
|
|
upage = group << j->log2_pages_per_group; |
|
|
|
|
upage = group << map->log2_pages_per_group; |
|
|
|
|
|
|
|
|
|
do { |
|
|
|
|
j->root = upage; |
|
|
|
|
upage = next_upage(j, upage); |
|
|
|
|
map->root = upage; |
|
|
|
|
upage = next_upage(map, upage); |
|
|
|
|
|
|
|
|
|
if (read_page_desc(j, &page_desc, upage) < 0) |
|
|
|
|
if (read_page_desc(map, &page_desc, upage) < 0) |
|
|
|
|
return -1; |
|
|
|
|
} while (page_desc.va != UINT32_MAX || |
|
|
|
|
page_desc.nused_pages == 0); |
|
|
|
@ -311,86 +311,86 @@ static int find_root(struct ftl_journal *j, uint32_t group) |
|
|
|
|
* within the page group, the first user page of the next page group should be |
|
|
|
|
* used as that page group should not be in use. |
|
|
|
|
*/ |
|
|
|
|
static int find_head(struct ftl_journal *j) |
|
|
|
|
static int find_head(struct ftl_map *map) |
|
|
|
|
{ |
|
|
|
|
size_t log2_pages_per_block = j->log2_pages_per_group + |
|
|
|
|
j->log2_groups_per_block; |
|
|
|
|
size_t log2_pages_per_block = map->log2_pages_per_group + |
|
|
|
|
map->log2_groups_per_block; |
|
|
|
|
|
|
|
|
|
j->head = j->root; |
|
|
|
|
map->head = map->root; |
|
|
|
|
|
|
|
|
|
do { |
|
|
|
|
j->head = next_upage(j, j->head); |
|
|
|
|
map->head = next_upage(map, map->head); |
|
|
|
|
|
|
|
|
|
if (is_aligned(j->head, log2_pages_per_block)) |
|
|
|
|
if (is_aligned(map->head, log2_pages_per_block)) |
|
|
|
|
return 0; |
|
|
|
|
} while (!is_page_erased(j, j->head)); |
|
|
|
|
} while (!is_page_erased(map, map->head)); |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
static void reset_journal(struct ftl_journal *j) |
|
|
|
|
static void reset_map(struct ftl_map *map) |
|
|
|
|
{ |
|
|
|
|
j->log2_erase_size = ilog2(4 * KIB); |
|
|
|
|
j->log2_page_size = ilog2(4 * KIB); |
|
|
|
|
j->log2_block_size = ilog2(64 * KIB); |
|
|
|
|
map->log2_erase_size = ilog2(4 * KIB); |
|
|
|
|
map->log2_page_size = ilog2(4 * KIB); |
|
|
|
|
map->log2_block_size = ilog2(64 * KIB); |
|
|
|
|
|
|
|
|
|
find_block_div(j); |
|
|
|
|
find_block_div(map); |
|
|
|
|
|
|
|
|
|
j->nblocks = flash_get_size(j->dev) >> j->log2_block_size; |
|
|
|
|
map->nblocks = flash_get_size(map->dev) >> map->log2_block_size; |
|
|
|
|
|
|
|
|
|
j->head = 0; |
|
|
|
|
j->tail = 0; |
|
|
|
|
j->root = UINT32_MAX; |
|
|
|
|
j->nused_pages = 0; |
|
|
|
|
j->epoch = 0; |
|
|
|
|
map->head = 0; |
|
|
|
|
map->tail = 0; |
|
|
|
|
map->root = UINT32_MAX; |
|
|
|
|
map->nused_pages = 0; |
|
|
|
|
map->epoch = 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
int ftl_init_journal(struct ftl_journal *j, struct flash_dev *dev) |
|
|
|
|
int ftl_init_map(struct ftl_map *map, struct flash_dev *dev) |
|
|
|
|
{ |
|
|
|
|
j->dev = dev; |
|
|
|
|
map->dev = dev; |
|
|
|
|
|
|
|
|
|
reset_journal(j); |
|
|
|
|
reset_map(map); |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
/* Resumes the journal by finding the first block that is in use, the last
|
|
|
|
|
/* Resumes the map by finding the first block that is in use, the last
|
|
|
|
|
* block that is in use, the last page group that is in use, and setting the |
|
|
|
|
* head to the first free user page. |
|
|
|
|
*/ |
|
|
|
|
int ftl_resume_journal(struct ftl_journal *j) |
|
|
|
|
int ftl_resume_map(struct ftl_map *map) |
|
|
|
|
{ |
|
|
|
|
struct ftl_page_group group; |
|
|
|
|
struct ftl_page_desc page_desc; |
|
|
|
|
uint32_t first, last, group_no; |
|
|
|
|
|
|
|
|
|
if (!j) |
|
|
|
|
if (!map) |
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
if (find_block(j, &group, &first, 0) < 0) { |
|
|
|
|
reset_journal(j); |
|
|
|
|
if (find_block(map, &group, &first, 0) < 0) { |
|
|
|
|
reset_map(map); |
|
|
|
|
|
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
j->epoch = group.epoch; |
|
|
|
|
last = find_last_block(j, first); |
|
|
|
|
group_no = find_last_group(j, last); |
|
|
|
|
map->epoch = group.epoch; |
|
|
|
|
last = find_last_block(map, first); |
|
|
|
|
group_no = find_last_group(map, last); |
|
|
|
|
|
|
|
|
|
if (find_root(j, group_no) < 0) |
|
|
|
|
if (find_root(map, group_no) < 0) |
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
if (find_head(j) < 0) |
|
|
|
|
if (find_head(map) < 0) |
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
if (read_page_group(j, &group, j->root >> j->log2_pages_per_group) < 0) |
|
|
|
|
if (read_page_group(map, &group, map->root >> map->log2_pages_per_group) < 0) |
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
if (read_page_desc(j, &page_desc, j->root) < 0) |
|
|
|
|
if (read_page_desc(map, &page_desc, map->root) < 0) |
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
j->tail = group.tail; |
|
|
|
|
j->nused_pages = page_desc.nused_pages; |
|
|
|
|
map->tail = group.tail; |
|
|
|
|
map->nused_pages = page_desc.nused_pages; |
|
|
|
|
|
|
|
|
|
return 0; |
|
|
|
|
} |
|
|
|
@ -401,12 +401,12 @@ int ftl_resume_journal(struct ftl_journal *j) |
|
|
|
|
* depth until we have either found that there is no further subtree to |
|
|
|
|
* traverse or until we have found the actual user page. |
|
|
|
|
*/ |
|
|
|
|
int trace_path(struct ftl_journal *j, struct ftl_page_desc *new_page_desc, |
|
|
|
|
int trace_path(struct ftl_map *map, struct ftl_page_desc *new_page_desc, |
|
|
|
|
uint32_t *page, uint32_t va) |
|
|
|
|
{ |
|
|
|
|
struct ftl_page_desc page_desc; |
|
|
|
|
uint8_t depth = 0; |
|
|
|
|
uint32_t upage = j->root; |
|
|
|
|
uint32_t upage = map->root; |
|
|
|
|
|
|
|
|
|
if (new_page_desc) |
|
|
|
|
new_page_desc->va = va; |
|
|
|
@ -414,7 +414,7 @@ int trace_path(struct ftl_journal *j, struct ftl_page_desc *new_page_desc, |
|
|
|
|
if (upage == UINT32_MAX) |
|
|
|
|
goto err_not_found; |
|
|
|
|
|
|
|
|
|
if (read_page_desc(j, &page_desc, upage) < 0) |
|
|
|
|
if (read_page_desc(map, &page_desc, upage) < 0) |
|
|
|
|
return -1; |
|
|
|
|
|
|
|
|
|
for (; depth < 32; ++depth) { |
|
|
|
@ -436,7 +436,7 @@ int trace_path(struct ftl_journal *j, struct ftl_page_desc *new_page_desc, |
|
|
|
|
goto err_not_found; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
if (read_page_desc(j, &page_desc, upage) < 0) |
|
|
|
|
if (read_page_desc(map, &page_desc, upage) < 0) |
|
|
|
|
return -1; |
|
|
|
|
} |
|
|
|
|
|
|
|
|
|