drivers: block: add block device cache

Add a block device cache to speed up repeated reads of block devices by
various filesystems.

This small amount of cache can dramatically speed up filesystem
operations by skipping repeated reads of common areas of a block
device (typically directory structures).

This has shown to have some benefit on FAT filesystem operations of
loading a kernel and RAM disk, but more dramatic benefits on ext4
filesystems when the kernel and/or RAM disk are spread across
multiple extent header structures as described in commit fc0fc50.

The cache is implemented through a minimal list (block_cache) maintained
in most-recently-used order and count of the current number of entries
(cache_count). It uses a maximum block count setting to prevent copies
of large block reads and an upper bound on the number of cached areas.

The maximum number of entries in the cache defaults to 32 and the maximum
number of blocks per cache entry has a default of 2, which has shown to
produce the best results on testing of ext4 and FAT filesystems.

The 'blkcache' command (enabled through CONFIG_CMD_BLOCK_CACHE) allows
changing these values and can be used to tune for a particular filesystem
layout.

Signed-off-by: Eric Nelson <eric@nelint.com>
master
Eric Nelson 8 years ago committed by Tom Rini
parent e721e98125
commit e40cf34a29
  1. 11
      cmd/Kconfig
  2. 1
      cmd/Makefile
  3. 89
      cmd/blkcache.c
  4. 2
      disk/part.c
  5. 9
      drivers/block/Kconfig
  6. 1
      drivers/block/Makefile
  7. 13
      drivers/block/blk-uclass.c
  8. 173
      drivers/block/blkcache.c
  9. 105
      include/blk.h

@ -497,6 +497,17 @@ config SYS_AMBAPP_PRINT_ON_STARTUP
help
Show AMBA Plug-n-Play information on startup.
config CMD_BLOCK_CACHE
bool "blkcache - control and stats for block cache"
depends on BLOCK_CACHE
default y if BLOCK_CACHE
help
Enable the blkcache command, which can be used to control the
operation of the cache functions.
This is most useful when fine-tuning the operation of the cache
during development, but also allows the cache to be disabled when
it might hurt performance (e.g. when using the ums command).
config CMD_TIME
bool "time"
help

@ -20,6 +20,7 @@ obj-$(CONFIG_SOURCE) += source.o
obj-$(CONFIG_CMD_SOURCE) += source.o
obj-$(CONFIG_CMD_BDI) += bdinfo.o
obj-$(CONFIG_CMD_BEDBUG) += bedbug.o
obj-$(CONFIG_CMD_BLOCK_CACHE) += blkcache.o
obj-$(CONFIG_CMD_BMP) += bmp.o
obj-$(CONFIG_CMD_BOOTEFI) += bootefi.o
obj-$(CONFIG_CMD_BOOTMENU) += bootmenu.o

@ -0,0 +1,89 @@
/*
* Copyright (C) Nelson Integration, LLC 2016
* Author: Eric Nelson<eric@nelint.com>
*
* SPDX-License-Identifier: GPL-2.0+
*
*/
#include <config.h>
#include <common.h>
#include <malloc.h>
#include <part.h>
static int blkc_show(cmd_tbl_t *cmdtp, int flag,
int argc, char * const argv[])
{
struct block_cache_stats stats;
blkcache_stats(&stats);
printf(" hits: %u\n"
" misses: %u\n"
" entries: %u\n"
" max blocks/entry: %u\n"
" max cache entries: %u\n",
stats.hits, stats.misses, stats.entries,
stats.max_blocks_per_entry, stats.max_entries);
return 0;
}
static int blkc_configure(cmd_tbl_t *cmdtp, int flag,
int argc, char * const argv[])
{
unsigned blocks_per_entry, max_entries;
if (argc != 3)
return CMD_RET_USAGE;
blocks_per_entry = simple_strtoul(argv[1], 0, 0);
max_entries = simple_strtoul(argv[2], 0, 0);
blkcache_configure(blocks_per_entry, max_entries);
printf("changed to max of %u entries of %u blocks each\n",
max_entries, blocks_per_entry);
return 0;
}
static cmd_tbl_t cmd_blkc_sub[] = {
U_BOOT_CMD_MKENT(show, 0, 0, blkc_show, "", ""),
U_BOOT_CMD_MKENT(configure, 3, 0, blkc_configure, "", ""),
};
static __maybe_unused void blkc_reloc(void)
{
static int relocated;
if (!relocated) {
fixup_cmdtable(cmd_blkc_sub, ARRAY_SIZE(cmd_blkc_sub));
relocated = 1;
};
}
static int do_blkcache(cmd_tbl_t *cmdtp, int flag,
int argc, char * const argv[])
{
cmd_tbl_t *c;
#ifdef CONFIG_NEEDS_MANUAL_RELOC
blkc_reloc();
#endif
if (argc < 2)
return CMD_RET_USAGE;
/* Strip off leading argument */
argc--;
argv++;
c = find_cmd_tbl(argv[0], &cmd_blkc_sub[0], ARRAY_SIZE(cmd_blkc_sub));
if (c)
return c->cmd(cmdtp, flag, argc, argv);
else
return CMD_RET_USAGE;
return 0;
}
U_BOOT_CMD(
blkcache, 4, 0, do_blkcache,
"block cache diagnostics and control",
"show - show and reset statistics\n"
"blkcache configure blocks entries\n"
);

@ -268,6 +268,8 @@ void part_init(struct blk_desc *dev_desc)
const int n_ents = ll_entry_count(struct part_driver, part_driver);
struct part_driver *entry;
blkcache_invalidate(dev_desc->if_type, dev_desc->devnum);
dev_desc->part_type = PART_TYPE_UNKNOWN;
for (entry = drv; entry != drv + n_ents; entry++) {
int ret;

@ -18,3 +18,12 @@ config DISK
types can use this, such as AHCI/SATA. It does not provide any standard
operations at present. The block device interface has not been converted
to driver model.
config BLOCK_CACHE
bool "Use block device cache"
default n
help
This option enables a disk-block cache for all block devices.
This is most useful when accessing filesystems under U-Boot since
it will prevent repeated reads from directory structures and other
filesystem data structures.

@ -24,3 +24,4 @@ obj-$(CONFIG_IDE_SIL680) += sil680.o
obj-$(CONFIG_SANDBOX) += sandbox.o
obj-$(CONFIG_SCSI_SYM53C8XX) += sym53c8xx.o
obj-$(CONFIG_SYSTEMACE) += systemace.o
obj-$(CONFIG_BLOCK_CACHE) += blkcache.o

@ -80,11 +80,20 @@ unsigned long blk_dread(struct blk_desc *block_dev, lbaint_t start,
{
struct udevice *dev = block_dev->bdev;
const struct blk_ops *ops = blk_get_ops(dev);
ulong blks_read;
if (!ops->read)
return -ENOSYS;
return ops->read(dev, start, blkcnt, buffer);
if (blkcache_read(block_dev->if_type, block_dev->devnum,
start, blkcnt, block_dev->blksz, buffer))
return blkcnt;
blks_read = ops->read(dev, start, blkcnt, buffer);
if (blks_read == blkcnt)
blkcache_fill(block_dev->if_type, block_dev->devnum,
start, blkcnt, block_dev->blksz, buffer);
return blks_read;
}
unsigned long blk_dwrite(struct blk_desc *block_dev, lbaint_t start,
@ -96,6 +105,7 @@ unsigned long blk_dwrite(struct blk_desc *block_dev, lbaint_t start,
if (!ops->write)
return -ENOSYS;
blkcache_invalidate(block_dev->if_type, block_dev->devnum);
return ops->write(dev, start, blkcnt, buffer);
}
@ -108,6 +118,7 @@ unsigned long blk_derase(struct blk_desc *block_dev, lbaint_t start,
if (!ops->erase)
return -ENOSYS;
blkcache_invalidate(block_dev->if_type, block_dev->devnum);
return ops->erase(dev, start, blkcnt);
}

@ -0,0 +1,173 @@
/*
* Copyright (C) Nelson Integration, LLC 2016
* Author: Eric Nelson<eric@nelint.com>
*
* SPDX-License-Identifier: GPL-2.0+
*
*/
#include <config.h>
#include <common.h>
#include <malloc.h>
#include <part.h>
#include <linux/ctype.h>
#include <linux/list.h>
struct block_cache_node {
struct list_head lh;
int iftype;
int devnum;
lbaint_t start;
lbaint_t blkcnt;
unsigned long blksz;
char *cache;
};
static LIST_HEAD(block_cache);
static struct block_cache_stats _stats = {
.max_blocks_per_entry = 2,
.max_entries = 32
};
static struct block_cache_node *cache_find(int iftype, int devnum,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz)
{
struct block_cache_node *node;
list_for_each_entry(node, &block_cache, lh)
if ((node->iftype == iftype) &&
(node->devnum == devnum) &&
(node->blksz == blksz) &&
(node->start <= start) &&
(node->start + node->blkcnt >= start + blkcnt)) {
if (block_cache.next != &node->lh) {
/* maintain MRU ordering */
list_del(&node->lh);
list_add(&node->lh, &block_cache);
}
return node;
}
return 0;
}
int blkcache_read(int iftype, int devnum,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz, void *buffer)
{
struct block_cache_node *node = cache_find(iftype, devnum, start,
blkcnt, blksz);
if (node) {
const char *src = node->cache + (start - node->start) * blksz;
memcpy(buffer, src, blksz * blkcnt);
debug("hit: start " LBAF ", count " LBAFU "\n",
start, blkcnt);
++_stats.hits;
return 1;
}
debug("miss: start " LBAF ", count " LBAFU "\n",
start, blkcnt);
++_stats.misses;
return 0;
}
void blkcache_fill(int iftype, int devnum,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz, void const *buffer)
{
lbaint_t bytes;
struct block_cache_node *node;
/* don't cache big stuff */
if (blkcnt > _stats.max_blocks_per_entry)
return;
if (_stats.max_entries == 0)
return;
bytes = blksz * blkcnt;
if (_stats.max_entries <= _stats.entries) {
/* pop LRU */
node = (struct block_cache_node *)block_cache.prev;
list_del(&node->lh);
_stats.entries--;
debug("drop: start " LBAF ", count " LBAFU "\n",
node->start, node->blkcnt);
if (node->blkcnt * node->blksz < bytes) {
free(node->cache);
node->cache = 0;
}
} else {
node = malloc(sizeof(*node));
if (!node)
return;
node->cache = 0;
}
if (!node->cache) {
node->cache = malloc(bytes);
if (!node->cache) {
free(node);
return;
}
}
debug("fill: start " LBAF ", count " LBAFU "\n",
start, blkcnt);
node->iftype = iftype;
node->devnum = devnum;
node->start = start;
node->blkcnt = blkcnt;
node->blksz = blksz;
memcpy(node->cache, buffer, bytes);
list_add(&node->lh, &block_cache);
_stats.entries++;
}
void blkcache_invalidate(int iftype, int devnum)
{
struct list_head *entry, *n;
struct block_cache_node *node;
list_for_each_safe(entry, n, &block_cache) {
node = (struct block_cache_node *)entry;
if ((node->iftype == iftype) &&
(node->devnum == devnum)) {
list_del(entry);
free(node->cache);
free(node);
--_stats.entries;
}
}
}
void blkcache_configure(unsigned blocks, unsigned entries)
{
struct block_cache_node *node;
if ((blocks != _stats.max_blocks_per_entry) ||
(entries != _stats.max_entries)) {
/* invalidate cache */
while (!list_empty(&block_cache)) {
node = (struct block_cache_node *)block_cache.next;
list_del(&node->lh);
free(node->cache);
free(node);
}
_stats.entries = 0;
}
_stats.max_blocks_per_entry = blocks;
_stats.max_entries = entries;
_stats.hits = 0;
_stats.misses = 0;
}
void blkcache_stats(struct block_cache_stats *stats)
{
memcpy(stats, &_stats, sizeof(*stats));
_stats.hits = 0;
_stats.misses = 0;
}

@ -83,6 +83,97 @@ struct blk_desc {
#define PAD_TO_BLOCKSIZE(size, blk_desc) \
(PAD_SIZE(size, blk_desc->blksz))
#ifdef CONFIG_BLOCK_CACHE
/**
* blkcache_read() - attempt to read a set of blocks from cache
*
* @param iftype - IF_TYPE_x for type of device
* @param dev - device index of particular type
* @param start - starting block number
* @param blkcnt - number of blocks to read
* @param blksz - size in bytes of each block
* @param buf - buffer to contain cached data
*
* @return - '1' if block returned from cache, '0' otherwise.
*/
int blkcache_read
(int iftype, int dev,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz, void *buffer);
/**
* blkcache_fill() - make data read from a block device available
* to the block cache
*
* @param iftype - IF_TYPE_x for type of device
* @param dev - device index of particular type
* @param start - starting block number
* @param blkcnt - number of blocks available
* @param blksz - size in bytes of each block
* @param buf - buffer containing data to cache
*
*/
void blkcache_fill
(int iftype, int dev,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz, void const *buffer);
/**
* blkcache_invalidate() - discard the cache for a set of blocks
* because of a write or device (re)initialization.
*
* @param iftype - IF_TYPE_x for type of device
* @param dev - device index of particular type
*/
void blkcache_invalidate
(int iftype, int dev);
/**
* blkcache_configure() - configure block cache
*
* @param blocks - maximum blocks per entry
* @param entries - maximum entries in cache
*/
void blkcache_configure(unsigned blocks, unsigned entries);
/*
* statistics of the block cache
*/
struct block_cache_stats {
unsigned hits;
unsigned misses;
unsigned entries; /* current entry count */
unsigned max_blocks_per_entry;
unsigned max_entries;
};
/**
* get_blkcache_stats() - return statistics and reset
*
* @param stats - statistics are copied here
*/
void blkcache_stats(struct block_cache_stats *stats);
#else
static inline int blkcache_read
(int iftype, int dev,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz, void *buffer)
{
return 0;
}
static inline void blkcache_fill
(int iftype, int dev,
lbaint_t start, lbaint_t blkcnt,
unsigned long blksz, void const *buffer) {}
static inline void blkcache_invalidate
(int iftype, int dev) {}
#endif
#ifdef CONFIG_BLK
struct udevice;
@ -224,23 +315,35 @@ int blk_unbind_all(int if_type);
static inline ulong blk_dread(struct blk_desc *block_dev, lbaint_t start,
lbaint_t blkcnt, void *buffer)
{
ulong blks_read;
if (blkcache_read(block_dev->if_type, block_dev->devnum,
start, blkcnt, block_dev->blksz, buffer))
return blkcnt;
/*
* We could check if block_read is NULL and return -ENOSYS. But this
* bloats the code slightly (cause some board to fail to build), and
* it would be an error to try an operation that does not exist.
*/
return block_dev->block_read(block_dev, start, blkcnt, buffer);
blks_read = block_dev->block_read(block_dev, start, blkcnt, buffer);
if (blks_read == blkcnt)
blkcache_fill(block_dev->if_type, block_dev->devnum,
start, blkcnt, block_dev->blksz, buffer);
return blks_read;
}
static inline ulong blk_dwrite(struct blk_desc *block_dev, lbaint_t start,
lbaint_t blkcnt, const void *buffer)
{
blkcache_invalidate(block_dev->if_type, block_dev->devnum);
return block_dev->block_write(block_dev, start, blkcnt, buffer);
}
static inline ulong blk_derase(struct blk_desc *block_dev, lbaint_t start,
lbaint_t blkcnt)
{
blkcache_invalidate(block_dev->if_type, block_dev->devnum);
return block_dev->block_erase(block_dev, start, blkcnt);
}
#endif /* !CONFIG_BLK */

Loading…
Cancel
Save