git: 3684bb89d520 - stable/13 - MFC: Improve readahead in loader
- Go to: [ bottom of page ] [ top of archives ] [ this month ]
Date: Wed, 29 Dec 2021 23:06:30 UTC
The branch stable/13 has been updated by cperciva: URL: https://cgit.FreeBSD.org/src/commit/?id=3684bb89d5208575b341833225a798d5b5c465de commit 3684bb89d5208575b341833225a798d5b5c465de Author: Colin Percival <cperciva@FreeBSD.org> AuthorDate: 2021-09-19 22:24:00 +0000 Commit: Colin Percival <cperciva@FreeBSD.org> CommitDate: 2021-12-29 22:53:19 +0000 MFC: Improve readahead in loader style: Fix leading whitespace in bcache.c loader: Refactor readahead adjustment in bcache loader bcache: Track unconsumed readahead loader bcache: Allow readahead up to 256 kB I/Os Sponsored by: https://www.patreon.com/cperciva (cherry picked from commit 5a01dea7e8c9640605e5731a9fc7f600c07ace61) (cherry picked from commit b841148bbbdc967c871e8742a6f0b7b17b2d1d91) (cherry picked from commit 04b9b7c507c52daf7b2999329a50825db098151f) (cherry picked from commit 248682a589159619aa5f2019e415a423e849e327) --- stand/common/bcache.c | 40 +++++++++++++++++++++++++++++++++------- 1 file changed, 33 insertions(+), 7 deletions(-) diff --git a/stand/common/bcache.c b/stand/common/bcache.c index 526f9fe3fa5c..99c78b0346b7 100644 --- a/stand/common/bcache.c +++ b/stand/common/bcache.c @@ -67,6 +67,7 @@ struct bcache { size_t bcache_nblks; size_t ra; daddr_t bcache_nextblkno; + size_t ralen; }; static u_int bcache_total_nblks; /* set by bcache_init */ @@ -85,8 +86,9 @@ static u_int bcache_rablks; #define BHASH(bc, blkno) ((blkno) & ((bc)->bcache_nblks - 1)) #define BCACHE_LOOKUP(bc, blkno) \ ((bc)->bcache_ctl[BHASH((bc), (blkno))].bc_blkno != (blkno)) -#define BCACHE_READAHEAD 256 +#define BCACHE_READAHEAD 512 #define BCACHE_MINREADAHEAD 32 +#define BCACHE_MAXIOWRA 512 static void bcache_invalidate(struct bcache *bc, daddr_t blkno); static void bcache_insert(struct bcache *bc, daddr_t blkno); @@ -238,20 +240,39 @@ read_strategy(void *devdata, int rw, daddr_t blk, size_t size, if (BCACHE_LOOKUP(bc, (daddr_t)(blk + i))) { bcache_misses += (nblk - i); complete = 0; - if (nblk - i > BCACHE_MINREADAHEAD && bc->ra > BCACHE_MINREADAHEAD) - bc->ra >>= 1; /* reduce read ahead */ break; } else { bcache_hits++; } } - if (complete) { /* whole set was in cache, return it */ + /* + * Adjust read-ahead size if appropriate. Subject to the requirement + * that bc->ra must stay in between MINREADAHEAD and READAHEAD, we + * increase it when we notice that readahead was useful and decrease + * it when we notice that readahead was not useful. + */ + if (complete || (i == bc->ralen && bc->ralen > 0)) { if (bc->ra < BCACHE_READAHEAD) - bc->ra <<= 1; /* increase read ahead */ + bc->ra <<= 1; /* increase read ahead */ + } else { + if (nblk - i > BCACHE_MINREADAHEAD && bc->ralen > 0 && + bc->ra > BCACHE_MINREADAHEAD) + bc->ra >>= 1; /* reduce read ahead */ + } + + /* Adjust our "unconsumed readahead" value. */ + if (blk == bc->bcache_nextblkno) { + if (nblk > bc->ralen) + bc->ralen = 0; + else + bc->ralen -= nblk; + } + + if (complete) { /* whole set was in cache, return it */ bcopy(bc->bcache_data + (bcache_blksize * BHASH(bc, blk)), buf, size); goto done; - } + } /* * Fill in any misses. From check we have i pointing to first missing @@ -304,7 +325,12 @@ read_strategy(void *devdata, int rw, daddr_t blk, size_t size, if (ra != 0 && ra != bc->bcache_nblks) { /* do we have RA space? */ ra = MIN(bc->ra, ra - 1); ra = rounddown(ra, 16); /* multiple of 16 blocks */ + if (ra + p_size > BCACHE_MAXIOWRA) + ra = BCACHE_MAXIOWRA - p_size; + bc->ralen = ra; p_size += ra; + } else { + bc->ralen = 0; } /* invalidate bcache */ @@ -351,7 +377,7 @@ read_strategy(void *devdata, int rw, daddr_t blk, size_t size, result = 0; } - done: +done: if (result == 0) { if (rsize != NULL) *rsize = size;