git: 09c8cb717d21 - main - uma: Add KASAN state transitions
Mark Johnston
markj at FreeBSD.org
Tue Apr 13 21:42:36 UTC 2021
The branch main has been updated by markj:
URL: https://cgit.FreeBSD.org/src/commit/?id=09c8cb717d214d03e51b3e4f8e9997b9f4e1624d
commit 09c8cb717d214d03e51b3e4f8e9997b9f4e1624d
Author: Mark Johnston <markj at FreeBSD.org>
AuthorDate: 2021-04-13 21:39:50 +0000
Commit: Mark Johnston <markj at FreeBSD.org>
CommitDate: 2021-04-13 21:42:21 +0000
uma: Add KASAN state transitions
- Add a UMA_ZONE_NOKASAN flag to indicate that items from a particular
zone should not be sanitized. This is applied implicitly for NOFREE
and cache zones.
- Add KASAN call backs which get invoked:
1) when a slab is imported into a keg
2) when an item is allocated from a zone
3) when an item is freed to a zone
4) when a slab is freed back to the VM
In state transitions 1 and 3, memory is poisoned so that accesses will
trigger a panic. In state transitions 2 and 4, memory is marked
valid.
- Disable trashing if KASAN is enabled. It just adds extra CPU overhead
to catch problems that are detected by KASAN.
MFC after: 2 weeks
Sponsored by: The FreeBSD Foundation
Differential Revision: https://reviews.freebsd.org/D29456
---
sys/vm/uma.h | 7 ++-
sys/vm/uma_core.c | 158 +++++++++++++++++++++++++++++++++++++++++++++++-------
2 files changed, 144 insertions(+), 21 deletions(-)
diff --git a/sys/vm/uma.h b/sys/vm/uma.h
index f1bf7cea6e53..361c64900845 100644
--- a/sys/vm/uma.h
+++ b/sys/vm/uma.h
@@ -276,6 +276,11 @@ uma_zone_t uma_zcache_create(const char *name, int size, uma_ctor ctor,
*
* See sys/smr.h for more details.
*/
+#define UMA_ZONE_NOKASAN 0x80000 /*
+ * Disable KASAN verification. This is
+ * implied by NOFREE. Cache zones are
+ * not verified by default.
+ */
/* In use by UMA_ZFLAGs: 0xffe00000 */
/*
@@ -286,7 +291,7 @@ uma_zone_t uma_zcache_create(const char *name, int size, uma_ctor ctor,
#define UMA_ZONE_INHERIT \
(UMA_ZONE_NOTOUCH | UMA_ZONE_MALLOC | UMA_ZONE_NOFREE | \
UMA_ZONE_VM | UMA_ZONE_NOTPAGE | UMA_ZONE_PCPU | \
- UMA_ZONE_FIRSTTOUCH | UMA_ZONE_ROUNDROBIN)
+ UMA_ZONE_FIRSTTOUCH | UMA_ZONE_ROUNDROBIN | UMA_ZONE_NOKASAN)
/* Definitions for align */
#define UMA_ALIGN_PTR (sizeof(void *) - 1) /* Alignment fit for ptr */
diff --git a/sys/vm/uma_core.c b/sys/vm/uma_core.c
index b1762500c147..d73eef70e249 100644
--- a/sys/vm/uma_core.c
+++ b/sys/vm/uma_core.c
@@ -58,6 +58,7 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/systm.h>
+#include <sys/asan.h>
#include <sys/bitset.h>
#include <sys/domainset.h>
#include <sys/eventhandler.h>
@@ -318,6 +319,7 @@ static void bucket_zone_drain(void);
static uma_bucket_t zone_alloc_bucket(uma_zone_t, void *, int, int);
static void *slab_alloc_item(uma_keg_t keg, uma_slab_t slab);
static void slab_free_item(uma_zone_t zone, uma_slab_t slab, void *item);
+static size_t slab_sizeof(int nitems);
static uma_keg_t uma_kcreate(uma_zone_t zone, size_t size, uma_init uminit,
uma_fini fini, int align, uint32_t flags);
static int zone_import(void *, void **, int, int, int);
@@ -531,6 +533,94 @@ bucket_zone_drain(void)
uma_zone_reclaim(ubz->ubz_zone, UMA_RECLAIM_DRAIN);
}
+#ifdef KASAN
+static void
+kasan_mark_item_valid(uma_zone_t zone, void *item)
+{
+ void *pcpu_item;
+ size_t sz, rsz;
+ int i;
+
+ if ((zone->uz_flags & UMA_ZONE_NOKASAN) != 0)
+ return;
+
+ sz = zone->uz_size;
+ rsz = roundup2(sz, KASAN_SHADOW_SCALE);
+ if ((zone->uz_flags & UMA_ZONE_PCPU) == 0) {
+ kasan_mark(item, sz, rsz, 0);
+ } else {
+ pcpu_item = zpcpu_base_to_offset(item);
+ for (i = 0; i <= mp_maxid; i++)
+ kasan_mark(zpcpu_get_cpu(pcpu_item, i), sz, rsz, 0);
+ }
+}
+
+static void
+kasan_mark_item_invalid(uma_zone_t zone, void *item)
+{
+ void *pcpu_item;
+ size_t sz;
+ int i;
+
+ if ((zone->uz_flags & UMA_ZONE_NOKASAN) != 0)
+ return;
+
+ sz = roundup2(zone->uz_size, KASAN_SHADOW_SCALE);
+ if ((zone->uz_flags & UMA_ZONE_PCPU) == 0) {
+ kasan_mark(item, 0, sz, KASAN_UMA_FREED);
+ } else {
+ pcpu_item = zpcpu_base_to_offset(item);
+ for (i = 0; i <= mp_maxid; i++)
+ kasan_mark(zpcpu_get_cpu(pcpu_item, i), 0, sz, 0);
+ }
+}
+
+static void
+kasan_mark_slab_valid(uma_keg_t keg, void *mem)
+{
+ size_t sz;
+
+ if ((keg->uk_flags & UMA_ZONE_NOKASAN) == 0) {
+ sz = keg->uk_ppera * PAGE_SIZE;
+ kasan_mark(mem, sz, sz, 0);
+ }
+}
+
+static void
+kasan_mark_slab_invalid(uma_keg_t keg, void *mem)
+{
+ size_t sz;
+
+ if ((keg->uk_flags & UMA_ZONE_NOKASAN) == 0) {
+ if ((keg->uk_flags & UMA_ZFLAG_OFFPAGE) != 0)
+ sz = keg->uk_ppera * PAGE_SIZE;
+ else
+ sz = keg->uk_pgoff;
+ kasan_mark(mem, 0, sz, KASAN_UMA_FREED);
+ }
+}
+#else /* !KASAN */
+static void
+kasan_mark_item_valid(uma_zone_t zone __unused, void *item __unused)
+{
+}
+
+static void
+kasan_mark_item_invalid(uma_zone_t zone __unused, void *item __unused)
+{
+}
+
+static void
+kasan_mark_slab_valid(uma_keg_t keg __unused, void *mem __unused)
+{
+}
+
+static void
+kasan_mark_slab_invalid(uma_keg_t keg __unused, void *mem __unused)
+{
+}
+#endif /* KASAN */
+
/*
* Acquire the domain lock and record contention.
*/
@@ -1156,8 +1246,11 @@ bucket_drain(uma_zone_t zone, uma_bucket_t bucket)
zone->uz_size, NULL, SKIP_NONE);
}
if (zone->uz_fini)
- for (i = 0; i < bucket->ub_cnt; i++)
+ for (i = 0; i < bucket->ub_cnt; i++) {
+ kasan_mark_item_valid(zone, bucket->ub_bucket[i]);
zone->uz_fini(bucket->ub_bucket[i], zone->uz_size);
+ kasan_mark_item_invalid(zone, bucket->ub_bucket[i]);
+ }
zone->uz_release(zone->uz_arg, bucket->ub_bucket, bucket->ub_cnt);
if (zone->uz_max_items > 0)
zone_free_limit(zone, bucket->ub_cnt);
@@ -1362,6 +1455,7 @@ static void
keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
{
uint8_t *mem;
+ size_t size;
int i;
uint8_t flags;
@@ -1369,10 +1463,11 @@ keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
keg->uk_name, keg, slab, PAGE_SIZE * keg->uk_ppera);
mem = slab_data(slab, keg);
- flags = slab->us_flags;
- i = start;
+ size = PAGE_SIZE * keg->uk_ppera;
+
+ kasan_mark_slab_valid(keg, mem);
if (keg->uk_fini != NULL) {
- for (i--; i > -1; i--)
+ for (i = start - 1; i > -1; i--)
#ifdef INVARIANTS
/*
* trash_fini implies that dtor was trash_dtor. trash_fini
@@ -1387,11 +1482,13 @@ keg_free_slab(uma_keg_t keg, uma_slab_t slab, int start)
#endif
keg->uk_fini(slab_item(slab, keg, i), keg->uk_size);
}
- if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
+ flags = slab->us_flags;
+ if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
zone_free_item(slabzone(keg->uk_ipers), slab_tohashslab(slab),
NULL, SKIP_NONE);
- keg->uk_freef(mem, PAGE_SIZE * keg->uk_ppera, flags);
- uma_total_dec(PAGE_SIZE * keg->uk_ppera);
+ }
+ keg->uk_freef(mem, size, flags);
+ uma_total_dec(size);
}
static void
@@ -1535,7 +1632,6 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
int aflags)
{
uma_domain_t dom;
- uma_alloc allocf;
uma_slab_t slab;
unsigned long size;
uint8_t *mem;
@@ -1545,7 +1641,6 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
KASSERT(domain >= 0 && domain < vm_ndomains,
("keg_alloc_slab: domain %d out of range", domain));
- allocf = keg->uk_allocf;
slab = NULL;
mem = NULL;
if (keg->uk_flags & UMA_ZFLAG_OFFPAGE) {
@@ -1574,7 +1669,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
/* zone is passed for legacy reasons. */
size = keg->uk_ppera * PAGE_SIZE;
- mem = allocf(zone, size, domain, &sflags, aflags);
+ mem = keg->uk_allocf(zone, size, domain, &sflags, aflags);
if (mem == NULL) {
if (keg->uk_flags & UMA_ZFLAG_OFFPAGE)
zone_free_item(slabzone(keg->uk_ipers),
@@ -1589,7 +1684,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
/* Point the slab into the allocated memory */
if (!(keg->uk_flags & UMA_ZFLAG_OFFPAGE))
- slab = (uma_slab_t )(mem + keg->uk_pgoff);
+ slab = (uma_slab_t)(mem + keg->uk_pgoff);
else
slab_tohashslab(slab)->uhs_data = mem;
@@ -1617,6 +1712,7 @@ keg_alloc_slab(uma_keg_t keg, uma_zone_t zone, int domain, int flags,
goto fail;
}
}
+ kasan_mark_slab_invalid(keg, mem);
KEG_LOCK(keg, domain);
CTR3(KTR_UMA, "keg_alloc_slab: allocated slab %p for %s(%p)",
@@ -2604,9 +2700,12 @@ zone_ctor(void *mem, int size, void *udata, int flags)
STAILQ_INIT(&zdom->uzd_buckets);
}
-#ifdef INVARIANTS
+#if defined(INVARIANTS) && !defined(KASAN)
if (arg->uminit == trash_init && arg->fini == trash_fini)
zone->uz_flags |= UMA_ZFLAG_TRASH | UMA_ZFLAG_CTORDTOR;
+#elif defined(KASAN)
+ if ((arg->flags & (UMA_ZONE_NOFREE | UMA_ZFLAG_CACHE)) != 0)
+ arg->flags |= UMA_ZONE_NOKASAN;
#endif
/*
@@ -3029,7 +3128,7 @@ uma_zcreate(const char *name, size_t size, uma_ctor ctor, uma_dtor dtor,
args.dtor = dtor;
args.uminit = uminit;
args.fini = fini;
-#ifdef INVARIANTS
+#if defined(INVARIANTS) && !defined(KASAN)
/*
* Inject procedures which check for memory use after free if we are
* allowed to scramble the memory while it is not allocated. This
@@ -3186,12 +3285,17 @@ item_ctor(uma_zone_t zone, int uz_flags, int size, void *udata, int flags,
{
#ifdef INVARIANTS
bool skipdbg;
+#endif
+
+ kasan_mark_item_valid(zone, item);
+#ifdef INVARIANTS
skipdbg = uma_dbg_zskip(zone, item);
- if (!skipdbg && (zone->uz_flags & UMA_ZFLAG_TRASH) != 0 &&
+ if (!skipdbg && (uz_flags & UMA_ZFLAG_TRASH) != 0 &&
zone->uz_ctor != trash_ctor)
trash_ctor(item, size, udata, flags);
#endif
+
/* Check flags before loading ctor pointer. */
if (__predict_false((uz_flags & UMA_ZFLAG_CTORDTOR) != 0) &&
__predict_false(zone->uz_ctor != NULL) &&
@@ -3234,6 +3338,7 @@ item_dtor(uma_zone_t zone, void *item, int size, void *udata,
trash_dtor(item, size, udata);
#endif
}
+ kasan_mark_item_invalid(zone, item);
}
#ifdef NUMA
@@ -3955,7 +4060,7 @@ static uma_bucket_t
zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
{
uma_bucket_t bucket;
- int maxbucket, cnt;
+ int error, maxbucket, cnt;
CTR3(KTR_UMA, "zone_alloc_bucket zone %s(%p) domain %d", zone->uz_name,
zone, domain);
@@ -3990,10 +4095,15 @@ zone_alloc_bucket(uma_zone_t zone, void *udata, int domain, int flags)
if (bucket->ub_cnt != 0 && zone->uz_init != NULL) {
int i;
- for (i = 0; i < bucket->ub_cnt; i++)
- if (zone->uz_init(bucket->ub_bucket[i], zone->uz_size,
- flags) != 0)
+ for (i = 0; i < bucket->ub_cnt; i++) {
+ kasan_mark_item_valid(zone, bucket->ub_bucket[i]);
+ error = zone->uz_init(bucket->ub_bucket[i],
+ zone->uz_size, flags);
+ kasan_mark_item_invalid(zone, bucket->ub_bucket[i]);
+ if (error != 0)
break;
+ }
+
/*
* If we couldn't initialize the whole bucket, put the
* rest back onto the freelist.
@@ -4060,7 +4170,12 @@ zone_alloc_item(uma_zone_t zone, void *udata, int domain, int flags)
* to be both zone-init'd as well as zone-ctor'd.
*/
if (zone->uz_init != NULL) {
- if (zone->uz_init(item, zone->uz_size, flags) != 0) {
+ int error;
+
+ kasan_mark_item_valid(zone, item);
+ error = zone->uz_init(item, zone->uz_size, flags);
+ kasan_mark_item_invalid(zone, item);
+ if (error != 0) {
zone_free_item(zone, item, udata, SKIP_FINI | SKIP_CNT);
goto fail_cnt;
}
@@ -4542,8 +4657,11 @@ zone_free_item(uma_zone_t zone, void *item, void *udata, enum zfreeskip skip)
item_dtor(zone, item, zone->uz_size, udata, skip);
- if (skip < SKIP_FINI && zone->uz_fini)
+ if (skip < SKIP_FINI && zone->uz_fini) {
+ kasan_mark_item_valid(zone, item);
zone->uz_fini(item, zone->uz_size);
+ kasan_mark_item_invalid(zone, item);
+ }
zone->uz_release(zone->uz_arg, &item, 1);
More information about the dev-commits-src-main
mailing list