git: 3d3678627c31 - main - bhyve nvme: Check return value of mapped memory

From: Chuck Tuffli <chuck_at_FreeBSD.org>
Date: Sun, 14 Aug 2022 15:04:06 UTC
The branch main has been updated by chuck:

URL: https://cgit.FreeBSD.org/src/commit/?id=3d3678627c3112c94d174a8c51d8c058d02befb3

commit 3d3678627c3112c94d174a8c51d8c058d02befb3
Author:     Chuck Tuffli <chuck@FreeBSD.org>
AuthorDate: 2022-08-14 14:45:21 +0000
Commit:     Chuck Tuffli <chuck@FreeBSD.org>
CommitDate: 2022-08-14 14:45:21 +0000

    bhyve nvme: Check return value of mapped memory
    
    Fuzzing of bhyve using hyfuzz discovered a way to cause a segmentation
    fault in the NVMe emulation. If a guest specifies a physical address in
    either the PRP1 or PRP2 field of a command that cannot be mapped from
    guest to host, the function paddr_guest2host() returns a NULL pointer.
    The NVMe emulation did not check for this error case, which allowed for
    the segmentation fault to occur.
    
    Fix is to check for a return value of NULL and indicate an error back to
    the guest (Data Transfer error). While in the area, slightly refactor
    the write/read blockif function to use a common error exit path.
    
    PR:             256321
    Reported by:    Cheolwoo Myung <cwmyung@snu.ac.kr>
    Reviewed by:    imp, jhb
    Differential Revision:  https://reviews.freebsd.org/D35452
---
 usr.sbin/bhyve/pci_nvme.c | 21 +++++++++++++--------
 1 file changed, 13 insertions(+), 8 deletions(-)

diff --git a/usr.sbin/bhyve/pci_nvme.c b/usr.sbin/bhyve/pci_nvme.c
index 5f54e4107c10..e10c8bd437e2 100644
--- a/usr.sbin/bhyve/pci_nvme.c
+++ b/usr.sbin/bhyve/pci_nvme.c
@@ -2196,6 +2196,8 @@ pci_nvme_append_iov_req(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req,
 		req->io_req.br_iov[iovidx].iov_base =
 		    paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
 				     req->prev_gpaddr, size);
+		if (req->io_req.br_iov[iovidx].iov_base == NULL)
+			return (-1);
 
 		req->prev_size += size;
 		req->io_req.br_resid += size;
@@ -2212,6 +2214,8 @@ pci_nvme_append_iov_req(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req,
 		req->io_req.br_iov[iovidx].iov_base =
 		    paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
 				     gpaddr, size);
+		if (req->io_req.br_iov[iovidx].iov_base == NULL)
+			return (-1);
 
 		req->io_req.br_iov[iovidx].iov_len = size;
 
@@ -2402,8 +2406,7 @@ nvme_write_read_blockif(struct pci_nvme_softc *sc,
 	size = MIN(PAGE_SIZE - (prp1 % PAGE_SIZE), bytes);
 	if (pci_nvme_append_iov_req(sc, req, prp1,
 	    size, is_write, offset)) {
-		pci_nvme_status_genc(&status,
-		    NVME_SC_DATA_TRANSFER_ERROR);
+		err = -1;
 		goto out;
 	}
 
@@ -2416,8 +2419,7 @@ nvme_write_read_blockif(struct pci_nvme_softc *sc,
 		size = bytes;
 		if (pci_nvme_append_iov_req(sc, req, prp2,
 		    size, is_write, offset)) {
-			pci_nvme_status_genc(&status,
-			    NVME_SC_DATA_TRANSFER_ERROR);
+			err = -1;
 			goto out;
 		}
 	} else {
@@ -2433,6 +2435,10 @@ nvme_write_read_blockif(struct pci_nvme_softc *sc,
 
 				prp_list = paddr_guest2host(vmctx, prp,
 				    PAGE_SIZE - (prp % PAGE_SIZE));
+				if (prp_list == NULL) {
+					err = -1;
+					goto out;
+				}
 				last = prp_list + (NVME_PRP2_ITEMS - 1);
 			}
 
@@ -2440,8 +2446,7 @@ nvme_write_read_blockif(struct pci_nvme_softc *sc,
 
 			if (pci_nvme_append_iov_req(sc, req, *prp_list,
 			    size, is_write, offset)) {
-				pci_nvme_status_genc(&status,
-				    NVME_SC_DATA_TRANSFER_ERROR);
+				err = -1;
 				goto out;
 			}
 
@@ -2456,10 +2461,10 @@ nvme_write_read_blockif(struct pci_nvme_softc *sc,
 		err = blockif_write(nvstore->ctx, &req->io_req);
 	else
 		err = blockif_read(nvstore->ctx, &req->io_req);
-
+out:
 	if (err)
 		pci_nvme_status_genc(&status, NVME_SC_DATA_TRANSFER_ERROR);
-out:
+
 	return (status);
 }