svn commit: r367525 - in stable/12: share/man/man4 sys/amd64/conf sys/conf sys/dev/qat sys/modules sys/modules/qat
Mark Johnston
markj at FreeBSD.org
Mon Nov 9 15:36:13 UTC 2020
Author: markj
Date: Mon Nov 9 15:36:11 2020
New Revision: 367525
URL: https://svnweb.freebsd.org/changeset/base/367525
Log:
MFC r367386:
Add qat(4)
Added:
stable/12/share/man/man4/qat.4
- copied unchanged from r367386, head/share/man/man4/qat.4
stable/12/sys/dev/qat/
- copied from r367386, head/sys/dev/qat/
stable/12/sys/modules/qat/
- copied from r367386, head/sys/modules/qat/
Modified:
stable/12/share/man/man4/Makefile
stable/12/sys/amd64/conf/NOTES
stable/12/sys/conf/files.amd64
stable/12/sys/dev/qat/qat.c
stable/12/sys/dev/qat/qat_hw15.c
stable/12/sys/dev/qat/qat_hw15var.h
stable/12/sys/dev/qat/qat_hw17.c
stable/12/sys/dev/qat/qat_hw17var.h
stable/12/sys/dev/qat/qatvar.h
stable/12/sys/modules/Makefile
Directory Properties:
stable/12/ (props changed)
Modified: stable/12/share/man/man4/Makefile
==============================================================================
--- stable/12/share/man/man4/Makefile Mon Nov 9 14:15:06 2020 (r367524)
+++ stable/12/share/man/man4/Makefile Mon Nov 9 15:36:11 2020 (r367525)
@@ -442,6 +442,7 @@ MAN= aac.4 \
pty.4 \
puc.4 \
pwmc.4 \
+ ${_qat.4} \
${_qlxge.4} \
${_qlxgb.4} \
${_qlxgbe.4} \
@@ -845,6 +846,7 @@ _nvd.4= nvd.4
_nvme.4= nvme.4
_nvram.4= nvram.4
_padlock.4= padlock.4
+_qat.4= qat.4
_rr232x.4= rr232x.4
_speaker.4= speaker.4
_spkr.4= spkr.4
Copied: stable/12/share/man/man4/qat.4 (from r367386, head/share/man/man4/qat.4)
==============================================================================
--- /dev/null 00:00:00 1970 (empty, because file is newly added)
+++ stable/12/share/man/man4/qat.4 Mon Nov 9 15:36:11 2020 (r367525, copy of r367386, head/share/man/man4/qat.4)
@@ -0,0 +1,99 @@
+.\"-
+.\" Copyright (c) 2020 Rubicon Communications, LLC (Netgate)
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions
+.\" are met:
+.\" 1. Redistributions of source code must retain the above copyright
+.\" notice, this list of conditions and the following disclaimer.
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+.\" ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+.\" FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+.\" DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+.\" OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+.\" HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+.\" LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+.\" OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+.\" SUCH DAMAGE.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd November 5, 2020
+.Dt QAT 4
+.Os
+.Sh NAME
+.Nm qat
+.Nd Intel QuickAssist Technology (QAT) driver
+.Sh SYNOPSIS
+To compile this driver into the kernel,
+place the following lines in your
+kernel configuration file:
+.Bd -ragged -offset indent
+.Cd "device crypto"
+.Cd "device cryptodev"
+.Cd "device qat"
+.Ed
+.Pp
+Alternatively, to load the driver as a
+module at boot time, place the following lines in
+.Xr loader.conf 5 :
+.Bd -literal -offset indent
+qat_load="YES"
+qat_c2xxxfw_load="YES"
+qat_c3xxxfw_load="YES"
+qat_c63xfw_load="YES"
+qat_d15xxfw_load="YES"
+qat_dh895xcc_load="YES"
+.Ed
+.Sh DESCRIPTION
+The
+.Nm
+driver implements
+.Xr crypto 4
+support for some of the cryptographic acceleration functions of the Intel
+QuickAssist device.
+The
+.Nm
+driver supports the QAT devices integrated with Atom C2000 and C3000 and Xeon
+C620 and D-1500 chipsets, and the Intel QAT Adapter 8950.
+It can accelerate AES in CBC, CTR, XTS (except for the C2000) and GCM modes,
+and can perform authenticated encryption combining the CBC, CTR and XTS modes
+with SHA1-HMAC and SHA2-HMAC.
+The
+.Nm
+driver can also compute SHA1 and SHA2 digests.
+.Sh SEE ALSO
+.Xr crypto 4 ,
+.Xr ipsec 4 ,
+.Xr pci 4 ,
+.Xr random 4 ,
+.Xr crypto 7 ,
+.Xr crypto 9
+.Sh HISTORY
+The
+.Nm
+driver first appeared in
+.Fx 13.0 .
+.Sh AUTHORS
+The
+.Nm
+driver was written for
+.Nx
+by
+.An Hikaru Abe Aq Mt hikaru at iij.ad.jp
+and ported to
+.Fx
+by
+.An Mark Johnston Aq Mt markj at FreeBSD.org .
+.Sh BUGS
+Some Atom C2000 QAT devices have two acceleration engines instead of one.
+The
+.Nm
+driver currently misbehaves when both are enabled and thus does not enable
+the second acceleration engine if one is present.
Modified: stable/12/sys/amd64/conf/NOTES
==============================================================================
--- stable/12/sys/amd64/conf/NOTES Mon Nov 9 14:15:06 2020 (r367524)
+++ stable/12/sys/amd64/conf/NOTES Mon Nov 9 15:36:11 2020 (r367525)
@@ -496,6 +496,10 @@ device nvd # expose NVMe namespaces as dis
device pmspcv
#
+# Intel QuickAssist
+device qat
+
+#
# SafeNet crypto driver: can be moved to the MI NOTES as soon as
# it's tested on a big-endian machine
#
Modified: stable/12/sys/conf/files.amd64
==============================================================================
--- stable/12/sys/conf/files.amd64 Mon Nov 9 14:15:06 2020 (r367524)
+++ stable/12/sys/conf/files.amd64 Mon Nov 9 15:36:11 2020 (r367525)
@@ -419,6 +419,15 @@ dev/nvme/nvme_util.c optional nvme
dev/nvram/nvram.c optional nvram isa
dev/random/ivy.c optional rdrand_rng
dev/random/nehemiah.c optional padlock_rng
+dev/qat/qat.c optional qat
+dev/qat/qat_ae.c optional qat
+dev/qat/qat_c2xxx.c optional qat
+dev/qat/qat_c3xxx.c optional qat
+dev/qat/qat_c62x.c optional qat
+dev/qat/qat_d15xx.c optional qat
+dev/qat/qat_dh895xcc.c optional qat
+dev/qat/qat_hw15.c optional qat
+dev/qat/qat_hw17.c optional qat
dev/qlxge/qls_dbg.c optional qlxge pci
dev/qlxge/qls_dump.c optional qlxge pci
dev/qlxge/qls_hw.c optional qlxge pci
Modified: stable/12/sys/dev/qat/qat.c
==============================================================================
--- head/sys/dev/qat/qat.c Thu Nov 5 15:55:23 2020 (r367386)
+++ stable/12/sys/dev/qat/qat.c Mon Nov 9 15:36:11 2020 (r367525)
@@ -281,7 +281,7 @@ static int qat_start(struct device *);
static int qat_detach(device_t);
static int qat_newsession(device_t dev, crypto_session_t cses,
- const struct crypto_session_params *csp);
+ struct cryptoini *cri);
static void qat_freesession(device_t dev, crypto_session_t cses);
static int qat_setup_msix_intr(struct qat_softc *);
@@ -1259,11 +1259,11 @@ static enum hw_cipher_algo
qat_aes_cipher_algo(int klen)
{
switch (klen) {
- case HW_AES_128_KEY_SZ:
+ case HW_AES_128_KEY_SZ * 8:
return HW_CIPHER_ALGO_AES128;
- case HW_AES_192_KEY_SZ:
+ case HW_AES_192_KEY_SZ * 8:
return HW_CIPHER_ALGO_AES192;
- case HW_AES_256_KEY_SZ:
+ case HW_AES_256_KEY_SZ * 8:
return HW_CIPHER_ALGO_AES256;
default:
panic("invalid key length %d", klen);
@@ -1341,10 +1341,33 @@ qat_crypto_load_auth_session(const struct qat_crypto_d
(*hash_def)->qshd_alg->qshai_digest_len);
}
+static bool
+qat_is_hash(int alg)
+{
+ switch (alg) {
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_AES_128_NIST_GMAC:
+ case CRYPTO_AES_192_NIST_GMAC:
+ case CRYPTO_AES_256_NIST_GMAC:
+ return true;
+ default:
+ return false;
+ }
+}
+
struct qat_crypto_load_cb_arg {
struct qat_session *qs;
struct qat_sym_cookie *qsc;
struct cryptop *crp;
+ struct cryptodesc *enc;
+ struct cryptodesc *mac;
int error;
};
@@ -1353,6 +1376,7 @@ qat_crypto_load_cb(void *_arg, bus_dma_segment_t *segs
int error)
{
struct cryptop *crp;
+ struct cryptodesc *enc, *mac;
struct flat_buffer_desc *flatbuf;
struct qat_crypto_load_cb_arg *arg;
struct qat_session *qs;
@@ -1368,6 +1392,8 @@ qat_crypto_load_cb(void *_arg, bus_dma_segment_t *segs
}
crp = arg->crp;
+ enc = arg->enc;
+ mac = arg->mac;
qs = arg->qs;
qsc = arg->qsc;
@@ -1377,16 +1403,16 @@ qat_crypto_load_cb(void *_arg, bus_dma_segment_t *segs
* padded to a multiple of 16 bytes. To satisfy these
* constraints we bounce the AAD into a per-request buffer.
*/
- crypto_copydata(crp, crp->crp_aad_start, crp->crp_aad_length,
- qsc->qsc_gcm_aad);
- memset(qsc->qsc_gcm_aad + crp->crp_aad_length, 0,
- roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) -
- crp->crp_aad_length);
- skip = crp->crp_payload_start;
- } else if (crp->crp_aad_length > 0) {
- skip = crp->crp_aad_start;
+ crypto_copydata(crp->crp_flags, crp->crp_buf, mac->crd_skip,
+ mac->crd_len, qsc->qsc_gcm_aad);
+ memset(qsc->qsc_gcm_aad + mac->crd_len, 0,
+ roundup2(mac->crd_len, QAT_AES_GCM_AAD_ALIGN) -
+ mac->crd_len);
+ skip = enc->crd_skip;
+ } else if (mac != NULL) {
+ skip = mac->crd_skip;
} else {
- skip = crp->crp_payload_start;
+ skip = enc->crd_skip;
}
for (iseg = oseg = 0; iseg < nseg; iseg++) {
@@ -1411,21 +1437,59 @@ qat_crypto_load_cb(void *_arg, bus_dma_segment_t *segs
qsc->qsc_buf_list.num_buffers = oseg;
}
+static void
+qat_crypto_load_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
+ bus_size_t mapsize __unused, int error)
+{
+ qat_crypto_load_cb(arg, segs, nseg, error);
+}
+
static int
qat_crypto_load(struct qat_session *qs, struct qat_sym_cookie *qsc,
- struct qat_crypto_desc const *desc, struct cryptop *crp)
+ struct cryptop *crp, struct cryptodesc *enc, struct cryptodesc *mac)
{
struct qat_crypto_load_cb_arg arg;
int error;
- crypto_read_iv(crp, qsc->qsc_iv_buf);
+ if (enc != NULL && (enc->crd_flags & CRD_F_ENCRYPT) != 0) {
+ if ((enc->crd_flags & CRD_F_IV_EXPLICIT) != 0)
+ memcpy(qsc->qsc_iv_buf, enc->crd_iv, qs->qs_ivlen);
+ else
+ arc4rand(qsc->qsc_iv_buf, qs->qs_ivlen, 0);
+ if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ enc->crd_inject, qs->qs_ivlen, qsc->qsc_iv_buf);
+ }
+ } else if (enc != NULL) {
+ if ((enc->crd_flags & CRD_F_IV_EXPLICIT) != 0) {
+ memcpy(qsc->qsc_iv_buf, enc->crd_iv, qs->qs_ivlen);
+ } else {
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ enc->crd_inject, qs->qs_ivlen, qsc->qsc_iv_buf);
+ }
+ }
+
arg.crp = crp;
+ arg.enc = enc;
+ arg.mac = mac;
arg.qs = qs;
arg.qsc = qsc;
arg.error = 0;
- error = bus_dmamap_load_crp(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
- crp, qat_crypto_load_cb, &arg, BUS_DMA_NOWAIT);
+
+ if ((crp->crp_flags & CRYPTO_F_IOV) != 0) {
+ error = bus_dmamap_load_uio(qsc->qsc_buf_dma_tag,
+ qsc->qsc_buf_dmamap, crp->crp_uio,
+ qat_crypto_load_cb2, &arg, BUS_DMA_NOWAIT);
+ } else if ((crp->crp_flags & CRYPTO_F_IMBUF) != 0) {
+ error = bus_dmamap_load_mbuf(qsc->qsc_buf_dma_tag,
+ qsc->qsc_buf_dmamap, crp->crp_mbuf,
+ qat_crypto_load_cb2, &arg, BUS_DMA_NOWAIT);
+ } else {
+ error = bus_dmamap_load(qsc->qsc_buf_dma_tag,
+ qsc->qsc_buf_dmamap, crp->crp_buf, crp->crp_ilen,
+ qat_crypto_load_cb, &arg, BUS_DMA_NOWAIT);
+ }
if (error == 0)
error = arg.error;
return error;
@@ -1615,6 +1679,12 @@ qat_crypto_deinit(struct qat_softc *sc)
}
}
+static void
+qat_crypto_register(struct qat_softc *sc, int alg)
+{
+ (void)crypto_register(sc->sc_crypto.qcy_cid, alg, QAT_MAXLEN, 0);
+}
+
static int
qat_crypto_start(struct qat_softc *sc)
{
@@ -1629,6 +1699,22 @@ qat_crypto_start(struct qat_softc *sc)
return ENOENT;
}
+ qat_crypto_register(sc, CRYPTO_AES_CBC);
+ qat_crypto_register(sc, CRYPTO_AES_ICM);
+ qat_crypto_register(sc, CRYPTO_AES_XTS);
+ qat_crypto_register(sc, CRYPTO_AES_NIST_GCM_16);
+ qat_crypto_register(sc, CRYPTO_AES_128_NIST_GMAC);
+ qat_crypto_register(sc, CRYPTO_AES_192_NIST_GMAC);
+ qat_crypto_register(sc, CRYPTO_AES_256_NIST_GMAC);
+ qat_crypto_register(sc, CRYPTO_SHA1);
+ qat_crypto_register(sc, CRYPTO_SHA1_HMAC);
+ qat_crypto_register(sc, CRYPTO_SHA2_256);
+ qat_crypto_register(sc, CRYPTO_SHA2_256_HMAC);
+ qat_crypto_register(sc, CRYPTO_SHA2_384);
+ qat_crypto_register(sc, CRYPTO_SHA2_384_HMAC);
+ qat_crypto_register(sc, CRYPTO_SHA2_512);
+ qat_crypto_register(sc, CRYPTO_SHA2_512_HMAC);
+
return 0;
}
@@ -1671,16 +1757,19 @@ qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg,
error = 0;
if ((auth_sz = qs->qs_auth_mlen) != 0) {
- if ((crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) != 0) {
- crypto_copydata(crp, crp->crp_digest_start,
- auth_sz, icv);
+ if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128 &&
+ (qsc->qsc_enc->crd_flags & CRD_F_ENCRYPT) == 0) {
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ qsc->qsc_mac->crd_inject, auth_sz,
+ icv);
if (timingsafe_bcmp(icv, qsc->qsc_auth_res,
auth_sz) != 0) {
error = EBADMSG;
}
} else {
- crypto_copyback(crp, crp->crp_digest_start,
- auth_sz, qsc->qsc_auth_res);
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ qsc->qsc_mac->crd_inject, auth_sz,
+ qsc->qsc_auth_res);
}
}
@@ -1706,9 +1795,9 @@ qat_crypto_sym_rxintr(struct qat_softc *sc, void *arg,
}
static int
-qat_probesession(device_t dev, const struct crypto_session_params *csp)
+qat_probesession(device_t dev, struct cryptoini *enc, struct cryptoini *mac)
{
- if (csp->csp_cipher_alg == CRYPTO_AES_XTS &&
+ if (enc != NULL && enc->cri_alg == CRYPTO_AES_XTS &&
qat_lookup(dev)->qatp_chip == QAT_CHIP_C2XXX) {
/*
* AES-XTS is not supported by the NanoQAT.
@@ -1716,24 +1805,30 @@ qat_probesession(device_t dev, const struct crypto_ses
return EINVAL;
}
- switch (csp->csp_mode) {
- case CSP_MODE_CIPHER:
- switch (csp->csp_cipher_alg) {
- case CRYPTO_AES_CBC:
- case CRYPTO_AES_ICM:
- if (csp->csp_ivlen != AES_BLOCK_LEN)
+ if (enc != NULL) {
+ switch (enc->cri_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ if (mac == NULL ||
+ (mac->cri_alg != CRYPTO_AES_128_NIST_GMAC &&
+ mac->cri_alg != CRYPTO_AES_192_NIST_GMAC &&
+ mac->cri_alg != CRYPTO_AES_256_NIST_GMAC))
return EINVAL;
break;
+ case CRYPTO_AES_CBC:
+ case CRYPTO_AES_ICM:
case CRYPTO_AES_XTS:
- if (csp->csp_ivlen != AES_XTS_IV_LEN)
+ if (mac != NULL &&
+ mac->cri_alg != CRYPTO_SHA1_HMAC &&
+ mac->cri_alg != CRYPTO_SHA2_256_HMAC &&
+ mac->cri_alg != CRYPTO_SHA2_384_HMAC &&
+ mac->cri_alg != CRYPTO_SHA2_512_HMAC)
return EINVAL;
break;
default:
return EINVAL;
}
- break;
- case CSP_MODE_DIGEST:
- switch (csp->csp_auth_alg) {
+ } else {
+ switch (mac->cri_alg) {
case CRYPTO_SHA1:
case CRYPTO_SHA1_HMAC:
case CRYPTO_SHA2_256:
@@ -1743,59 +1838,18 @@ qat_probesession(device_t dev, const struct crypto_ses
case CRYPTO_SHA2_512:
case CRYPTO_SHA2_512_HMAC:
break;
- case CRYPTO_AES_NIST_GMAC:
- if (csp->csp_ivlen != AES_GCM_IV_LEN)
- return EINVAL;
- break;
default:
return EINVAL;
}
- break;
- case CSP_MODE_AEAD:
- switch (csp->csp_cipher_alg) {
- case CRYPTO_AES_NIST_GCM_16:
- if (csp->csp_ivlen != AES_GCM_IV_LEN)
- return EINVAL;
- break;
- default:
- return EINVAL;
- }
- break;
- case CSP_MODE_ETA:
- switch (csp->csp_auth_alg) {
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- switch (csp->csp_cipher_alg) {
- case CRYPTO_AES_CBC:
- case CRYPTO_AES_ICM:
- if (csp->csp_ivlen != AES_BLOCK_LEN)
- return EINVAL;
- break;
- case CRYPTO_AES_XTS:
- if (csp->csp_ivlen != AES_XTS_IV_LEN)
- return EINVAL;
- break;
- default:
- return EINVAL;
- }
- break;
- default:
- return EINVAL;
- }
- break;
- default:
- return EINVAL;
}
- return CRYPTODEV_PROBE_HARDWARE;
+ return 0;
}
static int
-qat_newsession(device_t dev, crypto_session_t cses,
- const struct crypto_session_params *csp)
+qat_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
{
+ struct cryptoini *enc, *mac;
struct qat_crypto *qcy;
struct qat_dmamem *qdm;
struct qat_session *qs;
@@ -1807,6 +1861,26 @@ qat_newsession(device_t dev, crypto_session_t cses,
qs = crypto_get_driver_session(cses);
qcy = &sc->sc_crypto;
+ enc = mac = NULL;
+ if (qat_is_hash(cri->cri_alg))
+ mac = cri;
+ else
+ enc = cri;
+ cri = cri->cri_next;
+
+ if (cri != NULL) {
+ if (enc == NULL && !qat_is_hash(cri->cri_alg))
+ enc = cri;
+ if (mac == NULL && qat_is_hash(cri->cri_alg))
+ mac = cri;
+ if (cri->cri_next != NULL || !(enc != NULL && mac != NULL))
+ return EINVAL;
+ }
+
+ error = qat_probesession(dev, enc, mac);
+ if (error != 0)
+ return error;
+
qdm = &qs->qs_desc_mem;
error = qat_alloc_dmamem(sc, qdm, QAT_MAXSEG,
sizeof(struct qat_crypto_desc) * 2, QAT_OPTIMAL_ALIGN);
@@ -1830,91 +1904,103 @@ qat_newsession(device_t dev, crypto_session_t cses,
qs->qs_status = QAT_SESSION_STATUS_ACTIVE;
qs->qs_inflight = 0;
- qs->qs_cipher_key = csp->csp_cipher_key;
- qs->qs_cipher_klen = csp->csp_cipher_klen;
- qs->qs_auth_key = csp->csp_auth_key;
- qs->qs_auth_klen = csp->csp_auth_klen;
+ if (enc != NULL) {
+ qs->qs_cipher_key = enc->cri_key;
+ qs->qs_cipher_klen = enc->cri_klen / 8;
+ }
+ if (mac != NULL) {
+ qs->qs_auth_key = mac->cri_key;
+ qs->qs_auth_klen = mac->cri_klen / 8;
+ }
- switch (csp->csp_cipher_alg) {
- case CRYPTO_AES_CBC:
- qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
- qs->qs_cipher_mode = HW_CIPHER_CBC_MODE;
- break;
- case CRYPTO_AES_ICM:
- qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
- qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
- break;
- case CRYPTO_AES_XTS:
- qs->qs_cipher_algo =
- qat_aes_cipher_algo(csp->csp_cipher_klen / 2);
- qs->qs_cipher_mode = HW_CIPHER_XTS_MODE;
- break;
- case CRYPTO_AES_NIST_GCM_16:
- qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_cipher_klen);
- qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
- qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
- qs->qs_auth_mode = HW_AUTH_MODE1;
- break;
- case 0:
- break;
- default:
- panic("%s: unhandled cipher algorithm %d", __func__,
- csp->csp_cipher_alg);
+ if (enc != NULL) {
+ switch (enc->cri_alg) {
+ case CRYPTO_AES_CBC:
+ qs->qs_cipher_algo = qat_aes_cipher_algo(enc->cri_klen);
+ qs->qs_cipher_mode = HW_CIPHER_CBC_MODE;
+ qs->qs_ivlen = AES_BLOCK_LEN;
+ break;
+ case CRYPTO_AES_ICM:
+ qs->qs_cipher_algo = qat_aes_cipher_algo(enc->cri_klen);
+ qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
+ qs->qs_ivlen = AES_BLOCK_LEN;
+ break;
+ case CRYPTO_AES_XTS:
+ qs->qs_cipher_algo =
+ qat_aes_cipher_algo(enc->cri_klen / 2);
+ qs->qs_cipher_mode = HW_CIPHER_XTS_MODE;
+ qs->qs_ivlen = AES_XTS_IV_LEN;
+ break;
+ case CRYPTO_AES_NIST_GCM_16:
+ qs->qs_cipher_algo = qat_aes_cipher_algo(enc->cri_klen);
+ qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
+ qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+ qs->qs_ivlen = AES_GCM_IV_LEN;
+ break;
+ case 0:
+ break;
+ default:
+ panic("%s: unhandled cipher algorithm %d", __func__,
+ enc->cri_alg);
+ }
}
- switch (csp->csp_auth_alg) {
- case CRYPTO_SHA1_HMAC:
- qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
- qs->qs_auth_mode = HW_AUTH_MODE1;
- break;
- case CRYPTO_SHA1:
- qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
- qs->qs_auth_mode = HW_AUTH_MODE0;
- break;
- case CRYPTO_SHA2_256_HMAC:
- qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
- qs->qs_auth_mode = HW_AUTH_MODE1;
- break;
- case CRYPTO_SHA2_256:
- qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
- qs->qs_auth_mode = HW_AUTH_MODE0;
- break;
- case CRYPTO_SHA2_384_HMAC:
- qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
- qs->qs_auth_mode = HW_AUTH_MODE1;
- break;
- case CRYPTO_SHA2_384:
- qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
- qs->qs_auth_mode = HW_AUTH_MODE0;
- break;
- case CRYPTO_SHA2_512_HMAC:
- qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
- qs->qs_auth_mode = HW_AUTH_MODE1;
- break;
- case CRYPTO_SHA2_512:
- qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
- qs->qs_auth_mode = HW_AUTH_MODE0;
- break;
- case CRYPTO_AES_NIST_GMAC:
- qs->qs_cipher_algo = qat_aes_cipher_algo(csp->csp_auth_klen);
- qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
- qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
- qs->qs_auth_mode = HW_AUTH_MODE1;
+ if (mac != NULL) {
+ switch (mac->cri_alg) {
+ case CRYPTO_SHA1_HMAC:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+ break;
+ case CRYPTO_SHA1:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA1;
+ qs->qs_auth_mode = HW_AUTH_MODE0;
+ break;
+ case CRYPTO_SHA2_256_HMAC:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+ break;
+ case CRYPTO_SHA2_256:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA256;
+ qs->qs_auth_mode = HW_AUTH_MODE0;
+ break;
+ case CRYPTO_SHA2_384_HMAC:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+ break;
+ case CRYPTO_SHA2_384:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA384;
+ qs->qs_auth_mode = HW_AUTH_MODE0;
+ break;
+ case CRYPTO_SHA2_512_HMAC:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
+ break;
+ case CRYPTO_SHA2_512:
+ qs->qs_auth_algo = HW_AUTH_ALGO_SHA512;
+ qs->qs_auth_mode = HW_AUTH_MODE0;
+ break;
+ case CRYPTO_AES_128_NIST_GMAC:
+ case CRYPTO_AES_192_NIST_GMAC:
+ case CRYPTO_AES_256_NIST_GMAC:
+ qs->qs_cipher_algo = qat_aes_cipher_algo(mac->cri_klen);
+ qs->qs_cipher_mode = HW_CIPHER_CTR_MODE;
+ qs->qs_auth_algo = HW_AUTH_ALGO_GALOIS_128;
+ qs->qs_auth_mode = HW_AUTH_MODE1;
- qs->qs_cipher_key = qs->qs_auth_key;
- qs->qs_cipher_klen = qs->qs_auth_klen;
- break;
- case 0:
- break;
- default:
- panic("%s: unhandled auth algorithm %d", __func__,
- csp->csp_auth_alg);
+ qs->qs_cipher_key = qs->qs_auth_key;
+ qs->qs_cipher_klen = qs->qs_auth_klen;
+ break;
+ case 0:
+ break;
+ default:
+ panic("%s: unhandled auth algorithm %d", __func__,
+ mac->cri_alg);
+ }
}
slices = 0;
- switch (csp->csp_mode) {
- case CSP_MODE_AEAD:
- case CSP_MODE_ETA:
+ if (enc != NULL && mac != NULL) {
/* auth then decrypt */
ddesc->qcd_slices[0] = FW_SLICE_AUTH;
ddesc->qcd_slices[1] = FW_SLICE_CIPHER;
@@ -1926,8 +2012,7 @@ qat_newsession(device_t dev, crypto_session_t cses,
edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
edesc->qcd_cmd_id = FW_LA_CMD_CIPHER_HASH;
slices = 2;
- break;
- case CSP_MODE_CIPHER:
+ } else if (enc != NULL) {
/* decrypt */
ddesc->qcd_slices[0] = FW_SLICE_CIPHER;
ddesc->qcd_cipher_dir = HW_CIPHER_DECRYPT;
@@ -1937,8 +2022,7 @@ qat_newsession(device_t dev, crypto_session_t cses,
edesc->qcd_cipher_dir = HW_CIPHER_ENCRYPT;
edesc->qcd_cmd_id = FW_LA_CMD_CIPHER;
slices = 1;
- break;
- case CSP_MODE_DIGEST:
+ } else if (mac != NULL) {
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
/* auth then decrypt */
ddesc->qcd_slices[0] = FW_SLICE_AUTH;
@@ -1958,10 +2042,6 @@ qat_newsession(device_t dev, crypto_session_t cses,
edesc->qcd_cmd_id = FW_LA_CMD_AUTH;
slices = 1;
}
- break;
- default:
- panic("%s: unhandled crypto algorithm %d, %d", __func__,
- csp->csp_cipher_alg, csp->csp_auth_alg);
}
ddesc->qcd_slices[slices] = FW_SLICE_DRAM_WR;
edesc->qcd_slices[slices] = FW_SLICE_DRAM_WR;
@@ -1969,13 +2049,13 @@ qat_newsession(device_t dev, crypto_session_t cses,
qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, ddesc);
qcy->qcy_sc->sc_hw.qhw_crypto_setup_desc(qcy, qs, edesc);
- if (csp->csp_auth_mlen != 0)
- qs->qs_auth_mlen = csp->csp_auth_mlen;
+ if (mac != NULL && mac->cri_mlen != 0)
+ qs->qs_auth_mlen = mac->cri_mlen;
else
qs->qs_auth_mlen = edesc->qcd_auth_sz;
/* Compute the GMAC by specifying a null cipher payload. */
- if (csp->csp_auth_alg == CRYPTO_AES_NIST_GMAC)
+ if (mac != NULL && mac->cri_alg == CRYPTO_AES_NIST_GMAC)
ddesc->qcd_cmd_id = edesc->qcd_cmd_id = FW_LA_CMD_AUTH;
return 0;
@@ -2008,6 +2088,7 @@ qat_freesession(device_t dev, crypto_session_t cses)
static int
qat_process(device_t dev, struct cryptop *crp, int hint)
{
+ struct cryptodesc *crd, *enc, *mac;
struct qat_crypto *qcy;
struct qat_crypto_bank *qcb;
struct qat_crypto_desc const *desc;
@@ -2022,14 +2103,31 @@ qat_process(device_t dev, struct cryptop *crp, int hin
qs = crypto_get_driver_session(crp->crp_session);
qsc = NULL;
- if (__predict_false(crypto_buffer_len(&crp->crp_buf) > QAT_MAXLEN)) {
+ if (__predict_false(crp->crp_ilen > QAT_MAXLEN)) {
error = E2BIG;
goto fail1;
}
+ crd = crp->crp_desc;
+ enc = mac = NULL;
+ if (qat_is_hash(crd->crd_alg))
+ mac = crd;
+ else
+ enc = crd;
+ crd = crd->crd_next;
+
+ if (crd != NULL) {
+ if (enc == NULL && !qat_is_hash(crd->crd_alg))
+ enc = crd;
+ if (mac == NULL && qat_is_hash(crd->crd_alg))
+ mac = crd;
+ if (crd->crd_next != NULL || !(enc != NULL && mac != NULL))
+ return EINVAL;
+ }
+
mtx_lock(&qs->qs_session_mtx);
if (qs->qs_auth_algo == HW_AUTH_ALGO_GALOIS_128) {
- if (crp->crp_aad_length > QAT_GCM_AAD_SIZE_MAX) {
+ if (mac->crd_len > QAT_GCM_AAD_SIZE_MAX) {
error = E2BIG;
mtx_unlock(&qs->qs_session_mtx);
goto fail1;
@@ -2044,13 +2142,13 @@ qat_process(device_t dev, struct cryptop *crp, int hin
* size updates here so that the device code can safely update
* the session's recorded AAD size.
*/
- if (__predict_false(crp->crp_aad_length != qs->qs_aad_length)) {
+ if (__predict_false(mac->crd_len != qs->qs_aad_length)) {
if (qs->qs_inflight == 0) {
if (qs->qs_aad_length != -1) {
counter_u64_add(sc->sc_gcm_aad_updates,
1);
}
- qs->qs_aad_length = crp->crp_aad_length;
+ qs->qs_aad_length = mac->crd_len;
} else {
qs->qs_need_wakeup = true;
mtx_unlock(&qs->qs_session_mtx);
@@ -2071,12 +2169,14 @@ qat_process(device_t dev, struct cryptop *crp, int hin
goto fail2;
}
- if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
+ if (enc != NULL && (enc->crd_flags & CRD_F_ENCRYPT) != 0)
desc = qs->qs_enc_desc;
else
desc = qs->qs_dec_desc;
- error = qat_crypto_load(qs, qsc, desc, crp);
+ qsc->qsc_enc = enc;
+ qsc->qsc_mac = mac;
+ error = qat_crypto_load(qs, qsc, crp, enc, mac);
if (error != 0)
goto fail2;
@@ -2085,7 +2185,7 @@ qat_process(device_t dev, struct cryptop *crp, int hin
qsbc->qsbc_session = qs;
qsbc->qsbc_cb_tag = crp;
- sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, crp);
+ sc->sc_hw.qhw_crypto_setup_req_params(qcb, qs, desc, qsc, enc, mac);
bus_dmamap_sync(qsc->qsc_buf_dma_tag, qsc->qsc_buf_dmamap,
BUS_DMASYNC_PREWRITE | BUS_DMASYNC_PREREAD);
@@ -2118,7 +2218,6 @@ static device_method_t qat_methods[] = {
DEVMETHOD(device_detach, qat_detach),
/* Cryptodev interface */
- DEVMETHOD(cryptodev_probesession, qat_probesession),
DEVMETHOD(cryptodev_newsession, qat_newsession),
DEVMETHOD(cryptodev_freesession, qat_freesession),
DEVMETHOD(cryptodev_process, qat_process),
Modified: stable/12/sys/dev/qat/qat_hw15.c
==============================================================================
--- head/sys/dev/qat/qat_hw15.c Thu Nov 5 15:55:23 2020 (r367386)
+++ stable/12/sys/dev/qat/qat_hw15.c Mon Nov 9 15:36:11 2020 (r367525)
@@ -546,19 +546,16 @@ qat_hw15_crypto_setup_cipher_desc(struct qat_crypto_de
static void
qat_hw15_crypto_setup_cipher_config(const struct qat_crypto_desc *desc,
- const struct qat_session *qs, const struct cryptop *crp,
+ const struct qat_session *qs, const uint8_t *key,
struct hw_cipher_config *cipher_config)
{
- const uint8_t *key;
uint8_t *cipher_key;
cipher_config->val = qat_crypto_load_cipher_session(desc, qs);
cipher_config->reserved = 0;
cipher_key = (uint8_t *)(cipher_config + 1);
- if (crp != NULL && crp->crp_cipher_key != NULL)
- key = crp->crp_cipher_key;
- else
+ if (key == NULL)
key = qs->qs_cipher_key;
memcpy(cipher_key, key, qs->qs_cipher_klen);
}
@@ -610,11 +607,10 @@ qat_hw15_crypto_setup_auth_desc(struct qat_crypto_desc
static void
qat_hw15_crypto_setup_auth_setup(const struct qat_crypto_desc *desc,
- const struct qat_session *qs, const struct cryptop *crp,
+ const struct qat_session *qs, const uint8_t *key,
struct hw_auth_setup *auth_setup)
{
const struct qat_sym_hash_def *hash_def;
- const uint8_t *key;
uint8_t *state1, *state2;
uint32_t state_sz, state1_sz, state2_sz, state1_pad_len, state2_pad_len;
@@ -657,9 +653,7 @@ qat_hw15_crypto_setup_auth_setup(const struct qat_cryp
auth_setup->auth_counter.counter = 0;
break;
case HW_AUTH_MODE1:
- if (crp != NULL && crp->crp_auth_key != NULL)
- key = crp->crp_auth_key;
- else
+ if (key == NULL)
key = qs->qs_auth_key;
if (key != NULL) {
qat_crypto_hmac_precompute(desc, key,
@@ -769,7 +763,8 @@ qat_hw15_crypto_setup_desc(struct qat_crypto *qcy, str
static void
qat_hw15_crypto_req_setkey(const struct qat_crypto_desc *desc,
const struct qat_session *qs, struct qat_sym_cookie *qsc,
- struct fw_la_bulk_req *bulk_req, struct cryptop *crp)
+ struct fw_la_bulk_req *bulk_req, const struct cryptodesc *enc,
+ const struct cryptodesc *mac)
{
struct hw_auth_setup *auth_setup;
struct hw_cipher_config *cipher_config;
@@ -783,14 +778,14 @@ qat_hw15_crypto_req_setkey(const struct qat_crypto_des
case FW_SLICE_CIPHER:
cipher_config = (struct hw_cipher_config *)
(cdesc + desc->qcd_cipher_offset);
- qat_hw15_crypto_setup_cipher_config(desc, qs, crp,
- cipher_config);
+ qat_hw15_crypto_setup_cipher_config(desc, qs,
+ enc->crd_key, cipher_config);
break;
case FW_SLICE_AUTH:
auth_setup = (struct hw_auth_setup *)
(cdesc + desc->qcd_auth_offset);
- qat_hw15_crypto_setup_auth_setup(desc, qs, crp,
- auth_setup);
+ qat_hw15_crypto_setup_auth_setup(desc, qs,
+ mac->crd_key, auth_setup);
break;
case FW_SLICE_DRAM_WR:
i = MAX_FW_SLICE; /* end of chain */
@@ -806,7 +801,7 @@ qat_hw15_crypto_req_setkey(const struct qat_crypto_des
void
qat_hw15_crypto_setup_req_params(struct qat_crypto_bank *qcb,
struct qat_session *qs, struct qat_crypto_desc const *desc,
- struct qat_sym_cookie *qsc, struct cryptop *crp)
+ struct qat_sym_cookie *qsc, struct cryptodesc *enc, struct cryptodesc *mac)
{
struct qat_sym_bulk_cookie *qsbc;
struct fw_la_bulk_req *bulk_req;
@@ -830,9 +825,10 @@ qat_hw15_crypto_setup_req_params(struct qat_crypto_ban
bulk_req->req_params_addr = qsc->qsc_bulk_req_params_buf_paddr;
bulk_req->comn_ftr.next_request_addr = 0;
bulk_req->comn_mid.opaque_data = (uint64_t)(uintptr_t)qsc;
- if (__predict_false(crp->crp_cipher_key != NULL ||
- crp->crp_auth_key != NULL)) {
- qat_hw15_crypto_req_setkey(desc, qs, qsc, bulk_req, crp);
+ if (__predict_false(
+ (enc != NULL && (enc->crd_flags & CRD_F_KEY_EXPLICIT) != 0) ||
+ (mac != NULL && (mac->crd_flags & CRD_F_KEY_EXPLICIT) != 0))) {
+ qat_hw15_crypto_req_setkey(desc, qs, qsc, bulk_req, enc, mac);
}
digest_paddr = 0;
@@ -865,7 +861,7 @@ qat_hw15_crypto_setup_req_params(struct qat_crypto_ban
* only.
*/
cipher_req->cipher_off = 0;
- cipher_req->cipher_len = crp->crp_payload_length;
+ cipher_req->cipher_len = enc->crd_len;
}
auth_req->curr_id = FW_SLICE_AUTH;
if (cmd_id == FW_LA_CMD_HASH_CIPHER || cmd_id == FW_LA_CMD_AUTH)
@@ -877,11 +873,11 @@ qat_hw15_crypto_setup_req_params(struct qat_crypto_ban
auth_req->auth_res_sz = desc->qcd_auth_sz;
auth_req->auth_off = 0;
- auth_req->auth_len = crp->crp_payload_length;
+ auth_req->auth_len = enc->crd_len;
auth_req->hash_state_sz =
- roundup2(crp->crp_aad_length, QAT_AES_GCM_AAD_ALIGN) >> 3;
- auth_req->u1.aad_addr = crp->crp_aad_length > 0 ?
+ roundup2(mac->crd_len, QAT_AES_GCM_AAD_ALIGN) >> 3;
+ auth_req->u1.aad_addr = mac->crd_len > 0 ?
qsc->qsc_gcm_aad_paddr : 0;
/*
@@ -889,7 +885,7 @@ qat_hw15_crypto_setup_req_params(struct qat_crypto_ban
* when the AAD length changes between requests in a session and
* is synchronized by qat_process().
*/
- aad_sz = htobe32(crp->crp_aad_length);
+ aad_sz = htobe32(mac->crd_len);
aad_szp1 = (uint32_t *)(
__DECONST(uint8_t *, desc->qcd_content_desc) +
desc->qcd_gcm_aad_sz_offset1);
@@ -897,7 +893,7 @@ qat_hw15_crypto_setup_req_params(struct qat_crypto_ban
desc->qcd_gcm_aad_sz_offset2;
if (__predict_false(*aad_szp1 != aad_sz)) {
*aad_szp1 = aad_sz;
- *aad_szp2 = (uint8_t)roundup2(crp->crp_aad_length,
+ *aad_szp2 = (uint8_t)roundup2(mac->crd_len,
QAT_AES_GCM_AAD_ALIGN);
bus_dmamap_sync(qs->qs_desc_mem.qdm_dma_tag,
qs->qs_desc_mem.qdm_dma_map,
@@ -918,9 +914,9 @@ qat_hw15_crypto_setup_req_params(struct qat_crypto_ban
cipher_req->curr_id = FW_SLICE_CIPHER;
cipher_req->next_id = next_slice;
- cipher_req->cipher_off = crp->crp_aad_length == 0 ? 0 :
- crp->crp_payload_start - crp->crp_aad_start;
- cipher_req->cipher_len = crp->crp_payload_length;
+ cipher_req->cipher_off = mac != NULL ?
+ mac->crd_len - enc->crd_len : 0;
+ cipher_req->cipher_len = enc->crd_len;
cipher_req->state_address = qsc->qsc_iv_buf_paddr;
}
if (cmd_id != FW_LA_CMD_CIPHER) {
@@ -941,8 +937,7 @@ qat_hw15_crypto_setup_req_params(struct qat_crypto_ban
*** DIFF OUTPUT TRUNCATED AT 1000 LINES ***
More information about the svn-src-stable
mailing list