Page MenuHomeFreeBSD

D23677.diff
No OneTemporary

D23677.diff

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: ObsoleteFiles.inc
===================================================================
--- ObsoleteFiles.inc
+++ ObsoleteFiles.inc
@@ -36,6 +36,11 @@
# xargs -n1 | sort | uniq -d;
# done
+# 2020xxxx: OCF refactoring
+OLD_FILES+=usr/share/man/man9/crypto_find_driver.9
+OLD_FILES+=usr/share/man/man9/crypto_register.9
+OLD_FILES+=usr/share/man/man9/crypto_unregister.9
+
# 20200320: cx and ctau drivers retired
OLD_FILES+=usr/share/man/man4/ctau.4.gz
OLD_FILES+=usr/share/man/man4/cx.4.gz
Index: notes
===================================================================
--- /dev/null
+++ notes
@@ -0,0 +1,257 @@
+The goal of this branch is to simplify the data structures used to describe
+session and crypto op parameters.
+
+- replace linked/list of cryptoini for session init with a structure
+ - explicit IV length?
+- replace linked/list of cryptodesc with members in struct cryptop
+ - maybe require data to always be laid out as
+ AAD || IV (if not sideband) || DATA || TAG/MAC
+ (had to scrap this due to IPsec AH and permit arbitrary IV and
+ digest offsets)
+ - generate IV if needed in common code, not drivers
+- add flag for hash verify rather than hash compute
+- change copyto/copyback to operate on the crp instead of the fields
+- permit separate in/out buffers?
+
+- need to ensure crypto_getreq() zeroes the crp (it does)
+- change for AEAD:
+ - previously with AES+HMAC crda covered all of data to auth, now
+ need to hash AAD region + payload. If IV needs to be hashed,
+ AAD should cover IV.
+
+- Notes on IVs:
+ - CRYPTO_F_IV_GENERATE (encrypt only)
+ - generate full IV and copy to crp_iv_start before encryption
+ - CRYPTO_F_IV_SEPARATE (both, cannot use with GENERATE)
+ - IV is in crp_iv, crp_iv_start should be ignored
+ - neither flag
+ - full IV is in data stream at crp_iv_start
+
+- AES-GCM now uses CIPHER_MODE_AEAD which ignores most of csp_auth_*
+ except for csp_auth_mlen
+
+- Eventually retire 3 separate GMAC algs and repurpose NIST_GMAC as a single
+ MAC?
+
+- Maybe retire driver HIDs with 'struct cryptocap' pointers instead? We
+ could just use a TAILQ instead of an array
+
+- Separate session operation to rekey instead of implicit rekeying?
+ Implicit klen is not really great for auth, for example
+
+- XXX: Need to think about how to define GMAC in the new world order. I
+ think it should be CSP_MODE_DIGEST, but with csp_cipher_ivlen set?
+
+- Yes, GMAC is CSP_MODE_DIGEST with payload area describing data to hash.
+ It uses csp_ivlen.
+
+- Maybe change crp_cipher_key to not change session key but only the
+ specific operation?
+
+- Need to figure out what to do about accelerated software vs plain
+ software
+ - maybe need "priority" and a "session probe/test" instead of just
+ "newsession"
+
+- Add a reference count on sessions
+ - crypto_getop would take a session pointer and set crp_session along
+ with bumping refcount now
+
+- Kill the UIO usage in cryptodev.c, it's silly (done)
+
+- GCM IV length is kind of busted, perhaps need IV length as part of crp
+ instead of csp?
+
+- Testing
+ + cryptocheck
+ + soft
+ + ccr
+ - ipsec
+ - auth
+ - hmac-md5
+ - hmac-sha1
+ - keyed-md5
+ - keyed-sha1
+ - null
+ - hmac-sha2-256
+ - hmac-sha2-384
+ - hmac-sha2-512
+ - hmac-ripemd160
+ - aes-xcbc-mac
+ - tcp-md5
+ - encrypt
+ - des-cbc
+ - 3desc-cbc
+ - null
+ - blowfish-cbc
+ - cast128-cbc
+ - des-deriv
+ - des-32iv
+ - rijndael-cbc
+ - aes-ctr
+ - aes-gcm16
+ - camellia-cbc
+
+ - 3des-hmac-md5
+ + soft
+ - aes-cbc-hmac-sha
+ + soft
+ - aes-cbc
+ + soft
+ - aes-ctr-hmac-sha2-512
+ + soft
+ - aes-ctr
+ + soft
+ - aes-gcm-256
+ + soft
+ - aes-gcm
+ + soft
+ - blowfish
+ + soft
+ - camellia
+ + soft
+ - cast128
+ + soft
+ - des-32iv
+ + soft
+ - des-deriv
+ + soft
+ - hmac-sha1
+ + soft
+ - hmac-sha2-256
+ + soft
+ - hmac-sha2-384
+ + soft
+ - hmac-sha2-512
+ + soft
+ - ipcomp
+ - doesn't work with stock FreeBSD
+ - null-ipcomp
+ - doesn't work with stock FreeBSD
+ - null
+ + soft
+ - rijndael-hmac-sha2-256
+ + soft
+ - rijndael
+ + soft
+ - tcp-md5
+ - needs kernel with TCP_SIGNATURE
+
+- Tested with cryptocheck (re-check)
+ - cryptosoft
+ - aesni
+ - blake2
+ - ccr
+
+- Needs testing:
+ - ccp
+ - aesni with sha
+ - hifn
+ - kgssapi_krb5
+ - ubsec
+ - padlock
+ - safe
+ - GELI
+ - armv8_crypto (aarch64)
+ - glxsb (i386)
+ - sec (ppc)
+ - cesa (armv7)
+ - cryptocteon (mips64)
+ - nlmsec (mips64)
+
+- Benchmarks
+ - aes-gcm setup
+ - frodo
+ - kldload ipsec
+ - ifconfig cc0 inet 172.16.0.1/24 alias
+ - setkey -f /mnt/jhb/work/git/netperf/IPSEC/Configs/source-aes-gcm-256.conf
+ - netperf -H 172.16.0.2 -cC
+ - sam
+ - kldload ipsec
+ - ifconfig cc0 inet 172.16.0.2/24 alias
+ - setkey -f /mnt/jhb/work/git/netperf/IPSEC/Configs/dut-aes-gcm-256.conf
+ - netserver
+ - clean svn (r348883)
+ - no ipsec 44000 18% 25%
+ - null 5500 28% 28%
+ - aes-gcm
+ - cryptosoft 550 26% 17%
+ - aesni 3000 28% 26%
+ - ccr 4300 45% 45%
+ - ocf_rework (r348883)
+ - no ipsec 44000 18% 25%
+ - null 5800 28% 28%
+ - aes-gcm
+ - cryptosoft 580 26% 17%
+ - aesni 3000 28% 26%
+ - ccr 4300 45% 45%
+
+ - ktls using aes-gcm
+ - frodo
+ - ktls.sh
+ - kldload ktls_ocf
+ - service nginx start
+ - sam
+ - normal ktls client
+ - clean svn (r354096)
+ - cryptosoft 2.9 Gbps 76-80% CPU
+ - aesni 27 Gbps 63-66% CPU
+ - ccr 30 Gbps 46-44% CPU
+ - ocf_rework (r354096)
+ - cryptosoft 2.9 Gbps 100% CPU
+ - aesni 27 Gbps 63-66% CPU
+ - ccr 30 Gbps 40-44% CPU
+
+ - dd /dev/zero to 4g md geli using AES-XTS / HMAC/SHA256
+ - svn
+ - software 43.002561 secs (49938506 bytes/sec)
+ - ccr 17.595058 secs (122050390 bytes/sec)
+ - ocf_rework
+ - software 43.544508 secs (49316981 bytes/sec)
+ - ccr 19.096170 secs (112456250 bytes/sec)
+
+ - dd /dev/zero to 4g md geli using AES-XTS
+ - svn
+ - software 34.214655 secs (125530048 bytes/sec)
+ - aesni 10.575568 secs (406121652 bytes/sec)
+ - ccr 10.530135 secs (407873915 bytes/sec)
+ - ocf_rework
+ - software 34.942433 secs (122915518 bytes/sec)
+ - aesni 10.498557 secs (409100708 bytes/sec)
+ - ccr 11.955181 secs (359255745 bytes/sec)
+
+- steps to convert a driver
+ - add probesession
+ - cri -> csp
+ - crd -> crp fields
+ - axe crypto_register
+
++ rename CRYPTO_BUF_IOV -> CRYPTO_BUF_UIO
++ make key pointers const in csp and crp
+ (keep drivers from XOR'ing keys with IPAD/OPAD)
++ kill klen in crp (assume they match session)
+ + allow NULL keys during setup but require klen to be
+ set during probesession / newsession, also have to require a key
+ when processing
++ change klen to bytes?
+- sysctl node to list crypto drivers
+ - update python test to use this
+- update places for new sysctl names
+
++ csp_lookup_hash() returns auth_hash *?
++ csp_lookup_cipher() returns enc_xform *?
+
++ pull checks into crypto_newsession
+ + checks on mlen
+ + csp_auth_klen 0 vs !0
+
++ kill CSP_IV_VALID and update drivers to all bail for flags != 0 to
+ permit adding new flags in the future
+- update crypto.9
+
++ bus_dmamap_load_crp()
+ + hifn
+ + sec (ppc)
+ + safe
+ + cesa (armv7)
+ + ubsec
Index: share/man/man4/crypto.4
===================================================================
--- share/man/man4/crypto.4
+++ share/man/man4/crypto.4
@@ -156,7 +156,7 @@
The symmetric-key operation mode provides a context-based API
to traditional symmetric-key encryption (or privacy) algorithms,
or to keyed and unkeyed one-way hash (HMAC and MAC) algorithms.
-The symmetric-key mode also permits fused operation,
+The symmetric-key mode also permits encrypt-then-authenticate fused operation,
where the hardware performs both a privacy algorithm and an integrity-check
algorithm in a single pass over the data: either a fused
encrypt/HMAC-generate operation, or a fused HMAC-verify/decrypt operation.
@@ -314,16 +314,14 @@
.Fa cr_op-\*[Gt]iv
supply the addresses of the input buffer, output buffer,
one-way hash, and initialization vector, respectively.
-If a session is using both a privacy algorithm and a hash algorithm,
-the request will generate a hash of the input buffer before
-generating the output buffer by default.
-If the
-.Dv COP_F_CIPHER_FIRST
-flag is included in the
-.Fa cr_op-\*[Gt]flags
-field,
-then the request will generate a hash of the output buffer after
-executing the privacy algorithm.
+.Pp
+If a session is using either fused encrypt-then-authenticate or
+an AEAD algorithm,
+decryption operations require the associated hash as an input.
+If the hash is incorrect, the
+operation will fail with
+.Dv EBADMSG
+and the output buffer will remain unchanged.
.It Dv CIOCCRYPTAEAD Fa struct crypt_aead *cr_aead
.Bd -literal
struct crypt_aead {
Index: share/man/man7/crypto.7
===================================================================
--- share/man/man7/crypto.7
+++ share/man/man7/crypto.7
@@ -68,19 +68,13 @@
.El
.Pp
This algorithm implements Galois/Counter Mode.
-This is the cipher part of an AEAD
+This cipher uses AEAD
.Pq Authenticated Encryption with Associated Data
mode.
-This requires use of the use of a proper authentication mode, one of
-.Dv CRYPTO_AES_128_NIST_GMAC ,
-.Dv CRYPTO_AES_192_NIST_GMAC
-or
-.Dv CRYPTO_AES_256_NIST_GMAC ,
-that corresponds with the number of bits in the key that you are using.
.Pp
-The associated data (if any) must be provided by the authentication mode op.
-The authentication tag will be read/written from/to the offset crd_inject
-specified in the descriptor for the authentication mode.
+The authentication tag will be read/written from/to the offset
+.Va crp_digest_start
+specified in the request.
.Pp
Note: You must provide an IV on every call.
.It Dv CRYPTO_AES_ICM
@@ -118,22 +112,6 @@
NOTE: The ciphertext stealing part is not implemented which is why this cipher
is listed as having a block size of 16 instead of 1.
.El
-.Pp
-Authentication algorithms:
-.Bl -tag -width ".Dv CRYPTO_AES_256_NIST_GMAC"
-.It CRYPTO_AES_128_NIST_GMAC
-See
-.Dv CRYPTO_AES_NIST_GCM_16
-in the cipher mode section.
-.It CRYPTO_AES_192_NIST_GMAC
-See
-.Dv CRYPTO_AES_NIST_GCM_16
-in the cipher mode section.
-.It CRYPTO_AES_256_NIST_GMAC
-See
-.Dv CRYPTO_AES_NIST_GCM_16
-in the cipher mode section.
-.El
.Sh SEE ALSO
.Xr crypto 4 ,
.Xr crypto 9
Index: share/man/man9/Makefile
===================================================================
--- share/man/man9/Makefile
+++ share/man/man9/Makefile
@@ -71,6 +71,10 @@
cr_seeothergids.9 \
cr_seeotheruids.9 \
crypto.9 \
+ crypto_asym.9 \
+ crypto_driver.9 \
+ crypto_request.9 \
+ crypto_session.9 \
CTASSERT.9 \
DB_COMMAND.9 \
DECLARE_GEOM_CLASS.9 \
@@ -889,20 +893,33 @@
cpuset.9 CPU_COPY_STORE_REL.9
MLINKS+=critical_enter.9 critical.9 \
critical_enter.9 critical_exit.9
-MLINKS+=crypto.9 crypto_dispatch.9 \
- crypto.9 crypto_done.9 \
- crypto.9 crypto_freereq.9 \
- crypto.9 crypto_freesession.9 \
- crypto.9 crypto_get_driverid.9 \
- crypto.9 crypto_getreq.9 \
- crypto.9 crypto_kdispatch.9 \
- crypto.9 crypto_kdone.9 \
- crypto.9 crypto_kregister.9 \
- crypto.9 crypto_newsession.9 \
- crypto.9 crypto_register.9 \
- crypto.9 crypto_unblock.9 \
- crypto.9 crypto_unregister.9 \
- crypto.9 crypto_unregister_all.9
+MLINKS+=crypto_asym.9 crypto_kdispatch.9 \
+ crypto_asym.9 crypto_kdone.9 \
+ crypto_asym.9 crypto_kregister.9 \
+ crypto_asym.9 CRYPTODEV_KPROCESS.9
+MLINKS+=crypto_driver.9 crypto_apply.9 \
+ crypto_driver.9 crypto_contiguous_segment.9 \
+ crypto_driver.9 crypto_copyback.9 \
+ crypto_driver.9 crypto_copydata.9 \
+ crypto_driver.9 crypto_done.9 \
+ crypto_driver.9 crypto_get_driverid.9 \
+ crypto_driver.9 crypto_get_driver_session.9 \
+ crypto_driver.9 crypto_unblock.9 \
+ crypto_driver.9 crypto_unregister_all.9 \
+ crypto_driver.9 CRYPTODEV_FREESESSION.9 \
+ crypto_driver.9 CRYPTODEV_NEWSESSION.9 \
+ crypto_driver.9 CRYPTODEV_PROBESESSION.9 \
+ crypto_driver.9 CRYPTODEV_PROCESS.9 \
+ crypto_driver.9 hmac_init_ipad.9 \
+ crypto_driver.9 hmac_init_opad.9
+MLINKS+=crypto_request.9 crypto_dispatch.9 \
+ crypto_request.9 crypto_freereq.9 \
+ crypto_request.9 crypto_getreq.9
+MLINKS+=crypto_session.9 crypto_auth_hash.9 \
+ crypto_session.9 crypto_cipher.9 \
+ crypto_session.9 crypto_get_params.9 \
+ crypto_session.9 crypto_newsession.9 \
+ crypto_session.9 crypto_freesession.9
MLINKS+=DB_COMMAND.9 DB_SHOW_ALL_COMMAND.9 \
DB_COMMAND.9 DB_SHOW_COMMAND.9
MLINKS+=DECLARE_MODULE.9 DECLARE_MODULE_TIED.9
Index: share/man/man9/bus_dma.9
===================================================================
--- share/man/man9/bus_dma.9
+++ share/man/man9/bus_dma.9
@@ -68,6 +68,7 @@
.Nm bus_dmamap_load ,
.Nm bus_dmamap_load_bio ,
.Nm bus_dmamap_load_ccb ,
+.Nm bus_dmamap_load_crp ,
.Nm bus_dmamap_load_mbuf ,
.Nm bus_dmamap_load_mbuf_sg ,
.Nm bus_dmamap_load_uio ,
@@ -118,6 +119,10 @@
"union ccb *ccb" "bus_dmamap_callback_t *callback" "void *callback_arg" \
"int flags"
.Ft int
+.Fn bus_dmamap_load_crp "bus_dma_tag_t dmat" "bus_dmamap_t map" \
+"struct crypto *crp" "bus_dmamap_callback_t *callback" "void *callback_arg" \
+"int flags"
+.Ft int
.Fn bus_dmamap_load_mbuf "bus_dma_tag_t dmat" "bus_dmamap_t map" \
"struct mbuf *mbuf" "bus_dmamap_callback2_t *callback" "void *callback_arg" \
"int flags"
@@ -387,9 +392,10 @@
.Vt bus_dmamap_t
via
.Fn bus_dmamap_load ,
-.Fn bus_dmamap_load_bio
+.Fn bus_dmamap_load_bio ,
+.Fn bus_dmamap_load_ccb ,
or
-.Fn bus_dmamap_load_ccb .
+.Fn bus_dmamap_load_crp .
Callbacks are of the format:
.Bl -tag -width indent
.It Ft void
@@ -879,6 +885,15 @@
.It
XPT_SCSI_IO
.El
+.It Fn bus_dmamap_load_crp "dmat" "map" "crp" "callback" "callback_arg" "flags"
+This is a variation of
+.Fn bus_dmamap_load
+which maps buffers pointed to by
+.Fa crp
+for DMA transfers.
+The
+.Dv BUS_DMA_NOWAIT
+flag is implied, thus no callback deferral will happen.
.It Fn bus_dmamap_load_mbuf "dmat" "map" "mbuf" "callback2" "callback_arg" \
"flags"
This is a variation of
Index: share/man/man9/crypto.9
===================================================================
--- share/man/man9/crypto.9
+++ share/man/man9/crypto.9
@@ -25,120 +25,50 @@
.Nd API for cryptographic services in the kernel
.Sh SYNOPSIS
.In opencrypto/cryptodev.h
-.Ft int32_t
-.Fn crypto_get_driverid "device_t dev" "size_t session_size" "int flags"
-.Ft int
-.Fn crypto_register "uint32_t driverid" "int alg" "uint16_t maxoplen" "uint32_t flags"
-.Ft int
-.Fn crypto_kregister "uint32_t driverid" "int kalg" "uint32_t flags"
-.Ft int
-.Fn crypto_unregister "uint32_t driverid" "int alg"
-.Ft int
-.Fn crypto_unregister_all "uint32_t driverid"
-.Ft void
-.Fn crypto_done "struct cryptop *crp"
-.Ft void
-.Fn crypto_kdone "struct cryptkop *krp"
-.Ft int
-.Fn crypto_find_driver "const char *match"
-.Ft int
-.Fn crypto_newsession "crypto_session_t *cses" "struct cryptoini *cri" "int crid"
-.Ft int
-.Fn crypto_freesession "crypto_session_t cses"
-.Ft int
-.Fn crypto_dispatch "struct cryptop *crp"
-.Ft int
-.Fn crypto_kdispatch "struct cryptkop *krp"
-.Ft int
-.Fn crypto_unblock "uint32_t driverid" "int what"
-.Ft "struct cryptop *"
-.Fn crypto_getreq "int num"
-.Ft void
-.Fn crypto_freereq "struct cryptop *crp"
-.Bd -literal
-#define CRYPTO_SYMQ 0x1
-#define CRYPTO_ASYMQ 0x2
-
-#define EALG_MAX_BLOCK_LEN 16
-
-struct cryptoini {
- int cri_alg;
- int cri_klen;
- int cri_mlen;
- caddr_t cri_key;
- uint8_t cri_iv[EALG_MAX_BLOCK_LEN];
- struct cryptoini *cri_next;
-};
-
-struct cryptodesc {
- int crd_skip;
- int crd_len;
- int crd_inject;
- int crd_flags;
- struct cryptoini CRD_INI;
-#define crd_iv CRD_INI.cri_iv
-#define crd_key CRD_INI.cri_key
-#define crd_alg CRD_INI.cri_alg
-#define crd_klen CRD_INI.cri_klen
- struct cryptodesc *crd_next;
-};
-
-struct cryptop {
- TAILQ_ENTRY(cryptop) crp_next;
- crypto_session_t crp_session;
- int crp_ilen;
- int crp_olen;
- int crp_etype;
- int crp_flags;
- caddr_t crp_buf;
- caddr_t crp_opaque;
- struct cryptodesc *crp_desc;
- int (*crp_callback) (struct cryptop *);
- caddr_t crp_mac;
-};
-
-struct crparam {
- caddr_t crp_p;
- u_int crp_nbits;
-};
-
-#define CRK_MAXPARAM 8
-
-struct cryptkop {
- TAILQ_ENTRY(cryptkop) krp_next;
- u_int krp_op; /* ie. CRK_MOD_EXP or other */
- u_int krp_status; /* return status */
- u_short krp_iparams; /* # of input parameters */
- u_short krp_oparams; /* # of output parameters */
- uint32_t krp_hid;
- struct crparam krp_param[CRK_MAXPARAM];
- int (*krp_callback)(struct cryptkop *);
-};
-.Ed
.Sh DESCRIPTION
.Nm
-is a framework for drivers of cryptographic hardware to register with
-the kernel so
-.Dq consumers
-(other kernel subsystems, and
-users through the
+is a framework for in-kernel cryptography.
+It permits in-kernel consumers to encrypt and decrypt data
+and also enables userland applications to use cryptographic hardware
+through the
.Pa /dev/crypto
-device) are able to make use of it.
-Drivers register with the framework the algorithms they support,
-and provide entry points (functions) the framework may call to
-establish, use, and tear down sessions.
-Sessions are used to cache cryptographic information in a particular driver
-(or associated hardware), so initialization is not needed with every request.
-Consumers of cryptographic services pass a set of
-descriptors that instruct the framework (and the drivers registered
-with it) of the operations that should be applied on the data (more
-than one cryptographic operation can be requested).
+device.
.Pp
-Keying operations are supported as well.
-Unlike the symmetric operators described above,
-these sessionless commands perform mathematical operations using
-input and output parameters.
+.Nm
+supports two modes of operation:
+one mode for symmetric-keyed cryptographic requests and digest,
+and a second mode for asymmetric-key requests and modular arithmetic.
+.Ss Symmetric-Key Mode
+Symmetric-key operations include encryption and decryption operations
+using block and stream ciphers as well as computation and verification
+of message authentication codes (MACs).
+In this mode,
+consumers allocate sessions to describe a transform as discussed in
+.Xr crypto_session 9 .
+Consumers then allocate request objects to describe each transformation
+such as encrypting a network packet or decrypting a disk sector.
+Requests are described in
+.Xr crypto_request 9 .
.Pp
+Device drivers are responsible for processing requests submitted by
+consumers.
+.Xr crypto_driver 9
+describes the interfaces drivers use to register with the framework,
+helper routines the framework provides to faciliate request processing,
+and the interfaces drivers are required to provide.
+.Ss Asymmetric-Key Mode
+Assymteric-key operations do not use sessions.
+Instead,
+these operations perform individual mathematical operations using a set
+of input and output parameters.
+These operations are described in
+.Xr crypto_asym 9 .
+Drivers that support asymmetric operations use additional interfaces
+described in
+.Xr crypto_asym 9
+in addition to the base interfaces described in
+.Xr crypto_driver 9 .
+.Ss Callbacks
Since the consumers may not be associated with a process, drivers may
not
.Xr sleep 9 .
@@ -148,88 +78,38 @@
callback is specified by the consumer on a per-request basis).
The callback is invoked by the framework whether the request was
successfully completed or not.
-An error indication is provided in the latter case.
-A specific error code,
+Errors are reported to the callback function.
+.Pp
+Session initialization does not use callbacks and returns errors
+synchronously.
+.Ss Session Migration
+For symmetric-key operations,
+a specific error code,
.Er EAGAIN ,
is used to indicate that a session handle has changed and that the
request may be re-submitted immediately with the new session.
-Errors are only returned to the invoking function if not
-enough information to call the callback is available (meaning, there
-was a fatal error in verifying the arguments).
-For session initialization and teardown no callback mechanism is used.
+The consumer should update its saved copy of the session handle
+to the value of
+.Fa crp_session
+so that future requests use the new session.
+.Ss Supported Algorithms
+More details on some algorithms may be found in
+.Xr crypto 7 .
+These algorithms are used for symmetric-mode operations.
+Asymmetric-mode operations support operations described in
+.Xr crypto_asym 9 .
.Pp
-The
-.Fn crypto_find_driver
-returns the driver id of the device whose name matches
-.Fa match .
-.Fa match
-can either be the exact name of a device including the unit
-or the driver name without a unit.
-In the latter case,
-the id of the first device with the matching driver name is returned.
-If no matching device is found,
-the value -1 is returned.
+The following authentication algorithms are supported:
.Pp
-The
-.Fn crypto_newsession
-routine is called by consumers of cryptographic services (such as the
-.Xr ipsec 4
-stack) that wish to establish a new session with the framework.
-The
-.Fa cri
-argument points to a
-.Vt cryptoini
-structure containing all the necessary information for
-the driver to establish the session.
-The
-.Fa crid
-argument is either a specific driver id or a bitmask of flags.
-The flags are
-.Dv CRYPTOCAP_F_HARDWARE ,
-to select hardware devices,
-or
-.Dv CRYPTOCAP_F_SOFTWARE ,
-to select software devices.
-If both are specified, hardware devices are preferred over software
-devices.
-On success, the opaque session handle of the new session will be stored in
-.Fa *cses .
-The
-.Vt cryptoini
-structure pointed to by
-.Fa cri
-contains these fields:
-.Bl -tag -width ".Va cri_next"
-.It Va cri_alg
-An algorithm identifier.
-Currently supported algorithms are:
-.Pp
-.Bl -tag -width ".Dv CRYPTO_RIPEMD160_HMAC" -compact
-.It Dv CRYPTO_AES_128_NIST_GMAC
-.It Dv CRYPTO_AES_192_NIST_GMAC
-.It Dv CRYPTO_AES_256_NIST_GMAC
-.It Dv CRYPTO_AES_CBC
-.It Dv CRYPTO_AES_CCM_16
+.Bl -tag -offset indent -width CRYPTO_AES_CCM_CBC_MAC -compact
.It Dv CRYPTO_AES_CCM_CBC_MAC
-.It Dv CRYPTO_AES_ICM
-.It Dv CRYPTO_AES_NIST_GCM_16
.It Dv CRYPTO_AES_NIST_GMAC
-.It Dv CRYPTO_AES_XTS
-.It Dv CRYPTO_ARC4
.It Dv CRYPTO_BLAKE2B
.It Dv CRYPTO_BLAKE2S
-.It Dv CRYPTO_BLF_CBC
-.It Dv CRYPTO_CAMELLIA_CBC
-.It Dv CRYPTO_CAST_CBC
-.It Dv CRYPTO_CHACHA20
-.It Dv CRYPTO_DEFLATE_COMP
-.It Dv CRYPTO_DES_CBC
-.It Dv CRYPTO_3DES_CBC
.It Dv CRYPTO_MD5
.It Dv CRYPTO_MD5_HMAC
.It Dv CRYPTO_MD5_KPDK
.It Dv CRYPTO_NULL_HMAC
-.It Dv CRYPTO_NULL_CBC
.It Dv CRYPTO_POLY1305
.It Dv CRYPTO_RIPEMD160
.It Dv CRYPTO_RIPEMD160_HMAC
@@ -244,488 +124,38 @@
.It Dv CRYPTO_SHA2_384_HMAC
.It Dv CRYPTO_SHA2_512
.It Dv CRYPTO_SHA2_512_HMAC
+.El
+.Pp
+The following encryption algorithms are supported:
+.Pp
+.Bl -tag -offset indent -width CRYPTO_CAMELLIA_CBC -compact
+.It Dv CRYPTO_AES_CBC
+.It Dv CRYPTO_AES_ICM
+.It Dv CRYPTO_AES_XTS
+.It Dv CRYPTO_ARC4
+.It Dv CRYPTO_BLF_CBC
+.It Dv CRYPTO_CAMELLIA_CBC
+.It Dv CRYPTO_CAST_CBC
+.It Dv CRYPTO_CHACHA20
+.It Dv CRYPTO_DES_CBC
+.It Dv CRYPTO_3DES_CBC
+.It Dv CRYPTO_NULL_CBC
.It Dv CRYPTO_SKIPJACK_CBC
.El
-.It Va cri_klen
-For variable-size key algorithms, the length of the key in bits.
-.It Va cri_mlen
-If non-zero, truncate the calculated hash to this many bytes.
-.It Va cri_key
-The key to be used.
-.It Va cri_iv
-An explicit initialization vector if it does not prefix
-the data.
-This field is ignored during initialization
-.Pq Nm crypto_newsession .
-If no IV is explicitly passed (see below on details), a random IV is used
-by the device driver processing the request.
-.It Va cri_next
-Pointer to another
-.Vt cryptoini
-structure.
-This is used to establish dual-algorithm sessions, such as combining a
-cipher with a MAC.
-.El
-.Pp
-The
-.Vt cryptoini
-structure and its contents will not be modified or referenced by the
-framework or any cryptographic drivers.
-The memory associated with
-.Fa cri
-can be released once
-.Fn crypto_newsession
-returns.
-.Pp
-.Fn crypto_freesession
-is called with the session handle returned by
-.Fn crypto_newsession
-to free the session.
.Pp
-.Fn crypto_dispatch
-is called to process a request.
-The various fields in the
-.Vt cryptop
-structure are:
-.Bl -tag -width ".Va crp_callback"
-.It Va crp_session
-The session handle.
-.It Va crp_ilen
-The total length in bytes of the buffer to be processed.
-.It Va crp_olen
-On return, contains the total length of the result.
-For symmetric crypto operations, this will be the same as the input length.
-This will be used if the framework needs to allocate a new
-buffer for the result (or for re-formatting the input).
-.It Va crp_callback
-Callback routine invoked when a request is completed via
-.Fn crypto_done .
-The callback routine should inspect the
-.Va crp_etype
-to determine if the request was successfully completed.
-.It Va crp_etype
-The error type, if any errors were encountered, or zero if
-the request was successfully processed.
-If the
-.Er EAGAIN
-error code is returned, the session handle has changed (and has been recorded
-in the
-.Va crp_session
-field).
-The consumer should record the new session handle and use it in all subsequent
-requests.
-In this case, the request may be re-submitted immediately.
-This mechanism is used by the framework to perform
-session migration (move a session from one driver to another, because
-of availability, performance, or other considerations).
+The following authenticated encryption with additional data (AEAD)
+algorithms are supported:
.Pp
-This field is only valid in the context of the callback routine specified by
-.Va crp_callback .
-Errors are returned to the invoker of
-.Fn crypto_process
-only when enough information is not present to call the callback
-routine (i.e., if the pointer passed is
-.Dv NULL
-or if no callback routine was specified).
-.It Va crp_flags
-A bitmask of flags associated with this request.
-Currently defined flags are:
-.Bl -tag -width ".Dv CRYPTO_F_CBIFSYNC"
-.It Dv CRYPTO_F_IMBUF
-The buffer is an mbuf chain pointed to by
-.Va crp_mbuf .
-.It Dv CRYPTO_F_IOV
-The buffer is a
-.Vt uio
-structure pointed to by
-.Va crp_uio .
-.It Dv CRYPTO_F_BATCH
-Batch operation if possible.
-.It Dv CRYPTO_F_CBIMM
-Do callback immediately instead of doing it from a dedicated kernel thread.
-.It Dv CRYPTO_F_DONE
-Operation completed.
-.It Dv CRYPTO_F_CBIFSYNC
-Do callback immediately if operation is synchronous (that the driver
-specified the
-.Dv CRYPTOCAP_F_SYNC
-flag).
-.It Dv CRYPTO_F_ASYNC
-Try to do the crypto operation in a pool of workers
-if the operation is synchronous (that is, if the driver specified the
-.Dv CRYPTOCAP_F_SYNC
-flag).
-It aims to speed up processing by dispatching crypto operations
-on different processors.
-.It Dv CRYPTO_F_ASYNC_KEEPORDER
-Dispatch callbacks in the same order they are posted.
-Only relevant if the
-.Dv CRYPTO_F_ASYNC
-flag is set and if the operation is synchronous.
-.El
-.It Va crp_buf
-Data buffer unless
-.Dv CRYPTO_F_IMBUF
-or
-.Dv CRYPTO_F_IOV
-is set in
-.Va crp_flags .
-The length in bytes is set in
-.Va crp_ilen .
-.It Va crp_mbuf
-Data buffer mbuf chain when
-.Dv CRYPTO_F_IMBUF
-is set in
-.Va crp_flags .
-.It Va crp_uio
-.Vt struct uio
-data buffer when
-.Dv CRYPTO_F_IOV
-is set in
-.Va crp_flags .
-.It Va crp_opaque
-Cookie passed through the crypto framework untouched.
-It is
-intended for the invoking application's use.
-.It Va crp_desc
-A linked list of descriptors.
-Each descriptor provides
-information about what type of cryptographic operation should be done
-on the input buffer.
-The various fields are:
-.Bl -tag -width ".Va crd_inject"
-.It Va crd_iv
-When the flag
-.Dv CRD_F_IV_EXPLICIT
-is set, this field contains the IV.
-.It Va crd_key
-When the
-.Dv CRD_F_KEY_EXPLICIT
-flag is set, the
-.Va crd_key
-points to a buffer with encryption or authentication key.
-.It Va crd_alg
-An algorithm to use.
-Must be the same as the one given at newsession time.
-.It Va crd_klen
-The
-.Va crd_key
-key length.
-.It Va crd_skip
-The offset in the input buffer where processing should start.
-.It Va crd_len
-How many bytes, after
-.Va crd_skip ,
-should be processed.
-.It Va crd_inject
-The
-.Va crd_inject
-field specifies an offset in bytes from the beginning of the buffer.
-For encryption algorithms, this may be where the IV will be inserted
-when encrypting or where the IV may be found for
-decryption (subject to
-.Va crd_flags ) .
-For MAC algorithms, this is where the result of the keyed hash will be
-inserted.
-.It Va crd_flags
-The following flags are defined:
-.Bl -tag -width 3n
-.It Dv CRD_F_ENCRYPT
-For encryption algorithms, this bit is set when encryption is required
-(when not set, decryption is performed).
-.It Dv CRD_F_IV_PRESENT
-.\" This flag name has nothing to do w/ it's behavior, fix the name.
-For encryption, if this bit is not set the IV used to encrypt the packet
-will be written at the location pointed to by
-.Va crd_inject .
-The IV length is assumed to be equal to the blocksize of the
-encryption algorithm.
-For encryption, if this bit is set, nothing is done.
-For decryption, this flag has no meaning.
-Applications that do special
-.Dq "IV cooking" ,
-such as the half-IV mode in
-.Xr ipsec 4 ,
-can use this flag to indicate that the IV should not be written on the packet.
-This flag is typically used in conjunction with the
-.Dv CRD_F_IV_EXPLICIT
-flag.
-.It Dv CRD_F_IV_EXPLICIT
-This bit is set when the IV is explicitly
-provided by the consumer in the
-.Va crd_iv
-field.
-Otherwise, for encryption operations the IV is provided for by
-the driver used to perform the operation, whereas for decryption
-operations the offset of the IV is provided by the
-.Va crd_inject
-field.
-This flag is typically used when the IV is calculated
-.Dq "on the fly"
-by the consumer, and does not precede the data.
-.It Dv CRD_F_KEY_EXPLICIT
-For encryption and authentication (MAC) algorithms, this bit is set when the key
-is explicitly provided by the consumer in the
-.Va crd_key
-field for the given operation.
-Otherwise, the key is taken at newsession time from the
-.Va cri_key
-field.
-As calculating the key schedule may take a while, it is recommended that often
-used keys are given their own session.
-.It Dv CRD_F_COMP
-For compression algorithms, this bit is set when compression is required (when
-not set, decompression is performed).
-.El
-.It Va CRD_INI
-This
-.Vt cryptoini
-structure will not be modified by the framework or the device drivers.
-Since this information accompanies every cryptographic
-operation request, drivers may re-initialize state on-demand
-(typically an expensive operation).
-Furthermore, the cryptographic
-framework may re-route requests as a result of full queues or hardware
-failure, as described above.
-.It Va crd_next
-Point to the next descriptor.
-Linked operations are useful in protocols such as
-.Xr ipsec 4 ,
-where multiple cryptographic transforms may be applied on the same
-block of data.
+.Bl -tag -offset indent -width CRYPTO_AES_NIST_GCM_16 -compact
+.It Dv CRYPTO_AES_CCM_16
+.It Dv CRYPTO_AES_NIST_GCM_16
.El
-.El
-.Pp
-.Fn crypto_getreq
-allocates a
-.Vt cryptop
-structure with a linked list of
-.Fa num
-.Vt cryptodesc
-structures.
.Pp
-.Fn crypto_freereq
-deallocates a structure
-.Vt cryptop
-and any
-.Vt cryptodesc
-structures linked to it.
-Note that it is the responsibility of the
-callback routine to do the necessary cleanups associated with the
-opaque field in the
-.Vt cryptop
-structure.
+The following compression algorithms are supported:
.Pp
-.Fn crypto_kdispatch
-is called to perform a keying operation.
-The various fields in the
-.Vt cryptkop
-structure are:
-.Bl -tag -width ".Va krp_callback"
-.It Va krp_op
-Operation code, such as
-.Dv CRK_MOD_EXP .
-.It Va krp_status
-Return code.
-This
-.Va errno Ns -style
-variable indicates whether lower level reasons
-for operation failure.
-.It Va krp_iparams
-Number of input parameters to the specified operation.
-Note that each operation has a (typically hardwired) number of such parameters.
-.It Va krp_oparams
-Number of output parameters from the specified operation.
-Note that each operation has a (typically hardwired) number of such parameters.
-.It Va krp_kvp
-An array of kernel memory blocks containing the parameters.
-.It Va krp_hid
-Identifier specifying which low-level driver is being used.
-.It Va krp_callback
-Callback called on completion of a keying operation.
+.Bl -tag -offset indent -width CRYPTO_DEFLATE_COMP -compact
+.It Dv CRYPTO_DEFLATE_COMP
.El
-.Sh DRIVER-SIDE API
-The
-.Fn crypto_get_driverid ,
-.Fn crypto_get_driver_session ,
-.Fn crypto_register ,
-.Fn crypto_kregister ,
-.Fn crypto_unregister ,
-.Fn crypto_unblock ,
-and
-.Fn crypto_done
-routines are used by drivers that provide support for cryptographic
-primitives to register and unregister with the kernel crypto services
-framework.
-.Pp
-Drivers must first use the
-.Fn crypto_get_driverid
-function to acquire a driver identifier, specifying the
-.Fa flags
-as an argument.
-One of
-.Dv CRYPTOCAP_F_SOFTWARE
-or
-.Dv CRYPTOCAP_F_HARDWARE
-must be specified.
-The
-.Dv CRYPTOCAP_F_SYNC
-may also be specified, and should be specified if the driver does all of
-it's operations synchronously.
-Drivers must pass the size of their session structure as the second argument.
-An appropriately sized memory will be allocated by the framework, zeroed, and
-passed to the driver's
-.Fn newsession
-method.
-.Pp
-For each algorithm the driver supports, it must then call
-.Fn crypto_register .
-The first two arguments are the driver and algorithm identifiers.
-The next two arguments specify the largest possible operator length (in bits,
-important for public key operations) and flags for this algorithm.
-.Pp
-.Fn crypto_unregister
-is called by drivers that wish to withdraw support for an algorithm.
-The two arguments are the driver and algorithm identifiers, respectively.
-Typically, drivers for
-PCMCIA
-crypto cards that are being ejected will invoke this routine for all
-algorithms supported by the card.
-.Fn crypto_unregister_all
-will unregister all algorithms registered by a driver
-and the driver will be disabled (no new sessions will be allocated on
-that driver, and any existing sessions will be migrated to other
-drivers).
-The same will be done if all algorithms associated with a driver are
-unregistered one by one.
-After a call to
-.Fn crypto_unregister_all
-there will be no threads in either the newsession or freesession function
-of the driver.
-.Pp
-The calling convention for the driver-supplied routines are:
-.Pp
-.Bl -item -compact
-.It
-.Ft int
-.Fn \*[lp]*newsession\*[rp] "device_t" "crypto_session_t" "struct cryptoini *" ;
-.It
-.Ft void
-.Fn \*[lp]*freesession\*[rp] "device_t" "crypto_session_t" ;
-.It
-.Ft int
-.Fn \*[lp]*process\*[rp] "device_t" "struct cryptop *" "int" ;
-.It
-.Ft int
-.Fn \*[lp]*kprocess\*[rp] "device_t" "struct cryptkop *" "int" ;
-.El
-.Pp
-On invocation, the first argument to
-all routines is the
-.Fa device_t
-that was provided to
-.Fn crypto_get_driverid .
-The second argument to
-.Fn newsession
-is the opaque session handle for the new session.
-The third argument is identical to that of
-.Fn crypto_newsession .
-.Pp
-Drivers obtain a pointer to their session memory by invoking
-.Fn crypto_get_driver_session
-on the opaque
-.Vt crypto_session_t
-handle.
-.Pp
-The
-.Fn freesession
-routine takes as arguments the opaque data value and the session handle.
-It should clear any context associated with the session (clear hardware
-registers, memory, etc.).
-If no resources need to be released other than the contents of session memory,
-the method is optional.
-The
-.Nm
-framework will zero and release the allocated session memory (after running the
-.Fn freesession
-method, if one exists).
-.Pp
-The
-.Fn process
-routine is invoked with a request to perform crypto processing.
-This routine must not block or sleep, but should queue the request and return
-immediately or process the request to completion.
-In case of an unrecoverable error, the error indication must be placed in the
-.Va crp_etype
-field of the
-.Vt cryptop
-structure.
-When the request is completed, or an error is detected, the
-.Fn process
-routine must invoke
-.Fn crypto_done .
-Session migration may be performed, as mentioned previously.
-.Pp
-In case of a temporary resource exhaustion, the
-.Fn process
-routine may return
-.Er ERESTART
-in which case the crypto services will requeue the request, mark the driver
-as
-.Dq blocked ,
-and stop submitting requests for processing.
-The driver is then responsible for notifying the crypto services
-when it is again able to process requests through the
-.Fn crypto_unblock
-routine.
-This simple flow control mechanism should only be used for short-lived
-resource exhaustion as it causes operations to be queued in the crypto
-layer.
-Doing so is preferable to returning an error in such cases as
-it can cause network protocols to degrade performance by treating the
-failure much like a lost packet.
-.Pp
-The
-.Fn kprocess
-routine is invoked with a request to perform crypto key processing.
-This routine must not block, but should queue the request and return
-immediately.
-Upon processing the request, the callback routine should be invoked.
-In case of an unrecoverable error, the error indication must be placed in the
-.Va krp_status
-field of the
-.Vt cryptkop
-structure.
-When the request is completed, or an error is detected, the
-.Fn kprocess
-routine should invoked
-.Fn crypto_kdone .
-.Sh RETURN VALUES
-.Fn crypto_register ,
-.Fn crypto_kregister ,
-.Fn crypto_unregister ,
-.Fn crypto_newsession ,
-.Fn crypto_freesession ,
-and
-.Fn crypto_unblock
-return 0 on success, or an error code on failure.
-.Fn crypto_get_driverid
-returns a non-negative value on error, and \-1 on failure.
-.Fn crypto_getreq
-returns a pointer to a
-.Vt cryptop
-structure and
-.Dv NULL
-on failure.
-.Fn crypto_dispatch
-returns
-.Er EINVAL
-if its argument or the callback function was
-.Dv NULL ,
-and 0 otherwise.
-The callback is provided with an error code in case of failure, in the
-.Va crp_etype
-field.
.Sh FILES
.Bl -tag -width ".Pa sys/opencrypto/crypto.c"
.It Pa sys/opencrypto/crypto.c
@@ -735,7 +165,10 @@
.Xr crypto 4 ,
.Xr ipsec 4 ,
.Xr crypto 7 ,
-.Xr malloc 9 ,
+.Xr crypto_asym 9 ,
+.Xr crypto_driver 9 ,
+.Xr crypto_request 9 ,
+.Xr crypto_session 9 ,
.Xr sleep 9
.Sh HISTORY
The cryptographic framework first appeared in
@@ -743,14 +176,6 @@
and was written by
.An Angelos D. Keromytis Aq Mt angelos@openbsd.org .
.Sh BUGS
-The framework currently assumes that all the algorithms in a
-.Fn crypto_newsession
-operation must be available by the same driver.
-If that is not the case, session initialization will fail.
-.Pp
-The framework also needs a mechanism for determining which driver is
+The framework needs a mechanism for determining which driver is
best for a specific set of algorithms associated with a session.
Some type of benchmarking is in order here.
-.Pp
-Multiple instances of the same algorithm in the same session are not
-supported.
Index: share/man/man9/crypto_asym.9
===================================================================
--- /dev/null
+++ share/man/man9/crypto_asym.9
@@ -0,0 +1,178 @@
+.\" Copyright (c) 2020, Chelsio Inc
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright notice,
+.\" this list of conditions and the following disclaimer.
+.\"
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" 3. Neither the name of the Chelsio Inc nor the names of its
+.\" contributors may be used to endorse or promote products derived from
+.\" this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" * Other names and brands may be claimed as the property of others.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd January 31, 2020
+.Dt CRYPTO_ASYM 9
+.Os
+.Sh NAME
+.Nm crypto_asym
+.Nd asymmetric cryptographic operations
+.Sh SYNOPSIS
+.In opencrypto/cryptodev.h
+.Ft int
+.Fn crypto_kdispatch "struct cryptkop *krp"
+.Ft void
+.Fn crypto_kdone "struct cryptkop *krp"
+.Ft int
+.Fn crypto_kregister "uint32_t driverid" "int kalg" "uint32_t flags"
+.Ft int
+.Fn CRYPTODEV_KPROCESS "device_t dev" "struct cryptop *krp" "int flags"
+.Sh DESCRIPTION
+The in-kernel cryptographic kernel framework supports asymmetric
+requests (keying requests) in addition to symmetric operations.
+There are currently no in-kernel users of these requests,
+but applications can make requests of hardware drivers via the
+.Pa /dev/crypto
+device .
+.Pp
+Some APIs are shared with the framework's symmetric request support.
+This manual describes the APIs and data structures unique to
+asymmetric requests.
+.Pp
+.Ss Request Objects
+A request is described by a
+.Vt struct cryptkop
+containing the following fields:
+.Bl -tag -width "krp_callback"
+.It Fa krp_op
+Operation to perform.
+Available operations include
+.Dv CRK_MOD_EXP ,
+.Dv CRK_MOD_EXP_CRT ,
+.Dv CRK_DSA_SIGN ,
+.Dv CRK_DSA_VERIFY ,
+and
+.Dv CRK_DH_COMPUTE_KEY .
+.It Fa krp_status
+Error status.
+Either zero on success,
+or an error if an operation fails.
+Set by drivers prior to completing a request via
+.Fn crypto_kdone .
+.It Fa krp_iparams
+Count of input parameters.
+.It Fa krp_oparams
+Count of output parameters.
+.It Fa krp_crid
+Requested device.
+.It Fa krp_hid
+Device used to complete the request.
+.It Fa krp_param
+Array of parameters.
+The array contains the input parameters first followed by the output
+parameters.
+Each parameter is stored as a bignum.
+Each bignum is described by a
+.Vt struct crparam
+containing the following fields:
+.Bl -tag -width "crp_nbits"
+.It Fa crp_p
+Pointer to array of packed bytes.
+.It Fa crp_nbits
+Size of bignum in bits.
+.El
+.It Fa krp_callback
+Callback function.
+This must point to a callback function of type
+.Vt void (*)(struct cryptkop *) .
+The callback function should inspect
+.Fa krp_status
+to determine the status of the completed operation.
+.El
+.Pp
+New requests should be initialized to zero before setting fields to
+appropriate values.
+Once the request has been populated,
+it should be passed to
+.Fn crypto_kdispatch .
+.Pp
+.Fn crypto_kdispatch
+will choose a device driver to perform the operation described by
+.Fa krp
+and invoke that driver's
+.Fn CRYPTO_KPROCESS
+method.
+.Ss Driver API
+Drivers register support for asymmetric operations by calling
+.Fn crypto_kregister
+for each supported algorithm.
+.Fa driverid
+should be the value returned by an earlier call to
+.Fn crypto_get_driverid .
+.Fa kalg
+should list one of the operations that can be set in
+.Fa krp_op .
+.Fa flags
+is a bitmask of zero or more of the following values:
+.Bl -tag -width "CRYPTO_ALG_FLAG_RNG_ENABLE"
+.It Dv CRYPTO_ALG_FLAG_RNG_ENABLE
+Device has a hardware RNG for DH/DSA.
+.It Dv CRYPTO_ALG_FLAG_DSA_SHA
+Device can compute a SHA digest of a message.
+.El
+.Pp
+Drivers unregister with the framework via
+.Fn crypto_unregister_all .
+.Pp
+Similar to
+.Fn CRYPTO_PROCESS ,
+.Fn CRYPTO_KPROCESS
+should complete the request or schedule it for asynchronous
+completion.
+If this method is not able to complete a request due to insufficient
+resources,
+it can defer the request (and future asymmetric requests) by returning
+.Dv ERESTART .
+Once resources are available,
+the driver should invoke
+.Fn crypto_unblock
+with
+.Dv CRYPTO_ASYMQ
+to resume processing of asymmetric requests.
+.Pp
+Once a request is completed,
+the driver should set
+.Fa krp_status
+and then call
+.Fn crypto_kdone .
+.Sh RETURN VALUES
+.Fn crypto_kdispatch ,
+.Fn crypto_kregister ,
+and
+.Fn CRYPTODEV_KPROCESS
+return zero on success or an error on failure.
+.Sh SEE ALSO
+.Xr crypto 7 ,
+.Xr crypto 9 ,
+.Xr crypto_driver 9 ,
+.Xr crypto_request 9 ,
+.Xr crypto_session 9
Index: share/man/man9/crypto_driver.9
===================================================================
--- /dev/null
+++ share/man/man9/crypto_driver.9
@@ -0,0 +1,392 @@
+.\" Copyright (c) 2020, Chelsio Inc
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright notice,
+.\" this list of conditions and the following disclaimer.
+.\"
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" 3. Neither the name of the Chelsio Inc nor the names of its
+.\" contributors may be used to endorse or promote products derived from
+.\" this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" * Other names and brands may be claimed as the property of others.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd January 31, 2020
+.Dt CRYPTO_DRIVER 9
+.Os
+.Sh NAME
+.Nm crypto_driver
+.Nd interface for symmetric cryptographic drivers
+.Sh SYNOPSIS
+.In opencrypto/cryptodev.h
+.Ft int
+.Fo crypto_apply
+.Fa "struct cryptop *crp"
+.Fa "int off"
+.Fa "int len"
+.Fa "int (*f)(void *, void *, u_int)"
+.Fa "void *arg"
+.Fc
+.Ft void *
+.Fo crypto_contiguous_subsegment
+.Fa "struct cryptop *crp"
+.Fa "size_t skip"
+.Fa "size_t len"
+.Fc
+.Ft void
+.Fn crypto_copyback "struct cryptop *crp" "int off" "int size" "const void *src"
+.Ft void
+.Fn crypto_copydata "struct cryptop *crp" "int off" "int size" "void *dst"
+.Ft void
+.Fn crypto_done "struct cryptop *crp"
+.Ft int32_t
+.Fn crypto_get_driverid "device_t dev" "size_t session_size" "int flags"
+.Ft void *
+.Fn crypto_get_driver_session "crypto_session_t crypto_session"
+.Ft int
+.Fn crypto_unblock "uint32_t driverid" "int what"
+.Ft int
+.Fn crypto_unregister_all "uint32_t driverid"
+.Ft int
+.Fn CRYPTODEV_FREESESSION "device_t dev" "crypto_session_t crypto_session"
+.Ft int
+.Fo CRYPTODEV_NEWSESSION
+.Fa "device_t dev"
+.Fa "crypto_session_t crypto_session"
+.Fa "const struct crypto_session_params *csp"
+.Fc
+.Ft int
+.Fo CRYPTODEV_PROBESESSION
+.Fa "device_t dev"
+.Fa "const struct crypto_session_params *csp"
+.Fc
+.Ft int
+.Fn CRYPTODEV_PROCESS "device_t dev" "struct cryptop *crp" "int flags"
+.Ft void
+.Fo hmac_init_ipad
+.Fa "struct auth_hash *axf"
+.Fa "const char *key"
+.Fa "int klen"
+.Fa "void *auth_ctx"
+.Fc
+.Ft void
+.Fo hmac_init_opad
+.Fa "struct auth_hash *axf"
+.Fa "const char *key"
+.Fa "int klen"
+.Fa "void *auth_ctx"
+.Fc
+.Sh DESCRIPTION
+Symmetric cryptographic drivers process cryptographic requests
+submitted to sessions associated with the driver.
+.Pp
+Cryptographic drivers call
+.Fn crypto_get_driverid
+to register with the cryptographic framework.
+.Fa dev
+is the device used to service requests.
+The
+.Fn CRYPTODEV
+methods are defined in the method table for the device driver attached to
+.Fa dev .
+.Fa session_size
+specifies the size of a driver-specific per-session structure allocated by
+the cryptographic framework.
+.Fa flags
+is a bitmask of properties about the driver.
+Exactly one of
+.Dv CRYPTOCAP_F_SOFTWARE
+or
+.Dv CRYPTOCAP_F_HARDWARE
+must be specified.
+.Dv CRYPTOCAP_F_SOFTWARE
+should be used for drivers which process requests using host CPUs.
+.Dv CRYPTOCAP_F_HARDWARE
+should be used for drivers which process requests on separate co-processors.
+.Dv CRYPTOCAP_F_SYNC
+should be set for drivers which process requests synchronously in
+.Fn CRYPTODEV_PROCESS .
+.Fn crypto_get_driverid
+returns an opaque driver id.
+.Pp
+.Fn crypto_unregister_all
+unregisters a driver from the cryptographic framework.
+If there are any pending operations or open sessions,
+this function will sleep.
+.Fa driverid
+is the value returned by an earlier call to
+.Fn crypto_get_driverid .
+.Pp
+When a new session is created by
+.Fn crypto_newsession ,
+.Fn CRYPTODEV_PROBESESSION
+is invoked by the cryptographic framework on each active driver to
+determine the best driver to use for the session.
+This method should inspect the session parameters in
+.Fa csp .
+If a driver does not support requests described by
+.Fa csp ,
+this method should return an error value.
+If the driver does support requests described by
+.Fa csp ,
+it should return a negative value.
+The framework prefers drivers with the largest negative value,
+similar to
+.Xr DEVICE_PROBE 9 .
+The following values are defined for non-error return values from this
+method:
+.Bl -tag -width "CRYPTODEV_PROBE_ACCEL_SOFTWARE"
+.It Dv CRYPTODEV_PROBE_HARDWARE
+The driver processes requests via a co-processor.
+.It Dv CRYPTODEV_PROBE_ACCEL_SOFTWARE
+The driver processes requests on the host CPU using optimized instructions
+such as AES-NI.
+.It Dv CRYPTODEV_PROBE_SOFTWARE
+The driver processes requests on the host CPU.
+.El
+.Pp
+This method should not sleep.
+.Pp
+Once the framework has chosen a driver for a session,
+the framework invokes the
+.Fn CRYPTODEV_NEWSESSION
+method to initialize driver-specific session state.
+Prior to calling this method,
+the framework allocates a per-session driver-specific data structure.
+This structure is initialized with zeroes,
+and its size is set by the
+.Fa session_size
+passed to
+.Fn crypto_get_driverid .
+This method can retrieve a pointer to this data structure by passing
+.Fa crypto_session
+to
+.Fn crypto_get_driver_session .
+Session parameters are described in
+.Fa csp .
+.Pp
+This method should not sleep.
+.Pp
+.Fn CRYPTODEV_FREESESSION
+is invoked to release any driver-specific state when a session is
+destroyed.
+The per-session driver-specific data structure is explicitly zeroed
+and freed by the framework after this method returns.
+If a driver requires no additional tear-down steps, it can leave
+this method undefined.
+.Pp
+This method should not sleep.
+.Pp
+.Fn CRYPTODEV_PROCESS
+is invoked for each request submitted to an active session.
+This method can either complete a request synchronously or
+schedule it to be completed asynchronously,
+but it must not sleep.
+.Pp
+If this method is not able to complete a request due to insufficient
+resources such as a full command queue,
+it can defer the request by returning
+.Dv ERESTART .
+The request will be queued by the framework and retried once the
+driver releases pending requests via
+.Fn crypto_unblock .
+Any requests submitted to sessions belonging to the driver will also
+be queued until
+.Fn crypto_unblock
+is called.
+.Pp
+If a driver encounters errors while processing a request,
+it should report them via the
+.Fa crp_etype
+field of
+.Fa crp
+rather than returning an error directly.
+.Pp
+.Fa flags
+may be set to
+.Dv CRYPTO_HINT_MORE
+if there are additional requests queued for this driver.
+The driver can use this as a hint to batch completion interrupts.
+Note that these additional requests may be from different sessions.
+.Pp
+.Fn crypto_get_driver_session
+returns a pointer to the driver-specific per-session data structure
+for the session
+.Fa crypto_session .
+This function can be used in the
+.Fn CRYPTODEV_NEWSESSION ,
+.Fn CRYPTODEV_PROCESS ,
+and
+.Fn CRYPTODEV_FREESESSION
+callbacks.
+.Pp
+.Fn crypto_copydata
+copies
+.Fa size
+bytes out of the data buffer for
+.Fa crp
+into a local buffer pointed to by
+.Fa dst .
+The bytes are read starting at an offset of
+.Fa off
+bytes in the request's data buffer.
+.Pp
+.Fn crypto_copyback
+copies
+.Fa size
+bytes from the local buffer pointed to by
+.Fa src
+into the data buffer for
+.Fa crp .
+The bytes are written starting at an offset of
+.Fa off
+bytes in the request's data buffer.
+.Pp
+A driver calls
+.Fn crypto_done
+to mark the request
+.Fa crp
+as completed.
+Any errors should be set in
+.Fa crp_etype
+prior to calling this function.
+.Pp
+If a driver defers a request by returning
+.Dv ERESTART
+from
+.Dv CRYPTO_PROCESS ,
+the framework will queue all requests for the driver until the driver calls
+.Fn crypto_unblock
+to indicate that the temporary resource shortage has been relieved.
+For example,
+if a driver returns
+.Dv ERESTART
+due to a full command ring,
+it would invoke
+.Fn crypto_unblock
+from a command completion interrupt that makes a command ring entry available.
+.Fa driverid
+is the value returned by
+.Fn crypto_get_driverid .
+.Fa what
+indicates which types of requests the driver is able to handle again:
+.Bl -tag -width "CRYPTO_ASYMQ"
+.It Dv CRYPTO_SYMQ
+indicates that the driver is able to handle symmetric requests passed to
+.Fn CRYPTODEV_PROCESS .
+.It Dv CRYPTO_ASYMQ
+indicates that the driver is able to handle asymmetric requests passed to
+.Fn CRYPTODEV_KPROCESS .
+.El
+.Pp
+.Fn crypto_apply
+is a helper routine that can be used to invoke a caller-supplied function
+to a region of the data buffer for
+.Fa crp .
+The function
+.Fa f
+is called one or more times.
+For each invocation,
+the first argument to
+.Fa f
+is the value of
+.Fa arg passed to
+.Fn crypto_apply .
+The second and third arguments to
+.Fa f
+are a pointer and length to a segment of the buffer mapped into the kernel.
+The function is called enough times to cover the
+.Fa len
+bytes of the data buffer which starts at an offset
+.Fa off .
+If any invocation of
+.Fa f
+returns a non-zero value,
+.Fn crypto_apply
+immediately returns that value without invoking
+.Fa f
+on any remaining segments of the region,
+otherwise
+.Fn crypto_apply
+returns the value from the final call to
+.Fa f .
+.Pp
+.Fn crypto_contiguous_subsegment
+attempts to locate a single, virtually-contiguous segment of the data buffer
+for
+.Fa crp .
+The segment must be
+.Fa len
+bytes long and start at an offset of
+.Fa skip
+bytes.
+If a segment is found,
+a pointer to the start of the segment is returned.
+Otherwise,
+.Dv NULL
+is returned.
+.Pp
+.Fn hmac_init_ipad
+prepares an authentication context to generate the inner hash of an HMAC.
+.Fa axf
+is a software implementation of an authentication algorithm such as the
+value returned by
+.Fn crypto_auth_hash .
+.Fa key
+is a pointer to a HMAC key of
+.Fa klen
+bytes.
+.Fa auth_ctx
+points to a valid authentication context for the desired algorithm.
+The function initializes the context with the supplied key.
+.Pp
+.Fn hmac_init_opad
+is similar to
+.Fn hmac_init_ipad
+except that it prepares an authentication context to generate the
+outer hash of an HMAC.
+.Sh RETURN VALUES
+.Fn crypto_apply
+returns the return value from the caller-supplied callback function.
+.Pp
+.Fn crypto_contiguous_subsegment
+returns a pointer to a contiguous segment or
+.Dv NULL .
+.Pp
+.Fn crypto_get_driverid
+returns a driver identifier on success or -1 on error.
+.Pp
+.Fn crypto_unblock ,
+.Fn crypto_unregister_all ,
+.Fn CRYPTODEV_FREESESSION ,
+.Fn CRYPTODEV_NEWSESSION ,
+and
+.Fn CRYPTODEV_PROCESS
+return zero on success or an error on failure.
+.Pp
+.Fn CRYPTODEV_PROBESESSION
+returns a negative value on success or an error on failure.
+.Sh SEE ALSO
+.Xr crypto 7 ,
+.Xr crypto 9 ,
+.Xr crypto_request 9 ,
+.Xr crypto_session 9
Index: share/man/man9/crypto_request.9
===================================================================
--- /dev/null
+++ share/man/man9/crypto_request.9
@@ -0,0 +1,419 @@
+.\" Copyright (c) 2020, Chelsio Inc
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright notice,
+.\" this list of conditions and the following disclaimer.
+.\"
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" 3. Neither the name of the Chelsio Inc nor the names of its
+.\" contributors may be used to endorse or promote products derived from
+.\" this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" * Other names and brands may be claimed as the property of others.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd January 14, 2020
+.Dt CRYPTO_REQUEST 9
+.Os
+.Sh NAME
+.Nm crypto_request
+.Nd symmetric cryptographic operations
+.Sh SYNOPSIS
+.In opencrypto/cryptodev.h
+.Ft int
+.Fn crypto_dispatch "struct cryptop *crp"
+.Ft void
+.Fn crypto_freereq "struct cryptop *crp"
+.Ft "struct cryptop *"
+.Fn crypto_getreq "crypto_session_t cses" "int how"
+.Sh DESCRIPTION
+Each symmetric cryptographic operation in the kernel is described by
+an instance of
+.Vt struct cryptop
+and is associated with an active session.
+.Pp
+New requests are allocated by
+.Fn crypto_getreq .
+.Fa cses
+is a reference to an active session.
+.Fa how
+is passed to
+.Xr malloc 9
+and should be set to either
+.Dv M_NOWAIT
+or
+.Dv M_WAITOK .
+The caller should then set fields in the returned structure to describe
+request-specific parameters.
+Unused fields should be left as-is.
+.Pp
+.Fn crypto_dispatch
+passes a crypto request to the driver attached to the request's session.
+If there are errors in the request's fields, this function may return
+an error to the caller.
+If errors are encountered while servicing the request, they will instead
+be reported to the request's callback function
+.Pq Fa crp_callback
+via
+.Fa crp_etype .
+.Pp
+Note that a request's callback function may be invoked before
+.Fn crypto_dispatch
+returns.
+.Pp
+Once a request has signaled completion by invoking its callback function,
+it should be feed via
+.Fn crypto_freereq .
+.Pp
+Cryptographic operations include several fields to describe the request.
+.Ss Buffer Types
+Requests are associated with a single data buffer that is modified in place.
+The type of the data buffer and the buffer itself are described by the
+following fields:
+.Bl -tag -width crp_buf_type
+.It Fa crp_buf_type
+The type of the data buffer.
+The following types are supported:
+.Bl -tag -width CRYPTO_BUF_CONTIG
+.It Dv CRYPTO_BUF_CONTIG
+An array of bytes mapped into the kernel's address space.
+.It Dv CRYPTO_BUF_UIO
+A scatter/gather list of kernel buffers as described in
+.Xr uio 9 .
+.It Dv CRYPTO_BUF_MBUF
+A network memory buffer as described in
+.Xr mbuf 9 .
+.El
+.It Fa crp_buf
+A pointer to the start of a
+.Dv CRYPTO_BUF_CONTIG
+data buffer.
+.It Fa crp_ilen
+The length of a
+.Dv CRYPTO_BUF_CONTIG
+data buffer
+.It Fa crp_mbuf
+A pointer to a
+.Vt struct mbuf
+for
+.Dv CRYPTO_BUF_MBUF .
+.It Fa crp_uio
+A pointer to a
+.Vt struct uio
+for
+.Dv CRYPTO_BUF_UIO .
+.It Fa crp_olen
+Used with compression and decompression requests to describe the updated
+length of the payload region in the data buffer.
+.Pp
+If a compression request increases the size of the payload,
+then the data buffer is unmodified, the request completes successfully,
+and
+.Fa crp_olen
+is set to the size the compressed data would have used.
+Callers can compare this to the payload region length to determine if
+the compressed data was discarded.
+.El
+.Ss Request Regions
+Each request describes one or more regions in the data buffer using.
+Each region is described by an offset relative to the start of the
+data buffer and a length.
+The length of some regions is the same for all requests belonging to
+a session.
+Those lengths are set in the session parameters of the associated
+session.
+All requests must define a payload region.
+Other regions are only required for specific session modes.
+The following regions are defined:
+.Bl -column "Payload" "crp_payload_start" "crp_payload_length"
+.It Sy Region Ta Sy Start Ta Sy Length Ta Sy Description
+.It AAD Ta Fa crp_aad_start Ta Fa crp_aad_length Ta
+Additional Authenticated Data
+.It IV Ta Fa crp_iv_start Ta Fa csp_ivlen Ta
+Embedded IV or nonce
+.It Payload Ta Fa crp_payload_start Ta Fa crp_payload_length Ta
+Data to encrypt, decrypt, compress, or decompress
+.It Digest Ta Fa crp_digest_start Ta Fa csp_auth_mlen Ta
+Authentication digest, hash, or tag
+.El
+.Pp
+Requests are permitted to operate on only a subset of the data buffer.
+For example,
+requests from IPsec operate on network packets that include headers not
+used as either additional authentication data (AAD) or payload data.
+.Ss Request Operations
+All requests must specify the type of operation to perform in
+.Fa crp_op .
+Available operations depend on the session's mode.
+.Pp
+Compression requests support the following operations:
+.Bl -tag -width CRYPTO_OP_DECOMPRESS
+.It Dv CRYPTO_OP_COMPRESS
+Compress the data in the payload region of the data buffer.
+.It Dv CRYPTO_OP_DECOMPRESS
+Decompress the data in the payload region of the data buffer.
+.El
+.Pp
+Cipher requests support the following operations:
+.Bl -tag -width CRYPTO_OP_DECRYPT
+.It Dv CRYPTO_OP_ENCRYPT
+Encrypt the data in the payload region of the data buffer.
+.It Dv CRYPTO_OP_DECRYPT
+Decrypt the data in the payload region of the data buffer.
+.El
+.Pp
+Digest requests support the following operations:
+.Bl -tag -width CRYPTO_OP_COMPUTE_DIGEST
+.It Dv CRYPTO_OP_COMPUTE_DIGEST
+Calculate a digest over the payload region of the data buffer
+and store the result in the digest region.
+.It Dv CRYPTO_OP_VERIFY_DIGEST
+Calculate a digest over the payload region of the data buffer.
+Compare the calculated digest to the existing digest from the digest region.
+If the digests match,
+complete the request successfully.
+If the digests do not match,
+fail the request with
+.Er EBADMSG .
+.El
+.Pp
+AEAD and Encrypt-then-Authenticate requests support the following
+operations:
+.Bl -tag -width CRYPTO_OP
+.It Dv CRYPTO_OP_ENCRYPT | Dv CRYPTO_OP_COMPUTE_DIGEST
+Encrypt the data in the payload region of the data buffer.
+Calculate a digest over the AAD and payload regions and store the
+result in the data buffer.
+.It Dv CRYPTO_OP_DECRYPT | Dv CRYPTO_OP_VERIFY_DIGEST
+Calculate a digest over the AAD and payload regions of the data buffer.
+Compare the calculated digest to the existing digest from the digest region.
+If the digests match,
+decrypt the payload region.
+If the digests do not match,
+fail the request with
+.Er EBADMSG .
+.El
+.Ss Request IV and/or Nonce
+Some cryptographic operations require an IV or nonce as an input.
+An IV may be stored either in the IV region of the data buffer or in
+.Fa crp_iv .
+By default,
+the IV is assumed to be stored in the IV region.
+If the IV is stored in
+.Fa crp_iv ,
+.Dv CRYPTO_F_IV_SEPARATE
+should be set in
+.Fa crp_flags
+and
+.Fa crp_digest_start
+should be left as zero.
+.Pp
+An encryption request using an IV stored in the IV region may set
+.Dv CRYPTO_F_IV_GENERATE
+in
+.Fa crp_flags
+to request that the driver generate a random IV.
+Note that
+.Dv CRYPTO_F_IV_GENERATE
+cannot be used with decryption operations or in combination with
+.Dv CRYPTO_F_IV_SEPARATE .
+.Pp
+Requests that store part, but not all, of the IV in the data buffer should
+store the partial IV in the data buffer and pass the full IV separately in
+.Fa crp_iv .
+.Ss Request and Callback Scheduling
+The crypto framework provides multiple methods of scheduling the dispatch
+of requests to drivers along with the processing of driver callbacks.
+Requests use flags in
+.Fa crp_flags
+to select the desired scheduling methods.
+.Pp
+.Fn crypto_dispatch
+can pass the request to the session's driver via three different methods:
+.Bl -enum
+.It
+The request is queued to a taskqueue backed by a pool of worker threads.
+By default the pool is sized to provide one thread for each CPU.
+Worker threads dequeue requests and pass them to the driver
+asynchronously.
+.It
+The request is passed to the driver synchronously in the context of the
+thread invoking
+.Fn crypto_dispatch .
+.It
+The request is queued to a queue of pending requests.
+A single worker thread dequeues requests and passes them to the driver
+asynchronously.
+.El
+.Pp
+To select the first method (taskqueue backed by multiple threads),
+requests should set
+.Dv CRYPTO_F_ASYNC .
+To always use the third method (queue to single worker thread),
+requests should set
+.Dv CRYPTO_F_BATCH .
+If both flags are set,
+.Dv CRYPTO_F_ASYNC
+takes precedence.
+If neither flag is set,
+.Fn crypto_dispatch
+will first attempt the second method (invoke driver synchronously).
+If the driver is blocked,
+the request will be queued using the third method.
+One caveat is that the first method is only used for requests using software
+drivers which use host CPUs to process requests.
+Requests whose session is associated with a hardware driver will ignore
+.Dv CRYPTO_F_ASYNC
+and only use
+.Dv CRYPTO_F_BATCH
+to determine how requests should be scheduled.
+.Pp
+In addition to bypassing synchronous dispatch in
+.Fn crypto_dispatch ,
+.Dv CRYPTO_F_BATCH
+requests additional changes aimed at optimizing batches of requests to
+the same driver.
+When the worker thread processes a request with
+.Dv CRYPTO_F_BATCH ,
+it will search the pending request queue for any other requests for the same
+driver,
+including requests from different sessions.
+If any other requests are present,
+.Dv CRYPTO_HINT_MORE
+is passed to the driver's process method.
+Drivers may use this to batch completion interrupts.
+.Pp
+Callback function scheduling is simpler than request scheduling.
+Callbacks can either be invoked synchronously from
+.Fn crypto_done ,
+or they can be queued to a pool of worker threads.
+This pool of worker threads is also sized to provide one worker thread
+for each CPU by default.
+Note that a callback function invoked synchronously from
+.Fn crypto_done
+must follow the same restrictions placed on threaded interrupt handlers.
+.Pp
+By default,
+callbacks are invoked asynchronously by a worker thread.
+If
+.Dv CRYPTO_F_CBIMM
+is set,
+the callback is always invoked synchronously from
+.Fn crypto_done .
+If
+.Dv CRYPTO_F_CBIFSYNC
+is set,
+the callback is invoked synchronously if the request was processed by a
+software driver or asynchronously if the request was processed by a
+hardware driver.
+.Pp
+If a request was scheduled to the taskqueue via
+.Dv CRYPTO_F_ASYNC ,
+callbacks are always invoked asynchronously ignoring
+.Dv CRYPTO_F_CBIMM
+and
+.Dv CRYPTO_F_CBIFSYNC .
+In this case,
+.Dv CRYPTO_F_ASYNC_KEEPORDER
+may be set to ensure that callbacks for requests on a given session are
+invoked in the same order that requests were queued to the session via
+.Fn crypto_dispatch .
+This flag is used by IPsec to ensure that decrypted network packets are
+passed up the network stack in roughly the same order they were received.
+.Pp
+.Ss Other Request Fields
+In addition to the fields and flags enumerated above,
+.Vt struct cryptop
+includes the following:
+.Bl -tag -width crp_payload_length
+.It Fa crp_session
+A reference to the active session.
+This is set when the request is created by
+.Fn crypto_getreq
+and should not be modified.
+Drivers can use this to fetch driver-specific session state or
+session parameters.
+.It Fa crp_etype
+Error status.
+Either zero on success, or an error if a request fails.
+Set by drivers prior to completing a request via
+.Fn crypto_done .
+.It Fa crp_flags
+A bitmask of flags.
+The following flags are available in addition to flags discussed previously:
+.Bl -tag -width CRYPTO_F_DONE
+.It Dv CRYPTO_F_DONE
+Set by
+.Fa crypto_done
+before calling
+.Fa crp_callback .
+This flag is not very useful and will likely be removed in the future.
+It can only be safely checked from the callback routine at which point
+it is always set.
+.El
+.It Fa crp_cipher_key
+Pointer to a request-specific encryption key.
+If this value is not set,
+the request uses the session encryption key.
+.It Fa crp_auth_key
+Pointer to a request-specific authentication key.
+If this value is not set,
+the request uses the session authentication key.
+.It Fa crp_opaque
+An opaque pointer.
+This pointer permits users of the cryptographic framework to store
+information about a request to be used in the callback.
+.It Fa crp_callback
+Callback function.
+This must point to a callback function of type
+.Vt void (*)(struct cryptop *) .
+The callback function should inspect
+.Fa crp_etype
+to determine the status of the completed operation.
+It should also arrange for the request to be freed via
+.Fn crypto_freereq .
+.El
+.Sh RETURN VALUES
+.Fn crypto_dispatch
+returns an error if the request contained invalid fields,
+or zero if the request was valid.
+.Fn crypto_getreq
+returns a pointer to a new request structure on success,
+or
+.Dv NULL
+on failure.
+.Dv NULL
+can only be returned if
+.Dv M_NOWAIT
+was passed in
+.Fa how .
+.Sh SEE ALSO
+.Xr ipsec 4 ,
+.Xr crypto 7 ,
+.Xr crypto 9 ,
+.Xr crypto_session 9 ,
+.Xr mbuf 9
+.Xr uio 9
+.Sh BUGS
+Not all drivers properly handle mixing session and per-request keys
+within a single session.
+Consumers should either use a single key for a session specified in
+the session parameters or always use per-request keys.
Index: share/man/man9/crypto_session.9
===================================================================
--- /dev/null
+++ share/man/man9/crypto_session.9
@@ -0,0 +1,245 @@
+.\" Copyright (c) 2020, Chelsio Inc
+.\"
+.\" Redistribution and use in source and binary forms, with or without
+.\" modification, are permitted provided that the following conditions are met:
+.\"
+.\" 1. Redistributions of source code must retain the above copyright notice,
+.\" this list of conditions and the following disclaimer.
+.\"
+.\" 2. Redistributions in binary form must reproduce the above copyright
+.\" notice, this list of conditions and the following disclaimer in the
+.\" documentation and/or other materials provided with the distribution.
+.\"
+.\" 3. Neither the name of the Chelsio Inc nor the names of its
+.\" contributors may be used to endorse or promote products derived from
+.\" this software without specific prior written permission.
+.\"
+.\" THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+.\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+.\" IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+.\" ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+.\" LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+.\" CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+.\" SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+.\" INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+.\" CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+.\" ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+.\" POSSIBILITY OF SUCH DAMAGE.
+.\"
+.\" * Other names and brands may be claimed as the property of others.
+.\"
+.\" $FreeBSD$
+.\"
+.Dd January 13, 2020
+.Dt CRYPTO_SESSION 9
+.Os
+.Sh NAME
+.Nm crypto_session
+.Nd state used for symmetric cryptographic services
+.Sh SYNOPSIS
+.In opencrypto/cryptodev.h
+.Ft struct auth_hash *
+.Fn crypto_auth_hash "const struct crypto_session_params *csp"
+.Ft struct enc_xform *
+.Fn crypto_cipher "const struct crypto_session_params *csp"
+.Ft const struct crypto_session_params *
+.Fn crypto_get_params "crypto_session_t cses"
+.Ft int
+.Fo crypto_newsession
+.Fa "crypto_session_t *cses"
+.Fa "const struct crypto_session_params *csp"
+.Fa "int crid"
+.Fc
+.Ft int
+.Fn crypto_freesession "crypto_session_t cses"
+.Sh DESCRIPTION
+Symmetric cryptographic operations in the kernel are associated with
+cryptographic sessions.
+Sessions hold state shared across multiple requests.
+Active sessions are associated with a single cryptographic driver.
+.Pp
+The
+.Vt crypto_session_t
+type represents an opaque reference to an active session.
+Session objects are allocated and managed by the cryptographic
+framework.
+.Pp
+New sessions are created by
+.Fn crypto_newsession .
+.Fa csp
+describes various parameters associated with the new session such as
+the algorithms to use and any session-wide keys.
+.Fa crid
+can be used to request either a specific cryptographic driver or
+classes of drivers.
+For the latter case,
+.Fa crid
+should be set to a mask of the following values:
+.Bl -tag -width "CRYPTOCAP_F_HARDWARE"
+.It Dv CRYPTOCAP_F_HARDWARE
+Request hardware drivers.
+Hardware drivers do not use the host CPU to perform operations.
+Typically, a separate co-processor performs the operations asynchronously.
+.It Dv CRYPTOCAP_F_SOFTWARE
+Request software drivers.
+Software drivers use the host CPU to perform operations.
+The kernel includes a simple, yet portable implementation of each supported
+algorithm in the
+.Xr cryptosoft 4
+driver.
+Additional software drivers may also be available on architectures which
+provide instructions designed to accelerate cryptographic operations.
+.El
+.Pp
+If both hardware and software drivers are requested,
+hardware drivers are preferred over software drivers.
+Accelerated software drivers are preferred over the baseline software driver.
+If multiple hardware drivers are available,
+the framework will distribute sessions across these drivers in a round-robin
+fashion.
+.Pp
+On success,
+.Fn crypto_newsession
+saves a reference to the newly created session in
+.Fa cses .
+.Pp
+.Fn crypto_freesession
+is used to free the resources associated with the session
+.Fa cses .
+.Pp
+.Fn crypto_auth_hash
+returns a structure describing the baseline software implementation of an
+authentication algorithm requested by
+.Fa csp .
+If
+.Fa csp
+does not specify an authentication algorithm,
+or requests an invalid algorithm,
+.Dv NULL
+is returned.
+.Pp
+.Fn crypto_cipher
+returns a structure describing the baseline software implementation of an
+encryption algorithm requested by
+.Fa csp .
+If
+.Fa csp
+does not specify an encryption algorithm,
+or requests an invalid algorithm,
+.Dv NULL
+is returned.
+.Pp
+.Fn crypto_get_params
+returns a pointer to the session parameters used by
+.Fa cses .
+.Ss Session Parameters
+Session parameters are used to describe the cryptographic operations
+performed by cryptographic requests.
+Parameters are stored in an instance of
+.Vt struct crypto_session_params .
+When initializing parameters to pass to
+.Fn crypto_newsession ,
+the entire structure should first be zeroed.
+Needed fields should then be set leaving unused fields as zero.
+This structure contains the following fields:
+.Bl -tag -width csp_cipher_klen
+.It Fa csp_mode
+Type of operation to perform.
+This field must be set to one of the following:
+.Bl -tag -width CSP_MODE_COMPRESS
+.It Dv CSP_MODE_COMPRESS
+Compress or decompress request payload.
+.Pp
+The compression algorithm is specified in
+.Fa csp_cipher_alg .
+.It Dv CSP_MODE_CIPHER
+Encrypt or decrypt request payload.
+.Pp
+The encryption algorithm is specified in
+.Fa csp_cipher_alg .
+.It Dv CSP_MODE_DIGEST
+Compute or verify a digest, or hash, of request payload.
+.Pp
+The authentication algorithm is specified in
+.Fa csp_auth_alg .
+.It Dv CSP_MODE_AEAD
+Authenticated encryption with additional data.
+Decryption operations require the digest, or tag,
+and fail if it does not match.
+.Pp
+The AEAD algorithm is specified in
+.Fa csp_cipher_alg .
+.It Dv CSP_MODE_ETA
+Encrypt-then-Authenticate.
+In this mode, encryption operations encrypt the payload and then
+compute an authentication digest over the request additional authentication
+data followed by the encrypted payload.
+Decryption operations fail without decrypting the data if the provided digest
+does not match.
+.Pp
+The encryption algorithm is specified in
+.Fa csp_cipher_alg
+and the authentication algorithm is specified in
+.Fa csp_auth_alg .
+.El
+.It Fa csp_flags
+Currently, no additional flags are defined and this field should be set to
+zero.
+.It Fa csp_ivlen
+If either the cipher or authentication algorithms require an explicit
+initialization vector (IV) or nonce,
+this specifies the length in bytes.
+All requests for a session use the same IV length.
+.It Fa csp_cipher_alg
+Encryption or compression algorithm.
+.It Fa csp_cipher_klen
+Length of encryption or decryption key in bytes.
+All requests for a session use the same key length.
+.It Fa csp_cipher_key
+Pointer to encryption or decryption key.
+If all requests for a session use request-specific keys,
+this field should be left as
+.Dv NULL .
+This pointer and associated key must remain valid for the duration of the
+crypto session.
+.It Fa csp_auth_alg
+Authentication algorithm.
+.It Fa csp_auth_klen
+Length of authentication key in bytes.
+If the authentication algorithm does not use a key,
+this field should be left as zero.
+.It Fa csp_auth_key
+Pointer to the authentication key.
+If all requests for a session use request-specific keys,
+this field should be left as
+.Dv NULL .
+This pointer and associated key must remain valid for the duration of the
+crypto session.
+.It Fa csp_auth_mlen
+The length in bytes of the digest.
+If zero, the full length of the digest is used.
+If non-zero, the first
+.Fa csp_auth_mlen
+bytes of the digest are used.
+.El
+.Sh RETURN VALUES
+.Fn crypto_newsession
+returns a non-zero value if an error occurs or zero on success.
+.Pp
+.Fn crypto_auth_hash
+and
+.Fn crypto_cipher
+return
+.Dv NULL
+if the request is valid or a pointer to a structure on success.
+.Sh SEE ALSO
+.Xr crypto 7 ,
+.Xr crypto 9 ,
+.Xr crypto_request 9
+.Sh BUGS
+The current implementation of
+.Nm crypto_freesession
+does not provide a way for the caller to know that there are no other
+references to the keys stored in the session's associated parameters.
+This function should probably sleep until any in-flight cryptographic
+operations associated with the session are completed.
Index: sys/crypto/aesni/aesni.h
===================================================================
--- sys/crypto/aesni/aesni.h
+++ sys/crypto/aesni/aesni.h
@@ -56,16 +56,16 @@
uint8_t enc_schedule[AES_SCHED_LEN] __aligned(16);
uint8_t dec_schedule[AES_SCHED_LEN] __aligned(16);
uint8_t xts_schedule[AES_SCHED_LEN] __aligned(16);
- /* Same as the SHA256 Blocksize. */
- uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16);
- int algo;
int rounds;
/* uint8_t *ses_ictx; */
/* uint8_t *ses_octx; */
- /* int ses_mlen; */
int used;
- int auth_algo;
int mlen;
+ int hash_len;
+ void (*hash_init)(void *);
+ void (*hash_update)(void *, const void *, unsigned);
+ void (*hash_finalize)(void *, void *);
+ bool hmac;
};
/*
@@ -120,7 +120,7 @@
const unsigned char *addt, const unsigned char *ivec,
const unsigned char *tag, uint32_t nbytes, uint32_t abytes, int ibytes,
const unsigned char *key, int nr);
-int aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
- int keylen);
+void aesni_cipher_setup_common(struct aesni_session *ses,
+ const struct crypto_session_params *csp, const uint8_t *key, int keylen);
#endif /* _AESNI_H_ */
Index: sys/crypto/aesni/aesni.c
===================================================================
--- sys/crypto/aesni/aesni.c
+++ sys/crypto/aesni/aesni.c
@@ -88,16 +88,13 @@
(ctx) = NULL; \
} while (0)
-static int aesni_newsession(device_t, crypto_session_t cses,
- struct cryptoini *cri);
static int aesni_cipher_setup(struct aesni_session *ses,
- struct cryptoini *encini, struct cryptoini *authini);
-static int aesni_cipher_process(struct aesni_session *ses,
- struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp);
-static int aesni_cipher_crypt(struct aesni_session *ses,
- struct cryptodesc *enccrd, struct cryptodesc *authcrd, struct cryptop *crp);
-static int aesni_cipher_mac(struct aesni_session *ses, struct cryptodesc *crd,
- struct cryptop *crp);
+ const struct crypto_session_params *csp);
+static int aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp);
+static int aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp,
+ const struct crypto_session_params *csp);
+static int aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp,
+ const struct crypto_session_params *csp);
MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
@@ -170,7 +167,7 @@
sc = device_get_softc(dev);
sc->cid = crypto_get_driverid(dev, sizeof(struct aesni_session),
- CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC);
+ CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
if (sc->cid < 0) {
device_printf(dev, "Could not get crypto driver id.\n");
return (ENOMEM);
@@ -187,25 +184,6 @@
}
detect_cpu_features(&sc->has_aes, &sc->has_sha);
- if (sc->has_aes) {
- crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_CCM_16, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_CCM_CBC_MAC, 0, 0);
- }
- if (sc->has_sha) {
- crypto_register(sc->cid, CRYPTO_SHA1, 0, 0);
- crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_SHA2_224, 0, 0);
- crypto_register(sc->cid, CRYPTO_SHA2_224_HMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_SHA2_256, 0, 0);
- crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0);
- }
return (0);
}
@@ -223,115 +201,125 @@
return (0);
}
+static bool
+aesni_auth_supported(struct aesni_softc *sc,
+ const struct crypto_session_params *csp)
+{
+
+ if (!sc->has_sha)
+ return (false);
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA2_224:
+ case CRYPTO_SHA2_256:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_224_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ break;
+ default:
+ return (false);
+ }
+
+ return (true);
+}
+
+static bool
+aesni_cipher_supported(struct aesni_softc *sc,
+ const struct crypto_session_params *csp)
+{
+
+ if (!sc->has_aes)
+ return (false);
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ case CRYPTO_AES_ICM:
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return (false);
+ return (sc->has_aes);
+ case CRYPTO_AES_XTS:
+ if (csp->csp_ivlen != AES_XTS_IV_LEN)
+ return (false);
+ return (sc->has_aes);
+ default:
+ return (false);
+ }
+}
+
static int
-aesni_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+aesni_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+ struct aesni_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ if (!aesni_auth_supported(sc, csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!aesni_cipher_supported(sc, csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_AEAD:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ if (csp->csp_auth_mlen != 0 &&
+ csp->csp_auth_mlen != GMAC_DIGEST_LEN)
+ return (EINVAL);
+ if (csp->csp_ivlen != AES_GCM_IV_LEN ||
+ !sc->has_aes)
+ return (EINVAL);
+ break;
+ case CRYPTO_AES_CCM_16:
+ if (csp->csp_auth_mlen != 0 &&
+ csp->csp_auth_mlen != AES_CBC_MAC_HASH_LEN)
+ return (EINVAL);
+ if (csp->csp_ivlen != AES_CCM_IV_LEN ||
+ !sc->has_aes)
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+ break;
+ case CSP_MODE_ETA:
+ if (!aesni_auth_supported(sc, csp) ||
+ !aesni_cipher_supported(sc, csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (CRYPTODEV_PROBE_ACCEL_SOFTWARE);
+}
+
+static int
+aesni_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
{
struct aesni_softc *sc;
struct aesni_session *ses;
- struct cryptoini *encini, *authini;
- bool gcm_hash, gcm;
- bool cbc_hash, ccm;
int error;
- KASSERT(cses != NULL, ("EDOOFUS"));
- if (cri == NULL) {
- CRYPTDEB("no cri");
- return (EINVAL);
- }
-
sc = device_get_softc(dev);
ses = crypto_get_driver_session(cses);
- authini = NULL;
- encini = NULL;
- gcm = false;
- gcm_hash = false;
- ccm = cbc_hash = false;
-
- for (; cri != NULL; cri = cri->cri_next) {
- switch (cri->cri_alg) {
- case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_CCM_16:
- if (cri->cri_alg == CRYPTO_AES_NIST_GCM_16) {
- gcm = true;
- } else if (cri->cri_alg == CRYPTO_AES_CCM_16) {
- ccm = true;
- }
- /* FALLTHROUGH */
- case CRYPTO_AES_CBC:
- case CRYPTO_AES_ICM:
- case CRYPTO_AES_XTS:
- if (!sc->has_aes)
- goto unhandled;
- if (encini != NULL) {
- CRYPTDEB("encini already set");
- return (EINVAL);
- }
- encini = cri;
- break;
- case CRYPTO_AES_CCM_CBC_MAC:
- cbc_hash = true;
- authini = cri;
- break;
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- /*
- * nothing to do here, maybe in the future cache some
- * values for GHASH
- */
- if (authini != NULL) {
- CRYPTDEB("authini already set");
- return (EINVAL);
- }
- gcm_hash = true;
- authini = cri;
- break;
- case CRYPTO_SHA1:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_224:
- case CRYPTO_SHA2_224_HMAC:
- case CRYPTO_SHA2_256:
- case CRYPTO_SHA2_256_HMAC:
- if (!sc->has_sha)
- goto unhandled;
- if (authini != NULL) {
- CRYPTDEB("authini already set");
- return (EINVAL);
- }
- authini = cri;
- break;
- default:
-unhandled:
- CRYPTDEB("unhandled algorithm");
- return (EINVAL);
- }
- }
- if (encini == NULL && authini == NULL) {
- CRYPTDEB("no cipher");
- return (EINVAL);
- }
- /*
- * GMAC algorithms are only supported with simultaneous GCM. Likewise
- * GCM is not supported without GMAC.
- */
- if (gcm_hash != gcm) {
- CRYPTDEB("gcm_hash != gcm");
- return (EINVAL);
- }
-
- if (cbc_hash != ccm) {
- CRYPTDEB("cbc_hash != ccm");
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ case CSP_MODE_CIPHER:
+ case CSP_MODE_AEAD:
+ case CSP_MODE_ETA:
+ break;
+ default:
return (EINVAL);
}
-
- if (encini != NULL)
- ses->algo = encini->cri_alg;
- if (authini != NULL)
- ses->auth_algo = authini->cri_alg;
-
- error = aesni_cipher_setup(ses, encini, authini);
+ error = aesni_cipher_setup(ses, csp);
if (error != 0) {
CRYPTDEB("setup failed");
return (error);
@@ -344,108 +332,31 @@
aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
{
struct aesni_session *ses;
- struct cryptodesc *crd, *enccrd, *authcrd;
- int error, needauth;
-
- ses = NULL;
- error = 0;
- enccrd = NULL;
- authcrd = NULL;
- needauth = 0;
-
- /* Sanity check. */
- if (crp == NULL)
- return (EINVAL);
-
- if (crp->crp_callback == NULL || crp->crp_desc == NULL ||
- crp->crp_session == NULL) {
- error = EINVAL;
- goto out;
- }
-
- for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
- switch (crd->crd_alg) {
- case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_CCM_16:
- needauth = 1;
- /* FALLTHROUGH */
- case CRYPTO_AES_CBC:
- case CRYPTO_AES_ICM:
- case CRYPTO_AES_XTS:
- if (enccrd != NULL) {
- error = EINVAL;
- goto out;
- }
- enccrd = crd;
- break;
-
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- case CRYPTO_AES_CCM_CBC_MAC:
- case CRYPTO_SHA1:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_224:
- case CRYPTO_SHA2_224_HMAC:
- case CRYPTO_SHA2_256:
- case CRYPTO_SHA2_256_HMAC:
- if (authcrd != NULL) {
- error = EINVAL;
- goto out;
- }
- authcrd = crd;
- break;
-
- default:
- error = EINVAL;
- goto out;
- }
- }
-
- if ((enccrd == NULL && authcrd == NULL) ||
- (needauth && authcrd == NULL)) {
- error = EINVAL;
- goto out;
- }
-
- /* CBC & XTS can only handle full blocks for now */
- if (enccrd != NULL && (enccrd->crd_alg == CRYPTO_AES_CBC ||
- enccrd->crd_alg == CRYPTO_AES_XTS) &&
- (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
- error = EINVAL;
- goto out;
- }
+ int error;
ses = crypto_get_driver_session(crp->crp_session);
- KASSERT(ses != NULL, ("EDOOFUS"));
- error = aesni_cipher_process(ses, enccrd, authcrd, crp);
- if (error != 0)
- goto out;
+ error = aesni_cipher_process(ses, crp);
-out:
crp->crp_etype = error;
crypto_done(crp);
- return (error);
+ return (0);
}
static uint8_t *
-aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
- bool *allocated)
+aesni_cipher_alloc(struct cryptop *crp, int start, int length, bool *allocated)
{
uint8_t *addr;
- addr = crypto_contiguous_subsegment(crp->crp_flags,
- crp->crp_buf, enccrd->crd_skip, enccrd->crd_len);
+ addr = crypto_contiguous_subsegment(crp, start, length);
if (addr != NULL) {
*allocated = false;
return (addr);
}
- addr = malloc(enccrd->crd_len, M_AESNI, M_NOWAIT);
+ addr = malloc(length, M_AESNI, M_NOWAIT);
if (addr != NULL) {
*allocated = true;
- crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
- enccrd->crd_len, addr);
+ crypto_copydata(crp, start, length, addr);
} else
*allocated = false;
return (addr);
@@ -457,6 +368,7 @@
DEVMETHOD(device_attach, aesni_attach),
DEVMETHOD(device_detach, aesni_detach),
+ DEVMETHOD(cryptodev_probesession, aesni_probesession),
DEVMETHOD(cryptodev_newsession, aesni_newsession),
DEVMETHOD(cryptodev_process, aesni_process),
@@ -474,63 +386,7 @@
MODULE_VERSION(aesni, 1);
MODULE_DEPEND(aesni, crypto, 1, 1, 1);
-static int
-aesni_authprepare(struct aesni_session *ses, int klen, const void *cri_key)
-{
- int keylen;
-
- if (klen % 8 != 0)
- return (EINVAL);
- keylen = klen / 8;
- if (keylen > sizeof(ses->hmac_key))
- return (EINVAL);
- if (ses->auth_algo == CRYPTO_SHA1 && keylen > 0)
- return (EINVAL);
- memcpy(ses->hmac_key, cri_key, keylen);
- return (0);
-}
-
-static int
-aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini,
- struct cryptoini *authini)
-{
- struct fpu_kern_ctx *ctx;
- int kt, ctxidx, error;
-
- switch (ses->auth_algo) {
- case CRYPTO_SHA1:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_224:
- case CRYPTO_SHA2_224_HMAC:
- case CRYPTO_SHA2_256:
- case CRYPTO_SHA2_256_HMAC:
- error = aesni_authprepare(ses, authini->cri_klen,
- authini->cri_key);
- if (error != 0)
- return (error);
- ses->mlen = authini->cri_mlen;
- }
-
- kt = is_fpu_kern_thread(0) || (encini == NULL);
- if (!kt) {
- ACQUIRE_CTX(ctxidx, ctx);
- fpu_kern_enter(curthread, ctx,
- FPU_KERN_NORMAL | FPU_KERN_KTHR);
- }
-
- error = 0;
- if (encini != NULL)
- error = aesni_cipher_setup_common(ses, encini->cri_key,
- encini->cri_klen);
-
- if (!kt) {
- fpu_kern_leave(curthread, ctx);
- RELEASE_CTX(ctxidx, ctx);
- }
- return (error);
-}
-
-static int
+static void
intel_sha1_update(void *vctx, const void *vdata, u_int datalen)
{
struct sha1_ctxt *ctx = vctx;
@@ -563,7 +419,6 @@
intel_sha1_step(ctx->h.b32, (void *)ctx->m.b8, 1);
off += copysiz;
}
- return (0);
}
static void
@@ -578,7 +433,7 @@
sha1_result(ctx, digest);
}
-static int
+static void
intel_sha256_update(void *vctx, const void *vdata, u_int len)
{
SHA256_CTX *ctx = vctx;
@@ -599,7 +454,7 @@
/* Handle the case where we don't need to perform any transforms */
if (len < 64 - r) {
memcpy(&ctx->buf[r], src, len);
- return (0);
+ return;
}
/* Finish the current block */
@@ -618,7 +473,6 @@
/* Copy left over data into buffer */
memcpy(ctx->buf, src, len);
- return (0);
}
static void
@@ -645,42 +499,145 @@
SHA256_Final(digest, ctx);
}
-/*
- * Compute the HASH( (key ^ xorbyte) || buf )
- */
-static void
-hmac_internal(void *ctx, uint32_t *res,
- int (*update)(void *, const void *, u_int),
- void (*finalize)(void *, void *), uint8_t *key, uint8_t xorbyte,
- const void *buf, size_t off, size_t buflen, int crpflags)
+static int
+aesni_authprepare(struct aesni_session *ses, int klen)
+{
+
+ if (klen > SHA1_BLOCK_LEN)
+ return (EINVAL);
+ if ((ses->hmac && klen == 0) || (!ses->hmac && klen != 0))
+ return (EINVAL);
+ return (0);
+}
+
+static int
+aesni_cipherprepare(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_ICM:
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ case CRYPTO_AES_CBC:
+ switch (csp->csp_cipher_klen * 8) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ CRYPTDEB("invalid CBC/ICM/GCM key length");
+ return (EINVAL);
+ }
+ break;
+ case CRYPTO_AES_XTS:
+ switch (csp->csp_cipher_klen * 8) {
+ case 256:
+ case 512:
+ break;
+ default:
+ CRYPTDEB("invalid XTS key length");
+ return (EINVAL);
+ }
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (0);
+}
+
+static int
+aesni_cipher_setup(struct aesni_session *ses,
+ const struct crypto_session_params *csp)
{
- size_t i;
+ struct fpu_kern_ctx *ctx;
+ int kt, ctxidx, error;
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA1_HMAC:
+ ses->hmac = true;
+ /* FALLTHROUGH */
+ case CRYPTO_SHA1:
+ ses->hash_len = SHA1_HASH_LEN;
+ ses->hash_init = SHA1_Init_fn;
+ ses->hash_update = intel_sha1_update;
+ ses->hash_finalize = SHA1_Finalize_fn;
+ break;
+ case CRYPTO_SHA2_224_HMAC:
+ ses->hmac = true;
+ /* FALLTHROUGH */
+ case CRYPTO_SHA2_224:
+ ses->hash_len = SHA2_224_HASH_LEN;
+ ses->hash_init = SHA224_Init_fn;
+ ses->hash_update = intel_sha256_update;
+ ses->hash_finalize = SHA224_Finalize_fn;
+ break;
+ case CRYPTO_SHA2_256_HMAC:
+ ses->hmac = true;
+ /* FALLTHROUGH */
+ case CRYPTO_SHA2_256:
+ ses->hash_len = SHA2_256_HASH_LEN;
+ ses->hash_init = SHA256_Init_fn;
+ ses->hash_update = intel_sha256_update;
+ ses->hash_finalize = SHA256_Finalize_fn;
+ break;
+ }
+
+ if (ses->hash_len != 0) {
+ if (csp->csp_auth_mlen == 0)
+ ses->mlen = ses->hash_len;
+ else
+ ses->mlen = csp->csp_auth_mlen;
+
+ error = aesni_authprepare(ses, csp->csp_auth_klen);
+ if (error != 0)
+ return (error);
+ }
+
+ error = aesni_cipherprepare(csp);
+ if (error != 0)
+ return (error);
+
+ kt = is_fpu_kern_thread(0) || (csp->csp_cipher_alg == 0);
+ if (!kt) {
+ ACQUIRE_CTX(ctxidx, ctx);
+ fpu_kern_enter(curthread, ctx,
+ FPU_KERN_NORMAL | FPU_KERN_KTHR);
+ }
- for (i = 0; i < 64; i++)
- key[i] ^= xorbyte;
- update(ctx, key, 64);
- for (i = 0; i < 64; i++)
- key[i] ^= xorbyte;
+ error = 0;
+ if (csp->csp_cipher_key != NULL)
+ aesni_cipher_setup_common(ses, csp, csp->csp_cipher_key,
+ csp->csp_cipher_klen);
- crypto_apply(crpflags, __DECONST(void *, buf), off, buflen,
- __DECONST(int (*)(void *, void *, u_int), update), ctx);
- finalize(res, ctx);
+ if (!kt) {
+ fpu_kern_leave(curthread, ctx);
+ RELEASE_CTX(ctxidx, ctx);
+ }
+ return (error);
}
static int
-aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
- struct cryptodesc *authcrd, struct cryptop *crp)
+aesni_cipher_process(struct aesni_session *ses, struct cryptop *crp)
{
+ const struct crypto_session_params *csp;
struct fpu_kern_ctx *ctx;
int error, ctxidx;
bool kt;
- if (enccrd != NULL) {
- if ((enccrd->crd_alg == CRYPTO_AES_ICM ||
- enccrd->crd_alg == CRYPTO_AES_CCM_16 ||
- enccrd->crd_alg == CRYPTO_AES_NIST_GCM_16) &&
- (enccrd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
+ csp = crypto_get_params(crp->crp_session);
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_ICM:
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
return (EINVAL);
+ break;
+ case CRYPTO_AES_CBC:
+ case CRYPTO_AES_XTS:
+ /* CBC & XTS can only handle full blocks for now */
+ if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0)
+ return (EINVAL);
+ break;
}
ctx = NULL;
@@ -694,28 +651,21 @@
}
/* Do work */
- if (enccrd != NULL && authcrd != NULL) {
- /* Perform the first operation */
- if (crp->crp_desc == enccrd)
- error = aesni_cipher_crypt(ses, enccrd, authcrd, crp);
- else
- error = aesni_cipher_mac(ses, authcrd, crp);
- if (error != 0)
- goto out;
- /* Perform the second operation */
- if (crp->crp_desc == enccrd)
- error = aesni_cipher_mac(ses, authcrd, crp);
- else
- error = aesni_cipher_crypt(ses, enccrd, authcrd, crp);
- } else if (enccrd != NULL)
- error = aesni_cipher_crypt(ses, enccrd, authcrd, crp);
+ if (csp->csp_mode == CSP_MODE_ETA) {
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ error = aesni_cipher_crypt(ses, crp, csp);
+ if (error == 0)
+ error = aesni_cipher_mac(ses, crp, csp);
+ } else {
+ error = aesni_cipher_mac(ses, crp, csp);
+ if (error == 0)
+ error = aesni_cipher_crypt(ses, crp, csp);
+ }
+ } else if (csp->csp_mode == CSP_MODE_DIGEST)
+ error = aesni_cipher_mac(ses, crp, csp);
else
- error = aesni_cipher_mac(ses, authcrd, crp);
+ error = aesni_cipher_crypt(ses, crp, csp);
- if (error != 0)
- goto out;
-
-out:
if (!kt) {
fpu_kern_leave(curthread, ctx);
RELEASE_CTX(ctxidx, ctx);
@@ -724,28 +674,24 @@
}
static int
-aesni_cipher_crypt(struct aesni_session *ses, struct cryptodesc *enccrd,
- struct cryptodesc *authcrd, struct cryptop *crp)
+aesni_cipher_crypt(struct aesni_session *ses, struct cryptop *crp,
+ const struct crypto_session_params *csp)
{
uint8_t iv[AES_BLOCK_LEN], tag[GMAC_DIGEST_LEN], *buf, *authbuf;
- int error, ivlen;
+ int error;
bool encflag, allocated, authallocated;
- KASSERT((ses->algo != CRYPTO_AES_NIST_GCM_16 &&
- ses->algo != CRYPTO_AES_CCM_16) || authcrd != NULL,
- ("AES_NIST_GCM_16/AES_CCM_16 must include MAC descriptor"));
-
- ivlen = 0;
- authbuf = NULL;
-
- buf = aesni_cipher_alloc(enccrd, crp, &allocated);
+ buf = aesni_cipher_alloc(crp, crp->crp_payload_start,
+ crp->crp_payload_length, &allocated);
if (buf == NULL)
return (ENOMEM);
authallocated = false;
- if (ses->algo == CRYPTO_AES_NIST_GCM_16 ||
- ses->algo == CRYPTO_AES_CCM_16) {
- authbuf = aesni_cipher_alloc(authcrd, crp, &authallocated);
+ authbuf = NULL;
+ if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16 ||
+ csp->csp_cipher_alg == CRYPTO_AES_CCM_16) {
+ authbuf = aesni_cipher_alloc(crp, crp->crp_aad_start,
+ crp->crp_aad_length, &authallocated);
if (authbuf == NULL) {
error = ENOMEM;
goto out;
@@ -753,221 +699,161 @@
}
error = 0;
- encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT;
- if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
- error = aesni_cipher_setup_common(ses, enccrd->crd_key,
- enccrd->crd_klen);
- if (error != 0)
- goto out;
- }
-
- switch (enccrd->crd_alg) {
- case CRYPTO_AES_CBC:
- case CRYPTO_AES_ICM:
- ivlen = AES_BLOCK_LEN;
- break;
- case CRYPTO_AES_XTS:
- ivlen = 8;
- break;
- case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_CCM_16:
- ivlen = 12; /* should support arbitarily larger */
- break;
- }
+ encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
+ if (crp->crp_cipher_key != NULL)
+ aesni_cipher_setup_common(ses, csp, crp->crp_cipher_key,
+ csp->csp_cipher_klen);
/* Setup iv */
- if (encflag) {
- if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
- bcopy(enccrd->crd_iv, iv, ivlen);
- else
- arc4rand(iv, ivlen, 0);
-
- if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, ivlen, iv);
- } else {
- if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
- bcopy(enccrd->crd_iv, iv, ivlen);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, ivlen, iv);
- }
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(iv, csp->csp_ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(iv, crp->crp_iv, csp->csp_ivlen);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv);
- switch (ses->algo) {
+ switch (csp->csp_cipher_alg) {
case CRYPTO_AES_CBC:
if (encflag)
aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
- enccrd->crd_len, buf, buf, iv);
+ crp->crp_payload_length, buf, buf, iv);
else
aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
- enccrd->crd_len, buf, iv);
+ crp->crp_payload_length, buf, iv);
break;
case CRYPTO_AES_ICM:
/* encryption & decryption are the same */
aesni_encrypt_icm(ses->rounds, ses->enc_schedule,
- enccrd->crd_len, buf, buf, iv);
+ crp->crp_payload_length, buf, buf, iv);
break;
case CRYPTO_AES_XTS:
if (encflag)
aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
- ses->xts_schedule, enccrd->crd_len, buf, buf,
- iv);
+ ses->xts_schedule, crp->crp_payload_length, buf,
+ buf, iv);
else
aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
- ses->xts_schedule, enccrd->crd_len, buf, buf,
- iv);
+ ses->xts_schedule, crp->crp_payload_length, buf,
+ buf, iv);
break;
case CRYPTO_AES_NIST_GCM_16:
- if (!encflag)
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- authcrd->crd_inject, sizeof(tag), tag);
- else
- bzero(tag, sizeof tag);
-
if (encflag) {
+ memset(tag, 0, sizeof(tag));
AES_GCM_encrypt(buf, buf, authbuf, iv, tag,
- enccrd->crd_len, authcrd->crd_len, ivlen,
- ses->enc_schedule, ses->rounds);
-
- if (authcrd != NULL)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- authcrd->crd_inject, sizeof(tag), tag);
+ crp->crp_payload_length, crp->crp_aad_length,
+ csp->csp_ivlen, ses->enc_schedule, ses->rounds);
+ crypto_copyback(crp, crp->crp_digest_start, sizeof(tag),
+ tag);
} else {
+ crypto_copydata(crp, crp->crp_digest_start, sizeof(tag),
+ tag);
if (!AES_GCM_decrypt(buf, buf, authbuf, iv, tag,
- enccrd->crd_len, authcrd->crd_len, ivlen,
- ses->enc_schedule, ses->rounds))
+ crp->crp_payload_length, crp->crp_aad_length,
+ csp->csp_ivlen, ses->enc_schedule, ses->rounds))
error = EBADMSG;
}
break;
case CRYPTO_AES_CCM_16:
- if (!encflag)
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- authcrd->crd_inject, sizeof(tag), tag);
- else
- bzero(tag, sizeof tag);
if (encflag) {
+ memset(tag, 0, sizeof(tag));
AES_CCM_encrypt(buf, buf, authbuf, iv, tag,
- enccrd->crd_len, authcrd->crd_len, ivlen,
- ses->enc_schedule, ses->rounds);
- if (authcrd != NULL)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- authcrd->crd_inject, sizeof(tag), tag);
+ crp->crp_payload_length, crp->crp_aad_length,
+ csp->csp_ivlen, ses->enc_schedule, ses->rounds);
+ crypto_copyback(crp, crp->crp_digest_start, sizeof(tag),
+ tag);
} else {
+ crypto_copydata(crp, crp->crp_digest_start, sizeof(tag),
+ tag);
if (!AES_CCM_decrypt(buf, buf, authbuf, iv, tag,
- enccrd->crd_len, authcrd->crd_len, ivlen,
- ses->enc_schedule, ses->rounds))
+ crp->crp_payload_length, crp->crp_aad_length,
+ csp->csp_ivlen, ses->enc_schedule, ses->rounds))
error = EBADMSG;
}
break;
}
if (allocated && error == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
- enccrd->crd_len, buf);
+ crypto_copyback(crp, crp->crp_payload_start,
+ crp->crp_payload_length, buf);
out:
if (allocated) {
- explicit_bzero(buf, enccrd->crd_len);
+ explicit_bzero(buf, crp->crp_payload_length);
free(buf, M_AESNI);
}
if (authallocated) {
- explicit_bzero(authbuf, authcrd->crd_len);
+ explicit_bzero(authbuf, crp->crp_aad_length);
free(authbuf, M_AESNI);
}
return (error);
}
static int
-aesni_cipher_mac(struct aesni_session *ses, struct cryptodesc *crd,
- struct cryptop *crp)
+aesni_cipher_mac(struct aesni_session *ses, struct cryptop *crp,
+ const struct crypto_session_params *csp)
{
union {
struct SHA256Context sha2 __aligned(16);
struct sha1_ctxt sha1 __aligned(16);
} sctx;
+ uint8_t hmac_key[SHA1_BLOCK_LEN] __aligned(16);
uint32_t res[SHA2_256_HASH_LEN / sizeof(uint32_t)];
- int hashlen, error;
- void *ctx;
- void (*InitFn)(void *);
- int (*UpdateFn)(void *, const void *, unsigned);
- void (*FinalizeFn)(void *, void *);
+ uint32_t res2[SHA2_256_HASH_LEN / sizeof(uint32_t)];
+ const uint8_t *key;
+ int i, keylen;
- bool hmac;
+ if (crp->crp_auth_key != NULL)
+ key = crp->crp_auth_key;
+ else
+ key = csp->csp_auth_key;
+ keylen = csp->csp_auth_klen;
- if ((crd->crd_flags & ~CRD_F_KEY_EXPLICIT) != 0) {
- CRYPTDEB("%s: Unsupported MAC flags: 0x%x", __func__,
- (crd->crd_flags & ~CRD_F_KEY_EXPLICIT));
- return (EINVAL);
- }
- if ((crd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
- error = aesni_authprepare(ses, crd->crd_klen, crd->crd_key);
- if (error != 0)
- return (error);
- }
-
- hmac = false;
- switch (ses->auth_algo) {
- case CRYPTO_SHA1_HMAC:
- hmac = true;
- /* FALLTHROUGH */
- case CRYPTO_SHA1:
- hashlen = SHA1_HASH_LEN;
- InitFn = SHA1_Init_fn;
- UpdateFn = intel_sha1_update;
- FinalizeFn = SHA1_Finalize_fn;
- ctx = &sctx.sha1;
- break;
-
- case CRYPTO_SHA2_256_HMAC:
- hmac = true;
- /* FALLTHROUGH */
- case CRYPTO_SHA2_256:
- hashlen = SHA2_256_HASH_LEN;
- InitFn = SHA256_Init_fn;
- UpdateFn = intel_sha256_update;
- FinalizeFn = SHA256_Finalize_fn;
- ctx = &sctx.sha2;
- break;
-
- case CRYPTO_SHA2_224_HMAC:
- hmac = true;
- /* FALLTHROUGH */
- case CRYPTO_SHA2_224:
- hashlen = SHA2_224_HASH_LEN;
- InitFn = SHA224_Init_fn;
- UpdateFn = intel_sha256_update;
- FinalizeFn = SHA224_Finalize_fn;
- ctx = &sctx.sha2;
- break;
- default:
- /*
- * AES-GMAC authentication is verified while processing the
- * enccrd
- */
- return (0);
- }
-
- if (hmac) {
+ if (ses->hmac) {
/* Inner hash: (K ^ IPAD) || data */
- InitFn(ctx);
- hmac_internal(ctx, res, UpdateFn, FinalizeFn, ses->hmac_key,
- 0x36, crp->crp_buf, crd->crd_skip, crd->crd_len,
- crp->crp_flags);
+ ses->hash_init(&sctx);
+ for (i = 0; i < keylen; i++)
+ hmac_key[i] = key[i] ^ HMAC_IPAD_VAL;
+ for (i = keylen; i < sizeof(hmac_key); i++)
+ hmac_key[i] = 0 ^ HMAC_IPAD_VAL;
+ ses->hash_update(&sctx, hmac_key, sizeof(hmac_key));
+
+ crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
+ __DECONST(int (*)(void *, void *, u_int), ses->hash_update),
+ &sctx);
+ crypto_apply(crp, crp->crp_payload_start,
+ crp->crp_payload_length,
+ __DECONST(int (*)(void *, void *, u_int), ses->hash_update),
+ &sctx);
+ ses->hash_finalize(res, &sctx);
+
/* Outer hash: (K ^ OPAD) || inner hash */
- InitFn(ctx);
- hmac_internal(ctx, res, UpdateFn, FinalizeFn, ses->hmac_key,
- 0x5C, res, 0, hashlen, 0);
+ ses->hash_init(&sctx);
+ for (i = 0; i < keylen; i++)
+ hmac_key[i] = key[i] ^ HMAC_OPAD_VAL;
+ for (i = keylen; i < sizeof(hmac_key); i++)
+ hmac_key[i] = 0 ^ HMAC_OPAD_VAL;
+ ses->hash_update(&sctx, hmac_key, sizeof(hmac_key));
+ ses->hash_update(&sctx, res, ses->hash_len);
+ ses->hash_finalize(res, &sctx);
} else {
- InitFn(ctx);
- crypto_apply(crp->crp_flags, crp->crp_buf, crd->crd_skip,
- crd->crd_len, __DECONST(int (*)(void *, void *, u_int),
- UpdateFn), ctx);
- FinalizeFn(res, ctx);
+ ses->hash_init(&sctx);
+
+ crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
+ __DECONST(int (*)(void *, void *, u_int), ses->hash_update),
+ &sctx);
+ crypto_apply(crp, crp->crp_payload_start,
+ crp->crp_payload_length,
+ __DECONST(int (*)(void *, void *, u_int), ses->hash_update),
+ &sctx);
+
+ ses->hash_finalize(res, &sctx);
}
- if (ses->mlen != 0 && ses->mlen < hashlen)
- hashlen = ses->mlen;
-
- crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, hashlen,
- (void *)res);
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, ses->mlen, res2);
+ if (timingsafe_bcmp(res, res2, ses->mlen) != 0)
+ return (EBADMSG);
+ } else
+ crypto_copyback(crp, crp->crp_digest_start, ses->mlen, res);
return (0);
}
Index: sys/crypto/aesni/aesni_wrap.c
===================================================================
--- sys/crypto/aesni/aesni_wrap.c
+++ sys/crypto/aesni/aesni_wrap.c
@@ -435,51 +435,37 @@
iv, 0);
}
-int
-aesni_cipher_setup_common(struct aesni_session *ses, const uint8_t *key,
- int keylen)
+void
+aesni_cipher_setup_common(struct aesni_session *ses,
+ const struct crypto_session_params *csp, const uint8_t *key, int keylen)
{
int decsched;
decsched = 1;
- switch (ses->algo) {
+ switch (csp->csp_cipher_alg) {
case CRYPTO_AES_ICM:
case CRYPTO_AES_NIST_GCM_16:
case CRYPTO_AES_CCM_16:
decsched = 0;
- /* FALLTHROUGH */
- case CRYPTO_AES_CBC:
- switch (keylen) {
- case 128:
- ses->rounds = AES128_ROUNDS;
- break;
- case 192:
- ses->rounds = AES192_ROUNDS;
- break;
- case 256:
- ses->rounds = AES256_ROUNDS;
- break;
- default:
- CRYPTDEB("invalid CBC/ICM/GCM key length");
- return (EINVAL);
- }
break;
- case CRYPTO_AES_XTS:
- switch (keylen) {
- case 256:
- ses->rounds = AES128_ROUNDS;
- break;
- case 512:
- ses->rounds = AES256_ROUNDS;
- break;
- default:
- CRYPTDEB("invalid XTS key length");
- return (EINVAL);
- }
+ }
+
+ if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
+ keylen /= 2;
+
+ switch (keylen * 8) {
+ case 128:
+ ses->rounds = AES128_ROUNDS;
+ break;
+ case 192:
+ ses->rounds = AES192_ROUNDS;
+ break;
+ case 256:
+ ses->rounds = AES256_ROUNDS;
break;
default:
- return (EINVAL);
+ panic("shouldn't happen");
}
aesni_set_enckey(key, ses->enc_schedule, ses->rounds);
@@ -487,9 +473,7 @@
aesni_set_deckey(ses->enc_schedule, ses->dec_schedule,
ses->rounds);
- if (ses->algo == CRYPTO_AES_XTS)
- aesni_set_enckey(key + keylen / 16, ses->xts_schedule,
+ if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
+ aesni_set_enckey(key + keylen, ses->xts_schedule,
ses->rounds);
-
- return (0);
}
Index: sys/crypto/armv8/armv8_crypto.c
===================================================================
--- sys/crypto/armv8/armv8_crypto.c
+++ sys/crypto/armv8/armv8_crypto.c
@@ -85,7 +85,7 @@
} while (0)
static int armv8_crypto_cipher_process(struct armv8_crypto_session *,
- struct cryptodesc *, struct cryptop *);
+ struct cryptop *);
MALLOC_DEFINE(M_ARMV8_CRYPTO, "armv8_crypto", "ARMv8 Crypto Data");
@@ -131,7 +131,7 @@
sc->dieing = 0;
sc->cid = crypto_get_driverid(dev, sizeof(struct armv8_crypto_session),
- CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC);
+ CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
if (sc->cid < 0) {
device_printf(dev, "Could not get crypto driver id.\n");
return (ENOMEM);
@@ -149,8 +149,6 @@
mtx_init(&ctx_mtx[i], "armv8cryptoctx", NULL, MTX_DEF|MTX_NEW);
}
- crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
-
return (0);
}
@@ -185,83 +183,74 @@
}
static int
+armv8_crypto_probesession(device_t dev,
+ const struct crypto_session_params *csp)
+{
+
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_CIPHER:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return (EINVAL);
+ switch (csp->csp_cipher_klen * 8) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ return (EINVAL);
+ }
+ break;
+ default:
+ return (EINVAL);
+ }
+ default:
+ return (EINVAL);
+ }
+ return (CRYPTODEV_PROBE_ACCEL_SOFTWARE);
+}
+
+static void
armv8_crypto_cipher_setup(struct armv8_crypto_session *ses,
- struct cryptoini *encini)
+ const struct crypto_session_params *csp)
{
int i;
- switch (ses->algo) {
- case CRYPTO_AES_CBC:
- switch (encini->cri_klen) {
- case 128:
- ses->rounds = AES128_ROUNDS;
- break;
- case 192:
- ses->rounds = AES192_ROUNDS;
- break;
- case 256:
- ses->rounds = AES256_ROUNDS;
- break;
- default:
- CRYPTDEB("invalid CBC/ICM/GCM key length");
- return (EINVAL);
- }
+ switch (csp->csp_cipher_klen * 8) {
+ case 128:
+ ses->rounds = AES128_ROUNDS;
+ break;
+ case 192:
+ ses->rounds = AES192_ROUNDS;
+ break;
+ case 256:
+ ses->rounds = AES256_ROUNDS;
break;
default:
- return (EINVAL);
+ panic("invalid CBC key length");
}
- rijndaelKeySetupEnc(ses->enc_schedule, encini->cri_key,
- encini->cri_klen);
- rijndaelKeySetupDec(ses->dec_schedule, encini->cri_key,
- encini->cri_klen);
+ rijndaelKeySetupEnc(ses->enc_schedule, csp->csp_cipher_key,
+ csp->csp_cipher_klen * 8);
+ rijndaelKeySetupDec(ses->dec_schedule, csp->csp_cipher_key,
+ csp->csp_cipher_klen * 8);
for (i = 0; i < nitems(ses->enc_schedule); i++) {
ses->enc_schedule[i] = bswap32(ses->enc_schedule[i]);
ses->dec_schedule[i] = bswap32(ses->dec_schedule[i]);
}
-
- return (0);
}
static int
armv8_crypto_newsession(device_t dev, crypto_session_t cses,
- struct cryptoini *cri)
+ const struct crypto_session_params *csp)
{
struct armv8_crypto_softc *sc;
struct armv8_crypto_session *ses;
- struct cryptoini *encini;
- int error;
-
- if (cri == NULL) {
- CRYPTDEB("no cri");
- return (EINVAL);
- }
sc = device_get_softc(dev);
- if (sc->dieing)
- return (EINVAL);
-
- ses = NULL;
- encini = NULL;
- for (; cri != NULL; cri = cri->cri_next) {
- switch (cri->cri_alg) {
- case CRYPTO_AES_CBC:
- if (encini != NULL) {
- CRYPTDEB("encini already set");
- return (EINVAL);
- }
- encini = cri;
- break;
- default:
- CRYPTDEB("unhandled algorithm");
- return (EINVAL);
- }
- }
- if (encini == NULL) {
- CRYPTDEB("no cipher");
- return (EINVAL);
- }
-
rw_wlock(&sc->lock);
if (sc->dieing) {
rw_wunlock(&sc->lock);
@@ -269,15 +258,7 @@
}
ses = crypto_get_driver_session(cses);
- ses->algo = encini->cri_alg;
-
- error = armv8_crypto_cipher_setup(ses, encini);
- if (error != 0) {
- CRYPTDEB("setup failed");
- rw_wunlock(&sc->lock);
- return (error);
- }
-
+ armv8_crypto_cipher_setup(ses, csp);
rw_wunlock(&sc->lock);
return (0);
}
@@ -285,50 +266,17 @@
static int
armv8_crypto_process(device_t dev, struct cryptop *crp, int hint __unused)
{
- struct cryptodesc *crd, *enccrd;
struct armv8_crypto_session *ses;
int error;
- error = 0;
- enccrd = NULL;
-
- /* Sanity check. */
- if (crp == NULL)
- return (EINVAL);
-
- if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
- error = EINVAL;
- goto out;
- }
-
- for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
- switch (crd->crd_alg) {
- case CRYPTO_AES_CBC:
- if (enccrd != NULL) {
- error = EINVAL;
- goto out;
- }
- enccrd = crd;
- break;
- default:
- error = EINVAL;
- goto out;
- }
- }
-
- if (enccrd == NULL) {
- error = EINVAL;
- goto out;
- }
-
/* We can only handle full blocks for now */
- if ((enccrd->crd_len % AES_BLOCK_LEN) != 0) {
+ if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) {
error = EINVAL;
goto out;
}
ses = crypto_get_driver_session(crp->crp_session);
- error = armv8_crypto_cipher_process(ses, enccrd, crp);
+ error = armv8_crypto_cipher_process(ses, crp);
out:
crp->crp_etype = error;
@@ -337,37 +285,21 @@
}
static uint8_t *
-armv8_crypto_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
- int *allocated)
+armv8_crypto_cipher_alloc(struct cryptop *crp, int *allocated)
{
- struct mbuf *m;
- struct uio *uio;
- struct iovec *iov;
uint8_t *addr;
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
- m = (struct mbuf *)crp->crp_buf;
- if (m->m_next != NULL)
- goto alloc;
- addr = mtod(m, uint8_t *);
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
- uio = (struct uio *)crp->crp_buf;
- if (uio->uio_iovcnt != 1)
- goto alloc;
- iov = uio->uio_iov;
- addr = (uint8_t *)iov->iov_base;
- } else
- addr = (uint8_t *)crp->crp_buf;
- *allocated = 0;
- addr += enccrd->crd_skip;
- return (addr);
-
-alloc:
- addr = malloc(enccrd->crd_len, M_ARMV8_CRYPTO, M_NOWAIT);
+ addr = crypto_contiguous_subsegment(crp, crp->crp_payload_start,
+ crp->crp_payload_length);
+ if (addr != NULL) {
+ *allocated = 0;
+ return (addr);
+ }
+ addr = malloc(crp->crp_payload_length, M_ARMV8_CRYPTO, M_NOWAIT);
if (addr != NULL) {
*allocated = 1;
- crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
- enccrd->crd_len, addr);
+ crypto_copydata(crp, crp->crp_payload_start,
+ crp->crp_payload_length, addr);
} else
*allocated = 0;
return (addr);
@@ -375,18 +307,20 @@
static int
armv8_crypto_cipher_process(struct armv8_crypto_session *ses,
- struct cryptodesc *enccrd, struct cryptop *crp)
+ struct cryptop *crp)
{
+ const struct crypto_session_params *csp;
struct fpu_kern_ctx *ctx;
uint8_t *buf;
uint8_t iv[AES_BLOCK_LEN];
int allocated, i;
- int encflag, ivlen;
+ int encflag;
int kt;
- encflag = (enccrd->crd_flags & CRD_F_ENCRYPT) == CRD_F_ENCRYPT;
+ csp = crypto_get_params(crp->crp_session);
+ encflag = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
- buf = armv8_crypto_cipher_alloc(enccrd, crp, &allocated);
+ buf = armv8_crypto_cipher_alloc(crp, &allocated);
if (buf == NULL)
return (ENOMEM);
@@ -397,56 +331,41 @@
FPU_KERN_NORMAL | FPU_KERN_KTHR);
}
- if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
- panic("CRD_F_KEY_EXPLICIT");
- }
-
- switch (enccrd->crd_alg) {
- case CRYPTO_AES_CBC:
- ivlen = AES_BLOCK_LEN;
- break;
+ if (crp->crp_cipher_key != NULL) {
+ panic("armv8: new cipher key");
}
/* Setup iv */
- if (encflag) {
- if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
- bcopy(enccrd->crd_iv, iv, ivlen);
- else
- arc4rand(iv, ivlen, 0);
-
- if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, ivlen, iv);
- } else {
- if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
- bcopy(enccrd->crd_iv, iv, ivlen);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, ivlen, iv);
- }
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(iv, csp->csp_ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(iv, crp->crp_iv, csp->csp_ivlen);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv);
/* Do work */
- switch (ses->algo) {
+ switch (csp->csp_cipher_alg) {
case CRYPTO_AES_CBC:
if (encflag)
armv8_aes_encrypt_cbc(ses->rounds, ses->enc_schedule,
- enccrd->crd_len, buf, buf, iv);
+ crp->crp_payload_length, buf, buf, iv);
else
armv8_aes_decrypt_cbc(ses->rounds, ses->dec_schedule,
- enccrd->crd_len, buf, iv);
+ crp->crp_payload_length, buf, iv);
break;
}
if (allocated)
- crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
- enccrd->crd_len, buf);
+ crypto_copyback(crp, crp->crp_payload_start,
+ crp->crp_payload_length, buf);
if (!kt) {
fpu_kern_leave(curthread, ctx);
RELEASE_CTX(i, ctx);
}
if (allocated) {
- bzero(buf, enccrd->crd_len);
+ bzero(buf, crp->crp_payload_length);
free(buf, M_ARMV8_CRYPTO);
}
return (0);
@@ -458,6 +377,7 @@
DEVMETHOD(device_attach, armv8_crypto_attach),
DEVMETHOD(device_detach, armv8_crypto_detach),
+ DEVMETHOD(cryptodev_probesession, armv8_crypto_probesession),
DEVMETHOD(cryptodev_newsession, armv8_crypto_newsession),
DEVMETHOD(cryptodev_process, armv8_crypto_process),
Index: sys/crypto/blake2/blake2_cryptodev.c
===================================================================
--- sys/crypto/blake2/blake2_cryptodev.c
+++ sys/crypto/blake2/blake2_cryptodev.c
@@ -50,10 +50,7 @@
#endif
struct blake2_session {
- int algo;
- size_t klen;
size_t mlen;
- uint8_t key[BLAKE2B_KEYBYTES];
};
CTASSERT((size_t)BLAKE2B_KEYBYTES > (size_t)BLAKE2S_KEYBYTES);
@@ -79,10 +76,8 @@
(ctx) = NULL; \
} while (0)
-static int blake2_newsession(device_t, crypto_session_t cses,
- struct cryptoini *cri);
static int blake2_cipher_setup(struct blake2_session *ses,
- struct cryptoini *authini);
+ const struct crypto_session_params *csp);
static int blake2_cipher_process(struct blake2_session *ses,
struct cryptop *crp);
@@ -134,7 +129,7 @@
sc->dying = false;
sc->cid = crypto_get_driverid(dev, sizeof(struct blake2_session),
- CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SYNC);
+ CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
if (sc->cid < 0) {
device_printf(dev, "Could not get crypto driver id.\n");
return (ENOMEM);
@@ -152,8 +147,6 @@
rw_init(&sc->lock, "blake2_lock");
- crypto_register(sc->cid, CRYPTO_BLAKE2B, 0, 0);
- crypto_register(sc->cid, CRYPTO_BLAKE2S, 0, 0);
return (0);
}
@@ -177,52 +170,47 @@
}
static int
-blake2_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+blake2_probesession(device_t dev, const struct crypto_session_params *csp)
{
- struct blake2_softc *sc;
- struct blake2_session *ses;
- struct cryptoini *authini;
- int error;
- if (cri == NULL) {
- CRYPTDEB("no cri");
+ if (csp->csp_flags != 0)
return (EINVAL);
- }
-
- sc = device_get_softc(dev);
-
- authini = NULL;
- for (; cri != NULL; cri = cri->cri_next) {
- switch (cri->cri_alg) {
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ switch (csp->csp_auth_alg) {
case CRYPTO_BLAKE2B:
case CRYPTO_BLAKE2S:
- if (authini != NULL) {
- CRYPTDEB("authini already set");
- return (EINVAL);
- }
- authini = cri;
break;
default:
- CRYPTDEB("unhandled algorithm");
return (EINVAL);
}
- }
- if (authini == NULL) {
- CRYPTDEB("no cipher");
+ break;
+ default:
return (EINVAL);
}
+ return (CRYPTODEV_PROBE_ACCEL_SOFTWARE);
+}
- rw_wlock(&sc->lock);
- if (sc->dying) {
- rw_wunlock(&sc->lock);
- return (EINVAL);
- }
- rw_wunlock(&sc->lock);
+static int
+blake2_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
+{
+ struct blake2_softc *sc;
+ struct blake2_session *ses;
+ int error;
+
+ sc = device_get_softc(dev);
ses = crypto_get_driver_session(cses);
- ses->algo = authini->cri_alg;
- error = blake2_cipher_setup(ses, authini);
+ rw_rlock(&sc->lock);
+ if (sc->dying) {
+ rw_runlock(&sc->lock);
+ return (EINVAL);
+ }
+ rw_runlock(&sc->lock);
+
+ error = blake2_cipher_setup(ses, csp);
if (error != 0) {
CRYPTDEB("setup failed");
return (error);
@@ -235,48 +223,14 @@
blake2_process(device_t dev, struct cryptop *crp, int hint __unused)
{
struct blake2_session *ses;
- struct cryptodesc *crd, *authcrd;
int error;
- ses = NULL;
- error = 0;
- authcrd = NULL;
-
- /* Sanity check. */
- if (crp == NULL)
- return (EINVAL);
-
- if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
- error = EINVAL;
- goto out;
- }
-
- for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
- switch (crd->crd_alg) {
- case CRYPTO_BLAKE2B:
- case CRYPTO_BLAKE2S:
- if (authcrd != NULL) {
- error = EINVAL;
- goto out;
- }
- authcrd = crd;
- break;
-
- default:
- error = EINVAL;
- goto out;
- }
- }
-
ses = crypto_get_driver_session(crp->crp_session);
error = blake2_cipher_process(ses, crp);
- if (error != 0)
- goto out;
-out:
crp->crp_etype = error;
crypto_done(crp);
- return (error);
+ return (0);
}
static device_method_t blake2_methods[] = {
@@ -285,6 +239,7 @@
DEVMETHOD(device_attach, blake2_attach),
DEVMETHOD(device_detach, blake2_detach),
+ DEVMETHOD(cryptodev_probesession, blake2_probesession),
DEVMETHOD(cryptodev_newsession, blake2_newsession),
DEVMETHOD(cryptodev_process, blake2_process),
@@ -302,37 +257,48 @@
MODULE_VERSION(blake2, 1);
MODULE_DEPEND(blake2, crypto, 1, 1, 1);
+static bool
+blake2_check_klen(const struct crypto_session_params *csp, unsigned klen)
+{
+
+ if (csp->csp_auth_alg == CRYPTO_BLAKE2S)
+ return (klen <= BLAKE2S_KEYBYTES);
+ else
+ return (klen <= BLAKE2B_KEYBYTES);
+}
+
static int
-blake2_cipher_setup(struct blake2_session *ses, struct cryptoini *authini)
+blake2_cipher_setup(struct blake2_session *ses,
+ const struct crypto_session_params *csp)
{
- int keylen;
+ int hashlen;
CTASSERT((size_t)BLAKE2S_OUTBYTES <= (size_t)BLAKE2B_OUTBYTES);
- if (authini->cri_mlen < 0)
+ if (!blake2_check_klen(csp, csp->csp_auth_klen))
return (EINVAL);
- switch (ses->algo) {
+ if (csp->csp_auth_mlen < 0)
+ return (EINVAL);
+
+ switch (csp->csp_auth_alg) {
case CRYPTO_BLAKE2S:
- if (authini->cri_mlen != 0 &&
- authini->cri_mlen > BLAKE2S_OUTBYTES)
- return (EINVAL);
- /* FALLTHROUGH */
+ hashlen = BLAKE2S_OUTBYTES;
+ break;
case CRYPTO_BLAKE2B:
- if (authini->cri_mlen != 0 &&
- authini->cri_mlen > BLAKE2B_OUTBYTES)
- return (EINVAL);
-
- if (authini->cri_klen % 8 != 0)
- return (EINVAL);
- keylen = authini->cri_klen / 8;
- if (keylen > sizeof(ses->key) ||
- (ses->algo == CRYPTO_BLAKE2S && keylen > BLAKE2S_KEYBYTES))
- return (EINVAL);
- ses->klen = keylen;
- memcpy(ses->key, authini->cri_key, keylen);
- ses->mlen = authini->cri_mlen;
+ hashlen = BLAKE2B_OUTBYTES;
+ break;
+ default:
+ return (EINVAL);
}
+
+ if (csp->csp_auth_mlen > hashlen)
+ return (EINVAL);
+
+ if (csp->csp_auth_mlen == 0)
+ ses->mlen = hashlen;
+ else
+ ses->mlen = csp->csp_auth_mlen;
return (0);
}
@@ -365,15 +331,15 @@
blake2b_state sb;
blake2s_state ss;
} bctx;
- char res[BLAKE2B_OUTBYTES];
+ char res[BLAKE2B_OUTBYTES], res2[BLAKE2B_OUTBYTES];
+ const struct crypto_session_params *csp;
struct fpu_kern_ctx *ctx;
+ const void *key;
int ctxidx;
bool kt;
- struct cryptodesc *crd;
int error, rc;
- size_t hashlen;
+ unsigned klen;
- crd = crp->crp_desc;
ctx = NULL;
ctxidx = 0;
error = EINVAL;
@@ -385,47 +351,42 @@
FPU_KERN_NORMAL | FPU_KERN_KTHR);
}
- if (crd->crd_flags != 0)
- goto out;
-
- switch (ses->algo) {
+ csp = crypto_get_params(crp->crp_session);
+ if (crp->crp_auth_key != NULL)
+ key = crp->crp_auth_key;
+ else
+ key = csp->csp_auth_key;
+ klen = csp->csp_auth_klen;
+ switch (csp->csp_auth_alg) {
case CRYPTO_BLAKE2B:
- if (ses->mlen != 0)
- hashlen = ses->mlen;
+ if (klen > 0)
+ rc = blake2b_init_key(&bctx.sb, ses->mlen, key, klen);
else
- hashlen = BLAKE2B_OUTBYTES;
- if (ses->klen > 0)
- rc = blake2b_init_key(&bctx.sb, hashlen, ses->key, ses->klen);
- else
- rc = blake2b_init(&bctx.sb, hashlen);
+ rc = blake2b_init(&bctx.sb, ses->mlen);
if (rc != 0)
goto out;
- error = crypto_apply(crp->crp_flags, crp->crp_buf, crd->crd_skip,
- crd->crd_len, blake2b_applicator, &bctx.sb);
+ error = crypto_apply(crp, crp->crp_payload_start,
+ crp->crp_payload_length, blake2b_applicator, &bctx.sb);
if (error != 0)
goto out;
- rc = blake2b_final(&bctx.sb, res, hashlen);
+ rc = blake2b_final(&bctx.sb, res, ses->mlen);
if (rc != 0) {
error = EINVAL;
goto out;
}
break;
case CRYPTO_BLAKE2S:
- if (ses->mlen != 0)
- hashlen = ses->mlen;
+ if (klen > 0)
+ rc = blake2s_init_key(&bctx.ss, ses->mlen, key, klen);
else
- hashlen = BLAKE2S_OUTBYTES;
- if (ses->klen > 0)
- rc = blake2s_init_key(&bctx.ss, hashlen, ses->key, ses->klen);
- else
- rc = blake2s_init(&bctx.ss, hashlen);
+ rc = blake2s_init(&bctx.ss, ses->mlen);
if (rc != 0)
goto out;
- error = crypto_apply(crp->crp_flags, crp->crp_buf, crd->crd_skip,
- crd->crd_len, blake2s_applicator, &bctx.ss);
+ error = crypto_apply(crp, crp->crp_payload_start,
+ crp->crp_payload_length, blake2s_applicator, &bctx.ss);
if (error != 0)
goto out;
- rc = blake2s_final(&bctx.ss, res, hashlen);
+ rc = blake2s_final(&bctx.ss, res, ses->mlen);
if (rc != 0) {
error = EINVAL;
goto out;
@@ -435,8 +396,12 @@
panic("unreachable");
}
- crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject, hashlen,
- (void *)res);
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, ses->mlen, res2);
+ if (timingsafe_bcmp(res, res2, ses->mlen) != 0)
+ return (EBADMSG);
+ } else
+ crypto_copyback(crp, crp->crp_digest_start, ses->mlen, res);
out:
if (!kt) {
Index: sys/crypto/ccp/ccp.h
===================================================================
--- sys/crypto/ccp/ccp.h
+++ sys/crypto/ccp/ccp.h
@@ -58,14 +58,18 @@
SHA2_256, SHA2_384, SHA2_512
};
+/*
+ * XXX: The hmac.res, gmac.final_block, and blkcipher.iv fields are
+ * used by individual requests meaning that sessions cannot have more
+ * than a single request in flight at a time.
+ */
struct ccp_session_hmac {
struct auth_hash *auth_hash;
int hash_len;
- unsigned int partial_digest_len;
unsigned int auth_mode;
- unsigned int mk_size;
char ipad[CCP_HASH_MAX_BLOCK_SIZE];
char opad[CCP_HASH_MAX_BLOCK_SIZE];
+ char res[CCP_HASH_MAX_BLOCK_SIZE];
};
struct ccp_session_gmac {
@@ -77,14 +81,12 @@
unsigned cipher_mode;
unsigned cipher_type;
unsigned key_len;
- unsigned iv_len;
char enckey[CCP_AES_MAX_KEY_LEN];
char iv[CCP_MAX_CRYPTO_IV_LEN];
};
struct ccp_session {
- bool active : 1;
- bool cipher_first : 1;
+ bool active;
int pending;
enum { HMAC, BLKCIPHER, AUTHENC, GCM } mode;
unsigned queue;
@@ -217,12 +219,11 @@
* Internal hardware crypt-op submission routines.
*/
int ccp_authenc(struct ccp_queue *sc, struct ccp_session *s,
- struct cryptop *crp, struct cryptodesc *crda, struct cryptodesc *crde)
- __must_check;
+ struct cryptop *crp) __must_check;
int ccp_blkcipher(struct ccp_queue *sc, struct ccp_session *s,
struct cryptop *crp) __must_check;
-int ccp_gcm(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp,
- struct cryptodesc *crda, struct cryptodesc *crde) __must_check;
+int ccp_gcm(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp)
+ __must_check;
int ccp_hmac(struct ccp_queue *sc, struct ccp_session *s, struct cryptop *crp)
__must_check;
Index: sys/crypto/ccp/ccp.c
===================================================================
--- sys/crypto/ccp/ccp.c
+++ sys/crypto/ccp/ccp.c
@@ -96,22 +96,28 @@
int error;
sglist_reset(sg);
- if (crp->crp_flags & CRYPTO_F_IMBUF)
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
error = sglist_append_mbuf(sg, crp->crp_mbuf);
- else if (crp->crp_flags & CRYPTO_F_IOV)
+ break;
+ case CRYPTO_BUF_UIO:
error = sglist_append_uio(sg, crp->crp_uio);
- else
+ break;
+ case CRYPTO_BUF_CONTIG:
error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
+ break;
+ default:
+ error = EINVAL;
+ }
return (error);
}
/*
* Handle a GCM request with an empty payload by performing the
- * operation in software. Derived from swcr_authenc().
+ * operation in software.
*/
static void
-ccp_gcm_soft(struct ccp_session *s, struct cryptop *crp,
- struct cryptodesc *crda, struct cryptodesc *crde)
+ccp_gcm_soft(struct ccp_session *s, struct cryptop *crp)
{
struct aes_gmac_ctx gmac_ctx;
char block[GMAC_BLOCK_LEN];
@@ -123,21 +129,11 @@
* This assumes a 12-byte IV from the crp. See longer comment
* above in ccp_gcm() for more details.
*/
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crde->crd_iv, 12);
- else
- arc4rand(iv, 12, 0);
- if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, 12, iv);
- } else {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crde->crd_iv, 12);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, 12, iv);
+ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) {
+ crp->crp_etype = EINVAL;
+ goto out;
}
+ memcpy(iv, crp->crp_iv, 12);
*(uint32_t *)&iv[12] = htobe32(1);
/* Initialize the MAC. */
@@ -146,34 +142,34 @@
AES_GMAC_Reinit(&gmac_ctx, iv, sizeof(iv));
/* MAC the AAD. */
- for (i = 0; i < crda->crd_len; i += sizeof(block)) {
- len = imin(crda->crd_len - i, sizeof(block));
- crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
- i, len, block);
+ for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) {
+ len = imin(crp->crp_aad_length - i, sizeof(block));
+ crypto_copydata(crp, crp->crp_aad_start + i, len, block);
bzero(block + len, sizeof(block) - len);
AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
}
/* Length block. */
bzero(block, sizeof(block));
- ((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
+ ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8);
AES_GMAC_Update(&gmac_ctx, block, sizeof(block));
AES_GMAC_Final(digest, &gmac_ctx);
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
- sizeof(digest), digest);
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ crypto_copyback(crp, crp->crp_digest_start, sizeof(digest),
+ digest);
crp->crp_etype = 0;
} else {
char digest2[GMAC_DIGEST_LEN];
- crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
- sizeof(digest2), digest2);
+ crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2),
+ digest2);
if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0)
crp->crp_etype = 0;
else
crp->crp_etype = EBADMSG;
}
+out:
crypto_done(crp);
}
@@ -259,22 +255,6 @@
random_source_register(&random_ccp);
}
- if ((sc->hw_features & VERSION_CAP_AES) != 0) {
- crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_ICM, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
- }
- if ((sc->hw_features & VERSION_CAP_SHA) != 0) {
- crypto_register(sc->cid, CRYPTO_SHA1_HMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_SHA2_256_HMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_SHA2_384_HMAC, 0, 0);
- crypto_register(sc->cid, CRYPTO_SHA2_512_HMAC, 0, 0);
- }
-
return (0);
}
@@ -304,8 +284,7 @@
}
static void
-ccp_init_hmac_digest(struct ccp_session *s, int cri_alg, char *key,
- int klen)
+ccp_init_hmac_digest(struct ccp_session *s, const char *key, int klen)
{
union authctx auth_ctx;
struct auth_hash *axf;
@@ -316,7 +295,6 @@
* the key as the key instead.
*/
axf = s->hmac.auth_hash;
- klen /= 8;
if (klen > axf->blocksize) {
axf->Init(&auth_ctx);
axf->Update(&auth_ctx, key, klen);
@@ -335,26 +313,26 @@
}
}
-static int
+static bool
ccp_aes_check_keylen(int alg, int klen)
{
- switch (klen) {
+ switch (klen * 8) {
case 128:
case 192:
if (alg == CRYPTO_AES_XTS)
- return (EINVAL);
+ return (false);
break;
case 256:
break;
case 512:
if (alg != CRYPTO_AES_XTS)
- return (EINVAL);
+ return (false);
break;
default:
- return (EINVAL);
+ return (false);
}
- return (0);
+ return (true);
}
static void
@@ -363,9 +341,9 @@
unsigned kbits;
if (alg == CRYPTO_AES_XTS)
- kbits = klen / 2;
+ kbits = (klen / 2) * 8;
else
- kbits = klen;
+ kbits = klen * 8;
switch (kbits) {
case 128:
@@ -381,123 +359,154 @@
panic("should not get here");
}
- s->blkcipher.key_len = klen / 8;
+ s->blkcipher.key_len = klen;
memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
}
+static bool
+ccp_auth_supported(struct ccp_softc *sc,
+ const struct crypto_session_params *csp)
+{
+
+ if ((sc->hw_features & VERSION_CAP_SHA) == 0)
+ return (false);
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ if (csp->csp_auth_key == NULL)
+ return (false);
+ break;
+ default:
+ return (false);
+ }
+ return (true);
+}
+
+static bool
+ccp_cipher_supported(struct ccp_softc *sc,
+ const struct crypto_session_params *csp)
+{
+
+ if ((sc->hw_features & VERSION_CAP_AES) == 0)
+ return (false);
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return (false);
+ break;
+ case CRYPTO_AES_ICM:
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return (false);
+ break;
+ case CRYPTO_AES_XTS:
+ if (csp->csp_ivlen != AES_XTS_IV_LEN)
+ return (false);
+ break;
+ default:
+ return (false);
+ }
+ return (ccp_aes_check_keylen(csp->csp_cipher_alg,
+ csp->csp_cipher_klen));
+}
+
static int
-ccp_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+ccp_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+ struct ccp_softc *sc;
+
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ sc = device_get_softc(dev);
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ if (!ccp_auth_supported(sc, csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!ccp_cipher_supported(sc, csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_AEAD:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ if (csp->csp_ivlen != AES_GCM_IV_LEN)
+ return (EINVAL);
+ if (csp->csp_auth_mlen < 0 ||
+ csp->csp_auth_mlen > AES_GMAC_HASH_LEN)
+ return (EINVAL);
+ if ((sc->hw_features & VERSION_CAP_AES) == 0)
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+ break;
+ case CSP_MODE_ETA:
+ if (!ccp_auth_supported(sc, csp) ||
+ !ccp_cipher_supported(sc, csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (CRYPTODEV_PROBE_HARDWARE);
+}
+
+static int
+ccp_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
{
struct ccp_softc *sc;
struct ccp_session *s;
struct auth_hash *auth_hash;
- struct cryptoini *c, *hash, *cipher;
enum ccp_aes_mode cipher_mode;
- unsigned auth_mode, iv_len;
- unsigned partial_digest_len;
+ unsigned auth_mode;
unsigned q;
- int error;
- bool gcm_hash;
- if (cri == NULL)
- return (EINVAL);
-
- s = crypto_get_driver_session(cses);
-
- gcm_hash = false;
- cipher = NULL;
- hash = NULL;
- auth_hash = NULL;
/* XXX reconcile auth_mode with use by ccp_sha */
- auth_mode = 0;
- cipher_mode = CCP_AES_MODE_ECB;
- iv_len = 0;
- partial_digest_len = 0;
- for (c = cri; c != NULL; c = c->cri_next) {
- switch (c->cri_alg) {
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- if (hash)
- return (EINVAL);
- hash = c;
- switch (c->cri_alg) {
- case CRYPTO_SHA1_HMAC:
- auth_hash = &auth_hash_hmac_sha1;
- auth_mode = SHA1;
- partial_digest_len = SHA1_HASH_LEN;
- break;
- case CRYPTO_SHA2_256_HMAC:
- auth_hash = &auth_hash_hmac_sha2_256;
- auth_mode = SHA2_256;
- partial_digest_len = SHA2_256_HASH_LEN;
- break;
- case CRYPTO_SHA2_384_HMAC:
- auth_hash = &auth_hash_hmac_sha2_384;
- auth_mode = SHA2_384;
- partial_digest_len = SHA2_512_HASH_LEN;
- break;
- case CRYPTO_SHA2_512_HMAC:
- auth_hash = &auth_hash_hmac_sha2_512;
- auth_mode = SHA2_512;
- partial_digest_len = SHA2_512_HASH_LEN;
- break;
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- gcm_hash = true;
-#if 0
- auth_mode = CHCR_SCMD_AUTH_MODE_GHASH;
-#endif
- break;
- }
- break;
- case CRYPTO_AES_CBC:
- case CRYPTO_AES_ICM:
- case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_XTS:
- if (cipher)
- return (EINVAL);
- cipher = c;
- switch (c->cri_alg) {
- case CRYPTO_AES_CBC:
- cipher_mode = CCP_AES_MODE_CBC;
- iv_len = AES_BLOCK_LEN;
- break;
- case CRYPTO_AES_ICM:
- cipher_mode = CCP_AES_MODE_CTR;
- iv_len = AES_BLOCK_LEN;
- break;
- case CRYPTO_AES_NIST_GCM_16:
- cipher_mode = CCP_AES_MODE_GCTR;
- iv_len = AES_GCM_IV_LEN;
- break;
- case CRYPTO_AES_XTS:
- cipher_mode = CCP_AES_MODE_XTS;
- iv_len = AES_BLOCK_LEN;
- break;
- }
- if (c->cri_key != NULL) {
- error = ccp_aes_check_keylen(c->cri_alg,
- c->cri_klen);
- if (error != 0)
- return (error);
- }
- break;
- default:
- return (EINVAL);
- }
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA1_HMAC:
+ auth_hash = &auth_hash_hmac_sha1;
+ auth_mode = SHA1;
+ break;
+ case CRYPTO_SHA2_256_HMAC:
+ auth_hash = &auth_hash_hmac_sha2_256;
+ auth_mode = SHA2_256;
+ break;
+ case CRYPTO_SHA2_384_HMAC:
+ auth_hash = &auth_hash_hmac_sha2_384;
+ auth_mode = SHA2_384;
+ break;
+ case CRYPTO_SHA2_512_HMAC:
+ auth_hash = &auth_hash_hmac_sha2_512;
+ auth_mode = SHA2_512;
+ break;
+ default:
+ auth_hash = NULL;
+ auth_mode = 0;
+ break;
+ }
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ cipher_mode = CCP_AES_MODE_CBC;
+ break;
+ case CRYPTO_AES_ICM:
+ cipher_mode = CCP_AES_MODE_CTR;
+ break;
+ case CRYPTO_AES_NIST_GCM_16:
+ cipher_mode = CCP_AES_MODE_GCTR;
+ break;
+ case CRYPTO_AES_XTS:
+ cipher_mode = CCP_AES_MODE_XTS;
+ break;
+ default:
+ cipher_mode = CCP_AES_MODE_ECB;
+ break;
}
- if (gcm_hash != (cipher_mode == CCP_AES_MODE_GCTR))
- return (EINVAL);
- if (hash == NULL && cipher == NULL)
- return (EINVAL);
- if (hash != NULL && hash->cri_key == NULL)
- return (EINVAL);
sc = device_get_softc(dev);
mtx_lock(&sc->lock);
@@ -506,6 +515,8 @@
return (ENXIO);
}
+ s = crypto_get_driver_session(cses);
+
/* Just grab the first usable queue for now. */
for (q = 0; q < nitems(sc->queues); q++)
if ((sc->valid_queues & (1 << q)) != 0)
@@ -516,38 +527,40 @@
}
s->queue = q;
- if (gcm_hash)
+ switch (csp->csp_mode) {
+ case CSP_MODE_AEAD:
s->mode = GCM;
- else if (hash != NULL && cipher != NULL)
+ break;
+ case CSP_MODE_ETA:
s->mode = AUTHENC;
- else if (hash != NULL)
+ break;
+ case CSP_MODE_DIGEST:
s->mode = HMAC;
- else {
- MPASS(cipher != NULL);
+ break;
+ case CSP_MODE_CIPHER:
s->mode = BLKCIPHER;
+ break;
}
- if (gcm_hash) {
- if (hash->cri_mlen == 0)
+
+ if (s->mode == GCM) {
+ if (csp->csp_auth_mlen == 0)
s->gmac.hash_len = AES_GMAC_HASH_LEN;
else
- s->gmac.hash_len = hash->cri_mlen;
- } else if (hash != NULL) {
+ s->gmac.hash_len = csp->csp_auth_mlen;
+ } else if (auth_hash != NULL) {
s->hmac.auth_hash = auth_hash;
s->hmac.auth_mode = auth_mode;
- s->hmac.partial_digest_len = partial_digest_len;
- if (hash->cri_mlen == 0)
+ if (csp->csp_auth_mlen == 0)
s->hmac.hash_len = auth_hash->hashsize;
else
- s->hmac.hash_len = hash->cri_mlen;
- ccp_init_hmac_digest(s, hash->cri_alg, hash->cri_key,
- hash->cri_klen);
+ s->hmac.hash_len = csp->csp_auth_mlen;
+ ccp_init_hmac_digest(s, csp->csp_auth_key, csp->csp_auth_klen);
}
- if (cipher != NULL) {
+ if (cipher_mode != CCP_AES_MODE_ECB) {
s->blkcipher.cipher_mode = cipher_mode;
- s->blkcipher.iv_len = iv_len;
- if (cipher->cri_key != NULL)
- ccp_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
- cipher->cri_klen);
+ if (csp->csp_cipher_key != NULL)
+ ccp_aes_setkey(s, csp->csp_cipher_alg,
+ csp->csp_cipher_key, csp->csp_cipher_klen);
}
s->active = true;
@@ -573,19 +586,17 @@
static int
ccp_process(device_t dev, struct cryptop *crp, int hint)
{
+ const struct crypto_session_params *csp;
struct ccp_softc *sc;
struct ccp_queue *qp;
struct ccp_session *s;
- struct cryptodesc *crd, *crda, *crde;
int error;
bool qpheld;
qpheld = false;
qp = NULL;
- if (crp == NULL)
- return (EINVAL);
- crd = crp->crp_desc;
+ csp = crypto_get_params(crp->crp_session);
s = crypto_get_driver_session(crp->crp_session);
sc = device_get_softc(dev);
mtx_lock(&sc->lock);
@@ -600,89 +611,47 @@
if (error != 0)
goto out;
+ if (crp->crp_auth_key != NULL) {
+ KASSERT(s->hmac.auth_hash != NULL, ("auth key without HMAC"));
+ ccp_init_hmac_digest(s, crp->crp_auth_key, csp->csp_auth_klen);
+ }
+ if (crp->crp_cipher_key != NULL)
+ ccp_aes_setkey(s, csp->csp_cipher_alg, crp->crp_cipher_key,
+ csp->csp_cipher_klen);
+
switch (s->mode) {
case HMAC:
- if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
- ccp_init_hmac_digest(s, crd->crd_alg, crd->crd_key,
- crd->crd_klen);
+ if (s->pending != 0) {
+ error = EAGAIN;
+ break;
+ }
error = ccp_hmac(qp, s, crp);
break;
case BLKCIPHER:
- if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
- error = ccp_aes_check_keylen(crd->crd_alg,
- crd->crd_klen);
- if (error != 0)
- break;
- ccp_aes_setkey(s, crd->crd_alg, crd->crd_key,
- crd->crd_klen);
+ if (s->pending != 0) {
+ error = EAGAIN;
+ break;
}
error = ccp_blkcipher(qp, s, crp);
break;
case AUTHENC:
- error = 0;
- switch (crd->crd_alg) {
- case CRYPTO_AES_CBC:
- case CRYPTO_AES_ICM:
- case CRYPTO_AES_XTS:
- /* Only encrypt-then-authenticate supported. */
- crde = crd;
- crda = crd->crd_next;
- if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
- error = EINVAL;
- break;
- }
- s->cipher_first = true;
+ if (s->pending != 0) {
+ error = EAGAIN;
break;
- default:
- crda = crd;
- crde = crd->crd_next;
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- error = EINVAL;
- break;
- }
- s->cipher_first = false;
- break;
- }
- if (error != 0)
- break;
- if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
- ccp_init_hmac_digest(s, crda->crd_alg, crda->crd_key,
- crda->crd_klen);
- if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
- error = ccp_aes_check_keylen(crde->crd_alg,
- crde->crd_klen);
- if (error != 0)
- break;
- ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
- crde->crd_klen);
}
- error = ccp_authenc(qp, s, crp, crda, crde);
+ error = ccp_authenc(qp, s, crp);
break;
case GCM:
- error = 0;
- if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
- crde = crd;
- crda = crd->crd_next;
- s->cipher_first = true;
- } else {
- crda = crd;
- crde = crd->crd_next;
- s->cipher_first = false;
- }
- if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
- error = ccp_aes_check_keylen(crde->crd_alg,
- crde->crd_klen);
- if (error != 0)
- break;
- ccp_aes_setkey(s, crde->crd_alg, crde->crd_key,
- crde->crd_klen);
- }
- if (crde->crd_len == 0) {
+ if (crp->crp_payload_length == 0) {
mtx_unlock(&qp->cq_lock);
- ccp_gcm_soft(s, crp, crda, crde);
+ ccp_gcm_soft(s, crp);
return (0);
}
- error = ccp_gcm(qp, s, crp, crda, crde);
+ if (s->pending != 0) {
+ error = EAGAIN;
+ break;
+ }
+ error = ccp_gcm(qp, s, crp);
break;
}
@@ -716,6 +685,7 @@
DEVMETHOD(device_attach, ccp_attach),
DEVMETHOD(device_detach, ccp_detach),
+ DEVMETHOD(cryptodev_probesession, ccp_probesession),
DEVMETHOD(cryptodev_newsession, ccp_newsession),
DEVMETHOD(cryptodev_freesession, ccp_freesession),
DEVMETHOD(cryptodev_process, ccp_process),
Index: sys/crypto/ccp/ccp_hardware.c
===================================================================
--- sys/crypto/ccp/ccp_hardware.c
+++ sys/crypto/ccp/ccp_hardware.c
@@ -895,7 +895,7 @@
remain = len;
for (i = 0; i < sgl->sg_nseg && remain != 0; i++) {
seg = &sgl->sg_segs[i];
- /* crd_len is int, so 32-bit min() is ok. */
+ /* crp lengths are int, so 32-bit min() is ok. */
nb = min(remain, seg->ss_len);
if (tolsb)
@@ -1116,7 +1116,7 @@
lsbaddr = ccp_queue_lsb_address(qp, LSB_ENTRY_SHA);
for (i = 0; i < sgl_dst->sg_nseg; i++) {
seg = &sgl_dst->sg_segs[i];
- /* crd_len is int, so 32-bit min() is ok. */
+ /* crp lengths are int, so 32-bit min() is ok. */
nb = min(remaining, seg->ss_len);
error = ccp_passthrough(qp, seg->ss_paddr, CCP_MEMTYPE_SYSTEM,
@@ -1202,7 +1202,7 @@
static void
ccp_do_hmac_done(struct ccp_queue *qp, struct ccp_session *s,
- struct cryptop *crp, struct cryptodesc *crd, int error)
+ struct cryptop *crp, int error)
{
char ihash[SHA2_512_HASH_LEN /* max hash len */];
union authctx auth_ctx;
@@ -1220,21 +1220,26 @@
/* Do remaining outer hash over small inner hash in software */
axf->Init(&auth_ctx);
axf->Update(&auth_ctx, s->hmac.opad, axf->blocksize);
- ccp_sha_copy_result(ihash, s->hmac.ipad, s->hmac.auth_mode);
+ ccp_sha_copy_result(ihash, s->hmac.res, s->hmac.auth_mode);
#if 0
INSECURE_DEBUG(dev, "%s sha intermediate=%64D\n", __func__,
(u_char *)ihash, " ");
#endif
axf->Update(&auth_ctx, ihash, axf->hashsize);
- axf->Final(s->hmac.ipad, &auth_ctx);
+ axf->Final(s->hmac.res, &auth_ctx);
- crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
- s->hmac.hash_len, s->hmac.ipad);
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len,
+ ihash);
+ if (timingsafe_bcmp(s->hmac.res, ihash, s->hmac.hash_len) != 0)
+ crp->crp_etype = EBADMSG;
+ } else
+ crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len,
+ s->hmac.res);
/* Avoid leaking key material */
explicit_bzero(&auth_ctx, sizeof(auth_ctx));
- explicit_bzero(s->hmac.ipad, sizeof(s->hmac.ipad));
- explicit_bzero(s->hmac.opad, sizeof(s->hmac.opad));
+ explicit_bzero(s->hmac.res, sizeof(s->hmac.res));
out:
crypto_done(crp);
@@ -1244,17 +1249,15 @@
ccp_hmac_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp,
int error)
{
- struct cryptodesc *crd;
struct cryptop *crp;
crp = vcrp;
- crd = crp->crp_desc;
- ccp_do_hmac_done(qp, s, crp, crd, error);
+ ccp_do_hmac_done(qp, s, crp, error);
}
static int __must_check
ccp_do_hmac(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp,
- struct cryptodesc *crd, const struct ccp_completion_ctx *cctx)
+ const struct ccp_completion_ctx *cctx)
{
device_t dev;
struct auth_hash *axf;
@@ -1272,15 +1275,21 @@
error = sglist_append(qp->cq_sg_ulptx, s->hmac.ipad, axf->blocksize);
if (error != 0)
return (error);
+ if (crp->crp_aad_length != 0) {
+ error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
+ crp->crp_aad_start, crp->crp_aad_length);
+ if (error != 0)
+ return (error);
+ }
error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
- crd->crd_skip, crd->crd_len);
+ crp->crp_payload_start, crp->crp_payload_length);
if (error != 0) {
DPRINTF(dev, "%s: sglist too short\n", __func__);
return (error);
}
- /* Populate SGL for output -- just reuse hmac.ipad buffer. */
+ /* Populate SGL for output -- use hmac.res buffer. */
sglist_reset(qp->cq_sg_dst);
- error = sglist_append(qp->cq_sg_dst, s->hmac.ipad,
+ error = sglist_append(qp->cq_sg_dst, s->hmac.res,
roundup2(axf->hashsize, LSB_ENTRY_SIZE));
if (error != 0)
return (error);
@@ -1298,15 +1307,12 @@
ccp_hmac(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp)
{
struct ccp_completion_ctx ctx;
- struct cryptodesc *crd;
-
- crd = crp->crp_desc;
ctx.callback_fn = ccp_hmac_done;
ctx.callback_arg = crp;
ctx.session = s;
- return (ccp_do_hmac(qp, s, crp, crd, &ctx));
+ return (ccp_do_hmac(qp, s, crp, &ctx));
}
static void
@@ -1329,7 +1335,7 @@
{
struct cryptop *crp;
- explicit_bzero(&s->blkcipher, sizeof(s->blkcipher));
+ explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv));
crp = vcrp;
@@ -1343,57 +1349,39 @@
}
static void
-ccp_collect_iv(struct ccp_session *s, struct cryptop *crp,
- struct cryptodesc *crd)
-{
+ccp_collect_iv(struct cryptop *crp, const struct crypto_session_params *csp,
+ char *iv)
+{
- if (crd->crd_flags & CRD_F_ENCRYPT) {
- if (crd->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(s->blkcipher.iv, crd->crd_iv,
- s->blkcipher.iv_len);
- else
- arc4rand(s->blkcipher.iv, s->blkcipher.iv_len, 0);
- if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, s->blkcipher.iv_len,
- s->blkcipher.iv);
- } else {
- if (crd->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(s->blkcipher.iv, crd->crd_iv,
- s->blkcipher.iv_len);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, s->blkcipher.iv_len,
- s->blkcipher.iv);
- }
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(iv, csp->csp_ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen, iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(iv, crp->crp_iv, csp->csp_ivlen);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen, iv);
/*
* If the input IV is 12 bytes, append an explicit counter of 1.
*/
- if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16 &&
- s->blkcipher.iv_len == 12) {
- *(uint32_t *)&s->blkcipher.iv[12] = htobe32(1);
- s->blkcipher.iv_len = AES_BLOCK_LEN;
- }
+ if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16 &&
+ csp->csp_ivlen == 12)
+ *(uint32_t *)&iv[12] = htobe32(1);
- if (crd->crd_alg == CRYPTO_AES_XTS && s->blkcipher.iv_len != AES_BLOCK_LEN) {
- DPRINTF(NULL, "got ivlen != 16: %u\n", s->blkcipher.iv_len);
- if (s->blkcipher.iv_len < AES_BLOCK_LEN)
- memset(&s->blkcipher.iv[s->blkcipher.iv_len], 0,
- AES_BLOCK_LEN - s->blkcipher.iv_len);
- s->blkcipher.iv_len = AES_BLOCK_LEN;
- }
+ if (csp->csp_cipher_alg == CRYPTO_AES_XTS &&
+ csp->csp_ivlen < AES_BLOCK_LEN)
+ memset(&iv[csp->csp_ivlen], 0, AES_BLOCK_LEN - csp->csp_ivlen);
/* Reverse order of IV material for HW */
- INSECURE_DEBUG(NULL, "%s: IV: %16D len: %u\n", __func__,
- s->blkcipher.iv, " ", s->blkcipher.iv_len);
+ INSECURE_DEBUG(NULL, "%s: IV: %16D len: %u\n", __func__, iv, " ",
+ csp->csp_ivlen);
/*
* For unknown reasons, XTS mode expects the IV in the reverse byte
* order to every other AES mode.
*/
- if (crd->crd_alg != CRYPTO_AES_XTS)
- ccp_byteswap(s->blkcipher.iv, s->blkcipher.iv_len);
+ if (csp->csp_cipher_alg != CRYPTO_AES_XTS)
+ ccp_byteswap(iv, AES_BLOCK_LEN);
}
static int __must_check
@@ -1414,8 +1402,7 @@
static int __must_check
ccp_do_xts(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp,
- struct cryptodesc *crd, enum ccp_cipher_dir dir,
- const struct ccp_completion_ctx *cctx)
+ enum ccp_cipher_dir dir, const struct ccp_completion_ctx *cctx)
{
struct ccp_desc *desc;
device_t dev;
@@ -1427,7 +1414,8 @@
dev = qp->cq_softc->dev;
for (i = 0; i < nitems(ccp_xts_unitsize_map); i++)
- if (ccp_xts_unitsize_map[i].cxu_size == crd->crd_len) {
+ if (ccp_xts_unitsize_map[i].cxu_size ==
+ crp->crp_payload_length) {
usize = ccp_xts_unitsize_map[i].cxu_id;
break;
}
@@ -1484,25 +1472,26 @@
static int __must_check
ccp_do_blkcipher(struct ccp_queue *qp, struct ccp_session *s,
- struct cryptop *crp, struct cryptodesc *crd,
- const struct ccp_completion_ctx *cctx)
+ struct cryptop *crp, const struct ccp_completion_ctx *cctx)
{
+ const struct crypto_session_params *csp;
struct ccp_desc *desc;
char *keydata;
device_t dev;
enum ccp_cipher_dir dir;
- int error;
+ int error, iv_len;
size_t keydata_len;
unsigned i, j;
dev = qp->cq_softc->dev;
- if (s->blkcipher.key_len == 0 || crd->crd_len == 0) {
+ if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0) {
DPRINTF(dev, "%s: empty\n", __func__);
return (EINVAL);
}
- if ((crd->crd_len % AES_BLOCK_LEN) != 0) {
- DPRINTF(dev, "%s: len modulo: %d\n", __func__, crd->crd_len);
+ if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) {
+ DPRINTF(dev, "%s: len modulo: %d\n", __func__,
+ crp->crp_payload_length);
return (EINVAL);
}
@@ -1519,16 +1508,20 @@
}
/* Gather IV/nonce data */
- ccp_collect_iv(s, crp, crd);
+ csp = crypto_get_params(crp->crp_session);
+ ccp_collect_iv(crp, csp, s->blkcipher.iv);
+ iv_len = csp->csp_ivlen;
+ if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
+ iv_len = AES_BLOCK_LEN;
- if ((crd->crd_flags & CRD_F_ENCRYPT) != 0)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
dir = CCP_CIPHER_DIR_ENCRYPT;
else
dir = CCP_CIPHER_DIR_DECRYPT;
/* Set up passthrough op(s) to copy IV into LSB */
error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_IV),
- s->blkcipher.iv, s->blkcipher.iv_len);
+ s->blkcipher.iv, iv_len);
if (error != 0)
return (error);
@@ -1539,15 +1532,16 @@
keydata_len = 0;
keydata = NULL;
- switch (crd->crd_alg) {
+ switch (csp->csp_cipher_alg) {
case CRYPTO_AES_XTS:
for (j = 0; j < nitems(ccp_xts_unitsize_map); j++)
- if (ccp_xts_unitsize_map[j].cxu_size == crd->crd_len)
+ if (ccp_xts_unitsize_map[j].cxu_size ==
+ crp->crp_payload_length)
break;
/* Input buffer must be a supported UnitSize */
if (j >= nitems(ccp_xts_unitsize_map)) {
device_printf(dev, "%s: rejected block size: %u\n",
- __func__, crd->crd_len);
+ __func__, crp->crp_payload_length);
return (EOPNOTSUPP);
}
/* FALLTHROUGH */
@@ -1560,14 +1554,14 @@
INSECURE_DEBUG(dev, "%s: KEY(%zu): %16D\n", __func__, keydata_len,
keydata, " ");
- if (crd->crd_alg == CRYPTO_AES_XTS)
+ if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
INSECURE_DEBUG(dev, "%s: KEY(XTS): %64D\n", __func__, keydata, " ");
/* Reverse order of key material for HW */
ccp_byteswap(keydata, keydata_len);
/* Store key material into LSB to avoid page boundaries */
- if (crd->crd_alg == CRYPTO_AES_XTS) {
+ if (csp->csp_cipher_alg == CRYPTO_AES_XTS) {
/*
* XTS mode uses 2 256-bit vectors for the primary key and the
* tweak key. For 128-bit keys, the vectors are zero-padded.
@@ -1611,7 +1605,7 @@
*/
sglist_reset(qp->cq_sg_ulptx);
error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
- crd->crd_skip, crd->crd_len);
+ crp->crp_payload_start, crp->crp_payload_length);
if (error != 0)
return (error);
@@ -1623,8 +1617,8 @@
if (ccp_queue_get_ring_space(qp) < qp->cq_sg_ulptx->sg_nseg)
return (EAGAIN);
- if (crd->crd_alg == CRYPTO_AES_XTS)
- return (ccp_do_xts(qp, s, crp, crd, dir, cctx));
+ if (csp->csp_cipher_alg == CRYPTO_AES_XTS)
+ return (ccp_do_xts(qp, s, crp, dir, cctx));
for (i = 0; i < qp->cq_sg_ulptx->sg_nseg; i++) {
struct sglist_seg *seg;
@@ -1647,7 +1641,7 @@
desc->aes.encrypt = dir;
desc->aes.mode = s->blkcipher.cipher_mode;
desc->aes.type = s->blkcipher.cipher_type;
- if (crd->crd_alg == CRYPTO_AES_ICM)
+ if (csp->csp_cipher_alg == CRYPTO_AES_ICM)
/*
* Size of CTR value in bits, - 1. ICM mode uses all
* 128 bits as counter.
@@ -1684,38 +1678,29 @@
ccp_blkcipher(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp)
{
struct ccp_completion_ctx ctx;
- struct cryptodesc *crd;
-
- crd = crp->crp_desc;
ctx.callback_fn = ccp_blkcipher_done;
ctx.session = s;
ctx.callback_arg = crp;
- return (ccp_do_blkcipher(qp, s, crp, crd, &ctx));
+ return (ccp_do_blkcipher(qp, s, crp, &ctx));
}
static void
ccp_authenc_done(struct ccp_queue *qp, struct ccp_session *s, void *vcrp,
int error)
{
- struct cryptodesc *crda;
struct cryptop *crp;
- explicit_bzero(&s->blkcipher, sizeof(s->blkcipher));
+ explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv));
crp = vcrp;
- if (s->cipher_first)
- crda = crp->crp_desc->crd_next;
- else
- crda = crp->crp_desc;
- ccp_do_hmac_done(qp, s, crp, crda, error);
+ ccp_do_hmac_done(qp, s, crp, error);
}
int __must_check
-ccp_authenc(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp,
- struct cryptodesc *crda, struct cryptodesc *crde)
+ccp_authenc(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp)
{
struct ccp_completion_ctx ctx;
int error;
@@ -1725,18 +1710,18 @@
ctx.callback_arg = crp;
/* Perform first operation */
- if (s->cipher_first)
- error = ccp_do_blkcipher(qp, s, crp, crde, NULL);
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
+ error = ccp_do_blkcipher(qp, s, crp, NULL);
else
- error = ccp_do_hmac(qp, s, crp, crda, NULL);
+ error = ccp_do_hmac(qp, s, crp, NULL);
if (error != 0)
return (error);
/* Perform second operation */
- if (s->cipher_first)
- error = ccp_do_hmac(qp, s, crp, crda, &ctx);
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
+ error = ccp_do_hmac(qp, s, crp, &ctx);
else
- error = ccp_do_blkcipher(qp, s, crp, crde, &ctx);
+ error = ccp_do_blkcipher(qp, s, crp, &ctx);
return (error);
}
@@ -1853,17 +1838,9 @@
int error)
{
char tag[GMAC_DIGEST_LEN];
- struct cryptodesc *crde, *crda;
struct cryptop *crp;
crp = vcrp;
- if (s->cipher_first) {
- crde = crp->crp_desc;
- crda = crp->crp_desc->crd_next;
- } else {
- crde = crp->crp_desc->crd_next;
- crda = crp->crp_desc;
- }
s->pending--;
@@ -1873,27 +1850,26 @@
}
/* Encrypt is done. Decrypt needs to verify tag. */
- if ((crde->crd_flags & CRD_F_ENCRYPT) != 0)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
goto out;
/* Copy in message tag. */
- crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
- sizeof(tag), tag);
+ crypto_copydata(crp, crp->crp_digest_start, s->gmac.hash_len, tag);
/* Verify tag against computed GMAC */
if (timingsafe_bcmp(tag, s->gmac.final_block, s->gmac.hash_len) != 0)
crp->crp_etype = EBADMSG;
out:
- explicit_bzero(&s->blkcipher, sizeof(s->blkcipher));
- explicit_bzero(&s->gmac, sizeof(s->gmac));
+ explicit_bzero(&s->blkcipher.iv, sizeof(s->blkcipher.iv));
+ explicit_bzero(&s->gmac.final_block, sizeof(s->gmac.final_block));
crypto_done(crp);
}
int __must_check
-ccp_gcm(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp,
- struct cryptodesc *crda, struct cryptodesc *crde)
+ccp_gcm(struct ccp_queue *qp, struct ccp_session *s, struct cryptop *crp)
{
+ const struct crypto_session_params *csp;
struct ccp_completion_ctx ctx;
enum ccp_cipher_dir dir;
device_t dev;
@@ -1903,16 +1879,9 @@
if (s->blkcipher.key_len == 0)
return (EINVAL);
- /*
- * AAD is only permitted before the cipher/plain text, not
- * after.
- */
- if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
- return (EINVAL);
-
dev = qp->cq_softc->dev;
- if ((crde->crd_flags & CRD_F_ENCRYPT) != 0)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
dir = CCP_CIPHER_DIR_ENCRYPT;
else
dir = CCP_CIPHER_DIR_DECRYPT;
@@ -1921,14 +1890,15 @@
memset(s->blkcipher.iv, 0, sizeof(s->blkcipher.iv));
/* Gather IV data */
- ccp_collect_iv(s, crp, crde);
+ csp = crypto_get_params(crp->crp_session);
+ ccp_collect_iv(crp, csp, s->blkcipher.iv);
/* Reverse order of key material for HW */
ccp_byteswap(s->blkcipher.enckey, s->blkcipher.key_len);
/* Prepare input buffer of concatenated lengths for final GHASH */
- be64enc(s->gmac.final_block, (uint64_t)crda->crd_len * 8);
- be64enc(&s->gmac.final_block[8], (uint64_t)crde->crd_len * 8);
+ be64enc(s->gmac.final_block, (uint64_t)crp->crp_aad_length * 8);
+ be64enc(&s->gmac.final_block[8], (uint64_t)crp->crp_payload_length * 8);
/* Send IV + initial zero GHASH, key data, and lengths buffer to LSB */
error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_IV),
@@ -1946,10 +1916,10 @@
return (error);
/* First step - compute GHASH over AAD */
- if (crda->crd_len != 0) {
+ if (crp->crp_aad_length != 0) {
sglist_reset(qp->cq_sg_ulptx);
error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
- crda->crd_skip, crda->crd_len);
+ crp->crp_aad_start, crp->crp_aad_length);
if (error != 0)
return (error);
@@ -1971,7 +1941,7 @@
/* Feed data piece by piece into GCTR */
sglist_reset(qp->cq_sg_ulptx);
error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
- crde->crd_skip, crde->crd_len);
+ crp->crp_payload_start, crp->crp_payload_length);
if (error != 0)
return (error);
@@ -1997,7 +1967,7 @@
seg = &qp->cq_sg_ulptx->sg_segs[i];
error = ccp_do_gctr(qp, s, dir, seg,
- (i == 0 && crda->crd_len == 0),
+ (i == 0 && crp->crp_aad_length == 0),
i == (qp->cq_sg_ulptx->sg_nseg - 1));
if (error != 0)
return (error);
@@ -2005,7 +1975,7 @@
/* Send just initial IV (not GHASH!) to LSB again */
error = ccp_do_pst_to_lsb(qp, ccp_queue_lsb_address(qp, LSB_ENTRY_IV),
- s->blkcipher.iv, s->blkcipher.iv_len);
+ s->blkcipher.iv, AES_BLOCK_LEN);
if (error != 0)
return (error);
@@ -2022,7 +1992,7 @@
sglist_reset(qp->cq_sg_ulptx);
if (dir == CCP_CIPHER_DIR_ENCRYPT)
error = sglist_append_sglist(qp->cq_sg_ulptx, qp->cq_sg_crp,
- crda->crd_inject, s->gmac.hash_len);
+ crp->crp_digest_start, s->gmac.hash_len);
else
/*
* For decrypting, copy the computed tag out to our session
Index: sys/crypto/via/padlock.h
===================================================================
--- sys/crypto/via/padlock.h
+++ sys/crypto/via/padlock.h
@@ -68,7 +68,6 @@
union padlock_cw ses_cw __aligned(16);
uint32_t ses_ekey[4 * (RIJNDAEL_MAXNR + 1) + 4] __aligned(16); /* 128 bit aligned */
uint32_t ses_dkey[4 * (RIJNDAEL_MAXNR + 1) + 4] __aligned(16); /* 128 bit aligned */
- uint8_t ses_iv[16] __aligned(16); /* 128 bit aligned */
struct auth_hash *ses_axf;
uint8_t *ses_ictx;
uint8_t *ses_octx;
@@ -79,13 +78,14 @@
#define PADLOCK_ALIGN(p) (void *)(roundup2((uintptr_t)(p), 16))
int padlock_cipher_setup(struct padlock_session *ses,
- struct cryptoini *encini);
+ const struct crypto_session_params *csp);
int padlock_cipher_process(struct padlock_session *ses,
- struct cryptodesc *enccrd, struct cryptop *crp);
+ struct cryptop *crp, const struct crypto_session_params *csp);
+bool padlock_hash_check(const struct crypto_session_params *csp);
int padlock_hash_setup(struct padlock_session *ses,
- struct cryptoini *macini);
+ const struct crypto_session_params *csp);
int padlock_hash_process(struct padlock_session *ses,
- struct cryptodesc *maccrd, struct cryptop *crp);
+ struct cryptop *crp, const struct crypto_session_params *csp);
void padlock_hash_free(struct padlock_session *ses);
#endif /* !_PADLOCK_H_ */
Index: sys/crypto/via/padlock.c
===================================================================
--- sys/crypto/via/padlock.c
+++ sys/crypto/via/padlock.c
@@ -60,7 +60,9 @@
int32_t sc_cid;
};
-static int padlock_newsession(device_t, crypto_session_t cses, struct cryptoini *cri);
+static int padlock_probesession(device_t, const struct crypto_session_params *);
+static int padlock_newsession(device_t, crypto_session_t cses,
+ const struct crypto_session_params *);
static void padlock_freesession(device_t, crypto_session_t cses);
static void padlock_freesession_one(struct padlock_softc *sc,
struct padlock_session *ses);
@@ -123,13 +125,6 @@
return (ENOMEM);
}
- crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_RIPEMD160_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
return (0);
}
@@ -143,63 +138,65 @@
}
static int
-padlock_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+padlock_probesession(device_t dev, const struct crypto_session_params *csp)
{
- struct padlock_softc *sc = device_get_softc(dev);
- struct padlock_session *ses = NULL;
- struct cryptoini *encini, *macini;
- struct thread *td;
- int error;
- if (cri == NULL)
+ if (csp->csp_flags != 0)
return (EINVAL);
- encini = macini = NULL;
- for (; cri != NULL; cri = cri->cri_next) {
- switch (cri->cri_alg) {
- case CRYPTO_NULL_HMAC:
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_RIPEMD160_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- if (macini != NULL)
- return (EINVAL);
- macini = cri;
- break;
- case CRYPTO_AES_CBC:
- if (encini != NULL)
- return (EINVAL);
- encini = cri;
- break;
- default:
- return (EINVAL);
- }
- }
-
/*
* We only support HMAC algorithms to be able to work with
* ipsec(4), so if we are asked only for authentication without
- * encryption, don't pretend we can accellerate it.
+ * encryption, don't pretend we can accelerate it.
+ *
+ * XXX: For CPUs with SHA instructions we should probably
+ * permit CSP_MODE_DIGEST so that those can be tested.
*/
- if (encini == NULL)
+ switch (csp->csp_mode) {
+ case CSP_MODE_ETA:
+ if (!padlock_hash_check(csp))
+ return (EINVAL);
+ /* FALLTHROUGH */
+ case CSP_MODE_CIPHER:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+ break;
+ default:
return (EINVAL);
+ }
+
+ return (CRYPTODEV_PROBE_ACCEL_SOFTWARE);
+}
+
+static int
+padlock_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
+{
+ struct padlock_softc *sc = device_get_softc(dev);
+ struct padlock_session *ses = NULL;
+ struct thread *td;
+ int error;
ses = crypto_get_driver_session(cses);
ses->ses_fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL);
- error = padlock_cipher_setup(ses, encini);
+ error = padlock_cipher_setup(ses, csp);
if (error != 0) {
padlock_freesession_one(sc, ses);
return (error);
}
- if (macini != NULL) {
+ if (csp->csp_mode == CSP_MODE_ETA) {
td = curthread;
fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL |
FPU_KERN_KTHR);
- error = padlock_hash_setup(ses, macini);
+ error = padlock_hash_setup(ses, csp);
fpu_kern_leave(td, ses->ses_fpu_ctx);
if (error != 0) {
padlock_freesession_one(sc, ses);
@@ -231,68 +228,34 @@
static int
padlock_process(device_t dev, struct cryptop *crp, int hint __unused)
{
- struct padlock_session *ses = NULL;
- struct cryptodesc *crd, *enccrd, *maccrd;
- int error = 0;
+ const struct crypto_session_params *csp;
+ struct padlock_session *ses;
+ int error;
- enccrd = maccrd = NULL;
-
- /* Sanity check. */
- if (crp == NULL)
- return (EINVAL);
-
- if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
- error = EINVAL;
- goto out;
- }
-
- for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
- switch (crd->crd_alg) {
- case CRYPTO_NULL_HMAC:
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_RIPEMD160_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- if (maccrd != NULL) {
- error = EINVAL;
- goto out;
- }
- maccrd = crd;
- break;
- case CRYPTO_AES_CBC:
- if (enccrd != NULL) {
- error = EINVAL;
- goto out;
- }
- enccrd = crd;
- break;
- default:
- return (EINVAL);
- }
- }
- if (enccrd == NULL || (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
+ if ((crp->crp_payload_length % AES_BLOCK_LEN) != 0) {
error = EINVAL;
goto out;
}
ses = crypto_get_driver_session(crp->crp_session);
+ csp = crypto_get_params(crp->crp_session);
- /* Perform data authentication if requested before encryption. */
- if (maccrd != NULL && maccrd->crd_next == enccrd) {
- error = padlock_hash_process(ses, maccrd, crp);
+ /* Perform data authentication if requested before decryption. */
+ if (csp->csp_mode == CSP_MODE_ETA &&
+ !CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ error = padlock_hash_process(ses, crp, csp);
if (error != 0)
goto out;
}
- error = padlock_cipher_process(ses, enccrd, crp);
+ error = padlock_cipher_process(ses, crp, csp);
if (error != 0)
goto out;
/* Perform data authentication if requested after encryption. */
- if (maccrd != NULL && enccrd->crd_next == maccrd) {
- error = padlock_hash_process(ses, maccrd, crp);
+ if (csp->csp_mode == CSP_MODE_ETA &&
+ CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ error = padlock_hash_process(ses, crp, csp);
if (error != 0)
goto out;
}
@@ -320,6 +283,7 @@
DEVMETHOD(device_attach, padlock_attach),
DEVMETHOD(device_detach, padlock_detach),
+ DEVMETHOD(cryptodev_probesession, padlock_probesession),
DEVMETHOD(cryptodev_newsession, padlock_newsession),
DEVMETHOD(cryptodev_freesession,padlock_freesession),
DEVMETHOD(cryptodev_process, padlock_process),
Index: sys/crypto/via/padlock_cipher.c
===================================================================
--- sys/crypto/via/padlock_cipher.c
+++ sys/crypto/via/padlock_cipher.c
@@ -98,7 +98,7 @@
}
static void
-padlock_cipher_key_setup(struct padlock_session *ses, caddr_t key, int klen)
+padlock_cipher_key_setup(struct padlock_session *ses, const void *key, int klen)
{
union padlock_cw *cw;
int i;
@@ -106,8 +106,8 @@
cw = &ses->ses_cw;
if (cw->cw_key_generation == PADLOCK_KEY_GENERATION_SW) {
/* Build expanded keys for both directions */
- rijndaelKeySetupEnc(ses->ses_ekey, key, klen);
- rijndaelKeySetupDec(ses->ses_dkey, key, klen);
+ rijndaelKeySetupEnc(ses->ses_ekey, key, klen * 8);
+ rijndaelKeySetupDec(ses->ses_dkey, key, klen * 8);
for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) {
ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
@@ -119,12 +119,13 @@
}
int
-padlock_cipher_setup(struct padlock_session *ses, struct cryptoini *encini)
+padlock_cipher_setup(struct padlock_session *ses,
+ const struct crypto_session_params *csp)
{
union padlock_cw *cw;
- if (encini->cri_klen != 128 && encini->cri_klen != 192 &&
- encini->cri_klen != 256) {
+ if (csp->csp_cipher_klen != 16 && csp->csp_cipher_klen != 25 &&
+ csp->csp_cipher_klen != 32) {
return (EINVAL);
}
@@ -133,7 +134,7 @@
cw->cw_algorithm_type = PADLOCK_ALGORITHM_TYPE_AES;
cw->cw_key_generation = PADLOCK_KEY_GENERATION_SW;
cw->cw_intermediate = 0;
- switch (encini->cri_klen) {
+ switch (csp->csp_cipher_klen * 8) {
case 128:
cw->cw_round_count = PADLOCK_ROUND_COUNT_AES128;
cw->cw_key_size = PADLOCK_KEY_SIZE_128;
@@ -151,12 +152,10 @@
cw->cw_key_size = PADLOCK_KEY_SIZE_256;
break;
}
- if (encini->cri_key != NULL) {
- padlock_cipher_key_setup(ses, encini->cri_key,
- encini->cri_klen);
+ if (csp->csp_cipher_key != NULL) {
+ padlock_cipher_key_setup(ses, csp->csp_cipher_key,
+ csp->csp_cipher_klen);
}
-
- arc4rand(ses->ses_iv, sizeof(ses->ses_iv), 0);
return (0);
}
@@ -166,56 +165,60 @@
* If it isn't, new buffer is allocated.
*/
static u_char *
-padlock_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
- int *allocated)
+padlock_cipher_alloc(struct cryptop *crp, int *allocated)
{
u_char *addr;
- if (crp->crp_flags & CRYPTO_F_IMBUF)
- goto alloc;
- else {
- if (crp->crp_flags & CRYPTO_F_IOV) {
- struct uio *uio;
- struct iovec *iov;
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ break;
+ case CRYPTO_BUF_UIO: {
+ struct uio *uio;
+ struct iovec *iov;
- uio = (struct uio *)crp->crp_buf;
- if (uio->uio_iovcnt != 1)
- goto alloc;
- iov = uio->uio_iov;
- addr = (u_char *)iov->iov_base + enccrd->crd_skip;
- } else {
- addr = (u_char *)crp->crp_buf;
- }
+ uio = crp->crp_uio;
+ if (uio->uio_iovcnt != 1)
+ break;
+ iov = uio->uio_iov;
+ addr = (u_char *)iov->iov_base + crp->crp_payload_start;
if (((uintptr_t)addr & 0xf) != 0) /* 16 bytes aligned? */
- goto alloc;
+ break;
*allocated = 0;
return (addr);
}
-alloc:
+ case CRYPTO_BUF_CONTIG:
+ addr = (u_char *)crp->crp_buf + crp->crp_payload_start;
+ if (((uintptr_t)addr & 0xf) != 0) /* 16 bytes aligned? */
+ break;
+ *allocated = 0;
+ return (addr);
+ }
+
*allocated = 1;
- addr = malloc(enccrd->crd_len + 16, M_PADLOCK, M_NOWAIT);
+ addr = malloc(crp->crp_payload_length + 16, M_PADLOCK, M_NOWAIT);
return (addr);
}
int
-padlock_cipher_process(struct padlock_session *ses, struct cryptodesc *enccrd,
- struct cryptop *crp)
+padlock_cipher_process(struct padlock_session *ses, struct cryptop *crp,
+ const struct crypto_session_params *csp)
{
union padlock_cw *cw;
struct thread *td;
u_char *buf, *abuf;
uint32_t *key;
+ uint8_t iv[AES_BLOCK_LEN] __aligned(16);
int allocated;
- buf = padlock_cipher_alloc(enccrd, crp, &allocated);
+ buf = padlock_cipher_alloc(crp, &allocated);
if (buf == NULL)
return (ENOMEM);
/* Buffer has to be 16 bytes aligned. */
abuf = PADLOCK_ALIGN(buf);
- if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
- padlock_cipher_key_setup(ses, enccrd->crd_key,
- enccrd->crd_klen);
+ if (crp->crp_cipher_key != NULL) {
+ padlock_cipher_key_setup(ses, crp->crp_cipher_key,
+ csp->csp_cipher_klen);
}
cw = &ses->ses_cw;
@@ -223,52 +226,39 @@
cw->cw_filler1 = 0;
cw->cw_filler2 = 0;
cw->cw_filler3 = 0;
- if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
+
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(iv, AES_BLOCK_LEN, 0);
+ crypto_copyback(crp, crp->crp_iv_start, AES_BLOCK_LEN, iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(iv, crp->crp_iv, AES_BLOCK_LEN);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, AES_BLOCK_LEN, iv);
+
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
cw->cw_direction = PADLOCK_DIRECTION_ENCRYPT;
key = ses->ses_ekey;
- if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
- bcopy(enccrd->crd_iv, ses->ses_iv, AES_BLOCK_LEN);
-
- if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, AES_BLOCK_LEN, ses->ses_iv);
- }
} else {
cw->cw_direction = PADLOCK_DIRECTION_DECRYPT;
key = ses->ses_dkey;
- if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
- bcopy(enccrd->crd_iv, ses->ses_iv, AES_BLOCK_LEN);
- else {
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, AES_BLOCK_LEN, ses->ses_iv);
- }
}
if (allocated) {
- crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
- enccrd->crd_len, abuf);
+ crypto_copydata(crp, crp->crp_payload_start,
+ crp->crp_payload_length, abuf);
}
td = curthread;
fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR);
- padlock_cbc(abuf, abuf, enccrd->crd_len / AES_BLOCK_LEN, key, cw,
- ses->ses_iv);
+ padlock_cbc(abuf, abuf, crp->crp_payload_length / AES_BLOCK_LEN, key,
+ cw, iv);
fpu_kern_leave(td, ses->ses_fpu_ctx);
if (allocated) {
- crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
- enccrd->crd_len, abuf);
- }
+ crypto_copyback(crp, crp->crp_payload_start,
+ crp->crp_payload_length, abuf);
- /* copy out last block for use as next session IV */
- if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
- AES_BLOCK_LEN, ses->ses_iv);
- }
-
- if (allocated) {
- bzero(buf, enccrd->crd_len + 16);
+ explicit_bzero(buf, crp->crp_payload_length + 16);
free(buf, M_PADLOCK);
}
return (0);
Index: sys/crypto/via/padlock_hash.c
===================================================================
--- sys/crypto/via/padlock_hash.c
+++ sys/crypto/via/padlock_hash.c
@@ -44,7 +44,6 @@
#include <machine/pcb.h>
#include <opencrypto/cryptodev.h>
-#include <opencrypto/cryptosoft.h> /* for hmac_ipad_buffer and hmac_opad_buffer */
#include <opencrypto/xform.h>
#include <crypto/via/padlock.h>
@@ -249,12 +248,11 @@
}
static void
-padlock_hash_key_setup(struct padlock_session *ses, caddr_t key, int klen)
+padlock_hash_key_setup(struct padlock_session *ses, const uint8_t *key,
+ int klen)
{
struct auth_hash *axf;
- int i;
- klen /= 8;
axf = ses->ses_axf;
/*
@@ -265,32 +263,17 @@
padlock_free_ctx(axf, ses->ses_ictx);
padlock_free_ctx(axf, ses->ses_octx);
- for (i = 0; i < klen; i++)
- key[i] ^= HMAC_IPAD_VAL;
-
- axf->Init(ses->ses_ictx);
- axf->Update(ses->ses_ictx, key, klen);
- axf->Update(ses->ses_ictx, hmac_ipad_buffer, axf->blocksize - klen);
-
- for (i = 0; i < klen; i++)
- key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
-
- axf->Init(ses->ses_octx);
- axf->Update(ses->ses_octx, key, klen);
- axf->Update(ses->ses_octx, hmac_opad_buffer, axf->blocksize - klen);
-
- for (i = 0; i < klen; i++)
- key[i] ^= HMAC_OPAD_VAL;
+ hmac_init_ipad(axf, key, klen, ses->ses_ictx);
+ hmac_init_opad(axf, key, klen, ses->ses_octx);
}
/*
* Compute keyed-hash authenticator.
*/
static int
-padlock_authcompute(struct padlock_session *ses, struct cryptodesc *crd,
- caddr_t buf, int flags)
+padlock_authcompute(struct padlock_session *ses, struct cryptop *crp)
{
- u_char hash[HASH_MAX_LEN];
+ u_char hash[HASH_MAX_LEN], hash2[HASH_MAX_LEN];
struct auth_hash *axf;
union authctx ctx;
int error;
@@ -298,7 +281,14 @@
axf = ses->ses_axf;
padlock_copy_ctx(axf, ses->ses_ictx, &ctx);
- error = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
+ error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
+ (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
+ if (error != 0) {
+ padlock_free_ctx(axf, &ctx);
+ return (error);
+ }
+ error = crypto_apply(crp, crp->crp_payload_start,
+ crp->crp_payload_length,
(int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
if (error != 0) {
padlock_free_ctx(axf, &ctx);
@@ -310,48 +300,75 @@
axf->Update(&ctx, hash, axf->hashsize);
axf->Final(hash, &ctx);
- /* Inject the authentication data */
- crypto_copyback(flags, buf, crd->crd_inject,
- ses->ses_mlen == 0 ? axf->hashsize : ses->ses_mlen, hash);
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, ses->ses_mlen,
+ hash2);
+ if (timingsafe_bcmp(hash, hash2, ses->ses_mlen) != 0)
+ return (EBADMSG);
+ } else
+ crypto_copyback(crp, crp->crp_digest_start, ses->ses_mlen,
+ hash);
return (0);
}
-int
-padlock_hash_setup(struct padlock_session *ses, struct cryptoini *macini)
+/* Find software structure which describes HMAC algorithm. */
+static struct auth_hash *
+padlock_hash_lookup(int alg)
{
+ struct auth_hash *axf;
- ses->ses_mlen = macini->cri_mlen;
-
- /* Find software structure which describes HMAC algorithm. */
- switch (macini->cri_alg) {
+ switch (alg) {
case CRYPTO_NULL_HMAC:
- ses->ses_axf = &auth_hash_null;
+ axf = &auth_hash_null;
break;
case CRYPTO_MD5_HMAC:
- ses->ses_axf = &auth_hash_hmac_md5;
+ axf = &auth_hash_hmac_md5;
break;
case CRYPTO_SHA1_HMAC:
if ((via_feature_xcrypt & VIA_HAS_SHA) != 0)
- ses->ses_axf = &padlock_hmac_sha1;
+ axf = &padlock_hmac_sha1;
else
- ses->ses_axf = &auth_hash_hmac_sha1;
+ axf = &auth_hash_hmac_sha1;
break;
case CRYPTO_RIPEMD160_HMAC:
- ses->ses_axf = &auth_hash_hmac_ripemd_160;
+ axf = &auth_hash_hmac_ripemd_160;
break;
case CRYPTO_SHA2_256_HMAC:
if ((via_feature_xcrypt & VIA_HAS_SHA) != 0)
- ses->ses_axf = &padlock_hmac_sha256;
+ axf = &padlock_hmac_sha256;
else
- ses->ses_axf = &auth_hash_hmac_sha2_256;
+ axf = &auth_hash_hmac_sha2_256;
break;
case CRYPTO_SHA2_384_HMAC:
- ses->ses_axf = &auth_hash_hmac_sha2_384;
+ axf = &auth_hash_hmac_sha2_384;
break;
case CRYPTO_SHA2_512_HMAC:
- ses->ses_axf = &auth_hash_hmac_sha2_512;
+ axf = &auth_hash_hmac_sha2_512;
+ break;
+ default:
+ axf = NULL;
break;
}
+ return (axf);
+}
+
+bool
+padlock_hash_check(const struct crypto_session_params *csp)
+{
+
+ return (padlock_hash_lookup(csp->csp_auth_alg) != NULL);
+}
+
+int
+padlock_hash_setup(struct padlock_session *ses,
+ const struct crypto_session_params *csp)
+{
+
+ ses->ses_axf = padlock_hash_lookup(csp->csp_auth_alg);
+ if (csp->csp_auth_mlen == 0)
+ ses->ses_mlen = ses->ses_axf->hashsize;
+ else
+ ses->ses_mlen = csp->csp_auth_mlen;
/* Allocate memory for HMAC inner and outer contexts. */
ses->ses_ictx = malloc(ses->ses_axf->ctxsize, M_PADLOCK,
@@ -362,26 +379,27 @@
return (ENOMEM);
/* Setup key if given. */
- if (macini->cri_key != NULL) {
- padlock_hash_key_setup(ses, macini->cri_key,
- macini->cri_klen);
+ if (csp->csp_auth_key != NULL) {
+ padlock_hash_key_setup(ses, csp->csp_auth_key,
+ csp->csp_auth_klen);
}
return (0);
}
int
-padlock_hash_process(struct padlock_session *ses, struct cryptodesc *maccrd,
- struct cryptop *crp)
+padlock_hash_process(struct padlock_session *ses, struct cryptop *crp,
+ const struct crypto_session_params *csp)
{
struct thread *td;
int error;
td = curthread;
fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL | FPU_KERN_KTHR);
- if ((maccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0)
- padlock_hash_key_setup(ses, maccrd->crd_key, maccrd->crd_klen);
+ if (crp->crp_auth_key != NULL)
+ padlock_hash_key_setup(ses, crp->crp_auth_key,
+ csp->csp_auth_klen);
- error = padlock_authcompute(ses, maccrd, crp->crp_buf, crp->crp_flags);
+ error = padlock_authcompute(ses, crp);
fpu_kern_leave(td, ses->ses_fpu_ctx);
return (error);
}
Index: sys/dev/cesa/cesa.h
===================================================================
--- sys/dev/cesa/cesa.h
+++ sys/dev/cesa/cesa.h
@@ -194,7 +194,6 @@
struct cesa_session {
uint32_t cs_config;
- unsigned int cs_klen;
unsigned int cs_ivlen;
unsigned int cs_hlen;
unsigned int cs_mblen;
@@ -208,8 +207,6 @@
struct cesa_sa_data *cr_csd;
bus_addr_t cr_csd_paddr;
struct cryptop *cr_crp;
- struct cryptodesc *cr_enc;
- struct cryptodesc *cr_mac;
struct cesa_session *cr_cs;
bus_dmamap_t cr_dmap;
int cr_dmap_loaded;
@@ -272,8 +269,6 @@
struct cesa_chain_info {
struct cesa_softc *cci_sc;
struct cesa_request *cci_cr;
- struct cryptodesc *cci_enc;
- struct cryptodesc *cci_mac;
uint32_t cci_config;
int cci_error;
};
Index: sys/dev/cesa/cesa.c
===================================================================
--- sys/dev/cesa/cesa.c
+++ sys/dev/cesa/cesa.c
@@ -69,6 +69,7 @@
#include <crypto/sha2/sha256.h>
#include <crypto/rijndael/rijndael.h>
#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform.h>
#include "cryptodev_if.h"
#include <arm/mv/mvreg.h>
@@ -80,7 +81,10 @@
static int cesa_attach_late(device_t);
static int cesa_detach(device_t);
static void cesa_intr(void *);
-static int cesa_newsession(device_t, crypto_session_t, struct cryptoini *);
+static int cesa_probesession(device_t,
+ const struct crypto_session_params *);
+static int cesa_newsession(device_t, crypto_session_t,
+ const struct crypto_session_params *);
static int cesa_process(device_t, struct cryptop *, int);
static struct resource_spec cesa_res_spec[] = {
@@ -97,6 +101,7 @@
DEVMETHOD(device_detach, cesa_detach),
/* Crypto device methods */
+ DEVMETHOD(cryptodev_probesession, cesa_probesession),
DEVMETHOD(cryptodev_newsession, cesa_newsession),
DEVMETHOD(cryptodev_process, cesa_process),
@@ -417,78 +422,68 @@
return (0);
}
-static int
+static void
cesa_set_mkey(struct cesa_session *cs, int alg, const uint8_t *mkey, int mklen)
{
- uint8_t ipad[CESA_MAX_HMAC_BLOCK_LEN];
- uint8_t opad[CESA_MAX_HMAC_BLOCK_LEN];
- SHA1_CTX sha1ctx;
- SHA256_CTX sha256ctx;
- MD5_CTX md5ctx;
+ union authctx auth_ctx;
uint32_t *hout;
uint32_t *hin;
int i;
- memset(ipad, HMAC_IPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN);
- memset(opad, HMAC_OPAD_VAL, CESA_MAX_HMAC_BLOCK_LEN);
- for (i = 0; i < mklen; i++) {
- ipad[i] ^= mkey[i];
- opad[i] ^= mkey[i];
- }
-
hin = (uint32_t *)cs->cs_hiv_in;
hout = (uint32_t *)cs->cs_hiv_out;
switch (alg) {
case CRYPTO_MD5_HMAC:
- MD5Init(&md5ctx);
- MD5Update(&md5ctx, ipad, MD5_BLOCK_LEN);
- memcpy(hin, md5ctx.state, sizeof(md5ctx.state));
- MD5Init(&md5ctx);
- MD5Update(&md5ctx, opad, MD5_BLOCK_LEN);
- memcpy(hout, md5ctx.state, sizeof(md5ctx.state));
+ hmac_init_ipad(&auth_hash_hmac_md5, mkey, mklen, &auth_ctx);
+ memcpy(hin, auth_ctx.md5ctx.state,
+ sizeof(auth_ctx.md5ctx.state));
+ hmac_init_opad(&auth_hash_hmac_md5, mkey, mklen, &auth_ctx);
+ memcpy(hout, auth_ctx.md5ctx.state,
+ sizeof(auth_ctx.md5ctx.state));
break;
case CRYPTO_SHA1_HMAC:
- SHA1Init(&sha1ctx);
- SHA1Update(&sha1ctx, ipad, SHA1_BLOCK_LEN);
- memcpy(hin, sha1ctx.h.b32, sizeof(sha1ctx.h.b32));
- SHA1Init(&sha1ctx);
- SHA1Update(&sha1ctx, opad, SHA1_BLOCK_LEN);
- memcpy(hout, sha1ctx.h.b32, sizeof(sha1ctx.h.b32));
+ hmac_init_ipad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx);
+ memcpy(hin, auth_ctx.sha1ctx.h.b32,
+ sizeof(auth_ctx.sha1ctx.h.b32));
+ hmac_init_opad(&auth_hash_hmac_sha1, mkey, mklen, &auth_ctx);
+ memcpy(hout, auth_ctx.sha1ctx.h.b32,
+ sizeof(auth_ctx.sha1ctx.h.b32));
break;
case CRYPTO_SHA2_256_HMAC:
- SHA256_Init(&sha256ctx);
- SHA256_Update(&sha256ctx, ipad, SHA2_256_BLOCK_LEN);
- memcpy(hin, sha256ctx.state, sizeof(sha256ctx.state));
- SHA256_Init(&sha256ctx);
- SHA256_Update(&sha256ctx, opad, SHA2_256_BLOCK_LEN);
- memcpy(hout, sha256ctx.state, sizeof(sha256ctx.state));
+ hmac_init_ipad(&auth_hash_hmac_sha2_256, mkey, mklen,
+ &auth_ctx);
+ memcpy(hin, auth_ctx.sha256ctx.state,
+ sizeof(auth_ctx.sha256ctx.state));
+ hmac_init_opad(&auth_hash_hmac_sha2_256, mkey, mklen,
+ &auth_ctx);
+ memcpy(hout, auth_ctx.sha256ctx.state,
+ sizeof(auth_ctx.sha256ctx.state));
break;
default:
- return (EINVAL);
+ panic("shouldn't get here");
}
for (i = 0; i < CESA_MAX_HASH_LEN / sizeof(uint32_t); i++) {
hin[i] = htobe32(hin[i]);
hout[i] = htobe32(hout[i]);
}
-
- return (0);
}
static int
-cesa_prep_aes_key(struct cesa_session *cs)
+cesa_prep_aes_key(struct cesa_session *cs,
+ const struct crypto_session_params *csp)
{
uint32_t ek[4 * (RIJNDAEL_MAXNR + 1)];
uint32_t *dkey;
int i;
- rijndaelKeySetupEnc(ek, cs->cs_key, cs->cs_klen * 8);
+ rijndaelKeySetupEnc(ek, cs->cs_key, csp->csp_cipher_klen * 8);
cs->cs_config &= ~CESA_CSH_AES_KLEN_MASK;
dkey = (uint32_t *)cs->cs_aes_dkey;
- switch (cs->cs_klen) {
+ switch (csp->csp_cipher_klen) {
case 16:
cs->cs_config |= CESA_CSH_AES_KLEN_128;
for (i = 0; i < 4; i++)
@@ -515,22 +510,6 @@
return (0);
}
-static int
-cesa_is_hash(int alg)
-{
-
- switch (alg) {
- case CRYPTO_MD5:
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- return (1);
- default:
- return (0);
- }
-}
-
static void
cesa_start_packet(struct cesa_packet *cp, unsigned int size)
{
@@ -584,6 +563,7 @@
unsigned int skip, len;
struct cesa_sa_desc *csd;
struct cesa_request *cr;
+ struct cryptop *crp;
struct cesa_softc *sc;
struct cesa_packet cp;
bus_dma_segment_t seg;
@@ -593,73 +573,107 @@
cci = arg;
sc = cci->cci_sc;
cr = cci->cci_cr;
+ crp = cr->cr_crp;
if (error) {
cci->cci_error = error;
return;
}
- elen = cci->cci_enc ? cci->cci_enc->crd_len : 0;
- eskip = cci->cci_enc ? cci->cci_enc->crd_skip : 0;
- mlen = cci->cci_mac ? cci->cci_mac->crd_len : 0;
- mskip = cci->cci_mac ? cci->cci_mac->crd_skip : 0;
-
- if (elen && mlen &&
- ((eskip > mskip && ((eskip - mskip) & (cr->cr_cs->cs_ivlen - 1))) ||
- (mskip > eskip && ((mskip - eskip) & (cr->cr_cs->cs_mblen - 1))) ||
- (eskip > (mskip + mlen)) || (mskip > (eskip + elen)))) {
+ /*
+ * Only do a combined op if the AAD is adjacent to the payload
+ * and the AAD length is a multiple of the IV length. The
+ * checks against 'config' are to avoid recursing when the
+ * logic below invokes separate operations.
+ */
+ config = cci->cci_config;
+ if (((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC ||
+ (config & CESA_CSHD_OP_MASK) == CESA_CSHD_ENC_AND_MAC) &&
+ crp->crp_aad_length != 0 &&
+ (crp->crp_aad_length & (cr->cr_cs->cs_ivlen - 1)) != 0) {
/*
* Data alignment in the request does not meet CESA requiremnts
* for combined encryption/decryption and hashing. We have to
* split the request to separate operations and process them
* one by one.
*/
- config = cci->cci_config;
if ((config & CESA_CSHD_OP_MASK) == CESA_CSHD_MAC_AND_ENC) {
config &= ~CESA_CSHD_OP_MASK;
cci->cci_config = config | CESA_CSHD_MAC;
- cci->cci_enc = NULL;
- cci->cci_mac = cr->cr_mac;
- cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
+ cesa_create_chain_cb(cci, segs, nseg, 0);
cci->cci_config = config | CESA_CSHD_ENC;
- cci->cci_enc = cr->cr_enc;
- cci->cci_mac = NULL;
- cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
+ cesa_create_chain_cb(cci, segs, nseg, 0);
} else {
config &= ~CESA_CSHD_OP_MASK;
cci->cci_config = config | CESA_CSHD_ENC;
- cci->cci_enc = cr->cr_enc;
- cci->cci_mac = NULL;
- cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
+ cesa_create_chain_cb(cci, segs, nseg, 0);
cci->cci_config = config | CESA_CSHD_MAC;
- cci->cci_enc = NULL;
- cci->cci_mac = cr->cr_mac;
- cesa_create_chain_cb(cci, segs, nseg, cci->cci_error);
+ cesa_create_chain_cb(cci, segs, nseg, 0);
}
return;
}
+ mskip = mlen = eskip = elen = 0;
+
+ if (crp->crp_aad_length == 0) {
+ skip = crp->crp_payload_start;
+ len = crp->crp_payload_length;
+ switch (config & CESA_CSHD_OP_MASK) {
+ case CESA_CSHD_ENC:
+ eskip = skip;
+ elen = len;
+ break;
+ case CESA_CSHD_MAC:
+ mskip = skip;
+ mlen = len;
+ break;
+ default:
+ eskip = skip;
+ elen = len;
+ mskip = skip;
+ mlen = len;
+ break;
+ }
+ } else {
+ /*
+ * For an encryption-only separate request, only
+ * process the payload. For combined requests and
+ * hash-only requests, process the entire region.
+ */
+ switch (config & CESA_CSHD_OP_MASK) {
+ case CESA_CSHD_ENC:
+ skip = crp->crp_payload_start;
+ len = crp->crp_payload_length;
+ eskip = skip;
+ elen = len;
+ break;
+ case CESA_CSHD_MAC:
+ skip = crp->crp_aad_start;
+ len = crp->crp_aad_length + crp->crp_payload_length;
+ mskip = skip;
+ mlen = len;
+ break;
+ default:
+ skip = crp->crp_aad_start;
+ len = crp->crp_aad_length + crp->crp_payload_length;
+ mskip = skip;
+ mlen = len;
+ eskip = crp->crp_payload_start;
+ elen = crp->crp_payload_length;
+ break;
+ }
+ }
+
tmlen = mlen;
fragmented = 0;
mpsize = CESA_MAX_PACKET_SIZE;
mpsize &= ~((cr->cr_cs->cs_ivlen - 1) | (cr->cr_cs->cs_mblen - 1));
- if (elen && mlen) {
- skip = MIN(eskip, mskip);
- len = MAX(elen + eskip, mlen + mskip) - skip;
- } else if (elen) {
- skip = eskip;
- len = elen;
- } else {
- skip = mskip;
- len = mlen;
- }
-
/* Start first packet in chain */
cesa_start_packet(&cp, MIN(mpsize, len));
@@ -777,16 +791,9 @@
}
}
-static void
-cesa_create_chain_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
- bus_size_t size, int error)
-{
-
- cesa_create_chain_cb(arg, segs, nseg, error);
-}
-
static int
-cesa_create_chain(struct cesa_softc *sc, struct cesa_request *cr)
+cesa_create_chain(struct cesa_softc *sc,
+ const struct crypto_session_params *csp, struct cesa_request *cr)
{
struct cesa_chain_info cci;
struct cesa_tdma_desc *ctd;
@@ -797,17 +804,17 @@
CESA_LOCK_ASSERT(sc, sessions);
/* Create request metadata */
- if (cr->cr_enc) {
- if (cr->cr_enc->crd_alg == CRYPTO_AES_CBC &&
- (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0)
+ if (csp->csp_cipher_klen != 0) {
+ if (csp->csp_cipher_alg == CRYPTO_AES_CBC &&
+ !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op))
memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_aes_dkey,
- cr->cr_cs->cs_klen);
+ csp->csp_cipher_klen);
else
memcpy(cr->cr_csd->csd_key, cr->cr_cs->cs_key,
- cr->cr_cs->cs_klen);
+ csp->csp_cipher_klen);
}
- if (cr->cr_mac) {
+ if (csp->csp_auth_klen != 0) {
memcpy(cr->cr_csd->csd_hiv_in, cr->cr_cs->cs_hiv_in,
CESA_MAX_HASH_LEN);
memcpy(cr->cr_csd->csd_hiv_out, cr->cr_cs->cs_hiv_out,
@@ -823,37 +830,30 @@
/* Prepare SA configuration */
config = cr->cr_cs->cs_config;
- if (cr->cr_enc && (cr->cr_enc->crd_flags & CRD_F_ENCRYPT) == 0)
+ if (csp->csp_cipher_alg != 0 &&
+ !CRYPTO_OP_IS_ENCRYPT(cr->cr_crp->crp_op))
config |= CESA_CSHD_DECRYPT;
- if (cr->cr_enc && !cr->cr_mac)
+ switch (csp->csp_mode) {
+ case CSP_MODE_CIPHER:
config |= CESA_CSHD_ENC;
- if (!cr->cr_enc && cr->cr_mac)
+ break;
+ case CSP_MODE_DIGEST:
config |= CESA_CSHD_MAC;
- if (cr->cr_enc && cr->cr_mac)
+ break;
+ case CSP_MODE_ETA:
config |= (config & CESA_CSHD_DECRYPT) ? CESA_CSHD_MAC_AND_ENC :
CESA_CSHD_ENC_AND_MAC;
+ break;
+ }
/* Create data packets */
cci.cci_sc = sc;
cci.cci_cr = cr;
- cci.cci_enc = cr->cr_enc;
- cci.cci_mac = cr->cr_mac;
cci.cci_config = config;
cci.cci_error = 0;
- if (cr->cr_crp->crp_flags & CRYPTO_F_IOV)
- error = bus_dmamap_load_uio(sc->sc_data_dtag,
- cr->cr_dmap, (struct uio *)cr->cr_crp->crp_buf,
- cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT);
- else if (cr->cr_crp->crp_flags & CRYPTO_F_IMBUF)
- error = bus_dmamap_load_mbuf(sc->sc_data_dtag,
- cr->cr_dmap, (struct mbuf *)cr->cr_crp->crp_buf,
- cesa_create_chain_cb2, &cci, BUS_DMA_NOWAIT);
- else
- error = bus_dmamap_load(sc->sc_data_dtag,
- cr->cr_dmap, cr->cr_crp->crp_buf,
- cr->cr_crp->crp_ilen, cesa_create_chain_cb, &cci,
- BUS_DMA_NOWAIT);
+ error = bus_dmamap_load_crp(sc->sc_data_dtag, cr->cr_dmap, cr->cr_crp,
+ cesa_create_chain_cb, &cci, BUS_DMA_NOWAIT);
if (!error)
cr->cr_dmap_loaded = 1;
@@ -1385,18 +1385,6 @@
goto err8;
}
- crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
- if (sc->sc_soc_id == MV_DEV_88F6828 ||
- sc->sc_soc_id == MV_DEV_88F6820 ||
- sc->sc_soc_id == MV_DEV_88F6810)
- crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
-
return (0);
err8:
for (i = 0; i < CESA_REQUESTS; i++)
@@ -1487,6 +1475,7 @@
struct cesa_request *cr, *tmp;
struct cesa_softc *sc;
uint32_t ecr, icr;
+ uint8_t hash[HASH_MAX_LEN];
int blocked;
sc = arg;
@@ -1547,11 +1536,19 @@
BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
cr->cr_crp->crp_etype = sc->sc_error;
- if (cr->cr_mac)
- crypto_copyback(cr->cr_crp->crp_flags,
- cr->cr_crp->crp_buf, cr->cr_mac->crd_inject,
- cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash);
-
+ if (cr->cr_cs->cs_hlen != 0 && cr->cr_crp->crp_etype == 0) {
+ if (cr->cr_crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(cr->cr_crp,
+ cr->cr_crp->crp_digest_start,
+ cr->cr_cs->cs_hlen, hash);
+ if (timingsafe_bcmp(hash, cr->cr_csd->csd_hash,
+ cr->cr_cs->cs_hlen) != 0)
+ cr->cr_crp->crp_etype = EBADMSG;
+ } else
+ crypto_copyback(cr->cr_crp,
+ cr->cr_crp->crp_digest_start,
+ cr->cr_cs->cs_hlen, cr->cr_csd->csd_hash);
+ }
crypto_done(cr->cr_crp);
cesa_free_request(sc, cr);
}
@@ -1571,43 +1568,99 @@
crypto_unblock(sc->sc_cid, blocked);
}
+static bool
+cesa_cipher_supported(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return (false);
+ break;
+ case CRYPTO_DES_CBC:
+ if (csp->csp_ivlen != DES_BLOCK_LEN)
+ return (false);
+ break;
+ case CRYPTO_3DES_CBC:
+ if (csp->csp_ivlen != DES3_BLOCK_LEN)
+ return (false);
+ break;
+ default:
+ return (false);
+ }
+
+ if (csp->csp_cipher_klen > CESA_MAX_KEY_LEN)
+ return (false);
+
+ return (true);
+}
+
+static bool
+cesa_auth_supported(struct cesa_softc *sc,
+ const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA2_256_HMAC:
+ if (!(sc->sc_soc_id == MV_DEV_88F6828 ||
+ sc->sc_soc_id == MV_DEV_88F6820 ||
+ sc->sc_soc_id == MV_DEV_88F6810))
+ return (false);
+ /* FALLTHROUGH */
+ case CRYPTO_MD5:
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA1_HMAC:
+ break;
+ default:
+ return (false);
+ }
+
+ if (csp->csp_auth_klen > CESA_MAX_MKEY_LEN)
+ return (false);
+
+ return (true);
+}
+
static int
-cesa_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+cesa_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+ struct cesa_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ if (!cesa_auth_supported(sc, csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!cesa_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_ETA:
+ if (!cesa_auth_supported(sc, csp) ||
+ !cesa_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (CRYPTODEV_PROBE_HARDWARE);
+}
+
+static int
+cesa_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
{
struct cesa_session *cs;
struct cesa_softc *sc;
- struct cryptoini *enc;
- struct cryptoini *mac;
int error;
sc = device_get_softc(dev);
- enc = NULL;
- mac = NULL;
error = 0;
- /* Check and parse input */
- if (cesa_is_hash(cri->cri_alg))
- mac = cri;
- else
- enc = cri;
-
- cri = cri->cri_next;
-
- if (cri) {
- if (!enc && !cesa_is_hash(cri->cri_alg))
- enc = cri;
-
- if (!mac && cesa_is_hash(cri->cri_alg))
- mac = cri;
-
- if (cri->cri_next || !(enc && mac))
- return (EINVAL);
- }
-
- if ((enc && (enc->cri_klen / 8) > CESA_MAX_KEY_LEN) ||
- (mac && (mac->cri_klen / 8) > CESA_MAX_MKEY_LEN))
- return (E2BIG);
-
/* Allocate session */
cs = crypto_get_driver_session(cses);
@@ -1616,106 +1669,89 @@
cs->cs_ivlen = 1;
cs->cs_mblen = 1;
- if (enc) {
- switch (enc->cri_alg) {
- case CRYPTO_AES_CBC:
- cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC;
- cs->cs_ivlen = AES_BLOCK_LEN;
- break;
- case CRYPTO_DES_CBC:
- cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC;
- cs->cs_ivlen = DES_BLOCK_LEN;
- break;
- case CRYPTO_3DES_CBC:
- cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE |
- CESA_CSHD_CBC;
- cs->cs_ivlen = DES3_BLOCK_LEN;
- break;
- default:
- error = EINVAL;
- break;
- }
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ cs->cs_config |= CESA_CSHD_AES | CESA_CSHD_CBC;
+ cs->cs_ivlen = AES_BLOCK_LEN;
+ break;
+ case CRYPTO_DES_CBC:
+ cs->cs_config |= CESA_CSHD_DES | CESA_CSHD_CBC;
+ cs->cs_ivlen = DES_BLOCK_LEN;
+ break;
+ case CRYPTO_3DES_CBC:
+ cs->cs_config |= CESA_CSHD_3DES | CESA_CSHD_3DES_EDE |
+ CESA_CSHD_CBC;
+ cs->cs_ivlen = DES3_BLOCK_LEN;
+ break;
}
- if (!error && mac) {
- switch (mac->cri_alg) {
- case CRYPTO_MD5:
- cs->cs_mblen = 1;
- cs->cs_hlen = (mac->cri_mlen == 0) ? MD5_HASH_LEN :
- mac->cri_mlen;
- cs->cs_config |= CESA_CSHD_MD5;
- break;
- case CRYPTO_MD5_HMAC:
- cs->cs_mblen = MD5_BLOCK_LEN;
- cs->cs_hlen = (mac->cri_mlen == 0) ? MD5_HASH_LEN :
- mac->cri_mlen;
- cs->cs_config |= CESA_CSHD_MD5_HMAC;
- if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN)
- cs->cs_config |= CESA_CSHD_96_BIT_HMAC;
- break;
- case CRYPTO_SHA1:
- cs->cs_mblen = 1;
- cs->cs_hlen = (mac->cri_mlen == 0) ? SHA1_HASH_LEN :
- mac->cri_mlen;
- cs->cs_config |= CESA_CSHD_SHA1;
- break;
- case CRYPTO_SHA1_HMAC:
- cs->cs_mblen = SHA1_BLOCK_LEN;
- cs->cs_hlen = (mac->cri_mlen == 0) ? SHA1_HASH_LEN :
- mac->cri_mlen;
- cs->cs_config |= CESA_CSHD_SHA1_HMAC;
- if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN)
- cs->cs_config |= CESA_CSHD_96_BIT_HMAC;
- break;
- case CRYPTO_SHA2_256_HMAC:
- cs->cs_mblen = SHA2_256_BLOCK_LEN;
- cs->cs_hlen = (mac->cri_mlen == 0) ? SHA2_256_HASH_LEN :
- mac->cri_mlen;
- cs->cs_config |= CESA_CSHD_SHA2_256_HMAC;
- break;
- default:
- error = EINVAL;
- break;
- }
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5:
+ cs->cs_mblen = 1;
+ cs->cs_hlen = (csp->csp_auth_mlen == 0) ? MD5_HASH_LEN :
+ csp->csp_auth_mlen;
+ cs->cs_config |= CESA_CSHD_MD5;
+ break;
+ case CRYPTO_MD5_HMAC:
+ cs->cs_mblen = MD5_BLOCK_LEN;
+ cs->cs_hlen = (csp->csp_auth_mlen == 0) ? MD5_HASH_LEN :
+ csp->csp_auth_mlen;
+ cs->cs_config |= CESA_CSHD_MD5_HMAC;
+ if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN)
+ cs->cs_config |= CESA_CSHD_96_BIT_HMAC;
+ break;
+ case CRYPTO_SHA1:
+ cs->cs_mblen = 1;
+ cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN :
+ csp->csp_auth_mlen;
+ cs->cs_config |= CESA_CSHD_SHA1;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ cs->cs_mblen = SHA1_BLOCK_LEN;
+ cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA1_HASH_LEN :
+ csp->csp_auth_mlen;
+ cs->cs_config |= CESA_CSHD_SHA1_HMAC;
+ if (cs->cs_hlen == CESA_HMAC_TRUNC_LEN)
+ cs->cs_config |= CESA_CSHD_96_BIT_HMAC;
+ break;
+ case CRYPTO_SHA2_256_HMAC:
+ cs->cs_mblen = SHA2_256_BLOCK_LEN;
+ cs->cs_hlen = (csp->csp_auth_mlen == 0) ? SHA2_256_HASH_LEN :
+ csp->csp_auth_mlen;
+ cs->cs_config |= CESA_CSHD_SHA2_256_HMAC;
+ break;
}
/* Save cipher key */
- if (!error && enc && enc->cri_key) {
- cs->cs_klen = enc->cri_klen / 8;
- memcpy(cs->cs_key, enc->cri_key, cs->cs_klen);
- if (enc->cri_alg == CRYPTO_AES_CBC)
- error = cesa_prep_aes_key(cs);
+ if (csp->csp_cipher_key != NULL) {
+ memcpy(cs->cs_key, csp->csp_cipher_key,
+ csp->csp_cipher_klen);
+ if (csp->csp_cipher_alg == CRYPTO_AES_CBC)
+ error = cesa_prep_aes_key(cs, csp);
}
/* Save digest key */
- if (!error && mac && mac->cri_key)
- error = cesa_set_mkey(cs, mac->cri_alg, mac->cri_key,
- mac->cri_klen / 8);
+ if (csp->csp_auth_key != NULL)
+ cesa_set_mkey(cs, csp->csp_auth_alg, csp->csp_auth_key,
+ csp->csp_auth_klen);
- if (error)
- return (error);
-
- return (0);
+ return (error);
}
static int
cesa_process(device_t dev, struct cryptop *crp, int hint)
{
+ const struct crypto_session_params *csp;
struct cesa_request *cr;
struct cesa_session *cs;
- struct cryptodesc *crd;
- struct cryptodesc *enc;
- struct cryptodesc *mac;
struct cesa_softc *sc;
int error;
sc = device_get_softc(dev);
- crd = crp->crp_desc;
- enc = NULL;
- mac = NULL;
error = 0;
cs = crypto_get_driver_session(crp->crp_session);
+ csp = crypto_get_params(crp->crp_session);
/* Check and parse input */
if (crp->crp_ilen > CESA_MAX_REQUEST_SIZE) {
@@ -1724,25 +1760,16 @@
return (0);
}
- if (cesa_is_hash(crd->crd_alg))
- mac = crd;
- else
- enc = crd;
-
- crd = crd->crd_next;
-
- if (crd) {
- if (!enc && !cesa_is_hash(crd->crd_alg))
- enc = crd;
-
- if (!mac && cesa_is_hash(crd->crd_alg))
- mac = crd;
-
- if (crd->crd_next || !(enc && mac)) {
- crp->crp_etype = EINVAL;
- crypto_done(crp);
- return (0);
- }
+ /*
+ * For requests with AAD, only requests where the AAD is
+ * immediately adjacent to the payload are supported.
+ */
+ if (crp->crp_aad_length != 0 &&
+ (crp->crp_aad_start + crp->crp_aad_length) !=
+ crp->crp_payload_start) {
+ crp->crp_etype = EINVAL;
+ crypto_done(crp);
+ return (0);
}
/*
@@ -1759,51 +1786,37 @@
/* Prepare request */
cr->cr_crp = crp;
- cr->cr_enc = enc;
- cr->cr_mac = mac;
cr->cr_cs = cs;
CESA_LOCK(sc, sessions);
cesa_sync_desc(sc, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
- if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
- if (enc->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen);
+ if (csp->csp_cipher_alg != 0) {
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(cr->cr_csd->csd_iv, csp->csp_ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen,
+ cr->cr_csd->csd_iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(cr->cr_csd->csd_iv, crp->crp_iv, csp->csp_ivlen);
else
- arc4rand(cr->cr_csd->csd_iv, cs->cs_ivlen, 0);
-
- if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv);
- } else if (enc) {
- if (enc->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(cr->cr_csd->csd_iv, enc->crd_iv, cs->cs_ivlen);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- enc->crd_inject, cs->cs_ivlen, cr->cr_csd->csd_iv);
+ crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen,
+ cr->cr_csd->csd_iv);
}
- if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
- if ((enc->crd_klen / 8) <= CESA_MAX_KEY_LEN) {
- cs->cs_klen = enc->crd_klen / 8;
- memcpy(cs->cs_key, enc->crd_key, cs->cs_klen);
- if (enc->crd_alg == CRYPTO_AES_CBC)
- error = cesa_prep_aes_key(cs);
- } else
- error = E2BIG;
+ if (crp->crp_cipher_key != NULL) {
+ memcpy(cs->cs_key, crp->crp_cipher_key,
+ csp->csp_cipher_klen);
+ if (csp->csp_cipher_alg == CRYPTO_AES_CBC)
+ error = cesa_prep_aes_key(cs, csp);
}
- if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
- if ((mac->crd_klen / 8) <= CESA_MAX_MKEY_LEN)
- error = cesa_set_mkey(cs, mac->crd_alg, mac->crd_key,
- mac->crd_klen / 8);
- else
- error = E2BIG;
- }
+ if (!error && crp->crp_auth_key != NULL)
+ cesa_set_mkey(cs, csp->csp_auth_alg, crp->crp_auth_key,
+ csp->csp_auth_klen);
/* Convert request to chain of TDMA and SA descriptors */
if (!error)
- error = cesa_create_chain(sc, cr);
+ error = cesa_create_chain(sc, csp, cr);
cesa_sync_desc(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
CESA_UNLOCK(sc, sessions);
Index: sys/dev/cxgbe/adapter.h
===================================================================
--- sys/dev/cxgbe/adapter.h
+++ sys/dev/cxgbe/adapter.h
@@ -1204,7 +1204,7 @@
void t4_aes_getdeckey(void *, const void *, unsigned int);
void t4_copy_partial_hash(int, union authctx *, void *);
void t4_init_gmac_hash(const char *, int, char *);
-void t4_init_hmac_digest(struct auth_hash *, u_int, char *, int, char *);
+void t4_init_hmac_digest(struct auth_hash *, u_int, const char *, int, char *);
#ifdef DEV_NETMAP
/* t4_netmap.c */
Index: sys/dev/cxgbe/crypto/t4_crypto.c
===================================================================
--- sys/dev/cxgbe/crypto/t4_crypto.c
+++ sys/dev/cxgbe/crypto/t4_crypto.c
@@ -165,7 +165,7 @@
struct ccr_session {
bool active;
int pending;
- enum { HASH, HMAC, BLKCIPHER, AUTHENC, GCM, CCM } mode;
+ enum { HASH, HMAC, BLKCIPHER, ETA, GCM, CCM } mode;
union {
struct ccr_session_hmac hmac;
struct ccr_session_gmac gmac;
@@ -208,8 +208,8 @@
uint64_t stats_blkcipher_decrypt;
uint64_t stats_hash;
uint64_t stats_hmac;
- uint64_t stats_authenc_encrypt;
- uint64_t stats_authenc_decrypt;
+ uint64_t stats_eta_encrypt;
+ uint64_t stats_eta_decrypt;
uint64_t stats_gcm_encrypt;
uint64_t stats_gcm_decrypt;
uint64_t stats_ccm_encrypt;
@@ -230,9 +230,9 @@
* Non-hash-only requests require a PHYS_DSGL that describes the
* location to store the results of the encryption or decryption
* operation. This SGL uses a different format (PHYS_DSGL) and should
- * exclude the crd_skip bytes at the start of the data as well as
- * any AAD or IV. For authenticated encryption requests it should
- * cover include the destination of the hash or tag.
+ * exclude the skip bytes at the start of the data as well as any AAD
+ * or IV. For authenticated encryption requests it should include the
+ * destination of the hash or tag.
*
* The input payload may either be supplied inline as immediate data,
* or via a standard ULP_TX SGL. This SGL should include AAD,
@@ -251,12 +251,19 @@
int error;
sglist_reset(sg);
- if (crp->crp_flags & CRYPTO_F_IMBUF)
- error = sglist_append_mbuf(sg, (struct mbuf *)crp->crp_buf);
- else if (crp->crp_flags & CRYPTO_F_IOV)
- error = sglist_append_uio(sg, (struct uio *)crp->crp_buf);
- else
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ error = sglist_append_mbuf(sg, crp->crp_mbuf);
+ break;
+ case CRYPTO_BUF_UIO:
+ error = sglist_append_uio(sg, crp->crp_uio);
+ break;
+ case CRYPTO_BUF_CONTIG:
error = sglist_append(sg, crp->crp_buf, crp->crp_ilen);
+ break;
+ default:
+ error = EINVAL;
+ }
return (error);
}
@@ -436,16 +443,13 @@
struct chcr_wr *crwr;
struct wrqe *wr;
struct auth_hash *axf;
- struct cryptodesc *crd;
char *dst;
u_int hash_size_in_response, kctx_flits, kctx_len, transhdr_len, wr_len;
u_int hmac_ctrl, imm_len, iopad_size;
int error, sgl_nsegs, sgl_len, use_opad;
- crd = crp->crp_desc;
-
/* Reject requests with too large of an input buffer. */
- if (crd->crd_len > MAX_REQUEST_SIZE)
+ if (crp->crp_payload_length > MAX_REQUEST_SIZE)
return (EFBIG);
axf = s->hmac.auth_hash;
@@ -471,19 +475,19 @@
hash_size_in_response = axf->hashsize;
transhdr_len = HASH_TRANSHDR_SIZE(kctx_len);
- if (crd->crd_len == 0) {
+ if (crp->crp_payload_length == 0) {
imm_len = axf->blocksize;
sgl_nsegs = 0;
sgl_len = 0;
- } else if (ccr_use_imm_data(transhdr_len, crd->crd_len)) {
- imm_len = crd->crd_len;
+ } else if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length)) {
+ imm_len = crp->crp_payload_length;
sgl_nsegs = 0;
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crd->crd_skip, crd->crd_len);
+ crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
sgl_nsegs = sc->sg_ulptx->sg_nseg;
@@ -512,8 +516,8 @@
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(0));
- crwr->sec_cpl.pldlen = htobe32(crd->crd_len == 0 ? axf->blocksize :
- crd->crd_len);
+ crwr->sec_cpl.pldlen = htobe32(crp->crp_payload_length == 0 ?
+ axf->blocksize : crp->crp_payload_length);
crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
V_CPL_TX_SEC_PDU_AUTHSTART(1) | V_CPL_TX_SEC_PDU_AUTHSTOP(0));
@@ -527,7 +531,8 @@
V_SCMD_HMAC_CTRL(hmac_ctrl));
crwr->sec_cpl.ivgen_hdrlen = htobe32(
V_SCMD_LAST_FRAG(0) |
- V_SCMD_MORE_FRAGS(crd->crd_len == 0 ? 1 : 0) | V_SCMD_MAC_ONLY(1));
+ V_SCMD_MORE_FRAGS(crp->crp_payload_length == 0 ? 1 : 0) |
+ V_SCMD_MAC_ONLY(1));
memcpy(crwr->key_ctx.key, s->hmac.pads, kctx_len);
@@ -540,14 +545,14 @@
V_KEY_CONTEXT_MK_SIZE(s->hmac.mk_size) | V_KEY_CONTEXT_VALID(1));
dst = (char *)(crwr + 1) + kctx_len + DUMMY_BYTES;
- if (crd->crd_len == 0) {
+ if (crp->crp_payload_length == 0) {
dst[0] = 0x80;
if (s->mode == HMAC)
*(uint64_t *)(dst + axf->blocksize - sizeof(uint64_t)) =
htobe64(axf->blocksize << 3);
} else if (imm_len != 0)
- crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
- crd->crd_len, dst);
+ crypto_copydata(crp, crp->crp_payload_start,
+ crp->crp_payload_length, dst);
else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
@@ -561,15 +566,20 @@
ccr_hash_done(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
const struct cpl_fw6_pld *cpl, int error)
{
- struct cryptodesc *crd;
+ uint8_t hash[HASH_MAX_LEN];
- crd = crp->crp_desc;
- if (error == 0) {
- crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
- s->hmac.hash_len, (c_caddr_t)(cpl + 1));
- }
+ if (error)
+ return (error);
- return (error);
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, s->hmac.hash_len,
+ hash);
+ if (timingsafe_bcmp((cpl + 1), hash, s->hmac.hash_len) != 0)
+ return (EBADMSG);
+ } else
+ crypto_copyback(crp, crp->crp_digest_start, s->hmac.hash_len,
+ (cpl + 1));
+ return (0);
}
static int
@@ -578,34 +588,31 @@
char iv[CHCR_MAX_CRYPTO_IV_LEN];
struct chcr_wr *crwr;
struct wrqe *wr;
- struct cryptodesc *crd;
char *dst;
u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
- u_int imm_len;
+ u_int imm_len, iv_len;
int dsgl_nsegs, dsgl_len;
int sgl_nsegs, sgl_len;
int error;
- crd = crp->crp_desc;
-
- if (s->blkcipher.key_len == 0 || crd->crd_len == 0)
+ if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0)
return (EINVAL);
- if (crd->crd_alg == CRYPTO_AES_CBC &&
- (crd->crd_len % AES_BLOCK_LEN) != 0)
+ if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
+ (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
return (EINVAL);
/* Reject requests with too large of an input buffer. */
- if (crd->crd_len > MAX_REQUEST_SIZE)
+ if (crp->crp_payload_length > MAX_REQUEST_SIZE)
return (EFBIG);
- if (crd->crd_flags & CRD_F_ENCRYPT)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
op_type = CHCR_ENCRYPT_OP;
else
op_type = CHCR_DECRYPT_OP;
sglist_reset(sc->sg_dsgl);
- error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crd->crd_skip,
- crd->crd_len);
+ error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
+ crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
dsgl_nsegs = ccr_count_sgl(sc->sg_dsgl, DSGL_SGE_MAXLEN);
@@ -617,23 +624,28 @@
kctx_len = roundup2(s->blkcipher.key_len, 16);
transhdr_len = CIPHER_TRANSHDR_SIZE(kctx_len, dsgl_len);
- if (ccr_use_imm_data(transhdr_len, crd->crd_len +
- s->blkcipher.iv_len)) {
- imm_len = crd->crd_len;
+ /* For AES-XTS we send a 16-byte IV in the work request. */
+ if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
+ iv_len = AES_BLOCK_LEN;
+ else
+ iv_len = s->blkcipher.iv_len;
+
+ if (ccr_use_imm_data(transhdr_len, crp->crp_payload_length + iv_len)) {
+ imm_len = crp->crp_payload_length;
sgl_nsegs = 0;
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crd->crd_skip, crd->crd_len);
+ crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
sgl_nsegs = sc->sg_ulptx->sg_nseg;
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
- wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
+ wr_len = roundup2(transhdr_len, 16) + iv_len +
roundup2(imm_len, 16) + sgl_len;
if (wr_len > SGE_MAX_WR_LEN)
return (EFBIG);
@@ -647,24 +659,20 @@
/*
* Read the existing IV from the request or generate a random
- * one if none is provided. Optionally copy the generated IV
- * into the output buffer if requested.
+ * one if none is provided.
*/
- if (op_type == CHCR_ENCRYPT_OP) {
- if (crd->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
- else
- arc4rand(iv, s->blkcipher.iv_len, 0);
- if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, s->blkcipher.iv_len, iv);
- } else {
- if (crd->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crd->crd_iv, s->blkcipher.iv_len);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, s->blkcipher.iv_len, iv);
- }
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(iv, s->blkcipher.iv_len, 0);
+ crypto_copyback(crp, crp->crp_iv_start, s->blkcipher.iv_len,
+ iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(iv, crp->crp_iv, s->blkcipher.iv_len);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, s->blkcipher.iv_len,
+ iv);
+
+ /* Zero the remainder of the IV for AES-XTS. */
+ memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len);
ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
crp);
@@ -677,10 +685,10 @@
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
- crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + crd->crd_len);
+ crwr->sec_cpl.pldlen = htobe32(iv_len + crp->crp_payload_length);
crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
- V_CPL_TX_SEC_PDU_CIPHERSTART(s->blkcipher.iv_len + 1) |
+ V_CPL_TX_SEC_PDU_CIPHERSTART(iv_len + 1) |
V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(0));
crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(0));
@@ -693,7 +701,7 @@
V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
V_SCMD_AUTH_MODE(SCMD_AUTH_MODE_NOP) |
V_SCMD_HMAC_CTRL(SCMD_HMAC_CTRL_NOP) |
- V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
+ V_SCMD_IV_SIZE(iv_len / 2) |
V_SCMD_NUM_IVS(0));
crwr->sec_cpl.ivgen_hdrlen = htobe32(
V_SCMD_IV_GEN_CTRL(0) |
@@ -701,24 +709,24 @@
V_SCMD_AADIVDROP(1) | V_SCMD_HDR_LEN(dsgl_len));
crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
- switch (crd->crd_alg) {
- case CRYPTO_AES_CBC:
- if (crd->crd_flags & CRD_F_ENCRYPT)
+ switch (s->blkcipher.cipher_mode) {
+ case SCMD_CIPH_MODE_AES_CBC:
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
s->blkcipher.key_len);
else
memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
s->blkcipher.key_len);
break;
- case CRYPTO_AES_ICM:
+ case SCMD_CIPH_MODE_AES_CTR:
memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
s->blkcipher.key_len);
break;
- case CRYPTO_AES_XTS:
+ case SCMD_CIPH_MODE_AES_XTS:
key_half = s->blkcipher.key_len / 2;
memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
key_half);
- if (crd->crd_flags & CRD_F_ENCRYPT)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
memcpy(crwr->key_ctx.key + key_half,
s->blkcipher.enckey, key_half);
else
@@ -730,11 +738,11 @@
dst = (char *)(crwr + 1) + kctx_len;
ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
- memcpy(dst, iv, s->blkcipher.iv_len);
- dst += s->blkcipher.iv_len;
+ memcpy(dst, iv, iv_len);
+ dst += iv_len;
if (imm_len != 0)
- crypto_copydata(crp->crp_flags, crp->crp_buf, crd->crd_skip,
- crd->crd_len, dst);
+ crypto_copydata(crp, crp->crp_payload_start,
+ crp->crp_payload_length, dst);
else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
@@ -775,8 +783,7 @@
}
static int
-ccr_authenc(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
- struct cryptodesc *crda, struct cryptodesc *crde)
+ccr_eta(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
{
char iv[CHCR_MAX_CRYPTO_IV_LEN];
struct chcr_wr *crwr;
@@ -784,9 +791,9 @@
struct auth_hash *axf;
char *dst;
u_int kctx_len, key_half, op_type, transhdr_len, wr_len;
- u_int hash_size_in_response, imm_len, iopad_size;
- u_int aad_start, aad_len, aad_stop;
- u_int auth_start, auth_stop, auth_insert;
+ u_int hash_size_in_response, imm_len, iopad_size, iv_len;
+ u_int aad_start, aad_stop;
+ u_int auth_insert;
u_int cipher_start, cipher_stop;
u_int hmac_ctrl, input_len;
int dsgl_nsegs, dsgl_len;
@@ -797,34 +804,24 @@
* If there is a need in the future, requests with an empty
* payload could be supported as HMAC-only requests.
*/
- if (s->blkcipher.key_len == 0 || crde->crd_len == 0)
+ if (s->blkcipher.key_len == 0 || crp->crp_payload_length == 0)
return (EINVAL);
- if (crde->crd_alg == CRYPTO_AES_CBC &&
- (crde->crd_len % AES_BLOCK_LEN) != 0)
+ if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_CBC &&
+ (crp->crp_payload_length % AES_BLOCK_LEN) != 0)
return (EINVAL);
- /*
- * Compute the length of the AAD (data covered by the
- * authentication descriptor but not the encryption
- * descriptor). To simplify the logic, AAD is only permitted
- * before the cipher/plain text, not after. This is true of
- * all currently-generated requests.
- */
- if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
- return (EINVAL);
- if (crda->crd_skip < crde->crd_skip) {
- if (crda->crd_skip + crda->crd_len > crde->crd_skip)
- aad_len = (crde->crd_skip - crda->crd_skip);
- else
- aad_len = crda->crd_len;
- } else
- aad_len = 0;
- if (aad_len + s->blkcipher.iv_len > MAX_AAD_LEN)
+ /* For AES-XTS we send a 16-byte IV in the work request. */
+ if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
+ iv_len = AES_BLOCK_LEN;
+ else
+ iv_len = s->blkcipher.iv_len;
+
+ if (crp->crp_aad_length + iv_len > MAX_AAD_LEN)
return (EINVAL);
axf = s->hmac.auth_hash;
hash_size_in_response = s->hmac.hash_len;
- if (crde->crd_flags & CRD_F_ENCRYPT)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
op_type = CHCR_ENCRYPT_OP;
else
op_type = CHCR_DECRYPT_OP;
@@ -839,26 +836,26 @@
* output buffer.
*/
if (op_type == CHCR_ENCRYPT_OP) {
- if (s->blkcipher.iv_len + aad_len + crde->crd_len +
+ if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
hash_size_in_response > MAX_REQUEST_SIZE)
return (EFBIG);
} else {
- if (s->blkcipher.iv_len + aad_len + crde->crd_len >
+ if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
MAX_REQUEST_SIZE)
return (EFBIG);
}
sglist_reset(sc->sg_dsgl);
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0,
- s->blkcipher.iv_len + aad_len);
+ iv_len + crp->crp_aad_length);
if (error)
return (error);
- error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
- crde->crd_len);
+ error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
+ crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_ENCRYPT_OP) {
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
- crda->crd_inject, hash_size_in_response);
+ crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
@@ -888,7 +885,7 @@
* inside of the AAD region, so a second copy is always
* required.
*/
- input_len = aad_len + crde->crd_len;
+ input_len = crp->crp_aad_length + crp->crp_payload_length;
/*
* The firmware hangs if sent a request which is a
@@ -902,26 +899,27 @@
return (EFBIG);
if (op_type == CHCR_DECRYPT_OP)
input_len += hash_size_in_response;
- if (ccr_use_imm_data(transhdr_len, s->blkcipher.iv_len + input_len)) {
+
+ if (ccr_use_imm_data(transhdr_len, iv_len + input_len)) {
imm_len = input_len;
sgl_nsegs = 0;
sgl_len = 0;
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
- if (aad_len != 0) {
+ if (crp->crp_aad_length != 0) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crda->crd_skip, aad_len);
+ crp->crp_aad_start, crp->crp_aad_length);
if (error)
return (error);
}
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crde->crd_skip, crde->crd_len);
+ crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_DECRYPT_OP) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crda->crd_inject, hash_size_in_response);
+ crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
@@ -934,37 +932,25 @@
* Auth-data that overlaps with the cipher region is placed in
* the auth section.
*/
- if (aad_len != 0) {
- aad_start = s->blkcipher.iv_len + 1;
- aad_stop = aad_start + aad_len - 1;
+ if (crp->crp_aad_length != 0) {
+ aad_start = iv_len + 1;
+ aad_stop = aad_start + crp->crp_aad_length - 1;
} else {
aad_start = 0;
aad_stop = 0;
}
- cipher_start = s->blkcipher.iv_len + aad_len + 1;
+ cipher_start = iv_len + crp->crp_aad_length + 1;
if (op_type == CHCR_DECRYPT_OP)
cipher_stop = hash_size_in_response;
else
cipher_stop = 0;
- if (aad_len == crda->crd_len) {
- auth_start = 0;
- auth_stop = 0;
- } else {
- if (aad_len != 0)
- auth_start = cipher_start;
- else
- auth_start = s->blkcipher.iv_len + crda->crd_skip -
- crde->crd_skip + 1;
- auth_stop = (crde->crd_skip + crde->crd_len) -
- (crda->crd_skip + crda->crd_len) + cipher_stop;
- }
if (op_type == CHCR_DECRYPT_OP)
auth_insert = hash_size_in_response;
else
auth_insert = 0;
- wr_len = roundup2(transhdr_len, 16) + s->blkcipher.iv_len +
- roundup2(imm_len, 16) + sgl_len;
+ wr_len = roundup2(transhdr_len, 16) + iv_len + roundup2(imm_len, 16) +
+ sgl_len;
if (wr_len > SGE_MAX_WR_LEN)
return (EFBIG);
wr = alloc_wrqe(wr_len, sc->txq);
@@ -977,24 +963,20 @@
/*
* Read the existing IV from the request or generate a random
- * one if none is provided. Optionally copy the generated IV
- * into the output buffer if requested.
+ * one if none is provided.
*/
- if (op_type == CHCR_ENCRYPT_OP) {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
- else
- arc4rand(iv, s->blkcipher.iv_len, 0);
- if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, s->blkcipher.iv_len, iv);
- } else {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, s->blkcipher.iv_len, iv);
- }
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(iv, s->blkcipher.iv_len, 0);
+ crypto_copyback(crp, crp->crp_iv_start, s->blkcipher.iv_len,
+ iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(iv, crp->crp_iv, s->blkcipher.iv_len);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, s->blkcipher.iv_len,
+ iv);
+
+ /* Zero the remainder of the IV for AES-XTS. */
+ memset(iv + s->blkcipher.iv_len, 0, iv_len - s->blkcipher.iv_len);
ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len,
op_type == CHCR_DECRYPT_OP ? hash_size_in_response : 0, crp);
@@ -1007,7 +989,7 @@
V_CPL_TX_SEC_PDU_CPLLEN(2) | V_CPL_TX_SEC_PDU_PLACEHOLDER(0) |
V_CPL_TX_SEC_PDU_IVINSRTOFST(1));
- crwr->sec_cpl.pldlen = htobe32(s->blkcipher.iv_len + input_len);
+ crwr->sec_cpl.pldlen = htobe32(iv_len + input_len);
crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
@@ -1016,8 +998,8 @@
V_CPL_TX_SEC_PDU_CIPHERSTOP_HI(cipher_stop >> 4));
crwr->sec_cpl.cipherstop_lo_authinsert = htobe32(
V_CPL_TX_SEC_PDU_CIPHERSTOP_LO(cipher_stop & 0xf) |
- V_CPL_TX_SEC_PDU_AUTHSTART(auth_start) |
- V_CPL_TX_SEC_PDU_AUTHSTOP(auth_stop) |
+ V_CPL_TX_SEC_PDU_AUTHSTART(cipher_start) |
+ V_CPL_TX_SEC_PDU_AUTHSTOP(cipher_stop) |
V_CPL_TX_SEC_PDU_AUTHINSERT(auth_insert));
/* These two flits are actually a CPL_TLS_TX_SCMD_FMT. */
@@ -1030,7 +1012,7 @@
V_SCMD_CIPH_MODE(s->blkcipher.cipher_mode) |
V_SCMD_AUTH_MODE(s->hmac.auth_mode) |
V_SCMD_HMAC_CTRL(hmac_ctrl) |
- V_SCMD_IV_SIZE(s->blkcipher.iv_len / 2) |
+ V_SCMD_IV_SIZE(iv_len / 2) |
V_SCMD_NUM_IVS(0));
crwr->sec_cpl.ivgen_hdrlen = htobe32(
V_SCMD_IV_GEN_CTRL(0) |
@@ -1038,24 +1020,24 @@
V_SCMD_AADIVDROP(0) | V_SCMD_HDR_LEN(dsgl_len));
crwr->key_ctx.ctx_hdr = s->blkcipher.key_ctx_hdr;
- switch (crde->crd_alg) {
- case CRYPTO_AES_CBC:
- if (crde->crd_flags & CRD_F_ENCRYPT)
+ switch (s->blkcipher.cipher_mode) {
+ case SCMD_CIPH_MODE_AES_CBC:
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
s->blkcipher.key_len);
else
memcpy(crwr->key_ctx.key, s->blkcipher.deckey,
s->blkcipher.key_len);
break;
- case CRYPTO_AES_ICM:
+ case SCMD_CIPH_MODE_AES_CTR:
memcpy(crwr->key_ctx.key, s->blkcipher.enckey,
s->blkcipher.key_len);
break;
- case CRYPTO_AES_XTS:
+ case SCMD_CIPH_MODE_AES_XTS:
key_half = s->blkcipher.key_len / 2;
memcpy(crwr->key_ctx.key, s->blkcipher.enckey + key_half,
key_half);
- if (crde->crd_flags & CRD_F_ENCRYPT)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
memcpy(crwr->key_ctx.key + key_half,
s->blkcipher.enckey, key_half);
else
@@ -1070,20 +1052,20 @@
dst = (char *)(crwr + 1) + kctx_len;
ccr_write_phys_dsgl(sc, dst, dsgl_nsegs);
dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
- memcpy(dst, iv, s->blkcipher.iv_len);
- dst += s->blkcipher.iv_len;
+ memcpy(dst, iv, iv_len);
+ dst += iv_len;
if (imm_len != 0) {
- if (aad_len != 0) {
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crda->crd_skip, aad_len, dst);
- dst += aad_len;
+ if (crp->crp_aad_length != 0) {
+ crypto_copydata(crp, crp->crp_aad_start,
+ crp->crp_aad_length, dst);
+ dst += crp->crp_aad_length;
}
- crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
- crde->crd_len, dst);
- dst += crde->crd_len;
+ crypto_copydata(crp, crp->crp_payload_start,
+ crp->crp_payload_length, dst);
+ dst += crp->crp_payload_length;
if (op_type == CHCR_DECRYPT_OP)
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crda->crd_inject, hash_size_in_response, dst);
+ crypto_copydata(crp, crp->crp_digest_start,
+ hash_size_in_response, dst);
} else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
@@ -1094,38 +1076,19 @@
}
static int
-ccr_authenc_done(struct ccr_softc *sc, struct ccr_session *s,
+ccr_eta_done(struct ccr_softc *sc, struct ccr_session *s,
struct cryptop *crp, const struct cpl_fw6_pld *cpl, int error)
{
- struct cryptodesc *crd;
/*
* The updated IV to permit chained requests is at
* cpl->data[2], but OCF doesn't permit chained requests.
- *
- * For a decryption request, the hardware may do a verification
- * of the HMAC which will fail if the existing HMAC isn't in the
- * buffer. If that happens, clear the error and copy the HMAC
- * from the CPL reply into the buffer.
- *
- * For encryption requests, crd should be the cipher request
- * which will have CRD_F_ENCRYPT set. For decryption
- * requests, crp_desc will be the HMAC request which should
- * not have this flag set.
*/
- crd = crp->crp_desc;
- if (error == EBADMSG && !CHK_PAD_ERR_BIT(be64toh(cpl->data[0])) &&
- !(crd->crd_flags & CRD_F_ENCRYPT)) {
- crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
- s->hmac.hash_len, (c_caddr_t)(cpl + 1));
- error = 0;
- }
return (error);
}
static int
-ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
- struct cryptodesc *crda, struct cryptodesc *crde)
+ccr_gcm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
{
char iv[CHCR_MAX_CRYPTO_IV_LEN];
struct chcr_wr *crwr;
@@ -1146,21 +1109,14 @@
* The crypto engine doesn't handle GCM requests with an empty
* payload, so handle those in software instead.
*/
- if (crde->crd_len == 0)
+ if (crp->crp_payload_length == 0)
return (EMSGSIZE);
- /*
- * AAD is only permitted before the cipher/plain text, not
- * after.
- */
- if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
- return (EMSGSIZE);
-
- if (crda->crd_len + AES_BLOCK_LEN > MAX_AAD_LEN)
+ if (crp->crp_aad_length + AES_BLOCK_LEN > MAX_AAD_LEN)
return (EMSGSIZE);
hash_size_in_response = s->gmac.hash_len;
- if (crde->crd_flags & CRD_F_ENCRYPT)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
op_type = CHCR_ENCRYPT_OP;
else
op_type = CHCR_DECRYPT_OP;
@@ -1186,6 +1142,12 @@
else
iv_len = s->blkcipher.iv_len;
+ /*
+ * GCM requests should always provide an explicit IV.
+ */
+ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
+ return (EINVAL);
+
/*
* The output buffer consists of the cipher text followed by
* the tag when encrypting. For decryption it only contains
@@ -1196,25 +1158,26 @@
* output buffer.
*/
if (op_type == CHCR_ENCRYPT_OP) {
- if (iv_len + crda->crd_len + crde->crd_len +
+ if (iv_len + crp->crp_aad_length + crp->crp_payload_length +
hash_size_in_response > MAX_REQUEST_SIZE)
return (EFBIG);
} else {
- if (iv_len + crda->crd_len + crde->crd_len > MAX_REQUEST_SIZE)
+ if (iv_len + crp->crp_aad_length + crp->crp_payload_length >
+ MAX_REQUEST_SIZE)
return (EFBIG);
}
sglist_reset(sc->sg_dsgl);
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_iv_aad, 0, iv_len +
- crda->crd_len);
+ crp->crp_aad_length);
if (error)
return (error);
- error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
- crde->crd_len);
+ error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
+ crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_ENCRYPT_OP) {
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
- crda->crd_inject, hash_size_in_response);
+ crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
@@ -1241,7 +1204,7 @@
* inside of the AAD region, so a second copy is always
* required.
*/
- input_len = crda->crd_len + crde->crd_len;
+ input_len = crp->crp_aad_length + crp->crp_payload_length;
if (op_type == CHCR_DECRYPT_OP)
input_len += hash_size_in_response;
if (input_len > MAX_REQUEST_SIZE)
@@ -1253,19 +1216,19 @@
} else {
imm_len = 0;
sglist_reset(sc->sg_ulptx);
- if (crda->crd_len != 0) {
+ if (crp->crp_aad_length != 0) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crda->crd_skip, crda->crd_len);
+ crp->crp_aad_start, crp->crp_aad_length);
if (error)
return (error);
}
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crde->crd_skip, crde->crd_len);
+ crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_DECRYPT_OP) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crda->crd_inject, hash_size_in_response);
+ crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
@@ -1273,14 +1236,14 @@
sgl_len = ccr_ulptx_sgl_len(sgl_nsegs);
}
- if (crda->crd_len != 0) {
+ if (crp->crp_aad_length != 0) {
aad_start = iv_len + 1;
- aad_stop = aad_start + crda->crd_len - 1;
+ aad_stop = aad_start + crp->crp_aad_length - 1;
} else {
aad_start = 0;
aad_stop = 0;
}
- cipher_start = iv_len + crda->crd_len + 1;
+ cipher_start = iv_len + crp->crp_aad_length + 1;
if (op_type == CHCR_DECRYPT_OP)
cipher_stop = hash_size_in_response;
else
@@ -1302,29 +1265,7 @@
crwr = wrtod(wr);
memset(crwr, 0, wr_len);
- /*
- * Read the existing IV from the request or generate a random
- * one if none is provided. Optionally copy the generated IV
- * into the output buffer if requested.
- *
- * If the input IV is 12 bytes, append an explicit 4-byte
- * counter of 1.
- */
- if (op_type == CHCR_ENCRYPT_OP) {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
- else
- arc4rand(iv, s->blkcipher.iv_len, 0);
- if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, s->blkcipher.iv_len, iv);
- } else {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crde->crd_iv, s->blkcipher.iv_len);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, s->blkcipher.iv_len, iv);
- }
+ memcpy(iv, crp->crp_iv, s->blkcipher.iv_len);
if (s->blkcipher.iv_len == 12)
*(uint32_t *)&iv[12] = htobe32(1);
@@ -1343,13 +1284,12 @@
/*
* NB: cipherstop is explicitly set to 0. On encrypt it
- * should normally be set to 0 anyway (as the encrypt crd ends
- * at the end of the input). However, for decrypt the cipher
- * ends before the tag in the AUTHENC case (and authstop is
- * set to stop before the tag), but for GCM the cipher still
- * runs to the end of the buffer. Not sure if this is
- * intentional or a firmware quirk, but it is required for
- * working tag validation with GCM decryption.
+ * should normally be set to 0 anyway. However, for decrypt
+ * the cipher ends before the tag in the ETA case (and
+ * authstop is set to stop before the tag), but for GCM the
+ * cipher still runs to the end of the buffer. Not sure if
+ * this is intentional or a firmware quirk, but it is required
+ * for working tag validation with GCM decryption.
*/
crwr->sec_cpl.aadstart_cipherstop_hi = htobe32(
V_CPL_TX_SEC_PDU_AADSTART(aad_start) |
@@ -1390,17 +1330,17 @@
memcpy(dst, iv, iv_len);
dst += iv_len;
if (imm_len != 0) {
- if (crda->crd_len != 0) {
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crda->crd_skip, crda->crd_len, dst);
- dst += crda->crd_len;
+ if (crp->crp_aad_length != 0) {
+ crypto_copydata(crp, crp->crp_aad_start,
+ crp->crp_aad_length, dst);
+ dst += crp->crp_aad_length;
}
- crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
- crde->crd_len, dst);
- dst += crde->crd_len;
+ crypto_copydata(crp, crp->crp_payload_start,
+ crp->crp_payload_length, dst);
+ dst += crp->crp_payload_length;
if (op_type == CHCR_DECRYPT_OP)
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crda->crd_inject, hash_size_in_response, dst);
+ crypto_copydata(crp, crp->crp_digest_start,
+ hash_size_in_response, dst);
} else
ccr_write_ulptx_sgl(sc, dst, sgl_nsegs);
@@ -1429,8 +1369,7 @@
* performing the operation in software. Derived from swcr_authenc().
*/
static void
-ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp,
- struct cryptodesc *crda, struct cryptodesc *crde)
+ccr_gcm_soft(struct ccr_session *s, struct cryptop *crp)
{
struct auth_hash *axf;
struct enc_xform *exf;
@@ -1478,30 +1417,19 @@
* This assumes a 12-byte IV from the crp. See longer comment
* above in ccr_gcm() for more details.
*/
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crde->crd_iv, 12);
- else
- arc4rand(iv, 12, 0);
- if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, 12, iv);
- } else {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crde->crd_iv, 12);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, 12, iv);
+ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) {
+ error = EINVAL;
+ goto out;
}
+ memcpy(iv, crp->crp_iv, 12);
*(uint32_t *)&iv[12] = htobe32(1);
axf->Reinit(auth_ctx, iv, sizeof(iv));
/* MAC the AAD. */
- for (i = 0; i < crda->crd_len; i += sizeof(block)) {
- len = imin(crda->crd_len - i, sizeof(block));
- crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
- i, len, block);
+ for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) {
+ len = imin(crp->crp_aad_length - i, sizeof(block));
+ crypto_copydata(crp, crp->crp_aad_start + i, len, block);
bzero(block + len, sizeof(block) - len);
axf->Update(auth_ctx, block, sizeof(block));
}
@@ -1509,16 +1437,15 @@
exf->reinit(kschedule, iv);
/* Do encryption with MAC */
- for (i = 0; i < crde->crd_len; i += sizeof(block)) {
- len = imin(crde->crd_len - i, sizeof(block));
- crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip +
- i, len, block);
+ for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) {
+ len = imin(crp->crp_payload_length - i, sizeof(block));
+ crypto_copydata(crp, crp->crp_payload_start + i, len, block);
bzero(block + len, sizeof(block) - len);
- if (crde->crd_flags & CRD_F_ENCRYPT) {
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
exf->encrypt(kschedule, block);
axf->Update(auth_ctx, block, len);
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crde->crd_skip + i, len, block);
+ crypto_copyback(crp, crp->crp_payload_start + i, len,
+ block);
} else {
axf->Update(auth_ctx, block, len);
}
@@ -1526,35 +1453,37 @@
/* Length block. */
bzero(block, sizeof(block));
- ((uint32_t *)block)[1] = htobe32(crda->crd_len * 8);
- ((uint32_t *)block)[3] = htobe32(crde->crd_len * 8);
+ ((uint32_t *)block)[1] = htobe32(crp->crp_aad_length * 8);
+ ((uint32_t *)block)[3] = htobe32(crp->crp_payload_length * 8);
axf->Update(auth_ctx, block, sizeof(block));
/* Finalize MAC. */
axf->Final(digest, auth_ctx);
/* Inject or validate tag. */
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
- sizeof(digest), digest);
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ crypto_copyback(crp, crp->crp_digest_start, sizeof(digest),
+ digest);
error = 0;
} else {
char digest2[GMAC_DIGEST_LEN];
- crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
- sizeof(digest2), digest2);
+ crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2),
+ digest2);
if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
error = 0;
/* Tag matches, decrypt data. */
- for (i = 0; i < crde->crd_len; i += sizeof(block)) {
- len = imin(crde->crd_len - i, sizeof(block));
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crde->crd_skip + i, len, block);
+ for (i = 0; i < crp->crp_payload_length;
+ i += sizeof(block)) {
+ len = imin(crp->crp_payload_length - i,
+ sizeof(block));
+ crypto_copydata(crp, crp->crp_payload_start + i,
+ len, block);
bzero(block + len, sizeof(block) - len);
exf->decrypt(kschedule, block);
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crde->crd_skip + i, len, block);
+ crypto_copyback(crp, crp->crp_payload_start + i,
+ len, block);
}
} else
error = EBADMSG;
@@ -1571,8 +1500,8 @@
}
static void
-generate_ccm_b0(struct cryptodesc *crda, struct cryptodesc *crde,
- u_int hash_size_in_response, const char *iv, char *b0)
+generate_ccm_b0(struct cryptop *crp, u_int hash_size_in_response,
+ const char *iv, char *b0)
{
u_int i, payload_len;
@@ -1583,7 +1512,7 @@
b0[0] |= (((hash_size_in_response - 2) / 2) << 3);
/* Store the payload length as a big-endian value. */
- payload_len = crde->crd_len;
+ payload_len = crp->crp_payload_length;
for (i = 0; i < iv[0]; i++) {
b0[CCM_CBC_BLOCK_LEN - 1 - i] = payload_len;
payload_len >>= 8;
@@ -1595,15 +1524,14 @@
* start of block 1. This only assumes a 16-bit AAD length
* since T6 doesn't support large AAD sizes.
*/
- if (crda->crd_len != 0) {
+ if (crp->crp_aad_length != 0) {
b0[0] |= (1 << 6);
- *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crda->crd_len);
+ *(uint16_t *)(b0 + CCM_B0_SIZE) = htobe16(crp->crp_aad_length);
}
}
static int
-ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp,
- struct cryptodesc *crda, struct cryptodesc *crde)
+ccr_ccm(struct ccr_softc *sc, struct ccr_session *s, struct cryptop *crp)
{
char iv[CHCR_MAX_CRYPTO_IV_LEN];
struct ulptx_idata *idata;
@@ -1625,14 +1553,7 @@
* The crypto engine doesn't handle CCM requests with an empty
* payload, so handle those in software instead.
*/
- if (crde->crd_len == 0)
- return (EMSGSIZE);
-
- /*
- * AAD is only permitted before the cipher/plain text, not
- * after.
- */
- if (crda->crd_len + crda->crd_skip > crde->crd_len + crde->crd_skip)
+ if (crp->crp_payload_length == 0)
return (EMSGSIZE);
/*
@@ -1640,14 +1561,21 @@
* request.
*/
b0_len = CCM_B0_SIZE;
- if (crda->crd_len != 0)
+ if (crp->crp_aad_length != 0)
b0_len += CCM_AAD_FIELD_SIZE;
- aad_len = b0_len + crda->crd_len;
+ aad_len = b0_len + crp->crp_aad_length;
/*
- * Always assume a 12 byte input IV for now since that is what
- * OCF always generates. The full IV in the work request is
- * 16 bytes.
+ * CCM requests should always provide an explicit IV (really
+ * the nonce).
+ */
+ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
+ return (EINVAL);
+
+ /*
+ * Always assume a 12 byte input nonce for now since that is
+ * what OCF always generates. The full IV in the work request
+ * is 16 bytes.
*/
iv_len = AES_BLOCK_LEN;
@@ -1655,7 +1583,7 @@
return (EMSGSIZE);
hash_size_in_response = s->ccm_mac.hash_len;
- if (crde->crd_flags & CRD_F_ENCRYPT)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
op_type = CHCR_ENCRYPT_OP;
else
op_type = CHCR_DECRYPT_OP;
@@ -1670,11 +1598,12 @@
* output buffer.
*/
if (op_type == CHCR_ENCRYPT_OP) {
- if (iv_len + aad_len + crde->crd_len + hash_size_in_response >
- MAX_REQUEST_SIZE)
+ if (iv_len + aad_len + crp->crp_payload_length +
+ hash_size_in_response > MAX_REQUEST_SIZE)
return (EFBIG);
} else {
- if (iv_len + aad_len + crde->crd_len > MAX_REQUEST_SIZE)
+ if (iv_len + aad_len + crp->crp_payload_length >
+ MAX_REQUEST_SIZE)
return (EFBIG);
}
sglist_reset(sc->sg_dsgl);
@@ -1682,13 +1611,13 @@
aad_len);
if (error)
return (error);
- error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp, crde->crd_skip,
- crde->crd_len);
+ error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
+ crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_ENCRYPT_OP) {
error = sglist_append_sglist(sc->sg_dsgl, sc->sg_crp,
- crda->crd_inject, hash_size_in_response);
+ crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
@@ -1715,7 +1644,7 @@
* inside of the AAD region, so a second copy is always
* required.
*/
- input_len = aad_len + crde->crd_len;
+ input_len = aad_len + crp->crp_payload_length;
if (op_type == CHCR_DECRYPT_OP)
input_len += hash_size_in_response;
if (input_len > MAX_REQUEST_SIZE)
@@ -1729,19 +1658,19 @@
imm_len = b0_len;
sglist_reset(sc->sg_ulptx);
- if (crda->crd_len != 0) {
+ if (crp->crp_aad_length != 0) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crda->crd_skip, crda->crd_len);
+ crp->crp_aad_start, crp->crp_aad_length);
if (error)
return (error);
}
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crde->crd_skip, crde->crd_len);
+ crp->crp_payload_start, crp->crp_payload_length);
if (error)
return (error);
if (op_type == CHCR_DECRYPT_OP) {
error = sglist_append_sglist(sc->sg_ulptx, sc->sg_crp,
- crda->crd_inject, hash_size_in_response);
+ crp->crp_digest_start, hash_size_in_response);
if (error)
return (error);
}
@@ -1774,27 +1703,12 @@
memset(crwr, 0, wr_len);
/*
- * Read the nonce from the request or generate a random one if
- * none is provided. Use the nonce to generate the full IV
- * with the counter set to 0.
+ * Read the nonce from the request. Use the nonce to generate
+ * the full IV with the counter set to 0.
*/
memset(iv, 0, iv_len);
iv[0] = (15 - AES_CCM_IV_LEN) - 1;
- if (op_type == CHCR_ENCRYPT_OP) {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv + 1, crde->crd_iv, AES_CCM_IV_LEN);
- else
- arc4rand(iv + 1, AES_CCM_IV_LEN, 0);
- if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, AES_CCM_IV_LEN, iv + 1);
- } else {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv + 1, crde->crd_iv, AES_CCM_IV_LEN);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, AES_CCM_IV_LEN, iv + 1);
- }
+ memcpy(iv + 1, crp->crp_iv, AES_CCM_IV_LEN);
ccr_populate_wreq(sc, crwr, kctx_len, wr_len, imm_len, sgl_len, 0,
crp);
@@ -1851,20 +1765,20 @@
dst += sizeof(struct cpl_rx_phys_dsgl) + dsgl_len;
memcpy(dst, iv, iv_len);
dst += iv_len;
- generate_ccm_b0(crda, crde, hash_size_in_response, iv, dst);
+ generate_ccm_b0(crp, hash_size_in_response, iv, dst);
if (sgl_nsegs == 0) {
dst += b0_len;
- if (crda->crd_len != 0) {
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crda->crd_skip, crda->crd_len, dst);
- dst += crda->crd_len;
+ if (crp->crp_aad_length != 0) {
+ crypto_copydata(crp, crp->crp_aad_start,
+ crp->crp_aad_length, dst);
+ dst += crp->crp_aad_length;
}
- crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip,
- crde->crd_len, dst);
- dst += crde->crd_len;
+ crypto_copydata(crp, crp->crp_payload_start,
+ crp->crp_payload_length, dst);
+ dst += crp->crp_payload_length;
if (op_type == CHCR_DECRYPT_OP)
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crda->crd_inject, hash_size_in_response, dst);
+ crypto_copydata(crp, crp->crp_digest_start,
+ hash_size_in_response, dst);
} else {
dst += CCM_B0_SIZE;
if (b0_len > CCM_B0_SIZE) {
@@ -1911,8 +1825,7 @@
* performing the operation in software. Derived from swcr_authenc().
*/
static void
-ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp,
- struct cryptodesc *crda, struct cryptodesc *crde)
+ccr_ccm_soft(struct ccr_session *s, struct cryptop *crp)
{
struct auth_hash *axf;
struct enc_xform *exf;
@@ -1956,31 +1869,20 @@
if (error)
goto out;
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crde->crd_iv, AES_CCM_IV_LEN);
- else
- arc4rand(iv, AES_CCM_IV_LEN, 0);
- if ((crde->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, AES_CCM_IV_LEN, iv);
- } else {
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(iv, crde->crd_iv, AES_CCM_IV_LEN);
- else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crde->crd_inject, AES_CCM_IV_LEN, iv);
+ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0) {
+ error = EINVAL;
+ goto out;
}
+ memcpy(iv, crp->crp_iv, AES_CCM_IV_LEN);
- auth_ctx->aes_cbc_mac_ctx.authDataLength = crda->crd_len;
- auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crde->crd_len;
+ auth_ctx->aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
+ auth_ctx->aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
axf->Reinit(auth_ctx, iv, sizeof(iv));
/* MAC the AAD. */
- for (i = 0; i < crda->crd_len; i += sizeof(block)) {
- len = imin(crda->crd_len - i, sizeof(block));
- crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_skip +
- i, len, block);
+ for (i = 0; i < crp->crp_aad_length; i += sizeof(block)) {
+ len = imin(crp->crp_aad_length - i, sizeof(block));
+ crypto_copydata(crp, crp->crp_aad_start + i, len, block);
bzero(block + len, sizeof(block) - len);
axf->Update(auth_ctx, block, sizeof(block));
}
@@ -1988,16 +1890,15 @@
exf->reinit(kschedule, iv);
/* Do encryption/decryption with MAC */
- for (i = 0; i < crde->crd_len; i += sizeof(block)) {
- len = imin(crde->crd_len - i, sizeof(block));
- crypto_copydata(crp->crp_flags, crp->crp_buf, crde->crd_skip +
- i, len, block);
+ for (i = 0; i < crp->crp_payload_length; i += sizeof(block)) {
+ len = imin(crp->crp_payload_length - i, sizeof(block));
+ crypto_copydata(crp, crp->crp_payload_start + i, len, block);
bzero(block + len, sizeof(block) - len);
- if (crde->crd_flags & CRD_F_ENCRYPT) {
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
axf->Update(auth_ctx, block, len);
exf->encrypt(kschedule, block);
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crde->crd_skip + i, len, block);
+ crypto_copyback(crp, crp->crp_payload_start + i, len,
+ block);
} else {
exf->decrypt(kschedule, block);
axf->Update(auth_ctx, block, len);
@@ -2008,28 +1909,30 @@
axf->Final(digest, auth_ctx);
/* Inject or validate tag. */
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- crypto_copyback(crp->crp_flags, crp->crp_buf, crda->crd_inject,
- sizeof(digest), digest);
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ crypto_copyback(crp, crp->crp_digest_start, sizeof(digest),
+ digest);
error = 0;
} else {
- char digest2[GMAC_DIGEST_LEN];
+ char digest2[AES_CBC_MAC_HASH_LEN];
- crypto_copydata(crp->crp_flags, crp->crp_buf, crda->crd_inject,
- sizeof(digest2), digest2);
+ crypto_copydata(crp, crp->crp_digest_start, sizeof(digest2),
+ digest2);
if (timingsafe_bcmp(digest, digest2, sizeof(digest)) == 0) {
error = 0;
/* Tag matches, decrypt data. */
exf->reinit(kschedule, iv);
- for (i = 0; i < crde->crd_len; i += sizeof(block)) {
- len = imin(crde->crd_len - i, sizeof(block));
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crde->crd_skip + i, len, block);
+ for (i = 0; i < crp->crp_payload_length;
+ i += sizeof(block)) {
+ len = imin(crp->crp_payload_length - i,
+ sizeof(block));
+ crypto_copydata(crp, crp->crp_payload_start + i,
+ len, block);
bzero(block + len, sizeof(block) - len);
exf->decrypt(kschedule, block);
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crde->crd_skip + i, len, block);
+ crypto_copyback(crp, crp->crp_payload_start + i,
+ len, block);
}
} else
error = EBADMSG;
@@ -2096,11 +1999,11 @@
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "cipher_decrypt", CTLFLAG_RD,
&sc->stats_blkcipher_decrypt, 0,
"Cipher decryption requests submitted");
- SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_encrypt", CTLFLAG_RD,
- &sc->stats_authenc_encrypt, 0,
+ SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_encrypt", CTLFLAG_RD,
+ &sc->stats_eta_encrypt, 0,
"Combined AES+HMAC encryption requests submitted");
- SYSCTL_ADD_U64(ctx, children, OID_AUTO, "authenc_decrypt", CTLFLAG_RD,
- &sc->stats_authenc_decrypt, 0,
+ SYSCTL_ADD_U64(ctx, children, OID_AUTO, "eta_decrypt", CTLFLAG_RD,
+ &sc->stats_eta_decrypt, 0,
"Combined AES+HMAC decryption requests submitted");
SYSCTL_ADD_U64(ctx, children, OID_AUTO, "gcm_encrypt", CTLFLAG_RD,
&sc->stats_gcm_encrypt, 0, "AES-GCM encryption requests submitted");
@@ -2161,25 +2064,6 @@
sc->sg_iv_aad = sglist_build(sc->iv_aad_buf, MAX_AAD_LEN, M_WAITOK);
ccr_sysctls(sc);
- crypto_register(cid, CRYPTO_SHA1, 0, 0);
- crypto_register(cid, CRYPTO_SHA2_224, 0, 0);
- crypto_register(cid, CRYPTO_SHA2_256, 0, 0);
- crypto_register(cid, CRYPTO_SHA2_384, 0, 0);
- crypto_register(cid, CRYPTO_SHA2_512, 0, 0);
- crypto_register(cid, CRYPTO_SHA1_HMAC, 0, 0);
- crypto_register(cid, CRYPTO_SHA2_224_HMAC, 0, 0);
- crypto_register(cid, CRYPTO_SHA2_256_HMAC, 0, 0);
- crypto_register(cid, CRYPTO_SHA2_384_HMAC, 0, 0);
- crypto_register(cid, CRYPTO_SHA2_512_HMAC, 0, 0);
- crypto_register(cid, CRYPTO_AES_CBC, 0, 0);
- crypto_register(cid, CRYPTO_AES_ICM, 0, 0);
- crypto_register(cid, CRYPTO_AES_NIST_GCM_16, 0, 0);
- crypto_register(cid, CRYPTO_AES_128_NIST_GMAC, 0, 0);
- crypto_register(cid, CRYPTO_AES_192_NIST_GMAC, 0, 0);
- crypto_register(cid, CRYPTO_AES_256_NIST_GMAC, 0, 0);
- crypto_register(cid, CRYPTO_AES_XTS, 0, 0);
- crypto_register(cid, CRYPTO_AES_CCM_16, 0, 0);
- crypto_register(cid, CRYPTO_AES_CCM_CBC_MAC, 0, 0);
return (0);
}
@@ -2207,48 +2091,48 @@
}
static void
-ccr_init_hash_digest(struct ccr_session *s, int cri_alg)
+ccr_init_hash_digest(struct ccr_session *s)
{
union authctx auth_ctx;
struct auth_hash *axf;
axf = s->hmac.auth_hash;
axf->Init(&auth_ctx);
- t4_copy_partial_hash(cri_alg, &auth_ctx, s->hmac.pads);
+ t4_copy_partial_hash(axf->type, &auth_ctx, s->hmac.pads);
}
-static int
+static bool
ccr_aes_check_keylen(int alg, int klen)
{
- switch (klen) {
+ switch (klen * 8) {
case 128:
case 192:
if (alg == CRYPTO_AES_XTS)
- return (EINVAL);
+ return (false);
break;
case 256:
break;
case 512:
if (alg != CRYPTO_AES_XTS)
- return (EINVAL);
+ return (false);
break;
default:
- return (EINVAL);
+ return (false);
}
- return (0);
+ return (true);
}
static void
-ccr_aes_setkey(struct ccr_session *s, int alg, const void *key, int klen)
+ccr_aes_setkey(struct ccr_session *s, const void *key, int klen)
{
unsigned int ck_size, iopad_size, kctx_flits, kctx_len, kbits, mk_size;
unsigned int opad_present;
- if (alg == CRYPTO_AES_XTS)
- kbits = klen / 2;
+ if (s->blkcipher.cipher_mode == SCMD_CIPH_MODE_AES_XTS)
+ kbits = (klen / 2) * 8;
else
- kbits = klen;
+ kbits = klen * 8;
switch (kbits) {
case 128:
ck_size = CHCR_KEYCTX_CIPHER_KEY_SIZE_128;
@@ -2263,18 +2147,18 @@
panic("should not get here");
}
- s->blkcipher.key_len = klen / 8;
+ s->blkcipher.key_len = klen;
memcpy(s->blkcipher.enckey, key, s->blkcipher.key_len);
- switch (alg) {
- case CRYPTO_AES_CBC:
- case CRYPTO_AES_XTS:
+ switch (s->blkcipher.cipher_mode) {
+ case SCMD_CIPH_MODE_AES_CBC:
+ case SCMD_CIPH_MODE_AES_XTS:
t4_aes_getdeckey(s->blkcipher.deckey, key, kbits);
break;
}
kctx_len = roundup2(s->blkcipher.key_len, 16);
switch (s->mode) {
- case AUTHENC:
+ case ETA:
mk_size = s->hmac.mk_size;
opad_present = 1;
iopad_size = roundup2(s->hmac.partial_digest_len, 16);
@@ -2309,171 +2193,220 @@
}
kctx_flits = (sizeof(struct _key_ctx) + kctx_len) / 16;
s->blkcipher.key_ctx_hdr = htobe32(V_KEY_CONTEXT_CTX_LEN(kctx_flits) |
- V_KEY_CONTEXT_DUAL_CK(alg == CRYPTO_AES_XTS) |
+ V_KEY_CONTEXT_DUAL_CK(s->blkcipher.cipher_mode ==
+ SCMD_CIPH_MODE_AES_XTS) |
V_KEY_CONTEXT_OPAD_PRESENT(opad_present) |
V_KEY_CONTEXT_SALT_PRESENT(1) | V_KEY_CONTEXT_CK_SIZE(ck_size) |
V_KEY_CONTEXT_MK_SIZE(mk_size) | V_KEY_CONTEXT_VALID(1));
}
+static bool
+ccr_auth_supported(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA2_224:
+ case CRYPTO_SHA2_256:
+ case CRYPTO_SHA2_384:
+ case CRYPTO_SHA2_512:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_224_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ break;
+ default:
+ return (false);
+ }
+ return (true);
+}
+
+static bool
+ccr_cipher_supported(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return (false);
+ break;
+ case CRYPTO_AES_ICM:
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return (false);
+ break;
+ case CRYPTO_AES_XTS:
+ if (csp->csp_ivlen != AES_XTS_IV_LEN)
+ return (false);
+ break;
+ default:
+ return (false);
+ }
+ return (ccr_aes_check_keylen(csp->csp_cipher_alg,
+ csp->csp_cipher_klen));
+}
+
static int
-ccr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
-{
- struct ccr_softc *sc;
- struct ccr_session *s;
- struct auth_hash *auth_hash;
- struct cryptoini *c, *hash, *cipher;
- unsigned int auth_mode, cipher_mode, iv_len, mk_size;
- unsigned int partial_digest_len;
- int error;
- bool gcm_hash, hmac;
-
- if (cri == NULL)
+ccr_cipher_mode(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ return (SCMD_CIPH_MODE_AES_CBC);
+ case CRYPTO_AES_ICM:
+ return (SCMD_CIPH_MODE_AES_CTR);
+ case CRYPTO_AES_NIST_GCM_16:
+ return (SCMD_CIPH_MODE_AES_GCM);
+ case CRYPTO_AES_XTS:
+ return (SCMD_CIPH_MODE_AES_XTS);
+ case CRYPTO_AES_CCM_16:
+ return (SCMD_CIPH_MODE_AES_CCM);
+ default:
+ return (SCMD_CIPH_MODE_NOP);
+ }
+}
+
+static int
+ccr_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+ unsigned int cipher_mode;
+
+ if (csp->csp_flags != 0)
return (EINVAL);
-
- gcm_hash = false;
- hmac = false;
- cipher = NULL;
- hash = NULL;
- auth_hash = NULL;
- auth_mode = SCMD_AUTH_MODE_NOP;
- cipher_mode = SCMD_CIPH_MODE_NOP;
- iv_len = 0;
- mk_size = 0;
- partial_digest_len = 0;
- for (c = cri; c != NULL; c = c->cri_next) {
- switch (c->cri_alg) {
- case CRYPTO_SHA1:
- case CRYPTO_SHA2_224:
- case CRYPTO_SHA2_256:
- case CRYPTO_SHA2_384:
- case CRYPTO_SHA2_512:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_224_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- case CRYPTO_AES_CCM_CBC_MAC:
- if (hash)
- return (EINVAL);
- hash = c;
- switch (c->cri_alg) {
- case CRYPTO_SHA1:
- case CRYPTO_SHA1_HMAC:
- auth_hash = &auth_hash_hmac_sha1;
- auth_mode = SCMD_AUTH_MODE_SHA1;
- mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
- partial_digest_len = SHA1_HASH_LEN;
- break;
- case CRYPTO_SHA2_224:
- case CRYPTO_SHA2_224_HMAC:
- auth_hash = &auth_hash_hmac_sha2_224;
- auth_mode = SCMD_AUTH_MODE_SHA224;
- mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
- partial_digest_len = SHA2_256_HASH_LEN;
- break;
- case CRYPTO_SHA2_256:
- case CRYPTO_SHA2_256_HMAC:
- auth_hash = &auth_hash_hmac_sha2_256;
- auth_mode = SCMD_AUTH_MODE_SHA256;
- mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
- partial_digest_len = SHA2_256_HASH_LEN;
- break;
- case CRYPTO_SHA2_384:
- case CRYPTO_SHA2_384_HMAC:
- auth_hash = &auth_hash_hmac_sha2_384;
- auth_mode = SCMD_AUTH_MODE_SHA512_384;
- mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
- partial_digest_len = SHA2_512_HASH_LEN;
- break;
- case CRYPTO_SHA2_512:
- case CRYPTO_SHA2_512_HMAC:
- auth_hash = &auth_hash_hmac_sha2_512;
- auth_mode = SCMD_AUTH_MODE_SHA512_512;
- mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
- partial_digest_len = SHA2_512_HASH_LEN;
- break;
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- gcm_hash = true;
- auth_mode = SCMD_AUTH_MODE_GHASH;
- mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_128;
- break;
- case CRYPTO_AES_CCM_CBC_MAC:
- auth_mode = SCMD_AUTH_MODE_CBCMAC;
- break;
- }
- switch (c->cri_alg) {
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_224_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- hmac = true;
- break;
- }
- break;
- case CRYPTO_AES_CBC:
- case CRYPTO_AES_ICM:
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ if (!ccr_auth_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!ccr_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_AEAD:
+ switch (csp->csp_cipher_alg) {
case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_XTS:
+ if (csp->csp_ivlen != AES_GCM_IV_LEN)
+ return (EINVAL);
+ if (csp->csp_auth_mlen < 0 ||
+ csp->csp_auth_mlen > AES_GMAC_HASH_LEN)
+ return (EINVAL);
+ break;
case CRYPTO_AES_CCM_16:
- if (cipher)
+ if (csp->csp_ivlen != AES_CCM_IV_LEN)
+ return (EINVAL);
+ if (csp->csp_auth_mlen < 0 ||
+ csp->csp_auth_mlen > AES_CBC_MAC_HASH_LEN)
return (EINVAL);
- cipher = c;
- switch (c->cri_alg) {
- case CRYPTO_AES_CBC:
- cipher_mode = SCMD_CIPH_MODE_AES_CBC;
- iv_len = AES_BLOCK_LEN;
- break;
- case CRYPTO_AES_ICM:
- cipher_mode = SCMD_CIPH_MODE_AES_CTR;
- iv_len = AES_BLOCK_LEN;
- break;
- case CRYPTO_AES_NIST_GCM_16:
- cipher_mode = SCMD_CIPH_MODE_AES_GCM;
- iv_len = AES_GCM_IV_LEN;
- break;
- case CRYPTO_AES_XTS:
- cipher_mode = SCMD_CIPH_MODE_AES_XTS;
- iv_len = AES_BLOCK_LEN;
- break;
- case CRYPTO_AES_CCM_16:
- cipher_mode = SCMD_CIPH_MODE_AES_CCM;
- iv_len = AES_CCM_IV_LEN;
- break;
- }
- if (c->cri_key != NULL) {
- error = ccr_aes_check_keylen(c->cri_alg,
- c->cri_klen);
- if (error)
- return (error);
- }
break;
default:
return (EINVAL);
}
- }
- if (gcm_hash != (cipher_mode == SCMD_CIPH_MODE_AES_GCM))
- return (EINVAL);
- if ((auth_mode == SCMD_AUTH_MODE_CBCMAC) !=
- (cipher_mode == SCMD_CIPH_MODE_AES_CCM))
- return (EINVAL);
- if (hash == NULL && cipher == NULL)
+ break;
+ case CSP_MODE_ETA:
+ if (!ccr_auth_supported(csp) || !ccr_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ default:
return (EINVAL);
- if (hash != NULL) {
- if (hmac || gcm_hash || auth_mode == SCMD_AUTH_MODE_CBCMAC) {
- if (hash->cri_key == NULL)
- return (EINVAL);
- } else {
- if (hash->cri_key != NULL)
- return (EINVAL);
- }
}
+ if (csp->csp_cipher_klen != 0) {
+ cipher_mode = ccr_cipher_mode(csp);
+ if (cipher_mode == SCMD_CIPH_MODE_NOP)
+ return (EINVAL);
+ }
+
+ return (CRYPTODEV_PROBE_HARDWARE);
+}
+
+static int
+ccr_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
+{
+ struct ccr_softc *sc;
+ struct ccr_session *s;
+ struct auth_hash *auth_hash;
+ unsigned int auth_mode, cipher_mode, mk_size;
+ unsigned int partial_digest_len;
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA1_HMAC:
+ auth_hash = &auth_hash_hmac_sha1;
+ auth_mode = SCMD_AUTH_MODE_SHA1;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_160;
+ partial_digest_len = SHA1_HASH_LEN;
+ break;
+ case CRYPTO_SHA2_224:
+ case CRYPTO_SHA2_224_HMAC:
+ auth_hash = &auth_hash_hmac_sha2_224;
+ auth_mode = SCMD_AUTH_MODE_SHA224;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ partial_digest_len = SHA2_256_HASH_LEN;
+ break;
+ case CRYPTO_SHA2_256:
+ case CRYPTO_SHA2_256_HMAC:
+ auth_hash = &auth_hash_hmac_sha2_256;
+ auth_mode = SCMD_AUTH_MODE_SHA256;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_256;
+ partial_digest_len = SHA2_256_HASH_LEN;
+ break;
+ case CRYPTO_SHA2_384:
+ case CRYPTO_SHA2_384_HMAC:
+ auth_hash = &auth_hash_hmac_sha2_384;
+ auth_mode = SCMD_AUTH_MODE_SHA512_384;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
+ partial_digest_len = SHA2_512_HASH_LEN;
+ break;
+ case CRYPTO_SHA2_512:
+ case CRYPTO_SHA2_512_HMAC:
+ auth_hash = &auth_hash_hmac_sha2_512;
+ auth_mode = SCMD_AUTH_MODE_SHA512_512;
+ mk_size = CHCR_KEYCTX_MAC_KEY_SIZE_512;
+ partial_digest_len = SHA2_512_HASH_LEN;
+ break;
+ default:
+ auth_hash = NULL;
+ auth_mode = SCMD_AUTH_MODE_NOP;
+ mk_size = 0;
+ partial_digest_len = 0;
+ break;
+ }
+
+ cipher_mode = ccr_cipher_mode(csp);
+
+#ifdef INVARIANTS
+ switch (csp->csp_mode) {
+ case CSP_MODE_CIPHER:
+ if (cipher_mode == SCMD_CIPH_MODE_NOP ||
+ cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
+ cipher_mode == SCMD_CIPH_MODE_AES_CCM)
+ panic("invalid cipher algo");
+ break;
+ case CSP_MODE_DIGEST:
+ if (auth_mode == SCMD_AUTH_MODE_NOP)
+ panic("invalid auth algo");
+ break;
+ case CSP_MODE_AEAD:
+ if (cipher_mode != SCMD_CIPH_MODE_AES_GCM &&
+ cipher_mode != SCMD_CIPH_MODE_AES_CCM)
+ panic("invalid aead cipher algo");
+ if (auth_mode != SCMD_AUTH_MODE_NOP)
+ panic("invalid aead auth aglo");
+ break;
+ case CSP_MODE_ETA:
+ if (cipher_mode == SCMD_CIPH_MODE_NOP ||
+ cipher_mode == SCMD_CIPH_MODE_AES_GCM ||
+ cipher_mode == SCMD_CIPH_MODE_AES_CCM)
+ panic("invalid cipher algo");
+ if (auth_mode == SCMD_AUTH_MODE_NOP)
+ panic("invalid auth algo");
+ break;
+ default:
+ panic("invalid csp mode");
+ }
+#endif
+
sc = device_get_softc(dev);
/*
@@ -2493,54 +2426,61 @@
s = crypto_get_driver_session(cses);
- if (gcm_hash)
- s->mode = GCM;
- else if (cipher_mode == SCMD_CIPH_MODE_AES_CCM)
- s->mode = CCM;
- else if (hash != NULL && cipher != NULL)
- s->mode = AUTHENC;
- else if (hash != NULL) {
- if (hmac)
+ switch (csp->csp_mode) {
+ case CSP_MODE_AEAD:
+ if (cipher_mode == SCMD_CIPH_MODE_AES_CCM)
+ s->mode = CCM;
+ else
+ s->mode = GCM;
+ break;
+ case CSP_MODE_ETA:
+ s->mode = ETA;
+ break;
+ case CSP_MODE_DIGEST:
+ if (csp->csp_auth_klen != 0)
s->mode = HMAC;
else
s->mode = HASH;
- } else {
- MPASS(cipher != NULL);
+ break;
+ case CSP_MODE_CIPHER:
s->mode = BLKCIPHER;
+ break;
}
- if (gcm_hash) {
- if (hash->cri_mlen == 0)
+
+ if (s->mode == GCM) {
+ if (csp->csp_auth_mlen == 0)
s->gmac.hash_len = AES_GMAC_HASH_LEN;
else
- s->gmac.hash_len = hash->cri_mlen;
- t4_init_gmac_hash(hash->cri_key, hash->cri_klen,
+ s->gmac.hash_len = csp->csp_auth_mlen;
+ t4_init_gmac_hash(csp->csp_cipher_key, csp->csp_cipher_klen,
s->gmac.ghash_h);
- } else if (auth_mode == SCMD_AUTH_MODE_CBCMAC) {
- if (hash->cri_mlen == 0)
+ } else if (s->mode == CCM) {
+ if (csp->csp_auth_mlen == 0)
s->ccm_mac.hash_len = AES_CBC_MAC_HASH_LEN;
else
- s->ccm_mac.hash_len = hash->cri_mlen;
- } else if (hash != NULL) {
+ s->ccm_mac.hash_len = csp->csp_auth_mlen;
+ } else if (auth_mode != SCMD_AUTH_MODE_NOP) {
s->hmac.auth_hash = auth_hash;
s->hmac.auth_mode = auth_mode;
s->hmac.mk_size = mk_size;
s->hmac.partial_digest_len = partial_digest_len;
- if (hash->cri_mlen == 0)
+ if (csp->csp_auth_mlen == 0)
s->hmac.hash_len = auth_hash->hashsize;
else
- s->hmac.hash_len = hash->cri_mlen;
- if (hmac)
+ s->hmac.hash_len = csp->csp_auth_mlen;
+ if (csp->csp_auth_key != NULL)
t4_init_hmac_digest(auth_hash, partial_digest_len,
- hash->cri_key, hash->cri_klen, s->hmac.pads);
+ csp->csp_auth_key, csp->csp_auth_klen,
+ s->hmac.pads);
else
- ccr_init_hash_digest(s, hash->cri_alg);
+ ccr_init_hash_digest(s);
}
- if (cipher != NULL) {
+ if (cipher_mode != SCMD_CIPH_MODE_NOP) {
s->blkcipher.cipher_mode = cipher_mode;
- s->blkcipher.iv_len = iv_len;
- if (cipher->cri_key != NULL)
- ccr_aes_setkey(s, cipher->cri_alg, cipher->cri_key,
- cipher->cri_klen);
+ s->blkcipher.iv_len = csp->csp_ivlen;
+ if (csp->csp_cipher_key != NULL)
+ ccr_aes_setkey(s, csp->csp_cipher_key,
+ csp->csp_cipher_klen);
}
s->active = true;
@@ -2568,15 +2508,12 @@
static int
ccr_process(device_t dev, struct cryptop *crp, int hint)
{
+ const struct crypto_session_params *csp;
struct ccr_softc *sc;
struct ccr_session *s;
- struct cryptodesc *crd, *crda, *crde;
int error;
- if (crp == NULL)
- return (EINVAL);
-
- crd = crp->crp_desc;
+ csp = crypto_get_params(crp->crp_session);
s = crypto_get_driver_session(crp->crp_session);
sc = device_get_softc(dev);
@@ -2594,141 +2531,82 @@
sc->stats_hash++;
break;
case HMAC:
- if (crd->crd_flags & CRD_F_KEY_EXPLICIT)
+ if (crp->crp_auth_key != NULL)
t4_init_hmac_digest(s->hmac.auth_hash,
- s->hmac.partial_digest_len, crd->crd_key,
- crd->crd_klen, s->hmac.pads);
+ s->hmac.partial_digest_len, crp->crp_auth_key,
+ csp->csp_auth_klen, s->hmac.pads);
error = ccr_hash(sc, s, crp);
if (error == 0)
sc->stats_hmac++;
break;
case BLKCIPHER:
- if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
- error = ccr_aes_check_keylen(crd->crd_alg,
- crd->crd_klen);
- if (error)
- break;
- ccr_aes_setkey(s, crd->crd_alg, crd->crd_key,
- crd->crd_klen);
- }
+ if (crp->crp_cipher_key != NULL)
+ ccr_aes_setkey(s, crp->crp_cipher_key,
+ csp->csp_cipher_klen);
error = ccr_blkcipher(sc, s, crp);
if (error == 0) {
- if (crd->crd_flags & CRD_F_ENCRYPT)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
sc->stats_blkcipher_encrypt++;
else
sc->stats_blkcipher_decrypt++;
}
break;
- case AUTHENC:
- error = 0;
- switch (crd->crd_alg) {
- case CRYPTO_AES_CBC:
- case CRYPTO_AES_ICM:
- case CRYPTO_AES_XTS:
- /* Only encrypt-then-authenticate supported. */
- crde = crd;
- crda = crd->crd_next;
- if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
- error = EINVAL;
- break;
- }
- break;
- default:
- crda = crd;
- crde = crd->crd_next;
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- error = EINVAL;
- break;
- }
- break;
- }
- if (error)
- break;
- if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
+ case ETA:
+ if (crp->crp_auth_key != NULL)
t4_init_hmac_digest(s->hmac.auth_hash,
- s->hmac.partial_digest_len, crda->crd_key,
- crda->crd_klen, s->hmac.pads);
- if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
- error = ccr_aes_check_keylen(crde->crd_alg,
- crde->crd_klen);
- if (error)
- break;
- ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
- crde->crd_klen);
- }
- error = ccr_authenc(sc, s, crp, crda, crde);
+ s->hmac.partial_digest_len, crp->crp_auth_key,
+ csp->csp_auth_klen, s->hmac.pads);
+ if (crp->crp_cipher_key != NULL)
+ ccr_aes_setkey(s, crp->crp_cipher_key,
+ csp->csp_cipher_klen);
+ error = ccr_eta(sc, s, crp);
if (error == 0) {
- if (crde->crd_flags & CRD_F_ENCRYPT)
- sc->stats_authenc_encrypt++;
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
+ sc->stats_eta_encrypt++;
else
- sc->stats_authenc_decrypt++;
+ sc->stats_eta_decrypt++;
}
break;
case GCM:
- error = 0;
- if (crd->crd_alg == CRYPTO_AES_NIST_GCM_16) {
- crde = crd;
- crda = crd->crd_next;
- } else {
- crda = crd;
- crde = crd->crd_next;
+ if (crp->crp_cipher_key != NULL) {
+ t4_init_gmac_hash(crp->crp_cipher_key,
+ csp->csp_cipher_klen, s->gmac.ghash_h);
+ ccr_aes_setkey(s, crp->crp_cipher_key,
+ csp->csp_cipher_klen);
}
- if (crda->crd_flags & CRD_F_KEY_EXPLICIT)
- t4_init_gmac_hash(crda->crd_key, crda->crd_klen,
- s->gmac.ghash_h);
- if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
- error = ccr_aes_check_keylen(crde->crd_alg,
- crde->crd_klen);
- if (error)
- break;
- ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
- crde->crd_klen);
- }
- if (crde->crd_len == 0) {
+ if (crp->crp_payload_length == 0) {
mtx_unlock(&sc->lock);
- ccr_gcm_soft(s, crp, crda, crde);
+ ccr_gcm_soft(s, crp);
return (0);
}
- error = ccr_gcm(sc, s, crp, crda, crde);
+ error = ccr_gcm(sc, s, crp);
if (error == EMSGSIZE) {
sc->stats_sw_fallback++;
mtx_unlock(&sc->lock);
- ccr_gcm_soft(s, crp, crda, crde);
+ ccr_gcm_soft(s, crp);
return (0);
}
if (error == 0) {
- if (crde->crd_flags & CRD_F_ENCRYPT)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
sc->stats_gcm_encrypt++;
else
sc->stats_gcm_decrypt++;
}
break;
case CCM:
- error = 0;
- if (crd->crd_alg == CRYPTO_AES_CCM_16) {
- crde = crd;
- crda = crd->crd_next;
- } else {
- crda = crd;
- crde = crd->crd_next;
+ if (crp->crp_cipher_key != NULL) {
+ ccr_aes_setkey(s, crp->crp_cipher_key,
+ csp->csp_cipher_klen);
}
- if (crde->crd_flags & CRD_F_KEY_EXPLICIT) {
- error = ccr_aes_check_keylen(crde->crd_alg,
- crde->crd_klen);
- if (error)
- break;
- ccr_aes_setkey(s, crde->crd_alg, crde->crd_key,
- crde->crd_klen);
- }
- error = ccr_ccm(sc, s, crp, crda, crde);
+ error = ccr_ccm(sc, s, crp);
if (error == EMSGSIZE) {
sc->stats_sw_fallback++;
mtx_unlock(&sc->lock);
- ccr_ccm_soft(s, crp, crda, crde);
+ ccr_ccm_soft(s, crp);
return (0);
}
if (error == 0) {
- if (crde->crd_flags & CRD_F_ENCRYPT)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
sc->stats_ccm_encrypt++;
else
sc->stats_ccm_decrypt++;
@@ -2789,8 +2667,8 @@
case BLKCIPHER:
error = ccr_blkcipher_done(sc, s, crp, cpl, error);
break;
- case AUTHENC:
- error = ccr_authenc_done(sc, s, crp, cpl, error);
+ case ETA:
+ error = ccr_eta_done(sc, s, crp, cpl, error);
break;
case GCM:
error = ccr_gcm_done(sc, s, crp, cpl, error);
@@ -2835,6 +2713,7 @@
DEVMETHOD(device_attach, ccr_attach),
DEVMETHOD(device_detach, ccr_detach),
+ DEVMETHOD(cryptodev_probesession, ccr_probesession),
DEVMETHOD(cryptodev_newsession, ccr_newsession),
DEVMETHOD(cryptodev_freesession, ccr_freesession),
DEVMETHOD(cryptodev_process, ccr_process),
Index: sys/dev/cxgbe/crypto/t4_keyctx.c
===================================================================
--- sys/dev/cxgbe/crypto/t4_keyctx.c
+++ sys/dev/cxgbe/crypto/t4_keyctx.c
@@ -73,7 +73,7 @@
uint32_t keysched[4 * (RIJNDAEL_MAXNR + 1)];
int rounds;
- rounds = rijndaelKeySetupEnc(keysched, key, klen);
+ rounds = rijndaelKeySetupEnc(keysched, key, klen * 8);
rijndaelEncrypt(keysched, rounds, zeroes, ghash);
}
@@ -118,45 +118,19 @@
void
t4_init_hmac_digest(struct auth_hash *axf, u_int partial_digest_len,
- char *key, int klen, char *dst)
+ const char *key, int klen, char *dst)
{
union authctx auth_ctx;
- char ipad[SHA2_512_BLOCK_LEN], opad[SHA2_512_BLOCK_LEN];
- u_int i;
- /*
- * If the key is larger than the block size, use the digest of
- * the key as the key instead.
- */
- klen /= 8;
- if (klen > axf->blocksize) {
- axf->Init(&auth_ctx);
- axf->Update(&auth_ctx, key, klen);
- axf->Final(ipad, &auth_ctx);
- klen = axf->hashsize;
- } else
- memcpy(ipad, key, klen);
-
- memset(ipad + klen, 0, axf->blocksize - klen);
- memcpy(opad, ipad, axf->blocksize);
-
- for (i = 0; i < axf->blocksize; i++) {
- ipad[i] ^= HMAC_IPAD_VAL;
- opad[i] ^= HMAC_OPAD_VAL;
- }
-
- /*
- * Hash the raw ipad and opad and store the partial results in
- * the key context.
- */
- axf->Init(&auth_ctx);
- axf->Update(&auth_ctx, ipad, axf->blocksize);
+ hmac_init_ipad(axf, key, klen, &auth_ctx);
t4_copy_partial_hash(axf->type, &auth_ctx, dst);
dst += roundup2(partial_digest_len, 16);
- axf->Init(&auth_ctx);
- axf->Update(&auth_ctx, opad, axf->blocksize);
+
+ hmac_init_opad(axf, key, klen, &auth_ctx);
t4_copy_partial_hash(axf->type, &auth_ctx, dst);
+
+ explicit_bzero(&auth_ctx, sizeof(auth_ctx));
}
/*
Index: sys/dev/cxgbe/tom/t4_tls.c
===================================================================
--- sys/dev/cxgbe/tom/t4_tls.c
+++ sys/dev/cxgbe/tom/t4_tls.c
@@ -892,7 +892,7 @@
k_ctx->tx_key_info_size += GMAC_BLOCK_LEN;
memcpy(k_ctx->tx.salt, tls->params.iv, SALT_SIZE);
t4_init_gmac_hash(tls->params.cipher_key,
- tls->params.cipher_key_len * 8, hash);
+ tls->params.cipher_key_len, hash);
} else {
switch (tls->params.auth_algorithm) {
case CRYPTO_SHA1_HMAC:
@@ -920,7 +920,7 @@
k_ctx->tx_key_info_size += roundup2(mac_key_size, 16) * 2;
k_ctx->mac_secret_size = mac_key_size;
t4_init_hmac_digest(axf, mac_key_size, tls->params.auth_key,
- tls->params.auth_key_len * 8, hash);
+ tls->params.auth_key_len, hash);
}
k_ctx->frag_size = tls->params.max_frame_len;
Index: sys/dev/glxsb/glxsb.h
===================================================================
--- sys/dev/glxsb/glxsb.h
+++ sys/dev/glxsb/glxsb.h
@@ -37,8 +37,6 @@
struct glxsb_session {
uint32_t ses_key[4]; /* key */
- uint8_t ses_iv[SB_AES_BLOCK_SIZE]; /* initialization vector */
- int ses_klen; /* key len */
struct auth_hash *ses_axf;
uint8_t *ses_ictx;
uint8_t *ses_octx;
@@ -46,10 +44,10 @@
};
int glxsb_hash_setup(struct glxsb_session *ses,
- struct cryptoini *macini);
+ const struct crypto_session_params *csp);
int glxsb_hash_process(struct glxsb_session *ses,
- struct cryptodesc *maccrd, struct cryptop *crp);
+ const struct crypto_session_params *csp, struct cryptop *crp);
void glxsb_hash_free(struct glxsb_session *ses);
Index: sys/dev/glxsb/glxsb.c
===================================================================
--- sys/dev/glxsb/glxsb.c
+++ sys/dev/glxsb/glxsb.c
@@ -51,7 +51,6 @@
#include <dev/pci/pcireg.h>
#include <opencrypto/cryptodev.h>
-#include <opencrypto/cryptosoft.h>
#include <opencrypto/xform.h>
#include "cryptodev_if.h"
@@ -172,8 +171,6 @@
struct glxsb_taskop {
struct glxsb_session *to_ses; /* crypto session */
struct cryptop *to_crp; /* cryptop to perfom */
- struct cryptodesc *to_enccrd; /* enccrd to perform */
- struct cryptodesc *to_maccrd; /* maccrd to perform */
};
struct glxsb_softc {
@@ -204,13 +201,16 @@
static void glxsb_rnd(void *);
static int glxsb_crypto_setup(struct glxsb_softc *);
-static int glxsb_crypto_newsession(device_t, crypto_session_t, struct cryptoini *);
+static int glxsb_crypto_probesession(device_t,
+ const struct crypto_session_params *);
+static int glxsb_crypto_newsession(device_t, crypto_session_t,
+ const struct crypto_session_params *);
static void glxsb_crypto_freesession(device_t, crypto_session_t);
static int glxsb_aes(struct glxsb_softc *, uint32_t, uint32_t,
- uint32_t, void *, int, void *);
+ uint32_t, const void *, int, const void *);
-static int glxsb_crypto_encdec(struct cryptop *, struct cryptodesc *,
- struct glxsb_session *, struct glxsb_softc *);
+static int glxsb_crypto_encdec(struct cryptop *, struct glxsb_session *,
+ struct glxsb_softc *);
static void glxsb_crypto_task(void *, int);
static int glxsb_crypto_process(device_t, struct cryptop *, int);
@@ -222,6 +222,7 @@
DEVMETHOD(device_detach, glxsb_detach),
/* crypto device methods */
+ DEVMETHOD(cryptodev_probesession, glxsb_crypto_probesession),
DEVMETHOD(cryptodev_newsession, glxsb_crypto_newsession),
DEVMETHOD(cryptodev_freesession, glxsb_crypto_freesession),
DEVMETHOD(cryptodev_process, glxsb_crypto_process),
@@ -477,91 +478,67 @@
mtx_init(&sc->sc_task_mtx, "glxsb_crypto_mtx", NULL, MTX_DEF);
- if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0) != 0)
- goto crypto_fail;
- if (crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0) != 0)
- goto crypto_fail;
- if (crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0) != 0)
- goto crypto_fail;
- if (crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0) != 0)
- goto crypto_fail;
- if (crypto_register(sc->sc_cid, CRYPTO_RIPEMD160_HMAC, 0, 0) != 0)
- goto crypto_fail;
- if (crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0) != 0)
- goto crypto_fail;
- if (crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0) != 0)
- goto crypto_fail;
- if (crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0) != 0)
- goto crypto_fail;
-
return (0);
-
-crypto_fail:
- device_printf(sc->sc_dev, "cannot register crypto\n");
- crypto_unregister_all(sc->sc_cid);
- mtx_destroy(&sc->sc_task_mtx);
- return (ENOMEM);
}
static int
-glxsb_crypto_newsession(device_t dev, crypto_session_t cses,
- struct cryptoini *cri)
+glxsb_crypto_probesession(device_t dev, const struct crypto_session_params *csp)
{
- struct glxsb_softc *sc = device_get_softc(dev);
- struct glxsb_session *ses;
- struct cryptoini *encini, *macini;
- int error;
- if (sc == NULL || cri == NULL)
+ if (csp->csp_flags != 0)
return (EINVAL);
- encini = macini = NULL;
- for (; cri != NULL; cri = cri->cri_next) {
- switch(cri->cri_alg) {
- case CRYPTO_NULL_HMAC:
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_RIPEMD160_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- if (macini != NULL)
- return (EINVAL);
- macini = cri;
- break;
- case CRYPTO_AES_CBC:
- if (encini != NULL)
- return (EINVAL);
- encini = cri;
- break;
- default:
- return (EINVAL);
- }
- }
-
/*
* We only support HMAC algorithms to be able to work with
* ipsec(4), so if we are asked only for authentication without
- * encryption, don't pretend we can accellerate it.
+ * encryption, don't pretend we can accelerate it.
*/
- if (encini == NULL)
+ switch (csp->csp_mode) {
+ case CSP_MODE_ETA:
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_NULL_HMAC:
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ break;
+ default:
+ return (EINVAL);
+ }
+ /* FALLTHROUGH */
+ case CSP_MODE_CIPHER:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ if (csp->csp_cipher_klen * 8 != 128)
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+ default:
return (EINVAL);
-
- ses = crypto_get_driver_session(cses);
- if (encini->cri_alg == CRYPTO_AES_CBC) {
- if (encini->cri_klen != 128) {
- glxsb_crypto_freesession(sc->sc_dev, cses);
- return (EINVAL);
- }
- arc4rand(ses->ses_iv, sizeof(ses->ses_iv), 0);
- ses->ses_klen = encini->cri_klen;
-
- /* Copy the key (Geode LX wants the primary key only) */
- bcopy(encini->cri_key, ses->ses_key, sizeof(ses->ses_key));
}
+ return (CRYPTODEV_PROBE_HARDWARE);
+}
- if (macini != NULL) {
- error = glxsb_hash_setup(ses, macini);
+static int
+glxsb_crypto_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
+{
+ struct glxsb_softc *sc = device_get_softc(dev);
+ struct glxsb_session *ses;
+ int error;
+
+ ses = crypto_get_driver_session(cses);
+
+ /* Copy the key (Geode LX wants the primary key only) */
+ if (csp->csp_cipher_key != NULL)
+ bcopy(csp->csp_cipher_key, ses->ses_key, sizeof(ses->ses_key));
+
+ if (csp->csp_auth_alg != 0) {
+ error = glxsb_hash_setup(ses, csp);
if (error != 0) {
glxsb_crypto_freesession(sc->sc_dev, cses);
return (error);
@@ -574,19 +551,15 @@
static void
glxsb_crypto_freesession(device_t dev, crypto_session_t cses)
{
- struct glxsb_softc *sc = device_get_softc(dev);
struct glxsb_session *ses;
- if (sc == NULL)
- return;
-
ses = crypto_get_driver_session(cses);
glxsb_hash_free(ses);
}
static int
glxsb_aes(struct glxsb_softc *sc, uint32_t control, uint32_t psrc,
- uint32_t pdst, void *key, int len, void *iv)
+ uint32_t pdst, const void *key, int len, const void *iv)
{
uint32_t status;
int i;
@@ -652,23 +625,24 @@
}
static int
-glxsb_crypto_encdec(struct cryptop *crp, struct cryptodesc *crd,
- struct glxsb_session *ses, struct glxsb_softc *sc)
+glxsb_crypto_encdec(struct cryptop *crp, struct glxsb_session *ses,
+ struct glxsb_softc *sc)
{
char *op_src, *op_dst;
+ const void *key;
uint32_t op_psrc, op_pdst;
- uint8_t op_iv[SB_AES_BLOCK_SIZE], *piv;
+ uint8_t op_iv[SB_AES_BLOCK_SIZE];
int error;
int len, tlen, xlen;
int offset;
uint32_t control;
- if (crd == NULL || (crd->crd_len % SB_AES_BLOCK_SIZE) != 0)
+ if ((crp->crp_payload_length % SB_AES_BLOCK_SIZE) != 0)
return (EINVAL);
/* How much of our buffer will we need to use? */
- xlen = crd->crd_len > GLXSB_MAX_AES_LEN ?
- GLXSB_MAX_AES_LEN : crd->crd_len;
+ xlen = crp->crp_payload_length > GLXSB_MAX_AES_LEN ?
+ GLXSB_MAX_AES_LEN : crp->crp_payload_length;
/*
* XXX Check if we can have input == output on Geode LX.
@@ -680,73 +654,57 @@
op_psrc = sc->sc_dma.dma_paddr;
op_pdst = sc->sc_dma.dma_paddr + xlen;
- if (crd->crd_flags & CRD_F_ENCRYPT) {
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
control = SB_CTL_ENC;
- if (crd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(crd->crd_iv, op_iv, sizeof(op_iv));
- else
- bcopy(ses->ses_iv, op_iv, sizeof(op_iv));
-
- if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, sizeof(op_iv), op_iv);
- }
- } else {
+ else
control = SB_CTL_DEC;
- if (crd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(crd->crd_iv, op_iv, sizeof(op_iv));
- else {
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, sizeof(op_iv), op_iv);
- }
- }
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(op_iv, sizeof(op_iv), 0);
+ crypto_copyback(crp, crp->crp_iv_start, sizeof(op_iv), op_iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(op_iv, crp->crp_iv, sizeof(op_iv));
+ else
+ crypto_copydata(crp, crp->crp_iv_start, sizeof(op_iv), op_iv);
+
offset = 0;
- tlen = crd->crd_len;
- piv = op_iv;
+ tlen = crp->crp_payload_length;
+
+ if (crp->crp_cipher_key != NULL)
+ key = crp->crp_cipher_key;
+ else
+ key = ses->ses_key;
/* Process the data in GLXSB_MAX_AES_LEN chunks */
while (tlen > 0) {
len = (tlen > GLXSB_MAX_AES_LEN) ? GLXSB_MAX_AES_LEN : tlen;
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crd->crd_skip + offset, len, op_src);
+ crypto_copydata(crp, crp->crp_payload_start + offset, len,
+ op_src);
glxsb_dma_pre_op(sc, &sc->sc_dma);
- error = glxsb_aes(sc, control, op_psrc, op_pdst, ses->ses_key,
- len, op_iv);
+ error = glxsb_aes(sc, control, op_psrc, op_pdst, key, len,
+ op_iv);
glxsb_dma_post_op(sc, &sc->sc_dma);
if (error != 0)
return (error);
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crd->crd_skip + offset, len, op_dst);
+ crypto_copyback(crp, crp->crp_payload_start + offset, len,
+ op_dst);
offset += len;
tlen -= len;
- if (tlen <= 0) { /* Ideally, just == 0 */
- /* Finished - put the IV in session IV */
- piv = ses->ses_iv;
- }
-
/*
- * Copy out last block for use as next iteration/session IV.
- *
- * piv is set to op_iv[] before the loop starts, but is
- * set to ses->ses_iv if we're going to exit the loop this
- * time.
+ * Copy out last block for use as next iteration IV.
*/
- if (crd->crd_flags & CRD_F_ENCRYPT)
- bcopy(op_dst + len - sizeof(op_iv), piv, sizeof(op_iv));
- else {
- /* Decryption, only need this if another iteration */
- if (tlen > 0) {
- bcopy(op_src + len - sizeof(op_iv), piv,
- sizeof(op_iv));
- }
- }
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
+ bcopy(op_dst + len - sizeof(op_iv), op_iv,
+ sizeof(op_iv));
+ else
+ bcopy(op_src + len - sizeof(op_iv), op_iv,
+ sizeof(op_iv));
} /* while */
/* All AES processing has now been done. */
@@ -759,30 +717,31 @@
glxsb_crypto_task(void *arg, int pending)
{
struct glxsb_softc *sc = arg;
+ const struct crypto_session_params *csp;
struct glxsb_session *ses;
struct cryptop *crp;
- struct cryptodesc *enccrd, *maccrd;
int error;
- maccrd = sc->sc_to.to_maccrd;
- enccrd = sc->sc_to.to_enccrd;
crp = sc->sc_to.to_crp;
ses = sc->sc_to.to_ses;
+ csp = crypto_get_params(crp->crp_session);
/* Perform data authentication if requested before encryption */
- if (maccrd != NULL && maccrd->crd_next == enccrd) {
- error = glxsb_hash_process(ses, maccrd, crp);
+ if (csp->csp_mode == CSP_MODE_ETA &&
+ !CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ error = glxsb_hash_process(ses, csp, crp);
if (error != 0)
goto out;
}
- error = glxsb_crypto_encdec(crp, enccrd, ses, sc);
+ error = glxsb_crypto_encdec(crp, ses, sc);
if (error != 0)
goto out;
/* Perform data authentication if requested after encryption */
- if (maccrd != NULL && enccrd->crd_next == maccrd) {
- error = glxsb_hash_process(ses, maccrd, crp);
+ if (csp->csp_mode == CSP_MODE_ETA &&
+ CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ error = glxsb_hash_process(ses, csp, crp);
if (error != 0)
goto out;
}
@@ -801,52 +760,6 @@
{
struct glxsb_softc *sc = device_get_softc(dev);
struct glxsb_session *ses;
- struct cryptodesc *crd, *enccrd, *maccrd;
- int error = 0;
-
- enccrd = maccrd = NULL;
-
- /* Sanity check. */
- if (crp == NULL)
- return (EINVAL);
-
- if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
- error = EINVAL;
- goto fail;
- }
-
- for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
- switch (crd->crd_alg) {
- case CRYPTO_NULL_HMAC:
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_RIPEMD160_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- if (maccrd != NULL) {
- error = EINVAL;
- goto fail;
- }
- maccrd = crd;
- break;
- case CRYPTO_AES_CBC:
- if (enccrd != NULL) {
- error = EINVAL;
- goto fail;
- }
- enccrd = crd;
- break;
- default:
- error = EINVAL;
- goto fail;
- }
- }
-
- if (enccrd == NULL || enccrd->crd_len % AES_BLOCK_LEN != 0) {
- error = EINVAL;
- goto fail;
- }
ses = crypto_get_driver_session(crp->crp_session);
@@ -857,17 +770,10 @@
}
sc->sc_task_count++;
- sc->sc_to.to_maccrd = maccrd;
- sc->sc_to.to_enccrd = enccrd;
sc->sc_to.to_crp = crp;
sc->sc_to.to_ses = ses;
mtx_unlock(&sc->sc_task_mtx);
taskqueue_enqueue(sc->sc_tq, &sc->sc_cryptotask);
return(0);
-
-fail:
- crp->crp_etype = error;
- crypto_done(crp);
- return (error);
}
Index: sys/dev/glxsb/glxsb_hash.c
===================================================================
--- sys/dev/glxsb/glxsb_hash.c
+++ sys/dev/glxsb/glxsb_hash.c
@@ -33,7 +33,6 @@
#include <sys/systm.h>
#include <sys/malloc.h>
-#include <opencrypto/cryptosoft.h> /* for hmac_ipad_buffer and hmac_opad_buffer */
#include <opencrypto/xform.h>
#include "glxsb.h"
@@ -51,92 +50,66 @@
MALLOC_DECLARE(M_GLXSB);
static void
-glxsb_hash_key_setup(struct glxsb_session *ses, caddr_t key, int klen)
+glxsb_hash_key_setup(struct glxsb_session *ses, const char *key, int klen)
{
struct auth_hash *axf;
- int i;
- klen /= 8;
axf = ses->ses_axf;
-
- for (i = 0; i < klen; i++)
- key[i] ^= HMAC_IPAD_VAL;
-
- axf->Init(ses->ses_ictx);
- axf->Update(ses->ses_ictx, key, klen);
- axf->Update(ses->ses_ictx, hmac_ipad_buffer, axf->blocksize - klen);
-
- for (i = 0; i < klen; i++)
- key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
-
- axf->Init(ses->ses_octx);
- axf->Update(ses->ses_octx, key, klen);
- axf->Update(ses->ses_octx, hmac_opad_buffer, axf->blocksize - klen);
-
- for (i = 0; i < klen; i++)
- key[i] ^= HMAC_OPAD_VAL;
+ hmac_init_ipad(axf, key, klen, ses->ses_ictx);
+ hmac_init_opad(axf, key, klen, ses->ses_octx);
}
/*
* Compute keyed-hash authenticator.
*/
static int
-glxsb_authcompute(struct glxsb_session *ses, struct cryptodesc *crd,
- caddr_t buf, int flags)
+glxsb_authcompute(struct glxsb_session *ses, struct cryptop *crp)
{
- u_char hash[HASH_MAX_LEN];
+ u_char hash[HASH_MAX_LEN], hash2[HASH_MAX_LEN];
struct auth_hash *axf;
union authctx ctx;
int error;
axf = ses->ses_axf;
bcopy(ses->ses_ictx, &ctx, axf->ctxsize);
- error = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
+ error = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
(int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
if (error != 0)
return (error);
+ error = crypto_apply(crp, crp->crp_payload_start,
+ crp->crp_payload_length,
+ (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
+ if (error != 0)
+ return (error);
+
axf->Final(hash, &ctx);
bcopy(ses->ses_octx, &ctx, axf->ctxsize);
axf->Update(&ctx, hash, axf->hashsize);
axf->Final(hash, &ctx);
- /* Inject the authentication data */
- crypto_copyback(flags, buf, crd->crd_inject,
- ses->ses_mlen == 0 ? axf->hashsize : ses->ses_mlen, hash);
+ /* Verify or inject the authentication data */
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, ses->ses_mlen,
+ hash2);
+ if (timingsafe_bcmp(hash, hash2, ses->ses_mlen) != 0)
+ return (EBADMSG);
+ } else
+ crypto_copyback(crp, crp->crp_digest_start, ses->ses_mlen,
+ hash);
return (0);
}
int
-glxsb_hash_setup(struct glxsb_session *ses, struct cryptoini *macini)
+glxsb_hash_setup(struct glxsb_session *ses,
+ const struct crypto_session_params *csp)
{
- ses->ses_mlen = macini->cri_mlen;
-
- /* Find software structure which describes HMAC algorithm. */
- switch (macini->cri_alg) {
- case CRYPTO_NULL_HMAC:
- ses->ses_axf = &auth_hash_null;
- break;
- case CRYPTO_MD5_HMAC:
- ses->ses_axf = &auth_hash_hmac_md5;
- break;
- case CRYPTO_SHA1_HMAC:
- ses->ses_axf = &auth_hash_hmac_sha1;
- break;
- case CRYPTO_RIPEMD160_HMAC:
- ses->ses_axf = &auth_hash_hmac_ripemd_160;
- break;
- case CRYPTO_SHA2_256_HMAC:
- ses->ses_axf = &auth_hash_hmac_sha2_256;
- break;
- case CRYPTO_SHA2_384_HMAC:
- ses->ses_axf = &auth_hash_hmac_sha2_384;
- break;
- case CRYPTO_SHA2_512_HMAC:
- ses->ses_axf = &auth_hash_hmac_sha2_512;
- break;
- }
+ ses->ses_axf = crypto_auth_hash(csp);
+ if (csp->csp_auth_mlen == 0)
+ ses->ses_mlen = ses->ses_axf->hashsize;
+ else
+ ses->ses_mlen = csp->csp_auth_mlen;
/* Allocate memory for HMAC inner and outer contexts. */
ses->ses_ictx = malloc(ses->ses_axf->ctxsize, M_GLXSB,
@@ -147,23 +120,24 @@
return (ENOMEM);
/* Setup key if given. */
- if (macini->cri_key != NULL) {
- glxsb_hash_key_setup(ses, macini->cri_key,
- macini->cri_klen);
+ if (csp->csp_auth_key != NULL) {
+ glxsb_hash_key_setup(ses, csp->csp_auth_key,
+ csp->csp_auth_klen);
}
return (0);
}
int
-glxsb_hash_process(struct glxsb_session *ses, struct cryptodesc *maccrd,
- struct cryptop *crp)
+glxsb_hash_process(struct glxsb_session *ses,
+ const struct crypto_session_params *csp, struct cryptop *crp)
{
int error;
- if ((maccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0)
- glxsb_hash_key_setup(ses, maccrd->crd_key, maccrd->crd_klen);
+ if (crp->crp_auth_key != NULL)
+ glxsb_hash_key_setup(ses, crp->crp_auth_key,
+ csp->csp_auth_klen);
- error = glxsb_authcompute(ses, maccrd, crp->crp_buf, crp->crp_flags);
+ error = glxsb_authcompute(ses, crp);
return (error);
}
Index: sys/dev/hifn/hifn7751.c
===================================================================
--- sys/dev/hifn/hifn7751.c
+++ sys/dev/hifn/hifn7751.c
@@ -61,6 +61,7 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/sysctl.h>
+#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/pmap.h>
@@ -71,6 +72,7 @@
#include <sys/rman.h>
#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform_auth.h>
#include <sys/random.h>
#include <sys/kobj.h>
@@ -102,7 +104,9 @@
static int hifn_resume(device_t);
static int hifn_shutdown(device_t);
-static int hifn_newsession(device_t, crypto_session_t, struct cryptoini *);
+static int hifn_probesession(device_t, const struct crypto_session_params *);
+static int hifn_newsession(device_t, crypto_session_t,
+ const struct crypto_session_params *);
static int hifn_process(device_t, struct cryptop *, int);
static device_method_t hifn_methods[] = {
@@ -115,6 +119,7 @@
DEVMETHOD(device_shutdown, hifn_shutdown),
/* crypto device methods */
+ DEVMETHOD(cryptodev_probesession, hifn_probesession),
DEVMETHOD(cryptodev_newsession, hifn_newsession),
DEVMETHOD(cryptodev_process, hifn_process),
@@ -356,7 +361,7 @@
caddr_t kva;
int rseg, rid;
char rbase;
- u_int16_t ena, rev;
+ uint16_t rev;
sc->sc_dev = dev;
@@ -558,33 +563,22 @@
2 + 2*((sc->sc_pllconfig & HIFN_PLL_ND) >> 11));
printf("\n");
- sc->sc_cid = crypto_get_driverid(dev, sizeof(struct hifn_session),
- CRYPTOCAP_F_HARDWARE);
- if (sc->sc_cid < 0) {
- device_printf(dev, "could not get crypto driver id\n");
- goto fail_intr;
- }
-
WRITE_REG_0(sc, HIFN_0_PUCNFG,
READ_REG_0(sc, HIFN_0_PUCNFG) | HIFN_PUCNFG_CHIPID);
- ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
+ sc->sc_ena = READ_REG_0(sc, HIFN_0_PUSTAT) & HIFN_PUSTAT_CHIPENA;
- switch (ena) {
+ switch (sc->sc_ena) {
case HIFN_PUSTAT_ENA_2:
- crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0);
- if (sc->sc_flags & HIFN_HAS_AES)
- crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
- /*FALLTHROUGH*/
case HIFN_PUSTAT_ENA_1:
- crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
+ sc->sc_cid = crypto_get_driverid(dev,
+ sizeof(struct hifn_session), CRYPTOCAP_F_HARDWARE);
+ if (sc->sc_cid < 0) {
+ device_printf(dev, "could not get crypto driver id\n");
+ goto fail_intr;
+ }
break;
}
-
+
bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -1547,6 +1541,7 @@
static u_int
hifn_write_command(struct hifn_command *cmd, u_int8_t *buf)
{
+ struct cryptop *crp;
u_int8_t *buf_pos;
hifn_base_command_t *base_cmd;
hifn_mac_command_t *mac_cmd;
@@ -1554,6 +1549,7 @@
int using_mac, using_crypt, len, ivlen;
u_int32_t dlen, slen;
+ crp = cmd->crp;
buf_pos = buf;
using_mac = cmd->base_masks & HIFN_BASE_CMD_MAC;
using_crypt = cmd->base_masks & HIFN_BASE_CMD_CRYPT;
@@ -1576,24 +1572,27 @@
if (using_mac) {
mac_cmd = (hifn_mac_command_t *)buf_pos;
- dlen = cmd->maccrd->crd_len;
+ dlen = crp->crp_aad_length + crp->crp_payload_length;
mac_cmd->source_count = htole16(dlen & 0xffff);
dlen >>= 16;
mac_cmd->masks = htole16(cmd->mac_masks |
((dlen << HIFN_MAC_CMD_SRCLEN_S) & HIFN_MAC_CMD_SRCLEN_M));
- mac_cmd->header_skip = htole16(cmd->maccrd->crd_skip);
+ if (crp->crp_aad_length != 0)
+ mac_cmd->header_skip = htole16(crp->crp_aad_start);
+ else
+ mac_cmd->header_skip = htole16(crp->crp_payload_start);
mac_cmd->reserved = 0;
buf_pos += sizeof(hifn_mac_command_t);
}
if (using_crypt) {
cry_cmd = (hifn_crypt_command_t *)buf_pos;
- dlen = cmd->enccrd->crd_len;
+ dlen = crp->crp_payload_length;
cry_cmd->source_count = htole16(dlen & 0xffff);
dlen >>= 16;
cry_cmd->masks = htole16(cmd->cry_masks |
((dlen << HIFN_CRYPT_CMD_SRCLEN_S) & HIFN_CRYPT_CMD_SRCLEN_M));
- cry_cmd->header_skip = htole16(cmd->enccrd->crd_skip);
+ cry_cmd->header_skip = htole16(crp->crp_payload_length);
cry_cmd->reserved = 0;
buf_pos += sizeof(hifn_crypt_command_t);
}
@@ -1782,15 +1781,30 @@
return (idx);
}
+static bus_size_t
+hifn_crp_length(struct cryptop *crp)
+{
+
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ return (crp->crp_mbuf->m_pkthdr.len);
+ case CRYPTO_BUF_UIO:
+ return (crp->crp_uio->uio_resid);
+ case CRYPTO_BUF_CONTIG:
+ return (crp->crp_ilen);
+ default:
+ panic("bad crp buffer type");
+ }
+}
+
static void
-hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
+hifn_op_cb(void* arg, bus_dma_segment_t *seg, int nsegs, int error)
{
struct hifn_operand *op = arg;
KASSERT(nsegs <= MAX_SCATTER,
("hifn_op_cb: too many DMA segments (%u > %u) "
"returned when mapping operand", nsegs, MAX_SCATTER));
- op->mapsize = mapsize;
op->nsegs = nsegs;
bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
}
@@ -1832,130 +1846,110 @@
return (ENOMEM);
}
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
- if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->src_map,
- cmd->src_m, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
- hifnstats.hst_nomem_load++;
- err = ENOMEM;
- goto err_srcmap1;
- }
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
- if (bus_dmamap_load_uio(sc->sc_dmat, cmd->src_map,
- cmd->src_io, hifn_op_cb, &cmd->src, BUS_DMA_NOWAIT)) {
- hifnstats.hst_nomem_load++;
- err = ENOMEM;
- goto err_srcmap1;
- }
- } else {
- err = EINVAL;
+ if (bus_dmamap_load_crp(sc->sc_dmat, cmd->src_map, crp, hifn_op_cb,
+ &cmd->src, BUS_DMA_NOWAIT)) {
+ hifnstats.hst_nomem_load++;
+ err = ENOMEM;
goto err_srcmap1;
}
+ cmd->src_mapsize = hifn_crp_length(crp);
if (hifn_dmamap_aligned(&cmd->src)) {
cmd->sloplen = cmd->src_mapsize & 3;
cmd->dst = cmd->src;
- } else {
- if (crp->crp_flags & CRYPTO_F_IOV) {
- err = EINVAL;
+ } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) {
+ int totlen, len;
+ struct mbuf *m, *m0, *mlast;
+
+ KASSERT(cmd->dst_m == NULL,
+ ("hifn_crypto: dst_m initialized improperly"));
+ hifnstats.hst_unaligned++;
+
+ /*
+ * Source is not aligned on a longword boundary.
+ * Copy the data to insure alignment. If we fail
+ * to allocate mbufs or clusters while doing this
+ * we return ERESTART so the operation is requeued
+ * at the crypto later, but only if there are
+ * ops already posted to the hardware; otherwise we
+ * have no guarantee that we'll be re-entered.
+ */
+ totlen = cmd->src_mapsize;
+ if (crp->crp_mbuf->m_flags & M_PKTHDR) {
+ len = MHLEN;
+ MGETHDR(m0, M_NOWAIT, MT_DATA);
+ if (m0 && !m_dup_pkthdr(m0, crp->crp_mbuf, M_NOWAIT)) {
+ m_free(m0);
+ m0 = NULL;
+ }
+ } else {
+ len = MLEN;
+ MGET(m0, M_NOWAIT, MT_DATA);
+ }
+ if (m0 == NULL) {
+ hifnstats.hst_nomem_mbuf++;
+ err = sc->sc_cmdu ? ERESTART : ENOMEM;
goto err_srcmap;
- } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
- int totlen, len;
- struct mbuf *m, *m0, *mlast;
-
- KASSERT(cmd->dst_m == cmd->src_m,
- ("hifn_crypto: dst_m initialized improperly"));
- hifnstats.hst_unaligned++;
- /*
- * Source is not aligned on a longword boundary.
- * Copy the data to insure alignment. If we fail
- * to allocate mbufs or clusters while doing this
- * we return ERESTART so the operation is requeued
- * at the crypto later, but only if there are
- * ops already posted to the hardware; otherwise we
- * have no guarantee that we'll be re-entered.
- */
- totlen = cmd->src_mapsize;
- if (cmd->src_m->m_flags & M_PKTHDR) {
- len = MHLEN;
- MGETHDR(m0, M_NOWAIT, MT_DATA);
- if (m0 && !m_dup_pkthdr(m0, cmd->src_m, M_NOWAIT)) {
- m_free(m0);
- m0 = NULL;
- }
- } else {
- len = MLEN;
- MGET(m0, M_NOWAIT, MT_DATA);
+ }
+ if (totlen >= MINCLSIZE) {
+ if (!(MCLGET(m0, M_NOWAIT))) {
+ hifnstats.hst_nomem_mcl++;
+ err = sc->sc_cmdu ? ERESTART : ENOMEM;
+ m_freem(m0);
+ goto err_srcmap;
}
- if (m0 == NULL) {
+ len = MCLBYTES;
+ }
+ totlen -= len;
+ m0->m_pkthdr.len = m0->m_len = len;
+ mlast = m0;
+
+ while (totlen > 0) {
+ MGET(m, M_NOWAIT, MT_DATA);
+ if (m == NULL) {
hifnstats.hst_nomem_mbuf++;
err = sc->sc_cmdu ? ERESTART : ENOMEM;
+ m_freem(m0);
goto err_srcmap;
}
+ len = MLEN;
if (totlen >= MINCLSIZE) {
- if (!(MCLGET(m0, M_NOWAIT))) {
+ if (!(MCLGET(m, M_NOWAIT))) {
hifnstats.hst_nomem_mcl++;
err = sc->sc_cmdu ? ERESTART : ENOMEM;
+ mlast->m_next = m;
m_freem(m0);
goto err_srcmap;
}
len = MCLBYTES;
}
+
+ m->m_len = len;
+ m0->m_pkthdr.len += len;
totlen -= len;
- m0->m_pkthdr.len = m0->m_len = len;
- mlast = m0;
- while (totlen > 0) {
- MGET(m, M_NOWAIT, MT_DATA);
- if (m == NULL) {
- hifnstats.hst_nomem_mbuf++;
- err = sc->sc_cmdu ? ERESTART : ENOMEM;
- m_freem(m0);
- goto err_srcmap;
- }
- len = MLEN;
- if (totlen >= MINCLSIZE) {
- if (!(MCLGET(m, M_NOWAIT))) {
- hifnstats.hst_nomem_mcl++;
- err = sc->sc_cmdu ? ERESTART : ENOMEM;
- mlast->m_next = m;
- m_freem(m0);
- goto err_srcmap;
- }
- len = MCLBYTES;
- }
-
- m->m_len = len;
- m0->m_pkthdr.len += len;
- totlen -= len;
-
- mlast->m_next = m;
- mlast = m;
- }
- cmd->dst_m = m0;
+ mlast->m_next = m;
+ mlast = m;
}
- }
+ cmd->dst_m = m0;
- if (cmd->dst_map == NULL) {
- if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT, &cmd->dst_map)) {
+ if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
+ &cmd->dst_map)) {
hifnstats.hst_nomem_map++;
err = ENOMEM;
goto err_srcmap;
}
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
- if (bus_dmamap_load_mbuf(sc->sc_dmat, cmd->dst_map,
- cmd->dst_m, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
- hifnstats.hst_nomem_map++;
- err = ENOMEM;
- goto err_dstmap1;
- }
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
- if (bus_dmamap_load_uio(sc->sc_dmat, cmd->dst_map,
- cmd->dst_io, hifn_op_cb, &cmd->dst, BUS_DMA_NOWAIT)) {
- hifnstats.hst_nomem_load++;
- err = ENOMEM;
- goto err_dstmap1;
- }
+
+ if (bus_dmamap_load_mbuf_sg(sc->sc_dmat, cmd->dst_map, m0,
+ cmd->dst_segs, &cmd->dst_nsegs, 0)) {
+ hifnstats.hst_nomem_map++;
+ err = ENOMEM;
+ goto err_dstmap1;
}
+ cmd->dst_mapsize = m0->m_pkthdr.len;
+ } else {
+ err = EINVAL;
+ goto err_srcmap;
}
#ifdef HIFN_DEBUG
@@ -2111,8 +2105,8 @@
if (cmd->src_map != cmd->dst_map)
bus_dmamap_destroy(sc->sc_dmat, cmd->dst_map);
err_srcmap:
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
- if (cmd->src_m != cmd->dst_m)
+ if (crp->crp_buf_type == CRYPTO_BUF_MBUF) {
+ if (cmd->dst_m != NULL)
m_freem(cmd->dst_m);
}
bus_dmamap_unload(sc->sc_dmat, cmd->src_map);
@@ -2307,67 +2301,121 @@
}
}
+static bool
+hifn_auth_supported(struct hifn_softc *sc,
+ const struct crypto_session_params *csp)
+{
+
+ switch (sc->sc_ena) {
+ case HIFN_PUSTAT_ENA_2:
+ case HIFN_PUSTAT_ENA_1:
+ break;
+ default:
+ return (false);
+ }
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5:
+ case CRYPTO_SHA1:
+ break;
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ if (csp->csp_auth_klen > HIFN_MAC_KEY_LENGTH)
+ return (false);
+ break;
+ default:
+ return (false);
+ }
+
+ return (true);
+}
+
+static bool
+hifn_cipher_supported(struct hifn_softc *sc,
+ const struct crypto_session_params *csp)
+{
+
+ if (csp->csp_cipher_klen == 0)
+ return (false);
+ if (csp->csp_ivlen > HIFN_MAX_IV_LENGTH)
+ return (false);
+ switch (sc->sc_ena) {
+ case HIFN_PUSTAT_ENA_2:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_3DES_CBC:
+ case CRYPTO_ARC4:
+ break;
+ case CRYPTO_AES_CBC:
+ if ((sc->sc_flags & HIFN_HAS_AES) == 0)
+ return (false);
+ switch (csp->csp_cipher_klen) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ return (false);
+ }
+ return (true);
+ }
+ /*FALLTHROUGH*/
+ case HIFN_PUSTAT_ENA_1:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DES_CBC:
+ return (true);
+ }
+ break;
+ }
+ return (false);
+}
+
+static int
+hifn_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+ struct hifn_softc *sc;
+
+ sc = device_get_softc(dev);
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ if (!hifn_auth_supported(sc, csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!hifn_cipher_supported(sc, csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_ETA:
+ if (!hifn_auth_supported(sc, csp) ||
+ !hifn_cipher_supported(sc, csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (CRYPTODEV_PROBE_HARDWARE);
+}
+
/*
- * Allocate a new 'session' and return an encoded session id. 'sidp'
- * contains our registration id, and should contain an encoded session
- * id on successful allocation.
+ * Allocate a new 'session'.
*/
static int
-hifn_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+hifn_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
{
- struct hifn_softc *sc = device_get_softc(dev);
- struct cryptoini *c;
- int mac = 0, cry = 0;
struct hifn_session *ses;
- KASSERT(sc != NULL, ("hifn_newsession: null softc"));
- if (cri == NULL || sc == NULL)
- return (EINVAL);
-
ses = crypto_get_driver_session(cses);
- for (c = cri; c != NULL; c = c->cri_next) {
- switch (c->cri_alg) {
- case CRYPTO_MD5:
- case CRYPTO_SHA1:
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1_HMAC:
- if (mac)
- return (EINVAL);
- mac = 1;
- ses->hs_mlen = c->cri_mlen;
- if (ses->hs_mlen == 0) {
- switch (c->cri_alg) {
- case CRYPTO_MD5:
- case CRYPTO_MD5_HMAC:
- ses->hs_mlen = 16;
- break;
- case CRYPTO_SHA1:
- case CRYPTO_SHA1_HMAC:
- ses->hs_mlen = 20;
- break;
- }
- }
- break;
- case CRYPTO_DES_CBC:
- case CRYPTO_3DES_CBC:
- case CRYPTO_AES_CBC:
- /* XXX this may read fewer, does it matter? */
- read_random(ses->hs_iv,
- c->cri_alg == CRYPTO_AES_CBC ?
- HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
- /*FALLTHROUGH*/
- case CRYPTO_ARC4:
- if (cry)
- return (EINVAL);
- cry = 1;
- break;
- default:
- return (EINVAL);
- }
+ if (csp->csp_auth_alg != 0) {
+ if (csp->csp_auth_mlen == 0)
+ ses->hs_mlen = crypto_auth_hash(csp)->hashsize;
+ else
+ ses->hs_mlen = csp->csp_auth_mlen;
}
- if (mac == 0 && cry == 0)
- return (EINVAL);
+
return (0);
}
@@ -2379,18 +2427,15 @@
static int
hifn_process(device_t dev, struct cryptop *crp, int hint)
{
+ const struct crypto_session_params *csp;
struct hifn_softc *sc = device_get_softc(dev);
struct hifn_command *cmd = NULL;
- int err, ivlen;
- struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
+ const void *mackey;
+ int err, ivlen, keylen;
struct hifn_session *ses;
- if (crp == NULL || crp->crp_callback == NULL) {
- hifnstats.hst_invalid++;
- return (EINVAL);
- }
-
ses = crypto_get_driver_session(crp->crp_session);
+
cmd = malloc(sizeof(struct hifn_command), M_DEVBUF, M_NOWAIT | M_ZERO);
if (cmd == NULL) {
hifnstats.hst_nomem++;
@@ -2398,80 +2443,26 @@
goto errout;
}
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
- cmd->src_m = (struct mbuf *)crp->crp_buf;
- cmd->dst_m = (struct mbuf *)crp->crp_buf;
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
- cmd->src_io = (struct uio *)crp->crp_buf;
- cmd->dst_io = (struct uio *)crp->crp_buf;
- } else {
- err = EINVAL;
- goto errout; /* XXX we don't handle contiguous buffers! */
- }
+ csp = crypto_get_params(crp->crp_session);
- crd1 = crp->crp_desc;
- if (crd1 == NULL) {
+ /*
+ * The driver only supports ETA requests where there is no
+ * gap between the AAD and payload.
+ */
+ if (csp->csp_mode == CSP_MODE_ETA && crp->crp_aad_length != 0 &&
+ crp->crp_aad_start + crp->crp_aad_length !=
+ crp->crp_payload_start) {
err = EINVAL;
goto errout;
}
- crd2 = crd1->crd_next;
- if (crd2 == NULL) {
- if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
- crd1->crd_alg == CRYPTO_SHA1_HMAC ||
- crd1->crd_alg == CRYPTO_SHA1 ||
- crd1->crd_alg == CRYPTO_MD5) {
- maccrd = crd1;
- enccrd = NULL;
- } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
- crd1->crd_alg == CRYPTO_3DES_CBC ||
- crd1->crd_alg == CRYPTO_AES_CBC ||
- crd1->crd_alg == CRYPTO_ARC4) {
- if ((crd1->crd_flags & CRD_F_ENCRYPT) == 0)
- cmd->base_masks |= HIFN_BASE_CMD_DECODE;
- maccrd = NULL;
- enccrd = crd1;
- } else {
- err = EINVAL;
- goto errout;
- }
- } else {
- if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
- crd1->crd_alg == CRYPTO_SHA1_HMAC ||
- crd1->crd_alg == CRYPTO_MD5 ||
- crd1->crd_alg == CRYPTO_SHA1) &&
- (crd2->crd_alg == CRYPTO_DES_CBC ||
- crd2->crd_alg == CRYPTO_3DES_CBC ||
- crd2->crd_alg == CRYPTO_AES_CBC ||
- crd2->crd_alg == CRYPTO_ARC4) &&
- ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
- cmd->base_masks = HIFN_BASE_CMD_DECODE;
- maccrd = crd1;
- enccrd = crd2;
- } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
- crd1->crd_alg == CRYPTO_ARC4 ||
- crd1->crd_alg == CRYPTO_3DES_CBC ||
- crd1->crd_alg == CRYPTO_AES_CBC) &&
- (crd2->crd_alg == CRYPTO_MD5_HMAC ||
- crd2->crd_alg == CRYPTO_SHA1_HMAC ||
- crd2->crd_alg == CRYPTO_MD5 ||
- crd2->crd_alg == CRYPTO_SHA1) &&
- (crd1->crd_flags & CRD_F_ENCRYPT)) {
- enccrd = crd1;
- maccrd = crd2;
- } else {
- /*
- * We cannot order the 7751 as requested
- */
- err = EINVAL;
- goto errout;
- }
- }
-
- if (enccrd) {
- cmd->enccrd = enccrd;
+ switch (csp->csp_mode) {
+ case CSP_MODE_CIPHER:
+ case CSP_MODE_ETA:
+ if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
+ cmd->base_masks |= HIFN_BASE_CMD_DECODE;
cmd->base_masks |= HIFN_BASE_CMD_CRYPT;
- switch (enccrd->crd_alg) {
+ switch (csp->csp_cipher_alg) {
case CRYPTO_ARC4:
cmd->cry_masks |= HIFN_CRYPT_CMD_ALG_RC4;
break;
@@ -2494,36 +2485,24 @@
err = EINVAL;
goto errout;
}
- if (enccrd->crd_alg != CRYPTO_ARC4) {
- ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
- HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
- if (enccrd->crd_flags & CRD_F_ENCRYPT) {
- if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(enccrd->crd_iv, cmd->iv, ivlen);
- else
- bcopy(ses->hs_iv, cmd->iv, ivlen);
-
- if ((enccrd->crd_flags & CRD_F_IV_PRESENT)
- == 0) {
- crypto_copyback(crp->crp_flags,
- crp->crp_buf, enccrd->crd_inject,
- ivlen, cmd->iv);
- }
- } else {
- if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(enccrd->crd_iv, cmd->iv, ivlen);
- else {
- crypto_copydata(crp->crp_flags,
- crp->crp_buf, enccrd->crd_inject,
- ivlen, cmd->iv);
- }
- }
+ if (csp->csp_cipher_alg != CRYPTO_ARC4) {
+ ivlen = csp->csp_ivlen;
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(cmd->iv, ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, ivlen,
+ cmd->iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(cmd->iv, crp->crp_iv, ivlen);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, ivlen,
+ cmd->iv);
}
- if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
- cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
- cmd->ck = enccrd->crd_key;
- cmd->cklen = enccrd->crd_klen >> 3;
+ if (crp->crp_cipher_key != NULL)
+ cmd->ck = crp->crp_cipher_key;
+ else
+ cmd->ck = csp->csp_cipher_key;
+ cmd->cklen = csp->csp_cipher_klen;
cmd->cry_masks |= HIFN_CRYPT_CMD_NEW_KEY;
/*
@@ -2546,13 +2525,15 @@
goto errout;
}
}
+ break;
}
- if (maccrd) {
- cmd->maccrd = maccrd;
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ case CSP_MODE_ETA:
cmd->base_masks |= HIFN_BASE_CMD_MAC;
- switch (maccrd->crd_alg) {
+ switch (csp->csp_auth_alg) {
case CRYPTO_MD5:
cmd->mac_masks |= HIFN_MAC_CMD_ALG_MD5 |
HIFN_MAC_CMD_RESULT | HIFN_MAC_CMD_MODE_HASH |
@@ -2575,12 +2556,16 @@
break;
}
- if (maccrd->crd_alg == CRYPTO_SHA1_HMAC ||
- maccrd->crd_alg == CRYPTO_MD5_HMAC) {
+ if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC ||
+ csp->csp_auth_alg == CRYPTO_MD5_HMAC) {
cmd->mac_masks |= HIFN_MAC_CMD_NEW_KEY;
- bcopy(maccrd->crd_key, cmd->mac, maccrd->crd_klen >> 3);
- bzero(cmd->mac + (maccrd->crd_klen >> 3),
- HIFN_MAC_KEY_LENGTH - (maccrd->crd_klen >> 3));
+ if (crp->crp_auth_key != NULL)
+ mackey = crp->crp_auth_key;
+ else
+ mackey = csp->csp_auth_key;
+ keylen = csp->csp_auth_klen;
+ bcopy(mackey, cmd->mac, keylen);
+ bzero(cmd->mac + keylen, HIFN_MAC_KEY_LENGTH - keylen);
}
}
@@ -2655,9 +2640,8 @@
BUS_DMASYNC_POSTREAD);
}
- if (cmd->src_m != cmd->dst_m) {
- m_freem(cmd->src_m);
- crp->crp_buf = (caddr_t)cmd->dst_m;
+ if (cmd->dst_m != NULL) {
+ m_freem(cmd->dst_m);
}
/* non-shared buffers cannot be restarted */
@@ -2696,9 +2680,9 @@
{
struct hifn_dma *dma = sc->sc_dma;
struct cryptop *crp = cmd->crp;
- struct cryptodesc *crd;
+ uint8_t macbuf2[SHA1_HASH_LEN];
struct mbuf *m;
- int totlen, i, u, ivlen;
+ int totlen, i, u;
if (cmd->src_map == cmd->dst_map) {
bus_dmamap_sync(sc->sc_dmat, cmd->src_map,
@@ -2710,9 +2694,8 @@
BUS_DMASYNC_POSTREAD);
}
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
- if (cmd->src_m != cmd->dst_m) {
- crp->crp_buf = (caddr_t)cmd->dst_m;
+ if (crp->crp_buf_type == CRYPTO_BUF_MBUF) {
+ if (cmd->dst_m != NULL) {
totlen = cmd->src_mapsize;
for (m = cmd->dst_m; m != NULL; m = m->m_next) {
if (totlen < m->m_len) {
@@ -2721,15 +2704,15 @@
} else
totlen -= m->m_len;
}
- cmd->dst_m->m_pkthdr.len = cmd->src_m->m_pkthdr.len;
- m_freem(cmd->src_m);
+ cmd->dst_m->m_pkthdr.len = crp->crp_mbuf->m_pkthdr.len;
+ m_freem(crp->crp_mbuf);
+ crp->crp_mbuf = cmd->dst_m;
}
}
if (cmd->sloplen != 0) {
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- cmd->src_mapsize - cmd->sloplen, cmd->sloplen,
- (caddr_t)&dma->slop[cmd->slopidx]);
+ crypto_copyback(crp, cmd->src_mapsize - cmd->sloplen,
+ cmd->sloplen, &dma->slop[cmd->slopidx]);
}
i = sc->sc_dstk; u = sc->sc_dstu;
@@ -2749,37 +2732,16 @@
hifnstats.hst_obytes += cmd->dst_mapsize;
- if ((cmd->base_masks & (HIFN_BASE_CMD_CRYPT | HIFN_BASE_CMD_DECODE)) ==
- HIFN_BASE_CMD_CRYPT) {
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- if (crd->crd_alg != CRYPTO_DES_CBC &&
- crd->crd_alg != CRYPTO_3DES_CBC &&
- crd->crd_alg != CRYPTO_AES_CBC)
- continue;
- ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
- HIFN_AES_IV_LENGTH : HIFN_IV_LENGTH);
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crd->crd_skip + crd->crd_len - ivlen, ivlen,
- cmd->session->hs_iv);
- break;
- }
- }
-
if (macbuf != NULL) {
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- int len;
-
- if (crd->crd_alg != CRYPTO_MD5 &&
- crd->crd_alg != CRYPTO_SHA1 &&
- crd->crd_alg != CRYPTO_MD5_HMAC &&
- crd->crd_alg != CRYPTO_SHA1_HMAC) {
- continue;
- }
- len = cmd->session->hs_mlen;
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, len, macbuf);
- break;
- }
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start,
+ cmd->session->hs_mlen, macbuf2);
+ if (timingsafe_bcmp(macbuf, macbuf2,
+ cmd->session->hs_mlen) != 0)
+ crp->crp_etype = EBADMSG;
+ } else
+ crypto_copyback(crp, crp->crp_digest_start,
+ cmd->session->hs_mlen, macbuf);
}
if (cmd->src_map != cmd->dst_map) {
Index: sys/dev/hifn/hifn7751var.h
===================================================================
--- sys/dev/hifn/hifn7751var.h
+++ sys/dev/hifn/hifn7751var.h
@@ -105,7 +105,6 @@
struct hifn_session {
- u_int8_t hs_iv[HIFN_MAX_IV_LENGTH];
int hs_mlen;
};
@@ -160,6 +159,7 @@
int sc_cmdk, sc_srck, sc_dstk, sc_resk;
int32_t sc_cid;
+ uint16_t sc_ena;
int sc_maxses;
int sc_ramsize;
int sc_flags;
@@ -257,10 +257,6 @@
*
*/
struct hifn_operand {
- union {
- struct mbuf *m;
- struct uio *io;
- } u;
bus_dmamap_t map;
bus_size_t mapsize;
int nsegs;
@@ -269,27 +265,24 @@
struct hifn_command {
struct hifn_session *session;
u_int16_t base_masks, cry_masks, mac_masks;
- u_int8_t iv[HIFN_MAX_IV_LENGTH], *ck, mac[HIFN_MAC_KEY_LENGTH];
+ u_int8_t iv[HIFN_MAX_IV_LENGTH], mac[HIFN_MAC_KEY_LENGTH];
+ const uint8_t *ck;
int cklen;
int sloplen, slopidx;
struct hifn_operand src;
struct hifn_operand dst;
+ struct mbuf *dst_m;
struct hifn_softc *softc;
struct cryptop *crp;
- struct cryptodesc *enccrd, *maccrd;
};
-#define src_m src.u.m
-#define src_io src.u.io
#define src_map src.map
#define src_mapsize src.mapsize
#define src_segs src.segs
#define src_nsegs src.nsegs
-#define dst_m dst.u.m
-#define dst_io dst.u.io
#define dst_map dst.map
#define dst_mapsize dst.mapsize
#define dst_segs dst.segs
Index: sys/dev/safe/safe.c
===================================================================
--- sys/dev/safe/safe.c
+++ sys/dev/safe/safe.c
@@ -47,6 +47,7 @@
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
+#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/pmap.h>
@@ -56,10 +57,8 @@
#include <sys/bus.h>
#include <sys/rman.h>
-#include <crypto/sha1.h>
#include <opencrypto/cryptodev.h>
-#include <opencrypto/cryptosoft.h>
-#include <sys/md5.h>
+#include <opencrypto/xform_auth.h>
#include <sys/random.h>
#include <sys/kobj.h>
@@ -88,7 +87,9 @@
static int safe_resume(device_t);
static int safe_shutdown(device_t);
-static int safe_newsession(device_t, crypto_session_t, struct cryptoini *);
+static int safe_probesession(device_t, const struct crypto_session_params *);
+static int safe_newsession(device_t, crypto_session_t,
+ const struct crypto_session_params *);
static int safe_process(device_t, struct cryptop *, int);
static device_method_t safe_methods[] = {
@@ -101,6 +102,7 @@
DEVMETHOD(device_shutdown, safe_shutdown),
/* crypto device methods */
+ DEVMETHOD(cryptodev_probesession, safe_probesession),
DEVMETHOD(cryptodev_newsession, safe_newsession),
DEVMETHOD(cryptodev_process, safe_process),
@@ -221,7 +223,7 @@
{
struct safe_softc *sc = device_get_softc(dev);
u_int32_t raddr;
- u_int32_t i, devinfo;
+ u_int32_t i;
int rid;
bzero(sc, sizeof (*sc));
@@ -374,12 +376,12 @@
device_printf(sc->sc_dev, "%s", safe_partname(sc));
- devinfo = READ_REG(sc, SAFE_DEVINFO);
- if (devinfo & SAFE_DEVINFO_RNG) {
+ sc->sc_devinfo = READ_REG(sc, SAFE_DEVINFO);
+ if (sc->sc_devinfo & SAFE_DEVINFO_RNG) {
sc->sc_flags |= SAFE_FLAGS_RNG;
printf(" rng");
}
- if (devinfo & SAFE_DEVINFO_PKEY) {
+ if (sc->sc_devinfo & SAFE_DEVINFO_PKEY) {
#if 0
printf(" key");
sc->sc_flags |= SAFE_FLAGS_KEY;
@@ -387,26 +389,18 @@
crypto_kregister(sc->sc_cid, CRK_MOD_EXP_CRT, 0);
#endif
}
- if (devinfo & SAFE_DEVINFO_DES) {
+ if (sc->sc_devinfo & SAFE_DEVINFO_DES) {
printf(" des/3des");
- crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
}
- if (devinfo & SAFE_DEVINFO_AES) {
+ if (sc->sc_devinfo & SAFE_DEVINFO_AES) {
printf(" aes");
- crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
}
- if (devinfo & SAFE_DEVINFO_MD5) {
+ if (sc->sc_devinfo & SAFE_DEVINFO_MD5) {
printf(" md5");
- crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
}
- if (devinfo & SAFE_DEVINFO_SHA1) {
+ if (sc->sc_devinfo & SAFE_DEVINFO_SHA1) {
printf(" sha1");
- crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
}
- printf(" null");
- crypto_register(sc->sc_cid, CRYPTO_NULL_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_NULL_HMAC, 0, 0);
/* XXX other supported algorithms */
printf("\n");
@@ -629,11 +623,11 @@
#define N(a) (sizeof(a) / sizeof (a[0]))
static void
-safe_setup_enckey(struct safe_session *ses, caddr_t key)
+safe_setup_enckey(struct safe_session *ses, const void *key)
{
int i;
- bcopy(key, ses->ses_key, ses->ses_klen / 8);
+ bcopy(key, ses->ses_key, ses->ses_klen);
/* PE is little-endian, insure proper byte order */
for (i = 0; i < N(ses->ses_key); i++)
@@ -641,47 +635,30 @@
}
static void
-safe_setup_mackey(struct safe_session *ses, int algo, caddr_t key, int klen)
+safe_setup_mackey(struct safe_session *ses, int algo, const uint8_t *key,
+ int klen)
{
MD5_CTX md5ctx;
SHA1_CTX sha1ctx;
int i;
-
- for (i = 0; i < klen; i++)
- key[i] ^= HMAC_IPAD_VAL;
-
if (algo == CRYPTO_MD5_HMAC) {
- MD5Init(&md5ctx);
- MD5Update(&md5ctx, key, klen);
- MD5Update(&md5ctx, hmac_ipad_buffer, MD5_BLOCK_LEN - klen);
+ hmac_init_ipad(&auth_hash_hmac_md5, key, klen, &md5ctx);
bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state));
- } else {
- SHA1Init(&sha1ctx);
- SHA1Update(&sha1ctx, key, klen);
- SHA1Update(&sha1ctx, hmac_ipad_buffer,
- SHA1_BLOCK_LEN - klen);
- bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
- }
- for (i = 0; i < klen; i++)
- key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
-
- if (algo == CRYPTO_MD5_HMAC) {
- MD5Init(&md5ctx);
- MD5Update(&md5ctx, key, klen);
- MD5Update(&md5ctx, hmac_opad_buffer, MD5_BLOCK_LEN - klen);
+ hmac_init_opad(&auth_hash_hmac_md5, key, klen, &md5ctx);
bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state));
+
+ explicit_bzero(&md5ctx, sizeof(md5ctx));
} else {
- SHA1Init(&sha1ctx);
- SHA1Update(&sha1ctx, key, klen);
- SHA1Update(&sha1ctx, hmac_opad_buffer,
- SHA1_BLOCK_LEN - klen);
+ hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx);
+ bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
+
+ hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx);
bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
- }
- for (i = 0; i < klen; i++)
- key[i] ^= HMAC_OPAD_VAL;
+ explicit_bzero(&sha1ctx, sizeof(sha1ctx));
+ }
/* PE is little-endian, insure proper byte order */
for (i = 0; i < N(ses->ses_hminner); i++) {
@@ -691,90 +668,140 @@
}
#undef N
+static bool
+safe_auth_supported(struct safe_softc *sc,
+ const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ if ((sc->sc_devinfo & SAFE_DEVINFO_MD5) == 0)
+ return (false);
+ break;
+ case CRYPTO_SHA1_HMAC:
+ if ((sc->sc_devinfo & SAFE_DEVINFO_SHA1) == 0)
+ return (false);
+ break;
+ default:
+ return (false);
+ }
+ return (true);
+}
+
+static bool
+safe_cipher_supported(struct safe_softc *sc,
+ const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ if ((sc->sc_devinfo & SAFE_DEVINFO_DES) == 0)
+ return (false);
+ if (csp->csp_ivlen != 8)
+ return (false);
+ if (csp->csp_cipher_alg == CRYPTO_DES_CBC) {
+ if (csp->csp_cipher_klen != 8)
+ return (false);
+ } else {
+ if (csp->csp_cipher_klen != 24)
+ return (false);
+ }
+ break;
+ case CRYPTO_AES_CBC:
+ if ((sc->sc_devinfo & SAFE_DEVINFO_AES) == 0)
+ return (false);
+ if (csp->csp_ivlen != 16)
+ return (false);
+ if (csp->csp_cipher_klen != 16 &&
+ csp->csp_cipher_klen != 24 &&
+ csp->csp_cipher_klen != 32)
+ return (false);
+ break;
+ }
+ return (true);
+}
+
+static int
+safe_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+ struct safe_softc *sc = device_get_softc(dev);
+
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ if (!safe_auth_supported(sc, csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!safe_cipher_supported(sc, csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_ETA:
+ if (!safe_auth_supported(sc, csp) ||
+ !safe_cipher_supported(sc, csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (CRYPTODEV_PROBE_HARDWARE);
+}
+
/*
- * Allocate a new 'session' and return an encoded session id. 'sidp'
- * contains our registration id, and should contain an encoded session
- * id on successful allocation.
+ * Allocate a new 'session'.
*/
static int
-safe_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+safe_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
{
- struct safe_softc *sc = device_get_softc(dev);
- struct cryptoini *c, *encini = NULL, *macini = NULL;
- struct safe_session *ses = NULL;
-
- if (cri == NULL || sc == NULL)
- return (EINVAL);
-
- for (c = cri; c != NULL; c = c->cri_next) {
- if (c->cri_alg == CRYPTO_MD5_HMAC ||
- c->cri_alg == CRYPTO_SHA1_HMAC ||
- c->cri_alg == CRYPTO_NULL_HMAC) {
- if (macini)
- return (EINVAL);
- macini = c;
- } else if (c->cri_alg == CRYPTO_DES_CBC ||
- c->cri_alg == CRYPTO_3DES_CBC ||
- c->cri_alg == CRYPTO_AES_CBC ||
- c->cri_alg == CRYPTO_NULL_CBC) {
- if (encini)
- return (EINVAL);
- encini = c;
- } else
- return (EINVAL);
- }
- if (encini == NULL && macini == NULL)
- return (EINVAL);
- if (encini) { /* validate key length */
- switch (encini->cri_alg) {
- case CRYPTO_DES_CBC:
- if (encini->cri_klen != 64)
- return (EINVAL);
- break;
- case CRYPTO_3DES_CBC:
- if (encini->cri_klen != 192)
- return (EINVAL);
- break;
- case CRYPTO_AES_CBC:
- if (encini->cri_klen != 128 &&
- encini->cri_klen != 192 &&
- encini->cri_klen != 256)
- return (EINVAL);
- break;
- }
- }
+ struct safe_session *ses;
ses = crypto_get_driver_session(cses);
- if (encini) {
- /* get an IV */
- /* XXX may read fewer than requested */
- read_random(ses->ses_iv, sizeof(ses->ses_iv));
-
- ses->ses_klen = encini->cri_klen;
- if (encini->cri_key != NULL)
- safe_setup_enckey(ses, encini->cri_key);
+ if (csp->csp_cipher_alg != 0) {
+ ses->ses_klen = csp->csp_cipher_klen;
+ if (csp->csp_cipher_key != NULL)
+ safe_setup_enckey(ses, csp->csp_cipher_key);
}
- if (macini) {
- ses->ses_mlen = macini->cri_mlen;
+ if (csp->csp_auth_alg != 0) {
+ ses->ses_mlen = csp->csp_auth_mlen;
if (ses->ses_mlen == 0) {
- if (macini->cri_alg == CRYPTO_MD5_HMAC)
+ if (csp->csp_auth_alg == CRYPTO_MD5_HMAC)
ses->ses_mlen = MD5_HASH_LEN;
else
ses->ses_mlen = SHA1_HASH_LEN;
}
- if (macini->cri_key != NULL) {
- safe_setup_mackey(ses, macini->cri_alg, macini->cri_key,
- macini->cri_klen / 8);
+ if (csp->csp_auth_key != NULL) {
+ safe_setup_mackey(ses, csp->csp_auth_alg,
+ csp->csp_auth_key, csp->csp_auth_klen);
}
}
return (0);
}
+static bus_size_t
+safe_crp_length(struct cryptop *crp)
+{
+
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ return (crp->crp_mbuf->m_pkthdr.len);
+ case CRYPTO_BUF_UIO:
+ return (crp->crp_uio->uio_resid);
+ case CRYPTO_BUF_CONTIG:
+ return (crp->crp_ilen);
+ default:
+ panic("bad crp buffer type");
+ }
+}
+
static void
-safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
+safe_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error)
{
struct safe_operand *op = arg;
@@ -782,7 +809,6 @@
(u_int) mapsize, nsegs, error));
if (error != 0)
return;
- op->mapsize = mapsize;
op->nsegs = nsegs;
bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
}
@@ -790,11 +816,10 @@
static int
safe_process(device_t dev, struct cryptop *crp, int hint)
{
- struct safe_softc *sc = device_get_softc(dev);
+ struct safe_softc *sc = device_get_softc(dev);
+ const struct crypto_session_params *csp;
int err = 0, i, nicealign, uniform;
- struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
- int bypass, oplen, ivsize;
- caddr_t iv;
+ int bypass, oplen;
int16_t coffset;
struct safe_session *ses;
struct safe_ringentry *re;
@@ -802,11 +827,6 @@
struct safe_pdesc *pd;
u_int32_t cmd0, cmd1, staterec;
- if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
- safestats.st_invalid++;
- return (EINVAL);
- }
-
mtx_lock(&sc->sc_ringmtx);
if (sc->sc_front == sc->sc_back && sc->sc_nqchip != 0) {
safestats.st_ringfull++;
@@ -823,104 +843,46 @@
re->re_crp = crp;
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
- re->re_src_m = (struct mbuf *)crp->crp_buf;
- re->re_dst_m = (struct mbuf *)crp->crp_buf;
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
- re->re_src_io = (struct uio *)crp->crp_buf;
- re->re_dst_io = (struct uio *)crp->crp_buf;
- } else {
- safestats.st_badflags++;
- err = EINVAL;
- goto errout; /* XXX we don't handle contiguous blocks! */
- }
-
sa = &re->re_sa;
ses = crypto_get_driver_session(crp->crp_session);
-
- crd1 = crp->crp_desc;
- if (crd1 == NULL) {
- safestats.st_nodesc++;
- err = EINVAL;
- goto errout;
- }
- crd2 = crd1->crd_next;
+ csp = crypto_get_params(crp->crp_session);
cmd0 = SAFE_SA_CMD0_BASIC; /* basic group operation */
cmd1 = 0;
- if (crd2 == NULL) {
- if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
- crd1->crd_alg == CRYPTO_SHA1_HMAC ||
- crd1->crd_alg == CRYPTO_NULL_HMAC) {
- maccrd = crd1;
- enccrd = NULL;
- cmd0 |= SAFE_SA_CMD0_OP_HASH;
- } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
- crd1->crd_alg == CRYPTO_3DES_CBC ||
- crd1->crd_alg == CRYPTO_AES_CBC ||
- crd1->crd_alg == CRYPTO_NULL_CBC) {
- maccrd = NULL;
- enccrd = crd1;
- cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
- } else {
- safestats.st_badalg++;
- err = EINVAL;
- goto errout;
- }
- } else {
- if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
- crd1->crd_alg == CRYPTO_SHA1_HMAC ||
- crd1->crd_alg == CRYPTO_NULL_HMAC) &&
- (crd2->crd_alg == CRYPTO_DES_CBC ||
- crd2->crd_alg == CRYPTO_3DES_CBC ||
- crd2->crd_alg == CRYPTO_AES_CBC ||
- crd2->crd_alg == CRYPTO_NULL_CBC) &&
- ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
- maccrd = crd1;
- enccrd = crd2;
- } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
- crd1->crd_alg == CRYPTO_3DES_CBC ||
- crd1->crd_alg == CRYPTO_AES_CBC ||
- crd1->crd_alg == CRYPTO_NULL_CBC) &&
- (crd2->crd_alg == CRYPTO_MD5_HMAC ||
- crd2->crd_alg == CRYPTO_SHA1_HMAC ||
- crd2->crd_alg == CRYPTO_NULL_HMAC) &&
- (crd1->crd_flags & CRD_F_ENCRYPT)) {
- enccrd = crd1;
- maccrd = crd2;
- } else {
- safestats.st_badalg++;
- err = EINVAL;
- goto errout;
- }
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ cmd0 |= SAFE_SA_CMD0_OP_HASH;
+ break;
+ case CSP_MODE_CIPHER:
+ cmd0 |= SAFE_SA_CMD0_OP_CRYPT;
+ break;
+ case CSP_MODE_ETA:
cmd0 |= SAFE_SA_CMD0_OP_BOTH;
+ break;
}
- if (enccrd) {
- if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT)
- safe_setup_enckey(ses, enccrd->crd_key);
+ if (csp->csp_cipher_alg != 0) {
+ if (crp->crp_cipher_key != NULL)
+ safe_setup_enckey(ses, crp->crp_cipher_key);
- if (enccrd->crd_alg == CRYPTO_DES_CBC) {
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DES_CBC:
cmd0 |= SAFE_SA_CMD0_DES;
cmd1 |= SAFE_SA_CMD1_CBC;
- ivsize = 2*sizeof(u_int32_t);
- } else if (enccrd->crd_alg == CRYPTO_3DES_CBC) {
+ break;
+ case CRYPTO_3DES_CBC:
cmd0 |= SAFE_SA_CMD0_3DES;
cmd1 |= SAFE_SA_CMD1_CBC;
- ivsize = 2*sizeof(u_int32_t);
- } else if (enccrd->crd_alg == CRYPTO_AES_CBC) {
+ break;
+ case CRYPTO_AES_CBC:
cmd0 |= SAFE_SA_CMD0_AES;
cmd1 |= SAFE_SA_CMD1_CBC;
- if (ses->ses_klen == 128)
+ if (ses->ses_klen * 8 == 128)
cmd1 |= SAFE_SA_CMD1_AES128;
- else if (ses->ses_klen == 192)
+ else if (ses->ses_klen * 8 == 192)
cmd1 |= SAFE_SA_CMD1_AES192;
else
cmd1 |= SAFE_SA_CMD1_AES256;
- ivsize = 4*sizeof(u_int32_t);
- } else {
- cmd0 |= SAFE_SA_CMD0_CRYPT_NULL;
- ivsize = 0;
}
/*
@@ -932,32 +894,28 @@
* in the state record and set the hash/crypt offset to
* copy both the header+IV.
*/
- if (enccrd->crd_flags & CRD_F_ENCRYPT) {
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(re->re_sastate.sa_saved_iv, csp->csp_ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen,
+ re->re_sastate.sa_saved_iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(re->re_sastate.sa_saved_iv, crp->crp_iv,
+ csp->csp_ivlen);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen,
+ re->re_sastate.sa_saved_iv);
+ cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
+
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
cmd0 |= SAFE_SA_CMD0_OUTBOUND;
- if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
- iv = enccrd->crd_iv;
- else
- iv = (caddr_t) ses->ses_iv;
- if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, ivsize, iv);
- }
- bcopy(iv, re->re_sastate.sa_saved_iv, ivsize);
- cmd0 |= SAFE_SA_CMD0_IVLD_STATE | SAFE_SA_CMD0_SAVEIV;
- re->re_flags |= SAFE_QFLAGS_COPYOUTIV;
+ /*
+ * XXX: I suspect we don't need this since we
+ * don't save the returned IV.
+ */
+ cmd0 |= SAFE_SA_CMD0_SAVEIV;
} else {
cmd0 |= SAFE_SA_CMD0_INBOUND;
-
- if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
- bcopy(enccrd->crd_iv,
- re->re_sastate.sa_saved_iv, ivsize);
- } else {
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, ivsize,
- (caddr_t)re->re_sastate.sa_saved_iv);
- }
- cmd0 |= SAFE_SA_CMD0_IVLD_STATE;
}
/*
* For basic encryption use the zero pad algorithm.
@@ -973,21 +931,23 @@
bcopy(ses->ses_key, sa->sa_key, sizeof(sa->sa_key));
}
- if (maccrd) {
- if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
- safe_setup_mackey(ses, maccrd->crd_alg,
- maccrd->crd_key, maccrd->crd_klen / 8);
+ if (csp->csp_auth_alg != 0) {
+ if (crp->crp_auth_key != NULL) {
+ safe_setup_mackey(ses, csp->csp_auth_alg,
+ crp->crp_auth_key, csp->csp_auth_klen);
}
- if (maccrd->crd_alg == CRYPTO_MD5_HMAC) {
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
cmd0 |= SAFE_SA_CMD0_MD5;
cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
- } else if (maccrd->crd_alg == CRYPTO_SHA1_HMAC) {
+ break;
+ case CRYPTO_SHA1_HMAC:
cmd0 |= SAFE_SA_CMD0_SHA1;
cmd1 |= SAFE_SA_CMD1_HMAC; /* NB: enable HMAC */
- } else {
- cmd0 |= SAFE_SA_CMD0_HASH_NULL;
+ break;
}
+
/*
* Digest data is loaded from the SA and the hash
* result is saved to the state block where we
@@ -1003,38 +963,32 @@
re->re_flags |= SAFE_QFLAGS_COPYOUTICV;
}
- if (enccrd && maccrd) {
+ if (csp->csp_mode == CSP_MODE_ETA) {
/*
- * The offset from hash data to the start of
- * crypt data is the difference in the skips.
+ * The driver only supports ETA requests where there
+ * is no gap between the AAD and payload.
*/
- bypass = maccrd->crd_skip;
- coffset = enccrd->crd_skip - maccrd->crd_skip;
- if (coffset < 0) {
- DPRINTF(("%s: hash does not precede crypt; "
- "mac skip %u enc skip %u\n",
- __func__, maccrd->crd_skip, enccrd->crd_skip));
- safestats.st_skipmismatch++;
- err = EINVAL;
- goto errout;
- }
- oplen = enccrd->crd_skip + enccrd->crd_len;
- if (maccrd->crd_skip + maccrd->crd_len != oplen) {
- DPRINTF(("%s: hash amount %u != crypt amount %u\n",
- __func__, maccrd->crd_skip + maccrd->crd_len,
- oplen));
+ if (crp->crp_aad_length != 0 &&
+ crp->crp_aad_start + crp->crp_aad_length !=
+ crp->crp_payload_start) {
safestats.st_lenmismatch++;
err = EINVAL;
goto errout;
}
+ if (crp->crp_aad_length != 0)
+ bypass = crp->crp_aad_start;
+ else
+ bypass = crp->crp_payload_start;
+ coffset = crp->crp_aad_length;
+ oplen = crp->crp_payload_start + crp->crp_payload_length;
#ifdef SAFE_DEBUG
if (safe_debug) {
- printf("mac: skip %d, len %d, inject %d\n",
- maccrd->crd_skip, maccrd->crd_len,
- maccrd->crd_inject);
- printf("enc: skip %d, len %d, inject %d\n",
- enccrd->crd_skip, enccrd->crd_len,
- enccrd->crd_inject);
+ printf("AAD: skip %d, len %d, digest %d\n",
+ crp->crp_aad_start, crp->crp_aad_length,
+ crp->crp_digest_start);
+ printf("payload: skip %d, len %d, IV %d\n",
+ crp->crp_payload_start, crp->crp_payload_length,
+ crp->crp_iv_start);
printf("bypass %d coffset %d oplen %d\n",
bypass, coffset, oplen);
}
@@ -1070,13 +1024,8 @@
*/
cmd1 |= SAFE_SA_CMD1_MUTABLE;
} else {
- if (enccrd) {
- bypass = enccrd->crd_skip;
- oplen = bypass + enccrd->crd_len;
- } else {
- bypass = maccrd->crd_skip;
- oplen = bypass + maccrd->crd_len;
- }
+ bypass = crp->crp_payload_start;
+ oplen = bypass + crp->crp_payload_length;
coffset = 0;
}
/* XXX verify multiple of 4 when using s/g */
@@ -1092,27 +1041,15 @@
err = ENOMEM;
goto errout;
}
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
- if (bus_dmamap_load_mbuf(sc->sc_srcdmat, re->re_src_map,
- re->re_src_m, safe_op_cb,
- &re->re_src, BUS_DMA_NOWAIT) != 0) {
- bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
- re->re_src_map = NULL;
- safestats.st_noload++;
- err = ENOMEM;
- goto errout;
- }
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
- if (bus_dmamap_load_uio(sc->sc_srcdmat, re->re_src_map,
- re->re_src_io, safe_op_cb,
- &re->re_src, BUS_DMA_NOWAIT) != 0) {
- bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
- re->re_src_map = NULL;
- safestats.st_noload++;
- err = ENOMEM;
- goto errout;
- }
+ if (bus_dmamap_load_crp(sc->sc_srcdmat, re->re_src_map, crp, safe_op_cb,
+ &re->re_src, BUS_DMA_NOWAIT) != 0) {
+ bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
+ re->re_src_map = NULL;
+ safestats.st_noload++;
+ err = ENOMEM;
+ goto errout;
}
+ re->re_src_mapsize = safe_crp_length(crp);
nicealign = safe_dmamap_aligned(&re->re_src);
uniform = safe_dmamap_uniform(&re->re_src);
@@ -1143,211 +1080,175 @@
re->re_desc.d_src = re->re_src_segs[0].ds_addr;
}
- if (enccrd == NULL && maccrd != NULL) {
+ if (csp->csp_mode == CSP_MODE_DIGEST) {
/*
* Hash op; no destination needed.
*/
} else {
- if (crp->crp_flags & CRYPTO_F_IOV) {
- if (!nicealign) {
- safestats.st_iovmisaligned++;
- err = EINVAL;
+ if (nicealign && uniform == 1) {
+ /*
+ * Source layout is suitable for direct
+ * sharing of the DMA map and segment list.
+ */
+ re->re_dst = re->re_src;
+ } else if (nicealign && uniform == 2) {
+ /*
+ * The source is properly aligned but requires a
+ * different particle list to handle DMA of the
+ * result. Create a new map and do the load to
+ * create the segment list. The particle
+ * descriptor setup code below will handle the
+ * rest.
+ */
+ if (bus_dmamap_create(sc->sc_dstdmat, BUS_DMA_NOWAIT,
+ &re->re_dst_map)) {
+ safestats.st_nomap++;
+ err = ENOMEM;
goto errout;
}
- if (uniform != 1) {
- /*
- * Source is not suitable for direct use as
- * the destination. Create a new scatter/gather
- * list based on the destination requirements
- * and check if that's ok.
- */
- if (bus_dmamap_create(sc->sc_dstdmat,
- BUS_DMA_NOWAIT, &re->re_dst_map)) {
- safestats.st_nomap++;
- err = ENOMEM;
- goto errout;
- }
- if (bus_dmamap_load_uio(sc->sc_dstdmat,
- re->re_dst_map, re->re_dst_io,
- safe_op_cb, &re->re_dst,
- BUS_DMA_NOWAIT) != 0) {
- bus_dmamap_destroy(sc->sc_dstdmat,
- re->re_dst_map);
- re->re_dst_map = NULL;
- safestats.st_noload++;
- err = ENOMEM;
- goto errout;
- }
- uniform = safe_dmamap_uniform(&re->re_dst);
- if (!uniform) {
- /*
- * There's no way to handle the DMA
- * requirements with this uio. We
- * could create a separate DMA area for
- * the result and then copy it back,
- * but for now we just bail and return
- * an error. Note that uio requests
- * > SAFE_MAX_DSIZE are handled because
- * the DMA map and segment list for the
- * destination wil result in a
- * destination particle list that does
- * the necessary scatter DMA.
- */
- safestats.st_iovnotuniform++;
- err = EINVAL;
- goto errout;
- }
- } else
- re->re_dst = re->re_src;
- } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
- if (nicealign && uniform == 1) {
- /*
- * Source layout is suitable for direct
- * sharing of the DMA map and segment list.
- */
- re->re_dst = re->re_src;
- } else if (nicealign && uniform == 2) {
- /*
- * The source is properly aligned but requires a
- * different particle list to handle DMA of the
- * result. Create a new map and do the load to
- * create the segment list. The particle
- * descriptor setup code below will handle the
- * rest.
- */
- if (bus_dmamap_create(sc->sc_dstdmat,
- BUS_DMA_NOWAIT, &re->re_dst_map)) {
- safestats.st_nomap++;
- err = ENOMEM;
- goto errout;
+ if (bus_dmamap_load_crp(sc->sc_dstdmat, re->re_dst_map,
+ crp, safe_op_cb, &re->re_dst, BUS_DMA_NOWAIT) !=
+ 0) {
+ bus_dmamap_destroy(sc->sc_dstdmat,
+ re->re_dst_map);
+ re->re_dst_map = NULL;
+ safestats.st_noload++;
+ err = ENOMEM;
+ goto errout;
+ }
+ } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) {
+ int totlen, len;
+ struct mbuf *m, *top, **mp;
+
+ /*
+ * DMA constraints require that we allocate a
+ * new mbuf chain for the destination. We
+ * allocate an entire new set of mbufs of
+ * optimal/required size and then tell the
+ * hardware to copy any bits that are not
+ * created as a byproduct of the operation.
+ */
+ if (!nicealign)
+ safestats.st_unaligned++;
+ if (!uniform)
+ safestats.st_notuniform++;
+ totlen = re->re_src_mapsize;
+ if (crp->crp_mbuf->m_flags & M_PKTHDR) {
+ len = MHLEN;
+ MGETHDR(m, M_NOWAIT, MT_DATA);
+ if (m && !m_dup_pkthdr(m, crp->crp_mbuf,
+ M_NOWAIT)) {
+ m_free(m);
+ m = NULL;
}
- if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
- re->re_dst_map, re->re_dst_m,
- safe_op_cb, &re->re_dst,
- BUS_DMA_NOWAIT) != 0) {
- bus_dmamap_destroy(sc->sc_dstdmat,
- re->re_dst_map);
- re->re_dst_map = NULL;
- safestats.st_noload++;
- err = ENOMEM;
+ } else {
+ len = MLEN;
+ MGET(m, M_NOWAIT, MT_DATA);
+ }
+ if (m == NULL) {
+ safestats.st_nombuf++;
+ err = sc->sc_nqchip ? ERESTART : ENOMEM;
+ goto errout;
+ }
+ if (totlen >= MINCLSIZE) {
+ if (!(MCLGET(m, M_NOWAIT))) {
+ m_free(m);
+ safestats.st_nomcl++;
+ err = sc->sc_nqchip ?
+ ERESTART : ENOMEM;
goto errout;
}
- } else { /* !(aligned and/or uniform) */
- int totlen, len;
- struct mbuf *m, *top, **mp;
+ len = MCLBYTES;
+ }
+ m->m_len = len;
+ top = NULL;
+ mp = &top;
- /*
- * DMA constraints require that we allocate a
- * new mbuf chain for the destination. We
- * allocate an entire new set of mbufs of
- * optimal/required size and then tell the
- * hardware to copy any bits that are not
- * created as a byproduct of the operation.
- */
- if (!nicealign)
- safestats.st_unaligned++;
- if (!uniform)
- safestats.st_notuniform++;
- totlen = re->re_src_mapsize;
- if (re->re_src_m->m_flags & M_PKTHDR) {
- len = MHLEN;
- MGETHDR(m, M_NOWAIT, MT_DATA);
- if (m && !m_dup_pkthdr(m, re->re_src_m,
- M_NOWAIT)) {
- m_free(m);
- m = NULL;
- }
- } else {
- len = MLEN;
+ while (totlen > 0) {
+ if (top) {
MGET(m, M_NOWAIT, MT_DATA);
+ if (m == NULL) {
+ m_freem(top);
+ safestats.st_nombuf++;
+ err = sc->sc_nqchip ?
+ ERESTART : ENOMEM;
+ goto errout;
+ }
+ len = MLEN;
}
- if (m == NULL) {
- safestats.st_nombuf++;
- err = sc->sc_nqchip ? ERESTART : ENOMEM;
- goto errout;
- }
- if (totlen >= MINCLSIZE) {
+ if (top && totlen >= MINCLSIZE) {
if (!(MCLGET(m, M_NOWAIT))) {
- m_free(m);
+ *mp = m;
+ m_freem(top);
safestats.st_nomcl++;
err = sc->sc_nqchip ?
- ERESTART : ENOMEM;
+ ERESTART : ENOMEM;
goto errout;
}
len = MCLBYTES;
}
- m->m_len = len;
- top = NULL;
- mp = &top;
-
- while (totlen > 0) {
- if (top) {
- MGET(m, M_NOWAIT, MT_DATA);
- if (m == NULL) {
- m_freem(top);
- safestats.st_nombuf++;
- err = sc->sc_nqchip ?
- ERESTART : ENOMEM;
- goto errout;
- }
- len = MLEN;
- }
- if (top && totlen >= MINCLSIZE) {
- if (!(MCLGET(m, M_NOWAIT))) {
- *mp = m;
- m_freem(top);
- safestats.st_nomcl++;
- err = sc->sc_nqchip ?
- ERESTART : ENOMEM;
- goto errout;
- }
- len = MCLBYTES;
- }
- m->m_len = len = min(totlen, len);
- totlen -= len;
- *mp = m;
- mp = &m->m_next;
- }
- re->re_dst_m = top;
- if (bus_dmamap_create(sc->sc_dstdmat,
- BUS_DMA_NOWAIT, &re->re_dst_map) != 0) {
- safestats.st_nomap++;
- err = ENOMEM;
- goto errout;
- }
- if (bus_dmamap_load_mbuf(sc->sc_dstdmat,
- re->re_dst_map, re->re_dst_m,
- safe_op_cb, &re->re_dst,
- BUS_DMA_NOWAIT) != 0) {
- bus_dmamap_destroy(sc->sc_dstdmat,
- re->re_dst_map);
- re->re_dst_map = NULL;
- safestats.st_noload++;
- err = ENOMEM;
- goto errout;
- }
- if (re->re_src.mapsize > oplen) {
- /*
- * There's data following what the
- * hardware will copy for us. If this
- * isn't just the ICV (that's going to
- * be written on completion), copy it
- * to the new mbufs
- */
- if (!(maccrd &&
- (re->re_src.mapsize-oplen) == 12 &&
- maccrd->crd_inject == oplen))
- safe_mcopy(re->re_src_m,
- re->re_dst_m,
- oplen);
- else
- safestats.st_noicvcopy++;
- }
+ m->m_len = len = min(totlen, len);
+ totlen -= len;
+ *mp = m;
+ mp = &m->m_next;
+ }
+ re->re_dst_m = top;
+ if (bus_dmamap_create(sc->sc_dstdmat,
+ BUS_DMA_NOWAIT, &re->re_dst_map) != 0) {
+ safestats.st_nomap++;
+ err = ENOMEM;
+ goto errout;
+ }
+ if (bus_dmamap_load_mbuf_sg(sc->sc_dstdmat,
+ re->re_dst_map, top, re->re_dst_segs,
+ &re->re_dst_nsegs, 0) != 0) {
+ bus_dmamap_destroy(sc->sc_dstdmat,
+ re->re_dst_map);
+ re->re_dst_map = NULL;
+ safestats.st_noload++;
+ err = ENOMEM;
+ goto errout;
+ }
+ re->re_dst_mapsize = re->re_src_mapsize;
+ if (re->re_src.mapsize > oplen) {
+ /*
+ * There's data following what the
+ * hardware will copy for us. If this
+ * isn't just the ICV (that's going to
+ * be written on completion), copy it
+ * to the new mbufs
+ */
+ if (!(csp->csp_mode == CSP_MODE_ETA &&
+ (re->re_src.mapsize-oplen) == ses->ses_mlen &&
+ crp->crp_digest_start == oplen))
+ safe_mcopy(crp->crp_mbuf, re->re_dst_m,
+ oplen);
+ else
+ safestats.st_noicvcopy++;
}
} else {
- safestats.st_badflags++;
- err = EINVAL;
- goto errout;
+ if (!nicealign) {
+ safestats.st_iovmisaligned++;
+ err = EINVAL;
+ goto errout;
+ } else {
+ /*
+ * There's no way to handle the DMA
+ * requirements with this uio. We
+ * could create a separate DMA area for
+ * the result and then copy it back,
+ * but for now we just bail and return
+ * an error. Note that uio requests
+ * > SAFE_MAX_DSIZE are handled because
+ * the DMA map and segment list for the
+ * destination wil result in a
+ * destination particle list that does
+ * the necessary scatter DMA.
+ */
+ safestats.st_iovnotuniform++;
+ err = EINVAL;
+ goto errout;
+ }
}
if (re->re_dst.nsegs > 1) {
@@ -1393,7 +1294,7 @@
* ready for processing.
*/
re->re_desc.d_csr = SAFE_PE_CSR_READY | SAFE_PE_CSR_SAPCI;
- if (maccrd)
+ if (csp->csp_auth_alg != 0)
re->re_desc.d_csr |= SAFE_PE_CSR_LOADSA | SAFE_PE_CSR_HASHFINAL;
re->re_desc.d_len = oplen
| SAFE_PE_LEN_READY
@@ -1412,7 +1313,7 @@
return (0);
errout:
- if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
+ if (re->re_dst_m != NULL)
m_freem(re->re_dst_m);
if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
@@ -1436,11 +1337,13 @@
static void
safe_callback(struct safe_softc *sc, struct safe_ringentry *re)
{
+ const struct crypto_session_params *csp;
struct cryptop *crp = (struct cryptop *)re->re_crp;
struct safe_session *ses;
- struct cryptodesc *crd;
+ uint8_t hash[HASH_MAX_LEN];
ses = crypto_get_driver_session(crp->crp_session);
+ csp = crypto_get_params(crp->crp_session);
safestats.st_opackets++;
safestats.st_obytes += re->re_dst.mapsize;
@@ -1454,6 +1357,9 @@
safestats.st_peoperr++;
crp->crp_etype = EIO; /* something more meaningful? */
}
+
+ /* XXX: Should crp_mbuf be updated to re->re_dst_m if it is non-NULL? */
+
if (re->re_dst_map != NULL && re->re_dst_map != re->re_src_map) {
bus_dmamap_sync(sc->sc_dstdmat, re->re_dst_map,
BUS_DMASYNC_POSTREAD);
@@ -1464,58 +1370,29 @@
bus_dmamap_unload(sc->sc_srcdmat, re->re_src_map);
bus_dmamap_destroy(sc->sc_srcdmat, re->re_src_map);
- /*
- * If result was written to a differet mbuf chain, swap
- * it in as the return value and reclaim the original.
- */
- if ((crp->crp_flags & CRYPTO_F_IMBUF) && re->re_src_m != re->re_dst_m) {
- m_freem(re->re_src_m);
- crp->crp_buf = (caddr_t)re->re_dst_m;
- }
-
- if (re->re_flags & SAFE_QFLAGS_COPYOUTIV) {
- /* copy out IV for future use */
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- int ivsize;
-
- if (crd->crd_alg == CRYPTO_DES_CBC ||
- crd->crd_alg == CRYPTO_3DES_CBC) {
- ivsize = 2*sizeof(u_int32_t);
- } else if (crd->crd_alg == CRYPTO_AES_CBC) {
- ivsize = 4*sizeof(u_int32_t);
- } else
- continue;
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crd->crd_skip + crd->crd_len - ivsize, ivsize,
- (caddr_t)ses->ses_iv);
- break;
- }
- }
-
if (re->re_flags & SAFE_QFLAGS_COPYOUTICV) {
- /* copy out ICV result */
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- if (!(crd->crd_alg == CRYPTO_MD5_HMAC ||
- crd->crd_alg == CRYPTO_SHA1_HMAC ||
- crd->crd_alg == CRYPTO_NULL_HMAC))
- continue;
- if (crd->crd_alg == CRYPTO_SHA1_HMAC) {
- /*
- * SHA-1 ICV's are byte-swapped; fix 'em up
- * before copy them to their destination.
- */
- re->re_sastate.sa_saved_indigest[0] =
- bswap32(re->re_sastate.sa_saved_indigest[0]);
- re->re_sastate.sa_saved_indigest[1] =
- bswap32(re->re_sastate.sa_saved_indigest[1]);
- re->re_sastate.sa_saved_indigest[2] =
- bswap32(re->re_sastate.sa_saved_indigest[2]);
- }
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, ses->ses_mlen,
- (caddr_t)re->re_sastate.sa_saved_indigest);
- break;
+ if (csp->csp_auth_alg == CRYPTO_SHA1_HMAC) {
+ /*
+ * SHA-1 ICV's are byte-swapped; fix 'em up
+ * before copying them to their destination.
+ */
+ re->re_sastate.sa_saved_indigest[0] =
+ bswap32(re->re_sastate.sa_saved_indigest[0]);
+ re->re_sastate.sa_saved_indigest[1] =
+ bswap32(re->re_sastate.sa_saved_indigest[1]);
+ re->re_sastate.sa_saved_indigest[2] =
+ bswap32(re->re_sastate.sa_saved_indigest[2]);
}
+
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start,
+ ses->ses_mlen, hash);
+ if (timingsafe_bcmp(re->re_sastate.sa_saved_indigest,
+ hash, ses->ses_mlen) != 0)
+ crp->crp_etype = EBADMSG;
+ } else
+ crypto_copyback(crp, crp->crp_digest_start,
+ ses->ses_mlen, re->re_sastate.sa_saved_indigest);
}
crypto_done(crp);
}
@@ -1921,7 +1798,7 @@
/*
* Free header MCR
*/
- if ((re->re_dst_m != NULL) && (re->re_src_m != re->re_dst_m))
+ if (re->re_dst_m != NULL)
m_freem(re->re_dst_m);
crp = (struct cryptop *)re->re_crp;
Index: sys/dev/safe/safevar.h
===================================================================
--- sys/dev/safe/safevar.h
+++ sys/dev/safe/safevar.h
@@ -75,10 +75,6 @@
* where each is mapped for DMA.
*/
struct safe_operand {
- union {
- struct mbuf *m;
- struct uio *io;
- } u;
bus_dmamap_t map;
bus_size_t mapsize;
int nsegs;
@@ -109,22 +105,18 @@
struct safe_operand re_src; /* source operand */
struct safe_operand re_dst; /* destination operand */
+ struct mbuf *re_dst_m;
int unused;
int re_flags;
-#define SAFE_QFLAGS_COPYOUTIV 0x1 /* copy back on completion */
#define SAFE_QFLAGS_COPYOUTICV 0x2 /* copy back on completion */
};
-#define re_src_m re_src.u.m
-#define re_src_io re_src.u.io
#define re_src_map re_src.map
#define re_src_nsegs re_src.nsegs
#define re_src_segs re_src.segs
#define re_src_mapsize re_src.mapsize
-#define re_dst_m re_dst.u.m
-#define re_dst_io re_dst.u.io
#define re_dst_map re_dst.map
#define re_dst_nsegs re_dst.nsegs
#define re_dst_segs re_dst.segs
@@ -138,7 +130,6 @@
u_int32_t ses_mlen; /* hmac length in bytes */
u_int32_t ses_hminner[5]; /* hmac inner state */
u_int32_t ses_hmouter[5]; /* hmac outer state */
- u_int32_t ses_iv[4]; /* DES/3DES/AES iv */
};
struct safe_softc {
@@ -157,6 +148,7 @@
int sc_suspended;
int sc_needwakeup; /* notify crypto layer */
int32_t sc_cid; /* crypto tag */
+ uint32_t sc_devinfo;
struct safe_dma_alloc sc_ringalloc; /* PE ring allocation state */
struct safe_ringentry *sc_ring; /* PE ring */
struct safe_ringentry *sc_ringtop; /* PE ring top */
Index: sys/dev/sec/sec.h
===================================================================
--- sys/dev/sec/sec.h
+++ sys/dev/sec/sec.h
@@ -98,6 +98,7 @@
uint8_t shd_iv[SEC_MAX_IV_LEN];
uint8_t shd_key[SEC_MAX_KEY_LEN];
uint8_t shd_mkey[SEC_MAX_KEY_LEN];
+ uint8_t shd_digest[HASH_MAX_LEN];
} __packed__;
#define shd_eu_sel0 shd_control.request.eu_sel0
@@ -144,21 +145,17 @@
};
struct sec_eu_methods {
- int (*sem_newsession)(struct sec_softc *sc,
- struct sec_session *ses, struct cryptoini *enc,
- struct cryptoini *mac);
+ bool (*sem_newsession)(const struct crypto_session_params *csp);
int (*sem_make_desc)(struct sec_softc *sc,
- struct sec_session *ses, struct sec_desc *desc,
- struct cryptop *crp, int buftype);
+ const struct crypto_session_params *csp, struct sec_desc *desc,
+ struct cryptop *crp);
};
struct sec_session {
struct sec_eu_methods *ss_eu;
uint8_t ss_key[SEC_MAX_KEY_LEN];
uint8_t ss_mkey[SEC_MAX_KEY_LEN];
- u_int ss_klen;
- u_int ss_mklen;
- u_int ss_ivlen;
+ int ss_mlen;
};
struct sec_desc_map_info {
@@ -319,11 +316,6 @@
(((sc)->sc_lt_free_cnt - (sc)->sc_lt_alloc_cnt - 1) \
& (SEC_LT_ENTRIES - 1))
-/* DMA Maping defines */
-#define SEC_MEMORY 0
-#define SEC_UIO 1
-#define SEC_MBUF 2
-
/* Size of SEC registers area */
#define SEC_IO_SIZE 0x10000
Index: sys/dev/sec/sec.c
===================================================================
--- sys/dev/sec/sec.c
+++ sys/dev/sec/sec.c
@@ -51,6 +51,7 @@
#include <machine/resource.h>
#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform_auth.h>
#include "cryptodev_if.h"
#include <dev/ofw/ofw_bus_subr.h>
@@ -74,7 +75,7 @@
static int sec_alloc_dma_mem(struct sec_softc *sc,
struct sec_dma_mem *dma_mem, bus_size_t size);
static int sec_desc_map_dma(struct sec_softc *sc,
- struct sec_dma_mem *dma_mem, void *mem, bus_size_t size, int type,
+ struct sec_dma_mem *dma_mem, struct cryptop *crp, bus_size_t size,
struct sec_desc_map_info *sdmi);
static void sec_free_dma_mem(struct sec_dma_mem *dma_mem);
static void sec_enqueue(struct sec_softc *sc);
@@ -82,48 +83,43 @@
int channel);
static int sec_eu_channel(struct sec_softc *sc, int eu);
static int sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
- u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype);
+ u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize);
static int sec_make_pointer_direct(struct sec_softc *sc,
struct sec_desc *desc, u_int n, bus_addr_t data, bus_size_t dsize);
+static int sec_probesession(device_t dev,
+ const struct crypto_session_params *csp);
static int sec_newsession(device_t dev, crypto_session_t cses,
- struct cryptoini *cri);
+ const struct crypto_session_params *csp);
static int sec_process(device_t dev, struct cryptop *crp, int hint);
-static int sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
- struct cryptoini **mac);
-static int sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
- struct cryptodesc **mac);
static int sec_build_common_ns_desc(struct sec_softc *sc,
- struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
- struct cryptodesc *enc, int buftype);
+ struct sec_desc *desc, const struct crypto_session_params *csp,
+ struct cryptop *crp);
static int sec_build_common_s_desc(struct sec_softc *sc,
- struct sec_desc *desc, struct sec_session *ses, struct cryptop *crp,
- struct cryptodesc *enc, struct cryptodesc *mac, int buftype);
+ struct sec_desc *desc, const struct crypto_session_params *csp,
+ struct cryptop *crp);
static struct sec_desc *sec_find_desc(struct sec_softc *sc, bus_addr_t paddr);
/* AESU */
-static int sec_aesu_newsession(struct sec_softc *sc,
- struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
+static bool sec_aesu_newsession(const struct crypto_session_params *csp);
static int sec_aesu_make_desc(struct sec_softc *sc,
- struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
- int buftype);
+ const struct crypto_session_params *csp, struct sec_desc *desc,
+ struct cryptop *crp);
/* DEU */
-static int sec_deu_newsession(struct sec_softc *sc,
- struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
+static bool sec_deu_newsession(const struct crypto_session_params *csp);
static int sec_deu_make_desc(struct sec_softc *sc,
- struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
- int buftype);
+ const struct crypto_session_params *csp, struct sec_desc *desc,
+ struct cryptop *crp);
/* MDEU */
-static int sec_mdeu_can_handle(u_int alg);
-static int sec_mdeu_config(struct cryptodesc *crd,
+static bool sec_mdeu_can_handle(u_int alg);
+static int sec_mdeu_config(const struct crypto_session_params *csp,
u_int *eu, u_int *mode, u_int *hashlen);
-static int sec_mdeu_newsession(struct sec_softc *sc,
- struct sec_session *ses, struct cryptoini *enc, struct cryptoini *mac);
+static bool sec_mdeu_newsession(const struct crypto_session_params *csp);
static int sec_mdeu_make_desc(struct sec_softc *sc,
- struct sec_session *ses, struct sec_desc *desc, struct cryptop *crp,
- int buftype);
+ const struct crypto_session_params *csp, struct sec_desc *desc,
+ struct cryptop *crp);
static device_method_t sec_methods[] = {
/* Device interface */
@@ -136,6 +132,7 @@
DEVMETHOD(device_shutdown, sec_shutdown),
/* Crypto methods */
+ DEVMETHOD(cryptodev_probesession, sec_probesession),
DEVMETHOD(cryptodev_newsession, sec_newsession),
DEVMETHOD(cryptodev_process, sec_process),
@@ -362,24 +359,6 @@
if (error)
goto fail6;
- /* Register in OCF (AESU) */
- crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
-
- /* Register in OCF (DEU) */
- crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
-
- /* Register in OCF (MDEU) */
- crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA2_256_HMAC, 0, 0);
- if (sc->sc_version >= 3) {
- crypto_register(sc->sc_cid, CRYPTO_SHA2_384_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA2_512_HMAC, 0, 0);
- }
-
return (0);
fail6:
@@ -545,9 +524,12 @@
static void
sec_primary_intr(void *arg)
{
+ struct sec_session *ses;
struct sec_softc *sc = arg;
struct sec_desc *desc;
+ struct cryptop *crp;
uint64_t isr;
+ uint8_t hash[HASH_MAX_LEN];
int i, wakeup = 0;
SEC_LOCK(sc, controller);
@@ -595,7 +577,26 @@
SEC_DESC_SYNC_POINTERS(desc, BUS_DMASYNC_PREREAD |
BUS_DMASYNC_PREWRITE);
- desc->sd_crp->crp_etype = desc->sd_error;
+ crp = desc->sd_crp;
+ crp->crp_etype = desc->sd_error;
+ if (crp->crp_etype == 0) {
+ ses = crypto_get_driver_session(crp->crp_session);
+ if (ses->ss_mlen != 0) {
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp,
+ crp->crp_digest_start,
+ ses->ss_mlen, hash);
+ if (timingsafe_bcmp(
+ desc->sd_desc->shd_digest,
+ hash, ses->ss_mlen) != 0)
+ crp->crp_etype = EBADMSG;
+ } else
+ crypto_copyback(crp,
+ crp->crp_digest_start,
+ ses->ss_mlen,
+ desc->sd_desc->shd_digest);
+ }
+ }
crypto_done(desc->sd_crp);
SEC_DESC_FREE_POINTERS(desc);
@@ -786,14 +787,6 @@
sdmi->sdmi_lt_last = lt;
}
-static void
-sec_dma_map_desc_cb2(void *arg, bus_dma_segment_t *segs, int nseg,
- bus_size_t size, int error)
-{
-
- sec_dma_map_desc_cb(arg, segs, nseg, error);
-}
-
static int
sec_alloc_dma_mem(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
bus_size_t size)
@@ -851,22 +844,22 @@
}
static int
-sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem, void *mem,
- bus_size_t size, int type, struct sec_desc_map_info *sdmi)
+sec_desc_map_dma(struct sec_softc *sc, struct sec_dma_mem *dma_mem,
+ struct cryptop *crp, bus_size_t size, struct sec_desc_map_info *sdmi)
{
int error;
if (dma_mem->dma_vaddr != NULL)
return (EBUSY);
- switch (type) {
- case SEC_MEMORY:
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_CONTIG:
break;
- case SEC_UIO:
+ case CRYPTO_BUF_UIO:
size = SEC_FREE_LT_CNT(sc) * SEC_MAX_DMA_BLOCK_SIZE;
break;
- case SEC_MBUF:
- size = m_length((struct mbuf*)mem, NULL);
+ case CRYPTO_BUF_MBUF:
+ size = m_length(crp->crp_mbuf, NULL);
break;
default:
return (EINVAL);
@@ -899,20 +892,8 @@
return (error);
}
- switch (type) {
- case SEC_MEMORY:
- error = bus_dmamap_load(dma_mem->dma_tag, dma_mem->dma_map,
- mem, size, sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
- break;
- case SEC_UIO:
- error = bus_dmamap_load_uio(dma_mem->dma_tag, dma_mem->dma_map,
- mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
- break;
- case SEC_MBUF:
- error = bus_dmamap_load_mbuf(dma_mem->dma_tag, dma_mem->dma_map,
- mem, sec_dma_map_desc_cb2, sdmi, BUS_DMA_NOWAIT);
- break;
- }
+ error = bus_dmamap_load_crp(dma_mem->dma_tag, dma_mem->dma_map, crp,
+ sec_dma_map_desc_cb, sdmi, BUS_DMA_NOWAIT);
if (error) {
device_printf(sc->sc_dev, "cannot get address of the DMA"
@@ -923,7 +904,7 @@
}
dma_mem->dma_is_map = 1;
- dma_mem->dma_vaddr = mem;
+ dma_mem->dma_vaddr = crp;
return (0);
}
@@ -1130,7 +1111,7 @@
static int
sec_make_pointer(struct sec_softc *sc, struct sec_desc *desc,
- u_int n, void *data, bus_size_t doffset, bus_size_t dsize, int dtype)
+ u_int n, struct cryptop *crp, bus_size_t doffset, bus_size_t dsize)
{
struct sec_desc_map_info sdmi = { sc, dsize, doffset, NULL, NULL, 0 };
struct sec_hw_desc_ptr *ptr;
@@ -1138,14 +1119,8 @@
SEC_LOCK_ASSERT(sc, descriptors);
- /* For flat memory map only requested region */
- if (dtype == SEC_MEMORY) {
- data = (uint8_t*)(data) + doffset;
- sdmi.sdmi_offset = 0;
- }
-
- error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), data, dsize,
- dtype, &sdmi);
+ error = sec_desc_map_dma(sc, &(desc->sd_ptr_dmem[n]), crp, dsize,
+ &sdmi);
if (error)
return (error);
@@ -1162,115 +1137,116 @@
return (0);
}
-static int
-sec_split_cri(struct cryptoini *cri, struct cryptoini **enc,
- struct cryptoini **mac)
+static bool
+sec_cipher_supported(const struct crypto_session_params *csp)
{
- struct cryptoini *e, *m;
-
- e = cri;
- m = cri->cri_next;
-
- /* We can haldle only two operations */
- if (m && m->cri_next)
- return (EINVAL);
- if (sec_mdeu_can_handle(e->cri_alg)) {
- cri = m;
- m = e;
- e = cri;
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_CBC:
+ /* AESU */
+ if (csp->csp_ivlen != AES_BLOCK_LEN)
+ return (false);
+ break;
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ /* DEU */
+ if (csp->csp_ivlen != DES_BLOCK_LEN)
+ return (false);
+ break;
+ default:
+ return (false);
}
- if (m && !sec_mdeu_can_handle(m->cri_alg))
- return (EINVAL);
+ if (csp->csp_cipher_klen == 0 || csp->csp_cipher_klen > SEC_MAX_KEY_LEN)
+ return (false);
- *enc = e;
- *mac = m;
-
- return (0);
+ return (true);
}
-static int
-sec_split_crp(struct cryptop *crp, struct cryptodesc **enc,
- struct cryptodesc **mac)
+static bool
+sec_auth_supported(struct sec_softc *sc,
+ const struct crypto_session_params *csp)
{
- struct cryptodesc *e, *m, *t;
- e = crp->crp_desc;
- m = e->crd_next;
-
- /* We can haldle only two operations */
- if (m && m->crd_next)
- return (EINVAL);
-
- if (sec_mdeu_can_handle(e->crd_alg)) {
- t = m;
- m = e;
- e = t;
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ if (sc->sc_version < 3)
+ return (false);
+ /* FALLTHROUGH */
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ if (csp->csp_auth_klen > SEC_MAX_KEY_LEN)
+ return (false);
+ break;
+ case CRYPTO_MD5:
+ case CRYPTO_SHA1:
+ break;
+ default:
+ return (false);
}
-
- if (m && !sec_mdeu_can_handle(m->crd_alg))
- return (EINVAL);
-
- *enc = e;
- *mac = m;
-
- return (0);
+ return (true);
}
static int
-sec_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+sec_probesession(device_t dev, const struct crypto_session_params *csp)
{
struct sec_softc *sc = device_get_softc(dev);
+
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ if (!sec_auth_supported(sc, csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!sec_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_ETA:
+ if (!sec_auth_supported(sc, csp) || !sec_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (CRYPTODEV_PROBE_HARDWARE);
+}
+
+static int
+sec_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
+{
struct sec_eu_methods *eu = sec_eus;
- struct cryptoini *enc = NULL;
- struct cryptoini *mac = NULL;
struct sec_session *ses;
- int error = -1;
-
- error = sec_split_cri(cri, &enc, &mac);
- if (error)
- return (error);
-
- /* Check key lengths */
- if (enc && enc->cri_key && (enc->cri_klen / 8) > SEC_MAX_KEY_LEN)
- return (E2BIG);
-
- if (mac && mac->cri_key && (mac->cri_klen / 8) > SEC_MAX_KEY_LEN)
- return (E2BIG);
-
- /* Only SEC 3.0 supports digests larger than 256 bits */
- if (sc->sc_version < 3 && mac && mac->cri_klen > 256)
- return (E2BIG);
ses = crypto_get_driver_session(cses);
/* Find EU for this session */
while (eu->sem_make_desc != NULL) {
- error = eu->sem_newsession(sc, ses, enc, mac);
- if (error >= 0)
+ if (eu->sem_newsession(csp))
break;
-
eu++;
}
-
- /* If not found, return EINVAL */
- if (error < 0)
- return (EINVAL);
+ KASSERT(eu->sem_make_desc != NULL, ("failed to find eu for session"));
/* Save cipher key */
- if (enc && enc->cri_key) {
- ses->ss_klen = enc->cri_klen / 8;
- memcpy(ses->ss_key, enc->cri_key, ses->ss_klen);
- }
+ if (csp->csp_cipher_key != NULL)
+ memcpy(ses->ss_key, csp->csp_cipher_key, csp->csp_cipher_klen);
/* Save digest key */
- if (mac && mac->cri_key) {
- ses->ss_mklen = mac->cri_klen / 8;
- memcpy(ses->ss_mkey, mac->cri_key, ses->ss_mklen);
+ if (csp->csp_auth_key != NULL)
+ memcpy(ses->ss_mkey, csp->csp_auth_key, csp->csp_auth_klen);
+
+ if (csp->csp_auth_alg != 0) {
+ if (csp->csp_auth_mlen == 0)
+ ses->ss_mlen = crypto_auth_hash(csp)->hashsize;
+ else
+ ses->ss_mlen = csp->csp_auth_mlen;
}
- ses->ss_eu = eu;
return (0);
}
@@ -1279,11 +1255,12 @@
{
struct sec_softc *sc = device_get_softc(dev);
struct sec_desc *desc = NULL;
- struct cryptodesc *mac, *enc;
+ const struct crypto_session_params *csp;
struct sec_session *ses;
- int buftype, error = 0;
+ int error = 0;
ses = crypto_get_driver_session(crp->crp_session);
+ csp = crypto_get_params(crp->crp_session);
/* Check for input length */
if (crp->crp_ilen > SEC_MAX_DMA_BLOCK_SIZE) {
@@ -1292,13 +1269,6 @@
return (0);
}
- /* Get descriptors */
- if (sec_split_crp(crp, &enc, &mac)) {
- crp->crp_etype = EINVAL;
- crypto_done(crp);
- return (0);
- }
-
SEC_LOCK(sc, descriptors);
SEC_DESC_SYNC(sc, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
@@ -1315,56 +1285,29 @@
desc->sd_error = 0;
desc->sd_crp = crp;
- if (crp->crp_flags & CRYPTO_F_IOV)
- buftype = SEC_UIO;
- else if (crp->crp_flags & CRYPTO_F_IMBUF)
- buftype = SEC_MBUF;
- else
- buftype = SEC_MEMORY;
-
- if (enc && enc->crd_flags & CRD_F_ENCRYPT) {
- if (enc->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
- ses->ss_ivlen);
- else
- arc4rand(desc->sd_desc->shd_iv, ses->ss_ivlen, 0);
-
- if ((enc->crd_flags & CRD_F_IV_PRESENT) == 0)
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- enc->crd_inject, ses->ss_ivlen,
+ if (csp->csp_cipher_alg != 0) {
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(desc->sd_desc->shd_iv, csp->csp_ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen,
desc->sd_desc->shd_iv);
- } else if (enc) {
- if (enc->crd_flags & CRD_F_IV_EXPLICIT)
- memcpy(desc->sd_desc->shd_iv, enc->crd_iv,
- ses->ss_ivlen);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(desc->sd_desc->shd_iv, crp->crp_iv,
+ csp->csp_ivlen);
else
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- enc->crd_inject, ses->ss_ivlen,
+ crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen,
desc->sd_desc->shd_iv);
}
- if (enc && enc->crd_flags & CRD_F_KEY_EXPLICIT) {
- if ((enc->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
- ses->ss_klen = enc->crd_klen / 8;
- memcpy(ses->ss_key, enc->crd_key, ses->ss_klen);
- } else
- error = E2BIG;
- }
+ if (crp->crp_cipher_key != NULL)
+ memcpy(ses->ss_key, crp->crp_cipher_key, csp->csp_cipher_klen);
- if (!error && mac && mac->crd_flags & CRD_F_KEY_EXPLICIT) {
- if ((mac->crd_klen / 8) <= SEC_MAX_KEY_LEN) {
- ses->ss_mklen = mac->crd_klen / 8;
- memcpy(ses->ss_mkey, mac->crd_key, ses->ss_mklen);
- } else
- error = E2BIG;
- }
+ if (crp->crp_auth_key != NULL)
+ memcpy(ses->ss_mkey, crp->crp_auth_key, csp->csp_auth_klen);
- if (!error) {
- memcpy(desc->sd_desc->shd_key, ses->ss_key, ses->ss_klen);
- memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, ses->ss_mklen);
+ memcpy(desc->sd_desc->shd_key, ses->ss_key, csp->csp_cipher_klen);
+ memcpy(desc->sd_desc->shd_mkey, ses->ss_mkey, csp->csp_auth_klen);
- error = ses->ss_eu->sem_make_desc(sc, ses, desc, crp, buftype);
- }
+ error = ses->ss_eu->sem_make_desc(sc, csp, desc, crp);
if (error) {
SEC_DESC_FREE_POINTERS(desc);
@@ -1400,8 +1343,7 @@
static int
sec_build_common_ns_desc(struct sec_softc *sc, struct sec_desc *desc,
- struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
- int buftype)
+ const struct crypto_session_params *csp, struct cryptop *crp)
{
struct sec_hw_desc *hd = desc->sd_desc;
int error;
@@ -1417,25 +1359,25 @@
/* Pointer 1: IV IN */
error = sec_make_pointer_direct(sc, desc, 1, desc->sd_desc_paddr +
- offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
+ offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
if (error)
return (error);
/* Pointer 2: Cipher Key */
error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
- offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
+ offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
if (error)
return (error);
/* Pointer 3: Data IN */
- error = sec_make_pointer(sc, desc, 3, crp->crp_buf, enc->crd_skip,
- enc->crd_len, buftype);
+ error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
+ crp->crp_payload_length);
if (error)
return (error);
/* Pointer 4: Data OUT */
- error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
- enc->crd_len, buftype);
+ error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
+ crp->crp_payload_length);
if (error)
return (error);
@@ -1452,20 +1394,13 @@
static int
sec_build_common_s_desc(struct sec_softc *sc, struct sec_desc *desc,
- struct sec_session *ses, struct cryptop *crp, struct cryptodesc *enc,
- struct cryptodesc *mac, int buftype)
+ const struct crypto_session_params *csp, struct cryptop *crp)
{
struct sec_hw_desc *hd = desc->sd_desc;
u_int eu, mode, hashlen;
int error;
- if (mac->crd_len < enc->crd_len)
- return (EINVAL);
-
- if (mac->crd_skip + mac->crd_len != enc->crd_skip + enc->crd_len)
- return (EINVAL);
-
- error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
+ error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
if (error)
return (error);
@@ -1475,144 +1410,107 @@
/* Pointer 0: HMAC Key */
error = sec_make_pointer_direct(sc, desc, 0, desc->sd_desc_paddr +
- offsetof(struct sec_hw_desc, shd_mkey), ses->ss_mklen);
+ offsetof(struct sec_hw_desc, shd_mkey), csp->csp_auth_klen);
if (error)
return (error);
/* Pointer 1: HMAC-Only Data IN */
- error = sec_make_pointer(sc, desc, 1, crp->crp_buf, mac->crd_skip,
- mac->crd_len - enc->crd_len, buftype);
+ error = sec_make_pointer(sc, desc, 1, crp, crp->crp_aad_start,
+ crp->crp_aad_length);
if (error)
return (error);
/* Pointer 2: Cipher Key */
error = sec_make_pointer_direct(sc, desc, 2, desc->sd_desc_paddr +
- offsetof(struct sec_hw_desc, shd_key), ses->ss_klen);
+ offsetof(struct sec_hw_desc, shd_key), csp->csp_cipher_klen);
if (error)
return (error);
/* Pointer 3: IV IN */
error = sec_make_pointer_direct(sc, desc, 3, desc->sd_desc_paddr +
- offsetof(struct sec_hw_desc, shd_iv), ses->ss_ivlen);
+ offsetof(struct sec_hw_desc, shd_iv), csp->csp_ivlen);
if (error)
return (error);
/* Pointer 4: Data IN */
- error = sec_make_pointer(sc, desc, 4, crp->crp_buf, enc->crd_skip,
- enc->crd_len, buftype);
+ error = sec_make_pointer(sc, desc, 4, crp, crp->crp_payload_start,
+ crp->crp_payload_length);
if (error)
return (error);
/* Pointer 5: Data OUT */
- error = sec_make_pointer(sc, desc, 5, crp->crp_buf, enc->crd_skip,
- enc->crd_len, buftype);
+ error = sec_make_pointer(sc, desc, 5, crp, crp->crp_payload_start,
+ crp->crp_payload_length);
if (error)
return (error);
/* Pointer 6: HMAC OUT */
- error = sec_make_pointer(sc, desc, 6, crp->crp_buf, mac->crd_inject,
- hashlen, buftype);
+ error = sec_make_pointer_direct(sc, desc, 6, desc->sd_desc_paddr +
+ offsetof(struct sec_hw_desc, shd_digest), hashlen);
return (error);
}
/* AESU */
-static int
-sec_aesu_newsession(struct sec_softc *sc, struct sec_session *ses,
- struct cryptoini *enc, struct cryptoini *mac)
+static bool
+sec_aesu_newsession(const struct crypto_session_params *csp)
{
- if (enc == NULL)
- return (-1);
-
- if (enc->cri_alg != CRYPTO_AES_CBC)
- return (-1);
-
- ses->ss_ivlen = AES_BLOCK_LEN;
-
- return (0);
+ return (csp->csp_cipher_alg == CRYPTO_AES_CBC);
}
static int
-sec_aesu_make_desc(struct sec_softc *sc, struct sec_session *ses,
- struct sec_desc *desc, struct cryptop *crp, int buftype)
+sec_aesu_make_desc(struct sec_softc *sc,
+ const struct crypto_session_params *csp, struct sec_desc *desc,
+ struct cryptop *crp)
{
struct sec_hw_desc *hd = desc->sd_desc;
- struct cryptodesc *enc, *mac;
int error;
- error = sec_split_crp(crp, &enc, &mac);
- if (error)
- return (error);
-
- if (!enc)
- return (EINVAL);
-
hd->shd_eu_sel0 = SEC_EU_AESU;
hd->shd_mode0 = SEC_AESU_MODE_CBC;
- if (enc->crd_alg != CRYPTO_AES_CBC)
- return (EINVAL);
-
- if (enc->crd_flags & CRD_F_ENCRYPT) {
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
hd->shd_mode0 |= SEC_AESU_MODE_ED;
hd->shd_dir = 0;
} else
hd->shd_dir = 1;
- if (mac)
- error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
- buftype);
+ if (csp->csp_mode == CSP_MODE_ETA)
+ error = sec_build_common_s_desc(sc, desc, csp, crp);
else
- error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
- buftype);
+ error = sec_build_common_ns_desc(sc, desc, csp, crp);
return (error);
}
/* DEU */
-static int
-sec_deu_newsession(struct sec_softc *sc, struct sec_session *ses,
- struct cryptoini *enc, struct cryptoini *mac)
+static bool
+sec_deu_newsession(const struct crypto_session_params *csp)
{
- if (enc == NULL)
- return (-1);
-
- switch (enc->cri_alg) {
+ switch (csp->csp_cipher_alg) {
case CRYPTO_DES_CBC:
case CRYPTO_3DES_CBC:
- break;
+ return (true);
default:
- return (-1);
+ return (false);
}
-
- ses->ss_ivlen = DES_BLOCK_LEN;
-
- return (0);
}
static int
-sec_deu_make_desc(struct sec_softc *sc, struct sec_session *ses,
- struct sec_desc *desc, struct cryptop *crp, int buftype)
+sec_deu_make_desc(struct sec_softc *sc, const struct crypto_session_params *csp,
+ struct sec_desc *desc, struct cryptop *crp)
{
struct sec_hw_desc *hd = desc->sd_desc;
- struct cryptodesc *enc, *mac;
int error;
- error = sec_split_crp(crp, &enc, &mac);
- if (error)
- return (error);
-
- if (!enc)
- return (EINVAL);
-
hd->shd_eu_sel0 = SEC_EU_DEU;
hd->shd_mode0 = SEC_DEU_MODE_CBC;
- switch (enc->crd_alg) {
+ switch (csp->csp_cipher_alg) {
case CRYPTO_3DES_CBC:
hd->shd_mode0 |= SEC_DEU_MODE_TS;
break;
@@ -1622,25 +1520,23 @@
return (EINVAL);
}
- if (enc->crd_flags & CRD_F_ENCRYPT) {
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
hd->shd_mode0 |= SEC_DEU_MODE_ED;
hd->shd_dir = 0;
} else
hd->shd_dir = 1;
- if (mac)
- error = sec_build_common_s_desc(sc, desc, ses, crp, enc, mac,
- buftype);
+ if (csp->csp_mode == CSP_MODE_ETA)
+ error = sec_build_common_s_desc(sc, desc, csp, crp);
else
- error = sec_build_common_ns_desc(sc, desc, ses, crp, enc,
- buftype);
+ error = sec_build_common_ns_desc(sc, desc, csp, crp);
return (error);
}
/* MDEU */
-static int
+static bool
sec_mdeu_can_handle(u_int alg)
{
switch (alg) {
@@ -1651,20 +1547,21 @@
case CRYPTO_SHA2_256_HMAC:
case CRYPTO_SHA2_384_HMAC:
case CRYPTO_SHA2_512_HMAC:
- return (1);
+ return (true);
default:
- return (0);
+ return (false);
}
}
static int
-sec_mdeu_config(struct cryptodesc *crd, u_int *eu, u_int *mode, u_int *hashlen)
+sec_mdeu_config(const struct crypto_session_params *csp, u_int *eu, u_int *mode,
+ u_int *hashlen)
{
*mode = SEC_MDEU_MODE_PD | SEC_MDEU_MODE_INIT;
*eu = SEC_EU_NONE;
- switch (crd->crd_alg) {
+ switch (csp->csp_auth_alg) {
case CRYPTO_MD5_HMAC:
*mode |= SEC_MDEU_MODE_HMAC;
/* FALLTHROUGH */
@@ -1703,34 +1600,23 @@
return (0);
}
-static int
-sec_mdeu_newsession(struct sec_softc *sc, struct sec_session *ses,
- struct cryptoini *enc, struct cryptoini *mac)
+static bool
+sec_mdeu_newsession(const struct crypto_session_params *csp)
{
- if (mac && sec_mdeu_can_handle(mac->cri_alg))
- return (0);
-
- return (-1);
+ return (sec_mdeu_can_handle(csp->csp_auth_alg));
}
static int
-sec_mdeu_make_desc(struct sec_softc *sc, struct sec_session *ses,
- struct sec_desc *desc, struct cryptop *crp, int buftype)
+sec_mdeu_make_desc(struct sec_softc *sc,
+ const struct crypto_session_params *csp,
+ struct sec_desc *desc, struct cryptop *crp)
{
- struct cryptodesc *enc, *mac;
struct sec_hw_desc *hd = desc->sd_desc;
u_int eu, mode, hashlen;
int error;
- error = sec_split_crp(crp, &enc, &mac);
- if (error)
- return (error);
-
- if (enc)
- return (EINVAL);
-
- error = sec_mdeu_config(mac, &eu, &mode, &hashlen);
+ error = sec_mdeu_config(csp, &eu, &mode, &hashlen);
if (error)
return (error);
@@ -1754,7 +1640,7 @@
if (hd->shd_mode0 & SEC_MDEU_MODE_HMAC)
error = sec_make_pointer_direct(sc, desc, 2,
desc->sd_desc_paddr + offsetof(struct sec_hw_desc,
- shd_mkey), ses->ss_mklen);
+ shd_mkey), csp->csp_auth_klen);
else
error = sec_make_pointer_direct(sc, desc, 2, 0, 0);
@@ -1762,8 +1648,8 @@
return (error);
/* Pointer 3: Input Data */
- error = sec_make_pointer(sc, desc, 3, crp->crp_buf, mac->crd_skip,
- mac->crd_len, buftype);
+ error = sec_make_pointer(sc, desc, 3, crp, crp->crp_payload_start,
+ crp->crp_payload_length);
if (error)
return (error);
@@ -1773,8 +1659,8 @@
return (error);
/* Pointer 5: Hash out */
- error = sec_make_pointer(sc, desc, 5, crp->crp_buf,
- mac->crd_inject, hashlen, buftype);
+ error = sec_make_pointer_direct(sc, desc, 5, desc->sd_desc_paddr +
+ offsetof(struct sec_hw_desc, shd_digest), hashlen);
if (error)
return (error);
Index: sys/dev/ubsec/ubsec.c
===================================================================
--- sys/dev/ubsec/ubsec.c
+++ sys/dev/ubsec/ubsec.c
@@ -61,6 +61,7 @@
#include <sys/mutex.h>
#include <sys/sysctl.h>
#include <sys/endian.h>
+#include <sys/uio.h>
#include <vm/vm.h>
#include <vm/pmap.h>
@@ -70,10 +71,8 @@
#include <sys/bus.h>
#include <sys/rman.h>
-#include <crypto/sha1.h>
#include <opencrypto/cryptodev.h>
-#include <opencrypto/cryptosoft.h>
-#include <sys/md5.h>
+#include <opencrypto/xform_auth.h>
#include <sys/random.h>
#include <sys/kobj.h>
@@ -111,7 +110,9 @@
static int ubsec_resume(device_t);
static int ubsec_shutdown(device_t);
-static int ubsec_newsession(device_t, crypto_session_t, struct cryptoini *);
+static int ubsec_probesession(device_t, const struct crypto_session_params *);
+static int ubsec_newsession(device_t, crypto_session_t,
+ const struct crypto_session_params *);
static int ubsec_process(device_t, struct cryptop *, int);
static int ubsec_kprocess(device_t, struct cryptkop *, int);
@@ -125,6 +126,7 @@
DEVMETHOD(device_shutdown, ubsec_shutdown),
/* crypto device methods */
+ DEVMETHOD(cryptodev_probesession, ubsec_probesession),
DEVMETHOD(cryptodev_newsession, ubsec_newsession),
DEVMETHOD(cryptodev_process, ubsec_process),
DEVMETHOD(cryptodev_kprocess, ubsec_kprocess),
@@ -348,13 +350,6 @@
goto bad2;
}
- sc->sc_cid = crypto_get_driverid(dev, sizeof(struct ubsec_session),
- CRYPTOCAP_F_HARDWARE);
- if (sc->sc_cid < 0) {
- device_printf(dev, "could not get crypto driver id\n");
- goto bad3;
- }
-
/*
* Setup DMA descriptor area.
*/
@@ -370,7 +365,7 @@
NULL, NULL, /* lockfunc, lockarg */
&sc->sc_dmat)) {
device_printf(dev, "cannot allocate DMA tag\n");
- goto bad4;
+ goto bad3;
}
SIMPLEQ_INIT(&sc->sc_freequeue);
dmap = sc->sc_dmaa;
@@ -404,11 +399,6 @@
device_printf(sc->sc_dev, "%s\n", ubsec_partname(sc));
- crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
-
/*
* Reset Broadcom chip
*/
@@ -424,6 +414,13 @@
*/
ubsec_init_board(sc);
+ sc->sc_cid = crypto_get_driverid(dev, sizeof(struct ubsec_session),
+ CRYPTOCAP_F_HARDWARE);
+ if (sc->sc_cid < 0) {
+ device_printf(dev, "could not get crypto driver id\n");
+ goto bad4;
+ }
+
#ifndef UBSEC_NO_RNG
if (sc->sc_flags & UBS_FLAGS_RNG) {
sc->sc_statmask |= BS_STAT_MCR2_DONE;
@@ -477,7 +474,15 @@
}
return (0);
bad4:
- crypto_unregister_all(sc->sc_cid);
+ while (!SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
+ struct ubsec_q *q;
+
+ q = SIMPLEQ_FIRST(&sc->sc_freequeue);
+ SIMPLEQ_REMOVE_HEAD(&sc->sc_freequeue, q_next);
+ ubsec_dma_free(sc, &q->q_dma->d_alloc);
+ free(q, M_DEVBUF);
+ }
+ bus_dma_tag_destroy(sc->sc_dmat);
bad3:
bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
bad2:
@@ -498,13 +503,14 @@
/* XXX wait/abort active ops */
+ crypto_unregister_all(sc->sc_cid);
+
/* disable interrupts */
WRITE_REG(sc, BS_CTRL, READ_REG(sc, BS_CTRL) &~
(BS_CTRL_MCR2INT | BS_CTRL_MCR1INT | BS_CTRL_DMAERR));
callout_stop(&sc->sc_rngto);
-
- crypto_unregister_all(sc->sc_cid);
+ bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
#ifdef UBSEC_RNDTEST
if (sc->sc_rndtest)
@@ -531,7 +537,6 @@
mtx_destroy(&sc->sc_mcr2lock);
bus_generic_detach(dev);
- bus_teardown_intr(dev, sc->sc_irq, sc->sc_ih);
bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq);
bus_dma_tag_destroy(sc->sc_dmat);
@@ -826,7 +831,7 @@
}
static void
-ubsec_setup_enckey(struct ubsec_session *ses, int algo, caddr_t key)
+ubsec_setup_enckey(struct ubsec_session *ses, int algo, const void *key)
{
/* Go ahead and compute key in ubsec's byte order */
@@ -846,112 +851,134 @@
}
static void
-ubsec_setup_mackey(struct ubsec_session *ses, int algo, caddr_t key, int klen)
+ubsec_setup_mackey(struct ubsec_session *ses, int algo, const char *key,
+ int klen)
{
MD5_CTX md5ctx;
SHA1_CTX sha1ctx;
- int i;
-
- for (i = 0; i < klen; i++)
- key[i] ^= HMAC_IPAD_VAL;
if (algo == CRYPTO_MD5_HMAC) {
- MD5Init(&md5ctx);
- MD5Update(&md5ctx, key, klen);
- MD5Update(&md5ctx, hmac_ipad_buffer, MD5_BLOCK_LEN - klen);
+ hmac_init_ipad(&auth_hash_hmac_md5, key, klen, &md5ctx);
bcopy(md5ctx.state, ses->ses_hminner, sizeof(md5ctx.state));
- } else {
- SHA1Init(&sha1ctx);
- SHA1Update(&sha1ctx, key, klen);
- SHA1Update(&sha1ctx, hmac_ipad_buffer,
- SHA1_BLOCK_LEN - klen);
- bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
- }
- for (i = 0; i < klen; i++)
- key[i] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
-
- if (algo == CRYPTO_MD5_HMAC) {
- MD5Init(&md5ctx);
- MD5Update(&md5ctx, key, klen);
- MD5Update(&md5ctx, hmac_opad_buffer, MD5_BLOCK_LEN - klen);
+ hmac_init_opad(&auth_hash_hmac_md5, key, klen, &md5ctx);
bcopy(md5ctx.state, ses->ses_hmouter, sizeof(md5ctx.state));
+
+ explicit_bzero(&md5ctx, sizeof(md5ctx));
} else {
- SHA1Init(&sha1ctx);
- SHA1Update(&sha1ctx, key, klen);
- SHA1Update(&sha1ctx, hmac_opad_buffer,
- SHA1_BLOCK_LEN - klen);
+ hmac_init_ipad(&auth_hash_hmac_sha1, key, klen, &sha1ctx);
+ bcopy(sha1ctx.h.b32, ses->ses_hminner, sizeof(sha1ctx.h.b32));
+
+ hmac_init_opad(&auth_hash_hmac_sha1, key, klen, &sha1ctx);
bcopy(sha1ctx.h.b32, ses->ses_hmouter, sizeof(sha1ctx.h.b32));
+
+ explicit_bzero(&sha1ctx, sizeof(sha1ctx));
+ }
+}
+
+static bool
+ubsec_auth_supported(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ return (true);
+ default:
+ return (false);
+ }
+}
+
+static bool
+ubsec_cipher_supported(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ return (csp->csp_ivlen == 8);
+ default:
+ return (false);
+ }
+}
+
+static int
+ubsec_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ if (!ubsec_auth_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!ubsec_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_ETA:
+ if (!ubsec_auth_supported(csp) ||
+ !ubsec_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
}
- for (i = 0; i < klen; i++)
- key[i] ^= HMAC_OPAD_VAL;
+ return (CRYPTODEV_PROBE_HARDWARE);
}
/*
- * Allocate a new 'session' and return an encoded session id. 'sidp'
- * contains our registration id, and should contain an encoded session
- * id on successful allocation.
+ * Allocate a new 'session'.
*/
static int
-ubsec_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+ubsec_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
{
- struct ubsec_softc *sc = device_get_softc(dev);
- struct cryptoini *c, *encini = NULL, *macini = NULL;
- struct ubsec_session *ses = NULL;
-
- if (cri == NULL || sc == NULL)
- return (EINVAL);
-
- for (c = cri; c != NULL; c = c->cri_next) {
- if (c->cri_alg == CRYPTO_MD5_HMAC ||
- c->cri_alg == CRYPTO_SHA1_HMAC) {
- if (macini)
- return (EINVAL);
- macini = c;
- } else if (c->cri_alg == CRYPTO_DES_CBC ||
- c->cri_alg == CRYPTO_3DES_CBC) {
- if (encini)
- return (EINVAL);
- encini = c;
- } else
- return (EINVAL);
- }
- if (encini == NULL && macini == NULL)
- return (EINVAL);
+ struct ubsec_session *ses;
ses = crypto_get_driver_session(cses);
- if (encini) {
- /* get an IV, network byte order */
- /* XXX may read fewer than requested */
- read_random(ses->ses_iv, sizeof(ses->ses_iv));
+ if (csp->csp_cipher_alg != 0 && csp->csp_cipher_key != NULL)
+ ubsec_setup_enckey(ses, csp->csp_cipher_alg,
+ csp->csp_cipher_key);
- if (encini->cri_key != NULL) {
- ubsec_setup_enckey(ses, encini->cri_alg,
- encini->cri_key);
- }
- }
-
- if (macini) {
- ses->ses_mlen = macini->cri_mlen;
+ if (csp->csp_auth_alg != 0) {
+ ses->ses_mlen = csp->csp_auth_mlen;
if (ses->ses_mlen == 0) {
- if (macini->cri_alg == CRYPTO_MD5_HMAC)
+ if (csp->csp_auth_alg == CRYPTO_MD5_HMAC)
ses->ses_mlen = MD5_HASH_LEN;
else
ses->ses_mlen = SHA1_HASH_LEN;
}
- if (macini->cri_key != NULL) {
- ubsec_setup_mackey(ses, macini->cri_alg,
- macini->cri_key, macini->cri_klen / 8);
+ if (csp->csp_auth_key != NULL) {
+ ubsec_setup_mackey(ses, csp->csp_auth_alg,
+ csp->csp_auth_key, csp->csp_auth_klen);
}
}
return (0);
}
+static bus_size_t
+ubsec_crp_length(struct cryptop *crp)
+{
+
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ return (crp->crp_mbuf->m_pkthdr.len);
+ case CRYPTO_BUF_UIO:
+ return (crp->crp_uio->uio_resid);
+ case CRYPTO_BUF_CONTIG:
+ return (crp->crp_ilen);
+ default:
+ panic("bad crp buffer type");
+ }
+}
+
static void
-ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, bus_size_t mapsize, int error)
+ubsec_op_cb(void *arg, bus_dma_segment_t *seg, int nsegs, int error)
{
struct ubsec_operand *op = arg;
@@ -959,12 +986,11 @@
("Too many DMA segments returned when mapping operand"));
#ifdef UBSEC_DEBUG
if (ubsec_debug)
- printf("ubsec_op_cb: mapsize %u nsegs %d error %d\n",
- (u_int) mapsize, nsegs, error);
+ printf("ubsec_op_cb: nsegs %d error %d\n",
+ nsegs, error);
#endif
if (error != 0)
return;
- op->mapsize = mapsize;
op->nsegs = nsegs;
bcopy(seg, op->segs, nsegs * sizeof (seg[0]));
}
@@ -972,22 +998,17 @@
static int
ubsec_process(device_t dev, struct cryptop *crp, int hint)
{
+ const struct crypto_session_params *csp;
struct ubsec_softc *sc = device_get_softc(dev);
struct ubsec_q *q = NULL;
int err = 0, i, j, nicealign;
- struct cryptodesc *crd1, *crd2, *maccrd, *enccrd;
- int encoffset = 0, macoffset = 0, cpskip, cpoffset;
+ int cpskip, cpoffset;
int sskip, dskip, stheend, dtheend;
int16_t coffset;
struct ubsec_session *ses;
struct ubsec_pktctx ctx;
struct ubsec_dma *dmap = NULL;
- if (crp == NULL || crp->crp_callback == NULL || sc == NULL) {
- ubsecstats.hst_invalid++;
- return (EINVAL);
- }
-
mtx_lock(&sc->sc_freeqlock);
if (SIMPLEQ_EMPTY(&sc->sc_freequeue)) {
ubsecstats.hst_queuefull++;
@@ -1006,103 +1027,34 @@
q->q_dma = dmap;
ses = crypto_get_driver_session(crp->crp_session);
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
- q->q_src_m = (struct mbuf *)crp->crp_buf;
- q->q_dst_m = (struct mbuf *)crp->crp_buf;
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
- q->q_src_io = (struct uio *)crp->crp_buf;
- q->q_dst_io = (struct uio *)crp->crp_buf;
- } else {
- ubsecstats.hst_badflags++;
- err = EINVAL;
- goto errout; /* XXX we don't handle contiguous blocks! */
- }
-
bzero(&dmap->d_dma->d_mcr, sizeof(struct ubsec_mcr));
dmap->d_dma->d_mcr.mcr_pkts = htole16(1);
dmap->d_dma->d_mcr.mcr_flags = 0;
q->q_crp = crp;
- crd1 = crp->crp_desc;
- if (crd1 == NULL) {
- ubsecstats.hst_nodesc++;
- err = EINVAL;
- goto errout;
- }
- crd2 = crd1->crd_next;
+ csp = crypto_get_params(crp->crp_session);
- if (crd2 == NULL) {
- if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
- crd1->crd_alg == CRYPTO_SHA1_HMAC) {
- maccrd = crd1;
- enccrd = NULL;
- } else if (crd1->crd_alg == CRYPTO_DES_CBC ||
- crd1->crd_alg == CRYPTO_3DES_CBC) {
- maccrd = NULL;
- enccrd = crd1;
- } else {
- ubsecstats.hst_badalg++;
- err = EINVAL;
- goto errout;
- }
- } else {
- if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
- crd1->crd_alg == CRYPTO_SHA1_HMAC) &&
- (crd2->crd_alg == CRYPTO_DES_CBC ||
- crd2->crd_alg == CRYPTO_3DES_CBC) &&
- ((crd2->crd_flags & CRD_F_ENCRYPT) == 0)) {
- maccrd = crd1;
- enccrd = crd2;
- } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
- crd1->crd_alg == CRYPTO_3DES_CBC) &&
- (crd2->crd_alg == CRYPTO_MD5_HMAC ||
- crd2->crd_alg == CRYPTO_SHA1_HMAC) &&
- (crd1->crd_flags & CRD_F_ENCRYPT)) {
- enccrd = crd1;
- maccrd = crd2;
- } else {
- /*
- * We cannot order the ubsec as requested
- */
- ubsecstats.hst_badalg++;
- err = EINVAL;
- goto errout;
- }
- }
-
- if (enccrd) {
- if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
- ubsec_setup_enckey(ses, enccrd->crd_alg,
- enccrd->crd_key);
+ if (csp->csp_cipher_alg != 0) {
+ if (crp->crp_cipher_key != NULL) {
+ ubsec_setup_enckey(ses, csp->csp_cipher_alg,
+ crp->crp_cipher_key);
}
- encoffset = enccrd->crd_skip;
ctx.pc_flags |= htole16(UBS_PKTCTX_ENC_3DES);
- if (enccrd->crd_flags & CRD_F_ENCRYPT) {
- q->q_flags |= UBSEC_QFLAGS_COPYOUTIV;
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(ctx.pc_iv, csp->csp_ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start,
+ csp->csp_ivlen, ctx.pc_iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(ctx.pc_iv, crp->crp_iv, csp->csp_ivlen);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen,
+ ctx.pc_iv);
- if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(enccrd->crd_iv, ctx.pc_iv, 8);
- else {
- ctx.pc_iv[0] = ses->ses_iv[0];
- ctx.pc_iv[1] = ses->ses_iv[1];
- }
-
- if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv);
- }
- } else {
+ if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
ctx.pc_flags |= htole16(UBS_PKTCTX_INBOUND);
-
- if (enccrd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(enccrd->crd_iv, ctx.pc_iv, 8);
- else {
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, 8, (caddr_t)ctx.pc_iv);
- }
}
ctx.pc_deskey[0] = ses->ses_deskey[0];
@@ -1115,15 +1067,13 @@
SWAP32(ctx.pc_iv[1]);
}
- if (maccrd) {
- if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
- ubsec_setup_mackey(ses, maccrd->crd_alg,
- maccrd->crd_key, maccrd->crd_klen / 8);
+ if (csp->csp_auth_alg != 0) {
+ if (crp->crp_auth_key != NULL) {
+ ubsec_setup_mackey(ses, csp->csp_auth_alg,
+ crp->crp_auth_key, csp->csp_auth_klen);
}
- macoffset = maccrd->crd_skip;
-
- if (maccrd->crd_alg == CRYPTO_MD5_HMAC)
+ if (csp->csp_auth_alg == CRYPTO_MD5_HMAC)
ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_MD5);
else
ctx.pc_flags |= htole16(UBS_PKTCTX_AUTH_SHA1);
@@ -1137,35 +1087,37 @@
}
}
- if (enccrd && maccrd) {
+ if (csp->csp_mode == CSP_MODE_ETA) {
/*
- * ubsec cannot handle packets where the end of encryption
- * and authentication are not the same, or where the
- * encrypted part begins before the authenticated part.
+ * ubsec only supports ETA requests where there is no
+ * gap between the AAD and payload.
*/
- if ((encoffset + enccrd->crd_len) !=
- (macoffset + maccrd->crd_len)) {
+ if (crp->crp_aad_length != 0 &&
+ crp->crp_aad_start + crp->crp_aad_length !=
+ crp->crp_payload_start) {
ubsecstats.hst_lenmismatch++;
err = EINVAL;
goto errout;
}
- if (enccrd->crd_skip < maccrd->crd_skip) {
- ubsecstats.hst_skipmismatch++;
- err = EINVAL;
- goto errout;
+
+ if (crp->crp_aad_length != 0) {
+ sskip = crp->crp_aad_start;
+ } else {
+ sskip = crp->crp_payload_start;
}
- sskip = maccrd->crd_skip;
- cpskip = dskip = enccrd->crd_skip;
- stheend = maccrd->crd_len;
- dtheend = enccrd->crd_len;
- coffset = enccrd->crd_skip - maccrd->crd_skip;
+ cpskip = dskip = crp->crp_payload_start;
+ stheend = crp->crp_aad_length + crp->crp_payload_length;
+ dtheend = crp->crp_payload_length;
+ coffset = crp->crp_aad_length;
cpoffset = cpskip + dtheend;
#ifdef UBSEC_DEBUG
if (ubsec_debug) {
- printf("mac: skip %d, len %d, inject %d\n",
- maccrd->crd_skip, maccrd->crd_len, maccrd->crd_inject);
- printf("enc: skip %d, len %d, inject %d\n",
- enccrd->crd_skip, enccrd->crd_len, enccrd->crd_inject);
+ printf("AAD: start %d, len %d, digest %d\n",
+ crp->crp_aad_start, crp->crp_aad_length,
+ crp->crp_digest_start);
+ printf("payload: start %d, len %d, IV %d\n",
+ crp->crp_payload_start, crp->crp_payload_length,
+ crp->crp_iv_start);
printf("src: skip %d, len %d\n", sskip, stheend);
printf("dst: skip %d, len %d\n", dskip, dtheend);
printf("ubs: coffset %d, pktlen %d, cpskip %d, cpoffset %d\n",
@@ -1173,8 +1125,8 @@
}
#endif
} else {
- cpskip = dskip = sskip = macoffset + encoffset;
- dtheend = stheend = (enccrd)?enccrd->crd_len:maccrd->crd_len;
+ cpskip = dskip = sskip = crp->crp_payload_start;
+ dtheend = stheend = crp->crp_payload_length;
cpoffset = cpskip + dtheend;
coffset = 0;
}
@@ -1185,25 +1137,15 @@
err = ENOMEM;
goto errout;
}
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
- if (bus_dmamap_load_mbuf(sc->sc_dmat, q->q_src_map,
- q->q_src_m, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) {
- bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
- q->q_src_map = NULL;
- ubsecstats.hst_noload++;
- err = ENOMEM;
- goto errout;
- }
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
- if (bus_dmamap_load_uio(sc->sc_dmat, q->q_src_map,
- q->q_src_io, ubsec_op_cb, &q->q_src, BUS_DMA_NOWAIT) != 0) {
- bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
- q->q_src_map = NULL;
- ubsecstats.hst_noload++;
- err = ENOMEM;
- goto errout;
- }
+ if (bus_dmamap_load_crp(sc->sc_dmat, q->q_src_map, crp, ubsec_op_cb,
+ &q->q_src, BUS_DMA_NOWAIT) != 0) {
+ bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
+ q->q_src_map = NULL;
+ ubsecstats.hst_noload++;
+ err = ENOMEM;
+ goto errout;
}
+ q->q_src_mapsize = ubsec_crp_length(crp);
nicealign = ubsec_dmamap_aligned(&q->q_src);
dmap->d_dma->d_mcr.mcr_pktlen = htole16(stheend);
@@ -1257,7 +1199,7 @@
j++;
}
- if (enccrd == NULL && maccrd != NULL) {
+ if (csp->csp_mode == CSP_MODE_DIGEST) {
dmap->d_dma->d_mcr.mcr_opktbuf.pb_addr = 0;
dmap->d_dma->d_mcr.mcr_opktbuf.pb_len = 0;
dmap->d_dma->d_mcr.mcr_opktbuf.pb_next = htole32(dmap->d_alloc.dma_paddr +
@@ -1270,104 +1212,79 @@
dmap->d_dma->d_mcr.mcr_opktbuf.pb_next);
#endif
} else {
- if (crp->crp_flags & CRYPTO_F_IOV) {
- if (!nicealign) {
- ubsecstats.hst_iovmisaligned++;
- err = EINVAL;
- goto errout;
- }
- if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
- &q->q_dst_map)) {
- ubsecstats.hst_nomap++;
- err = ENOMEM;
- goto errout;
- }
- if (bus_dmamap_load_uio(sc->sc_dmat, q->q_dst_map,
- q->q_dst_io, ubsec_op_cb, &q->q_dst, BUS_DMA_NOWAIT) != 0) {
- bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
- q->q_dst_map = NULL;
- ubsecstats.hst_noload++;
- err = ENOMEM;
- goto errout;
- }
- } else if (crp->crp_flags & CRYPTO_F_IMBUF) {
- if (nicealign) {
- q->q_dst = q->q_src;
+ if (nicealign) {
+ q->q_dst = q->q_src;
+ } else if (crp->crp_buf_type == CRYPTO_BUF_MBUF) {
+ int totlen, len;
+ struct mbuf *m, *top, **mp;
+
+ ubsecstats.hst_unaligned++;
+ totlen = q->q_src_mapsize;
+ if (totlen >= MINCLSIZE) {
+ m = m_getcl(M_NOWAIT, MT_DATA,
+ crp->crp_mbuf->m_flags & M_PKTHDR);
+ len = MCLBYTES;
+ } else if (crp->crp_mbuf->m_flags & M_PKTHDR) {
+ m = m_gethdr(M_NOWAIT, MT_DATA);
+ len = MHLEN;
} else {
- int totlen, len;
- struct mbuf *m, *top, **mp;
+ m = m_get(M_NOWAIT, MT_DATA);
+ len = MLEN;
+ }
+ if (m && crp->crp_mbuf->m_flags & M_PKTHDR &&
+ !m_dup_pkthdr(m, crp->crp_mbuf, M_NOWAIT)) {
+ m_free(m);
+ m = NULL;
+ }
+ if (m == NULL) {
+ ubsecstats.hst_nombuf++;
+ err = sc->sc_nqueue ? ERESTART : ENOMEM;
+ goto errout;
+ }
+ m->m_len = len = min(totlen, len);
+ totlen -= len;
+ top = m;
+ mp = &top;
- ubsecstats.hst_unaligned++;
- totlen = q->q_src_mapsize;
+ while (totlen > 0) {
if (totlen >= MINCLSIZE) {
- m = m_getcl(M_NOWAIT, MT_DATA,
- q->q_src_m->m_flags & M_PKTHDR);
+ m = m_getcl(M_NOWAIT, MT_DATA, 0);
len = MCLBYTES;
- } else if (q->q_src_m->m_flags & M_PKTHDR) {
- m = m_gethdr(M_NOWAIT, MT_DATA);
- len = MHLEN;
} else {
m = m_get(M_NOWAIT, MT_DATA);
len = MLEN;
}
- if (m && q->q_src_m->m_flags & M_PKTHDR &&
- !m_dup_pkthdr(m, q->q_src_m, M_NOWAIT)) {
- m_free(m);
- m = NULL;
- }
if (m == NULL) {
+ m_freem(top);
ubsecstats.hst_nombuf++;
err = sc->sc_nqueue ? ERESTART : ENOMEM;
goto errout;
}
m->m_len = len = min(totlen, len);
totlen -= len;
- top = m;
- mp = &top;
-
- while (totlen > 0) {
- if (totlen >= MINCLSIZE) {
- m = m_getcl(M_NOWAIT,
- MT_DATA, 0);
- len = MCLBYTES;
- } else {
- m = m_get(M_NOWAIT, MT_DATA);
- len = MLEN;
- }
- if (m == NULL) {
- m_freem(top);
- ubsecstats.hst_nombuf++;
- err = sc->sc_nqueue ? ERESTART : ENOMEM;
- goto errout;
- }
- m->m_len = len = min(totlen, len);
- totlen -= len;
- *mp = m;
- mp = &m->m_next;
- }
- q->q_dst_m = top;
- ubsec_mcopy(q->q_src_m, q->q_dst_m,
- cpskip, cpoffset);
- if (bus_dmamap_create(sc->sc_dmat,
- BUS_DMA_NOWAIT, &q->q_dst_map) != 0) {
- ubsecstats.hst_nomap++;
- err = ENOMEM;
- goto errout;
- }
- if (bus_dmamap_load_mbuf(sc->sc_dmat,
- q->q_dst_map, q->q_dst_m,
- ubsec_op_cb, &q->q_dst,
- BUS_DMA_NOWAIT) != 0) {
- bus_dmamap_destroy(sc->sc_dmat,
- q->q_dst_map);
- q->q_dst_map = NULL;
- ubsecstats.hst_noload++;
- err = ENOMEM;
- goto errout;
- }
+ *mp = m;
+ mp = &m->m_next;
}
+ q->q_dst_m = top;
+ ubsec_mcopy(crp->crp_mbuf, q->q_dst_m, cpskip, cpoffset);
+ if (bus_dmamap_create(sc->sc_dmat, BUS_DMA_NOWAIT,
+ &q->q_dst_map) != 0) {
+ ubsecstats.hst_nomap++;
+ err = ENOMEM;
+ goto errout;
+ }
+ if (bus_dmamap_load_mbuf_sg(sc->sc_dmat,
+ q->q_dst_map, q->q_dst_m, q->q_dst_segs,
+ &q->q_dst_nsegs, 0) != 0) {
+ bus_dmamap_destroy(sc->sc_dmat, q->q_dst_map);
+ q->q_dst_map = NULL;
+ ubsecstats.hst_noload++;
+ err = ENOMEM;
+ goto errout;
+ }
+ q->q_dst_mapsize = q->q_src_mapsize;
} else {
- ubsecstats.hst_badflags++;
+ ubsecstats.hst_iovmisaligned++;
err = EINVAL;
goto errout;
}
@@ -1414,7 +1331,7 @@
pb->pb_len = htole32(packl);
if ((i + 1) == q->q_dst_nsegs) {
- if (maccrd)
+ if (csp->csp_auth_alg != 0)
pb->pb_next = htole32(dmap->d_alloc.dma_paddr +
offsetof(struct ubsec_dmachunk, d_macbuf[0]));
else
@@ -1465,7 +1382,7 @@
errout:
if (q != NULL) {
- if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
+ if (q->q_dst_m != NULL)
m_freem(q->q_dst_m);
if (q->q_dst_map != NULL && q->q_dst_map != q->q_src_map) {
@@ -1495,12 +1412,14 @@
static void
ubsec_callback(struct ubsec_softc *sc, struct ubsec_q *q)
{
+ const struct crypto_session_params *csp;
struct cryptop *crp = (struct cryptop *)q->q_crp;
struct ubsec_session *ses;
- struct cryptodesc *crd;
struct ubsec_dma *dmap = q->q_dma;
+ char hash[SHA1_HASH_LEN];
ses = crypto_get_driver_session(crp->crp_session);
+ csp = crypto_get_params(crp->crp_session);
ubsecstats.hst_opackets++;
ubsecstats.hst_obytes += dmap->d_alloc.dma_size;
@@ -1517,31 +1436,21 @@
bus_dmamap_unload(sc->sc_dmat, q->q_src_map);
bus_dmamap_destroy(sc->sc_dmat, q->q_src_map);
- if ((crp->crp_flags & CRYPTO_F_IMBUF) && (q->q_src_m != q->q_dst_m)) {
- m_freem(q->q_src_m);
- crp->crp_buf = (caddr_t)q->q_dst_m;
+ if (q->q_dst_m != NULL) {
+ m_freem(crp->crp_mbuf);
+ crp->crp_mbuf = q->q_dst_m;
}
- /* copy out IV for future use */
- if (q->q_flags & UBSEC_QFLAGS_COPYOUTIV) {
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- if (crd->crd_alg != CRYPTO_DES_CBC &&
- crd->crd_alg != CRYPTO_3DES_CBC)
- continue;
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crd->crd_skip + crd->crd_len - 8, 8,
- (caddr_t)ses->ses_iv);
- break;
- }
- }
-
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- if (crd->crd_alg != CRYPTO_MD5_HMAC &&
- crd->crd_alg != CRYPTO_SHA1_HMAC)
- continue;
- crypto_copyback(crp->crp_flags, crp->crp_buf, crd->crd_inject,
- ses->ses_mlen, (caddr_t)dmap->d_dma->d_macbuf);
- break;
+ if (csp->csp_auth_alg != 0) {
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start,
+ ses->ses_mlen, hash);
+ if (timingsafe_bcmp(dmap->d_dma->d_macbuf, hash,
+ ses->ses_mlen) != 0)
+ crp->crp_etype = EBADMSG;
+ } else
+ crypto_copyback(crp, crp->crp_digest_start,
+ ses->ses_mlen, dmap->d_dma->d_macbuf);
}
mtx_lock(&sc->sc_freeqlock);
SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next);
@@ -1942,7 +1851,7 @@
if(q->q_stacked_mcr[i]) {
q2 = q->q_stacked_mcr[i];
- if ((q2->q_dst_m != NULL) && (q2->q_src_m != q2->q_dst_m))
+ if (q2->q_dst_m != NULL)
m_freem(q2->q_dst_m);
crp = (struct cryptop *)q2->q_crp;
@@ -1959,7 +1868,7 @@
/*
* Free header MCR
*/
- if ((q->q_dst_m != NULL) && (q->q_src_m != q->q_dst_m))
+ if (q->q_dst_m != NULL)
m_freem(q->q_dst_m);
crp = (struct cryptop *)q->q_crp;
Index: sys/dev/ubsec/ubsecvar.h
===================================================================
--- sys/dev/ubsec/ubsecvar.h
+++ sys/dev/ubsec/ubsecvar.h
@@ -134,10 +134,6 @@
#define UBS_FLAGS_RNG 0x10 /* hardware rng */
struct ubsec_operand {
- union {
- struct mbuf *m;
- struct uio *io;
- } u;
bus_dmamap_t map;
bus_size_t mapsize;
int nsegs;
@@ -153,19 +149,16 @@
struct ubsec_operand q_src;
struct ubsec_operand q_dst;
+ struct mbuf *q_dst_m;
int q_flags;
};
-#define q_src_m q_src.u.m
-#define q_src_io q_src.u.io
#define q_src_map q_src.map
#define q_src_nsegs q_src.nsegs
#define q_src_segs q_src.segs
#define q_src_mapsize q_src.mapsize
-#define q_dst_m q_dst.u.m
-#define q_dst_io q_dst.u.io
#define q_dst_map q_dst.map
#define q_dst_nsegs q_dst.nsegs
#define q_dst_segs q_dst.segs
@@ -215,7 +208,6 @@
u_int32_t ses_mlen; /* hmac length */
u_int32_t ses_hminner[5]; /* hmac inner state */
u_int32_t ses_hmouter[5]; /* hmac outer state */
- u_int32_t ses_iv[2]; /* [3]DES iv */
};
#endif /* _KERNEL */
Index: sys/geom/eli/g_eli.h
===================================================================
--- sys/geom/eli/g_eli.h
+++ sys/geom/eli/g_eli.h
@@ -163,6 +163,7 @@
struct g_eli_worker {
struct g_eli_softc *w_softc;
struct proc *w_proc;
+ void *w_first_key;
u_int w_number;
crypto_session_t w_sid;
boolean_t w_active;
@@ -573,6 +574,25 @@
}
}
+static __inline u_int
+g_eli_ivlen(u_int algo)
+{
+
+ switch (algo) {
+ case CRYPTO_AES_XTS:
+ return (AES_XTS_IV_LEN);
+ case CRYPTO_AES_CBC:
+ return (AES_BLOCK_LEN);
+ case CRYPTO_BLF_CBC:
+ return (BLOWFISH_BLOCK_LEN);
+ case CRYPTO_CAMELLIA_CBC:
+ return (CAMELLIA_BLOCK_LEN);
+ case CRYPTO_3DES_CBC:
+ return (DES3_BLOCK_LEN);
+ }
+ return (0);
+}
+
static __inline u_int
g_eli_hashlen(u_int algo)
{
Index: sys/geom/eli/g_eli.c
===================================================================
--- sys/geom/eli/g_eli.c
+++ sys/geom/eli/g_eli.c
@@ -488,41 +488,44 @@
g_eli_newsession(struct g_eli_worker *wr)
{
struct g_eli_softc *sc;
- struct cryptoini crie, cria;
+ struct crypto_session_params csp;
int error;
+ void *key;
sc = wr->w_softc;
- bzero(&crie, sizeof(crie));
- crie.cri_alg = sc->sc_ealgo;
- crie.cri_klen = sc->sc_ekeylen;
+ memset(&csp, 0, sizeof(csp));
+ csp.csp_mode = CSP_MODE_CIPHER;
+ csp.csp_cipher_alg = sc->sc_ealgo;
+ csp.csp_ivlen = g_eli_ivlen(sc->sc_ealgo);
+ csp.csp_cipher_klen = sc->sc_ekeylen / 8;
if (sc->sc_ealgo == CRYPTO_AES_XTS)
- crie.cri_klen <<= 1;
+ csp.csp_cipher_klen <<= 1;
if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
- crie.cri_key = g_eli_key_hold(sc, 0,
+ key = g_eli_key_hold(sc, 0,
LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize);
+ csp.csp_cipher_key = key;
} else {
- crie.cri_key = sc->sc_ekey;
+ key = NULL;
+ csp.csp_cipher_key = sc->sc_ekey;
}
if (sc->sc_flags & G_ELI_FLAG_AUTH) {
- bzero(&cria, sizeof(cria));
- cria.cri_alg = sc->sc_aalgo;
- cria.cri_klen = sc->sc_akeylen;
- cria.cri_key = sc->sc_akey;
- crie.cri_next = &cria;
+ csp.csp_mode = CSP_MODE_ETA;
+ csp.csp_auth_alg = sc->sc_aalgo;
+ csp.csp_auth_klen = G_ELI_AUTH_SECKEYLEN;
}
switch (sc->sc_crypto) {
case G_ELI_CRYPTO_SW:
- error = crypto_newsession(&wr->w_sid, &crie,
+ error = crypto_newsession(&wr->w_sid, &csp,
CRYPTOCAP_F_SOFTWARE);
break;
case G_ELI_CRYPTO_HW:
- error = crypto_newsession(&wr->w_sid, &crie,
+ error = crypto_newsession(&wr->w_sid, &csp,
CRYPTOCAP_F_HARDWARE);
break;
case G_ELI_CRYPTO_UNKNOWN:
- error = crypto_newsession(&wr->w_sid, &crie,
+ error = crypto_newsession(&wr->w_sid, &csp,
CRYPTOCAP_F_HARDWARE);
if (error == 0) {
mtx_lock(&sc->sc_queue_mtx);
@@ -530,7 +533,7 @@
sc->sc_crypto = G_ELI_CRYPTO_HW;
mtx_unlock(&sc->sc_queue_mtx);
} else {
- error = crypto_newsession(&wr->w_sid, &crie,
+ error = crypto_newsession(&wr->w_sid, &csp,
CRYPTOCAP_F_SOFTWARE);
mtx_lock(&sc->sc_queue_mtx);
if (sc->sc_crypto == G_ELI_CRYPTO_UNKNOWN)
@@ -542,8 +545,12 @@
panic("%s: invalid condition", __func__);
}
- if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0)
- g_eli_key_drop(sc, crie.cri_key);
+ if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) != 0) {
+ if (error)
+ g_eli_key_drop(sc, key);
+ else
+ wr->w_first_key = key;
+ }
return (error);
}
@@ -551,8 +558,14 @@
static void
g_eli_freesession(struct g_eli_worker *wr)
{
+ struct g_eli_softc *sc;
crypto_freesession(wr->w_sid);
+ if (wr->w_first_key != NULL) {
+ sc = wr->w_softc;
+ g_eli_key_drop(sc, wr->w_first_key);
+ wr->w_first_key = NULL;
+ }
}
static void
Index: sys/geom/eli/g_eli_crypto.c
===================================================================
--- sys/geom/eli/g_eli_crypto.c
+++ sys/geom/eli/g_eli_crypto.c
@@ -61,50 +61,40 @@
g_eli_crypto_cipher(u_int algo, int enc, u_char *data, size_t datasize,
const u_char *key, size_t keysize)
{
- struct cryptoini cri;
+ struct crypto_session_params csp;
struct cryptop *crp;
- struct cryptodesc *crd;
crypto_session_t sid;
- u_char *p;
int error;
KASSERT(algo != CRYPTO_AES_XTS,
("%s: CRYPTO_AES_XTS unexpected here", __func__));
- bzero(&cri, sizeof(cri));
- cri.cri_alg = algo;
- cri.cri_key = __DECONST(void *, key);
- cri.cri_klen = keysize;
- error = crypto_newsession(&sid, &cri, CRYPTOCAP_F_SOFTWARE);
+ memset(&csp, 0, sizeof(csp));
+ csp.csp_mode = CSP_MODE_CIPHER;
+ csp.csp_cipher_alg = algo;
+ csp.csp_ivlen = g_eli_ivlen(algo);
+ csp.csp_cipher_key = key;
+ csp.csp_cipher_klen = keysize / 8;
+ error = crypto_newsession(&sid, &csp, CRYPTOCAP_F_SOFTWARE);
if (error != 0)
return (error);
- p = malloc(sizeof(*crp) + sizeof(*crd), M_ELI, M_NOWAIT | M_ZERO);
- if (p == NULL) {
+ crp = crypto_getreq(sid, M_NOWAIT);
+ if (crp == NULL) {
crypto_freesession(sid);
return (ENOMEM);
}
- crp = (struct cryptop *)p; p += sizeof(*crp);
- crd = (struct cryptodesc *)p; p += sizeof(*crd);
- crd->crd_skip = 0;
- crd->crd_len = datasize;
- crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
- if (enc)
- crd->crd_flags |= CRD_F_ENCRYPT;
- crd->crd_alg = algo;
- crd->crd_key = __DECONST(void *, key);
- crd->crd_klen = keysize;
- bzero(crd->crd_iv, sizeof(crd->crd_iv));
- crd->crd_next = NULL;
+ crp->crp_payload_start = 0;
+ crp->crp_payload_length = datasize;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_IV_SEPARATE;
+ crp->crp_op = enc ? CRYPTO_OP_ENCRYPT : CRYPTO_OP_DECRYPT;
+ memset(crp->crp_iv, 0, sizeof(crp->crp_iv));
- crp->crp_session = sid;
- crp->crp_ilen = datasize;
- crp->crp_olen = datasize;
crp->crp_opaque = NULL;
crp->crp_callback = g_eli_crypto_done;
+ crp->crp_buf_type = CRYPTO_BUF_CONTIG;
+ crp->crp_ilen = datasize;
crp->crp_buf = (void *)data;
- crp->crp_flags = CRYPTO_F_CBIFSYNC;
- crp->crp_desc = crd;
error = crypto_dispatch(crp);
if (error == 0) {
@@ -113,7 +103,7 @@
error = crp->crp_etype;
}
- free(crp, M_ELI);
+ crypto_freereq(crp);
crypto_freesession(sid);
return (error);
}
Index: sys/geom/eli/g_eli_integrity.c
===================================================================
--- sys/geom/eli/g_eli_integrity.c
+++ sys/geom/eli/g_eli_integrity.c
@@ -140,31 +140,51 @@
}
bp = (struct bio *)crp->crp_opaque;
bp->bio_inbed++;
+ sc = bp->bio_to->geom->softc;
if (crp->crp_etype == 0) {
- bp->bio_completed += crp->crp_olen;
- G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%jd completed=%jd).",
- bp->bio_inbed, bp->bio_children, (intmax_t)crp->crp_olen, (intmax_t)bp->bio_completed);
+ bp->bio_completed += crp->crp_payload_length;
+ G_ELI_DEBUG(3, "Crypto READ request done (%d/%d) (add=%d completed=%jd).",
+ bp->bio_inbed, bp->bio_children, crp->crp_payload_length, (intmax_t)bp->bio_completed);
} else {
- G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.",
+ u_int nsec, decr_secsize, encr_secsize, rel_sec;
+ int *errorp;
+
+ /* Sectorsize of decrypted provider eg. 4096. */
+ decr_secsize = bp->bio_to->sectorsize;
+ /* The real sectorsize of encrypted provider, eg. 512. */
+ encr_secsize =
+ LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize;
+ /* Number of sectors from decrypted provider, eg. 2. */
+ nsec = bp->bio_length / decr_secsize;
+ /* Number of sectors from encrypted provider, eg. 18. */
+ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize;
+ /* Which relative sector this request decrypted. */
+ rel_sec = ((crp->crp_buf + crp->crp_payload_start) -
+ (char *)bp->bio_driver2) / encr_secsize;
+
+ errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec +
+ sizeof(int) * rel_sec);
+ *errorp = crp->crp_etype;
+ G_ELI_DEBUG(1,
+ "Crypto READ request failed (%d/%d) error=%d.",
bp->bio_inbed, bp->bio_children, crp->crp_etype);
- if (bp->bio_error == 0)
- bp->bio_error = crp->crp_etype;
+ if (bp->bio_error == 0 || bp->bio_error == EINTEGRITY)
+ bp->bio_error = crp->crp_etype == EBADMSG ?
+ EINTEGRITY : crp->crp_etype;
}
- sc = bp->bio_to->geom->softc;
- g_eli_key_drop(sc, crp->crp_desc->crd_next->crd_key);
+ if (crp->crp_cipher_key != NULL)
+ g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
+ crypto_freereq(crp);
/*
* Do we have all sectors already?
*/
if (bp->bio_inbed < bp->bio_children)
return (0);
+
if (bp->bio_error == 0) {
u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize;
- u_char *srcdata, *dstdata, *auth;
- off_t coroff, corsize;
+ u_char *srcdata, *dstdata;
- /*
- * Verify data integrity based on calculated and read HMACs.
- */
/* Sectorsize of decrypted provider eg. 4096. */
decr_secsize = bp->bio_to->sectorsize;
/* The real sectorsize of encrypted provider, eg. 512. */
@@ -180,30 +200,54 @@
srcdata = bp->bio_driver2;
dstdata = bp->bio_data;
- auth = srcdata + encr_secsize * nsec;
+
+ for (i = 1; i <= nsec; i++) {
+ data_secsize = sc->sc_data_per_sector;
+ if ((i % lsec) == 0)
+ data_secsize = decr_secsize % data_secsize;
+ bcopy(srcdata + sc->sc_alen, dstdata, data_secsize);
+ srcdata += encr_secsize;
+ dstdata += data_secsize;
+ }
+ } else if (bp->bio_error == EINTEGRITY) {
+ u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize;
+ int *errorp;
+ off_t coroff, corsize, dstoff;
+
+ /* Sectorsize of decrypted provider eg. 4096. */
+ decr_secsize = bp->bio_to->sectorsize;
+ /* The real sectorsize of encrypted provider, eg. 512. */
+ encr_secsize = LIST_FIRST(&sc->sc_geom->consumer)->provider->sectorsize;
+ /* Number of data bytes in one encrypted sector, eg. 480. */
+ data_secsize = sc->sc_data_per_sector;
+ /* Number of sectors from decrypted provider, eg. 2. */
+ nsec = bp->bio_length / decr_secsize;
+ /* Number of sectors from encrypted provider, eg. 18. */
+ nsec = (nsec * sc->sc_bytes_per_sector) / encr_secsize;
+ /* Last sector number in every big sector, eg. 9. */
+ lsec = sc->sc_bytes_per_sector / encr_secsize;
+
+ errorp = (int *)((char *)bp->bio_driver2 + encr_secsize * nsec);
coroff = -1;
corsize = 0;
+ dstoff = bp->bio_offset;
for (i = 1; i <= nsec; i++) {
data_secsize = sc->sc_data_per_sector;
if ((i % lsec) == 0)
data_secsize = decr_secsize % data_secsize;
- if (bcmp(srcdata, auth, sc->sc_alen) != 0) {
+ if (errorp[i - 1] == EBADMSG) {
/*
- * Curruption detected, remember the offset if
+ * Corruption detected, remember the offset if
* this is the first corrupted sector and
* increase size.
*/
- if (bp->bio_error == 0)
- bp->bio_error = -1;
- if (coroff == -1) {
- coroff = bp->bio_offset +
- (dstdata - (u_char *)bp->bio_data);
- }
+ if (coroff == -1)
+ coroff = dstoff;
corsize += data_secsize;
} else {
/*
- * No curruption, good.
+ * No corruption, good.
* Report previous corruption if there was one.
*/
if (coroff != -1) {
@@ -214,12 +258,8 @@
coroff = -1;
corsize = 0;
}
- bcopy(srcdata + sc->sc_alen, dstdata,
- data_secsize);
}
- srcdata += encr_secsize;
- dstdata += data_secsize;
- auth += sc->sc_alen;
+ dstoff += data_secsize;
}
/* Report previous corruption if there was one. */
if (coroff != -1) {
@@ -231,9 +271,7 @@
free(bp->bio_driver2, M_ELI);
bp->bio_driver2 = NULL;
if (bp->bio_error != 0) {
- if (bp->bio_error == -1)
- bp->bio_error = EINTEGRITY;
- else {
+ if (bp->bio_error != EINTEGRITY) {
G_ELI_LOGREQ(0, bp,
"Crypto READ request failed (error=%d).",
bp->bio_error);
@@ -277,7 +315,9 @@
bp->bio_error = crp->crp_etype;
}
sc = bp->bio_to->geom->softc;
- g_eli_key_drop(sc, crp->crp_desc->crd_key);
+ if (crp->crp_cipher_key != NULL)
+ g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
+ crypto_freereq(crp);
/*
* All sectors are already encrypted?
*/
@@ -361,14 +401,16 @@
cbp->bio_length = cp->provider->sectorsize * nsec;
size = cbp->bio_length;
- size += sc->sc_alen * nsec;
- size += sizeof(struct cryptop) * nsec;
- size += sizeof(struct cryptodesc) * nsec * 2;
+ size += sizeof(int) * nsec;
size += G_ELI_AUTH_SECKEYLEN * nsec;
cbp->bio_offset = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector;
bp->bio_driver2 = malloc(size, M_ELI, M_WAITOK);
cbp->bio_data = bp->bio_driver2;
+ /* Clear the error array. */
+ memset((char *)bp->bio_driver2 + cbp->bio_length, 0,
+ sizeof(int) * nsec);
+
/*
* We read more than what is requested, so we have to be ready to read
* more than MAXPHYS.
@@ -408,10 +450,9 @@
{
struct g_eli_softc *sc;
struct cryptop *crp;
- struct cryptodesc *crde, *crda;
u_int i, lsec, nsec, data_secsize, decr_secsize, encr_secsize;
off_t dstoff;
- u_char *p, *data, *auth, *authkey, *plaindata;
+ u_char *p, *data, *authkey, *plaindata;
int error;
G_ELI_LOGREQ(3, bp, "%s", __func__);
@@ -433,19 +474,15 @@
/* Destination offset, used for IV generation. */
dstoff = (bp->bio_offset / bp->bio_to->sectorsize) * sc->sc_bytes_per_sector;
- auth = NULL; /* Silence compiler warning. */
plaindata = bp->bio_data;
if (bp->bio_cmd == BIO_READ) {
data = bp->bio_driver2;
- auth = data + encr_secsize * nsec;
- p = auth + sc->sc_alen * nsec;
+ p = data + encr_secsize * nsec;
+ p += sizeof(int) * nsec;
} else {
size_t size;
size = encr_secsize * nsec;
- size += sizeof(*crp) * nsec;
- size += sizeof(*crde) * nsec;
- size += sizeof(*crda) * nsec;
size += G_ELI_AUTH_SECKEYLEN * nsec;
size += sizeof(uintptr_t); /* Space for alignment. */
data = malloc(size, M_ELI, M_WAITOK);
@@ -460,9 +497,7 @@
#endif
for (i = 1; i <= nsec; i++, dstoff += encr_secsize) {
- crp = (struct cryptop *)p; p += sizeof(*crp);
- crde = (struct cryptodesc *)p; p += sizeof(*crde);
- crda = (struct cryptodesc *)p; p += sizeof(*crda);
+ crp = crypto_getreq(wr->w_sid, M_WAITOK);
authkey = (u_char *)p; p += G_ELI_AUTH_SECKEYLEN;
data_secsize = sc->sc_data_per_sector;
@@ -477,21 +512,14 @@
encr_secsize - sc->sc_alen - data_secsize);
}
- if (bp->bio_cmd == BIO_READ) {
- /* Remember read HMAC. */
- bcopy(data, auth, sc->sc_alen);
- auth += sc->sc_alen;
- /* TODO: bzero(9) can be commented out later. */
- bzero(data, sc->sc_alen);
- } else {
+ if (bp->bio_cmd == BIO_WRITE) {
bcopy(plaindata, data + sc->sc_alen, data_secsize);
plaindata += data_secsize;
}
- crp->crp_session = wr->w_sid;
crp->crp_ilen = sc->sc_alen + data_secsize;
- crp->crp_olen = data_secsize;
crp->crp_opaque = (void *)bp;
+ crp->crp_buf_type = CRYPTO_BUF_CONTIG;
crp->crp_buf = (void *)data;
data += encr_secsize;
crp->crp_flags = CRYPTO_F_CBIFSYNC;
@@ -499,41 +527,28 @@
crp->crp_flags |= CRYPTO_F_BATCH;
if (bp->bio_cmd == BIO_WRITE) {
crp->crp_callback = g_eli_auth_write_done;
- crp->crp_desc = crde;
- crde->crd_next = crda;
- crda->crd_next = NULL;
+ crp->crp_op = CRYPTO_OP_ENCRYPT |
+ CRYPTO_OP_COMPUTE_DIGEST;
} else {
crp->crp_callback = g_eli_auth_read_done;
- crp->crp_desc = crda;
- crda->crd_next = crde;
- crde->crd_next = NULL;
+ crp->crp_op = CRYPTO_OP_DECRYPT |
+ CRYPTO_OP_VERIFY_DIGEST;
}
- crde->crd_skip = sc->sc_alen;
- crde->crd_len = data_secsize;
- crde->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
- if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0)
- crde->crd_flags |= CRD_F_KEY_EXPLICIT;
- if (bp->bio_cmd == BIO_WRITE)
- crde->crd_flags |= CRD_F_ENCRYPT;
- crde->crd_alg = sc->sc_ealgo;
- crde->crd_key = g_eli_key_hold(sc, dstoff, encr_secsize);
- crde->crd_klen = sc->sc_ekeylen;
- if (sc->sc_ealgo == CRYPTO_AES_XTS)
- crde->crd_klen <<= 1;
- g_eli_crypto_ivgen(sc, dstoff, crde->crd_iv,
- sizeof(crde->crd_iv));
+ crp->crp_digest_start = 0;
+ crp->crp_payload_start = sc->sc_alen;
+ crp->crp_payload_length = data_secsize;
+ crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
+ if ((sc->sc_flags & G_ELI_FLAG_FIRST_KEY) == 0) {
+ crp->crp_cipher_key = g_eli_key_hold(sc, dstoff,
+ encr_secsize);
+ }
+ g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv,
+ sizeof(crp->crp_iv));
- crda->crd_skip = sc->sc_alen;
- crda->crd_len = data_secsize;
- crda->crd_inject = 0;
- crda->crd_flags = CRD_F_KEY_EXPLICIT;
- crda->crd_alg = sc->sc_aalgo;
g_eli_auth_keygen(sc, dstoff, authkey);
- crda->crd_key = authkey;
- crda->crd_klen = G_ELI_AUTH_SECKEYLEN * 8;
+ crp->crp_auth_key = authkey;
- crp->crp_etype = 0;
error = crypto_dispatch(crp);
KASSERT(error == 0, ("crypto_dispatch() failed (error=%d)",
error));
Index: sys/geom/eli/g_eli_privacy.c
===================================================================
--- sys/geom/eli/g_eli_privacy.c
+++ sys/geom/eli/g_eli_privacy.c
@@ -82,7 +82,7 @@
if (crp->crp_etype == 0) {
G_ELI_DEBUG(3, "Crypto READ request done (%d/%d).",
bp->bio_inbed, bp->bio_children);
- bp->bio_completed += crp->crp_olen;
+ bp->bio_completed += crp->crp_ilen;
} else {
G_ELI_DEBUG(1, "Crypto READ request failed (%d/%d) error=%d.",
bp->bio_inbed, bp->bio_children, crp->crp_etype);
@@ -90,8 +90,9 @@
bp->bio_error = crp->crp_etype;
}
sc = bp->bio_to->geom->softc;
- if (sc != NULL)
- g_eli_key_drop(sc, crp->crp_desc->crd_key);
+ if (sc != NULL && crp->crp_cipher_key != NULL)
+ g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
+ crypto_freereq(crp);
/*
* Do we have all sectors already?
*/
@@ -143,7 +144,9 @@
}
gp = bp->bio_to->geom;
sc = gp->softc;
- g_eli_key_drop(sc, crp->crp_desc->crd_key);
+ if (crp->crp_cipher_key != NULL)
+ g_eli_key_drop(sc, __DECONST(void *, crp->crp_cipher_key));
+ crypto_freereq(crp);
/*
* All sectors are already encrypted?
*/
@@ -233,11 +236,9 @@
{
struct g_eli_softc *sc;
struct cryptop *crp;
- struct cryptodesc *crd;
u_int i, nsec, secsize;
off_t dstoff;
- size_t size;
- u_char *p, *data;
+ u_char *data;
int error;
G_ELI_LOGREQ(3, bp, "%s", __func__);
@@ -247,71 +248,49 @@
secsize = LIST_FIRST(&sc->sc_geom->provider)->sectorsize;
nsec = bp->bio_length / secsize;
- /*
- * Calculate how much memory do we need.
- * We need separate crypto operation for every single sector.
- * It is much faster to calculate total amount of needed memory here and
- * do the allocation once instead of allocating memory in pieces (many,
- * many pieces).
- */
- size = sizeof(*crp) * nsec;
- size += sizeof(*crd) * nsec;
+ bp->bio_inbed = 0;
+ bp->bio_children = nsec;
+
/*
* If we write the data we cannot destroy current bio_data content,
* so we need to allocate more memory for encrypted data.
*/
- if (bp->bio_cmd == BIO_WRITE)
- size += bp->bio_length;
- p = malloc(size, M_ELI, M_WAITOK);
-
- bp->bio_inbed = 0;
- bp->bio_children = nsec;
- bp->bio_driver2 = p;
-
- if (bp->bio_cmd == BIO_READ)
- data = bp->bio_data;
- else {
- data = p;
- p += bp->bio_length;
+ if (bp->bio_cmd == BIO_WRITE) {
+ data = malloc(bp->bio_length, M_ELI, M_WAITOK);
+ bp->bio_driver2 = data;
bcopy(bp->bio_data, data, bp->bio_length);
- }
+ } else
+ data = bp->bio_data;
for (i = 0, dstoff = bp->bio_offset; i < nsec; i++, dstoff += secsize) {
- crp = (struct cryptop *)p; p += sizeof(*crp);
- crd = (struct cryptodesc *)p; p += sizeof(*crd);
+ crp = crypto_getreq(wr->w_sid, M_WAITOK);
- crp->crp_session = wr->w_sid;
crp->crp_ilen = secsize;
- crp->crp_olen = secsize;
crp->crp_opaque = (void *)bp;
+ crp->crp_buf_type = CRYPTO_BUF_CONTIG;
crp->crp_buf = (void *)data;
data += secsize;
- if (bp->bio_cmd == BIO_WRITE)
+ if (bp->bio_cmd == BIO_WRITE) {
+ crp->crp_op = CRYPTO_OP_ENCRYPT;
crp->crp_callback = g_eli_crypto_write_done;
- else /* if (bp->bio_cmd == BIO_READ) */
+ } else /* if (bp->bio_cmd == BIO_READ) */ {
+ crp->crp_op = CRYPTO_OP_DECRYPT;
crp->crp_callback = g_eli_crypto_read_done;
+ }
crp->crp_flags = CRYPTO_F_CBIFSYNC;
if (g_eli_batch)
crp->crp_flags |= CRYPTO_F_BATCH;
- crp->crp_desc = crd;
- crd->crd_skip = 0;
- crd->crd_len = secsize;
- crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
- if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0)
- crd->crd_flags |= CRD_F_KEY_EXPLICIT;
- if (bp->bio_cmd == BIO_WRITE)
- crd->crd_flags |= CRD_F_ENCRYPT;
- crd->crd_alg = sc->sc_ealgo;
- crd->crd_key = g_eli_key_hold(sc, dstoff, secsize);
- crd->crd_klen = sc->sc_ekeylen;
- if (sc->sc_ealgo == CRYPTO_AES_XTS)
- crd->crd_klen <<= 1;
- g_eli_crypto_ivgen(sc, dstoff, crd->crd_iv,
- sizeof(crd->crd_iv));
- crd->crd_next = NULL;
+ crp->crp_payload_start = 0;
+ crp->crp_payload_length = secsize;
+ crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
+ if ((sc->sc_flags & G_ELI_FLAG_SINGLE_KEY) == 0) {
+ crp->crp_cipher_key = g_eli_key_hold(sc, dstoff,
+ secsize);
+ }
+ g_eli_crypto_ivgen(sc, dstoff, crp->crp_iv,
+ sizeof(crp->crp_iv));
- crp->crp_etype = 0;
error = crypto_dispatch(crp);
KASSERT(error == 0, ("crypto_dispatch() failed (error=%d)",
error));
Index: sys/kern/subr_bus_dma.c
===================================================================
--- sys/kern/subr_bus_dma.c
+++ sys/kern/subr_bus_dma.c
@@ -54,6 +54,8 @@
#include <cam/cam.h>
#include <cam/cam_ccb.h>
+#include <opencrypto/cryptodev.h>
+
#include <machine/bus.h>
/*
@@ -635,3 +637,52 @@
return (0);
}
+
+int
+bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map, struct cryptop *crp,
+ bus_dmamap_callback_t *callback, void *callback_arg, int flags)
+{
+ bus_dma_segment_t *segs;
+ int error;
+ int nsegs;
+
+ flags |= BUS_DMA_NOWAIT;
+ nsegs = -1;
+ error = 0;
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_CONTIG:
+ error = _bus_dmamap_load_buffer(dmat, map, crp->crp_buf,
+ crp->crp_ilen, kernel_pmap, flags, NULL, &nsegs);
+ break;
+ case CRYPTO_BUF_MBUF:
+ error = _bus_dmamap_load_mbuf_sg(dmat, map, crp->crp_mbuf,
+ NULL, &nsegs, flags);
+ break;
+ case CRYPTO_BUF_UIO:
+ error = _bus_dmamap_load_uio(dmat, map, crp->crp_uio, &nsegs,
+ flags);
+ break;
+ }
+ nsegs++;
+
+ CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
+ __func__, dmat, flags, error, nsegs);
+
+ if (error == EINPROGRESS)
+ return (error);
+
+ segs = _bus_dmamap_complete(dmat, map, NULL, nsegs, error);
+ if (error)
+ (*callback)(callback_arg, segs, 0, error);
+ else
+ (*callback)(callback_arg, segs, nsegs, 0);
+
+ /*
+ * Return ENOMEM to the caller so that it can pass it up the stack.
+ * This error only happens when NOWAIT is set, so deferral is disabled.
+ */
+ if (error == ENOMEM)
+ return (error);
+
+ return (0);
+}
Index: sys/kern/uipc_ktls.c
===================================================================
--- sys/kern/uipc_ktls.c
+++ sys/kern/uipc_ktls.c
@@ -437,9 +437,12 @@
*/
switch (en->auth_algorithm) {
case 0:
+#ifdef COMPAT_FREEBSD12
+ /* XXX: Really 13.0-current COMPAT. */
case CRYPTO_AES_128_NIST_GMAC:
case CRYPTO_AES_192_NIST_GMAC:
case CRYPTO_AES_256_NIST_GMAC:
+#endif
break;
default:
return (EINVAL);
Index: sys/kgssapi/krb5/kcrypto_aes.c
===================================================================
--- sys/kgssapi/krb5/kcrypto_aes.c
+++ sys/kgssapi/krb5/kcrypto_aes.c
@@ -77,7 +77,7 @@
{
void *kp = ks->ks_key;
struct aes_state *as = ks->ks_priv;
- struct cryptoini cri;
+ struct crypto_session_params csp;
if (kp != in)
bcopy(in, kp, ks->ks_class->ec_keylen);
@@ -90,22 +90,22 @@
/*
* We only want the first 96 bits of the HMAC.
*/
- bzero(&cri, sizeof(cri));
- cri.cri_alg = CRYPTO_SHA1_HMAC;
- cri.cri_klen = ks->ks_class->ec_keybits;
- cri.cri_mlen = 12;
- cri.cri_key = ks->ks_key;
- cri.cri_next = NULL;
- crypto_newsession(&as->as_session_sha1, &cri,
+ memset(&csp, 0, sizeof(csp));
+ csp.csp_mode = CSP_MODE_DIGEST;
+ csp.csp_auth_alg = CRYPTO_SHA1_HMAC;
+ csp.csp_auth_klen = ks->ks_class->ec_keybits / 8;
+ csp.csp_auth_mlen = 12;
+ csp.csp_auth_key = ks->ks_key;
+ crypto_newsession(&as->as_session_sha1, &csp,
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
- bzero(&cri, sizeof(cri));
- cri.cri_alg = CRYPTO_AES_CBC;
- cri.cri_klen = ks->ks_class->ec_keybits;
- cri.cri_mlen = 0;
- cri.cri_key = ks->ks_key;
- cri.cri_next = NULL;
- crypto_newsession(&as->as_session_aes, &cri,
+ memset(&csp, 0, sizeof(csp));
+ csp.csp_mode = CSP_MODE_CIPHER;
+ csp.csp_cipher_alg = CRYPTO_AES_CBC;
+ csp.csp_cipher_klen = ks->ks_class->ec_keybits / 8;
+ csp.csp_cipher_key = ks->ks_key;
+ csp.csp_ivlen = 16;
+ crypto_newsession(&as->as_session_aes, &csp,
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
}
@@ -138,31 +138,27 @@
static void
aes_encrypt_1(const struct krb5_key_state *ks, int buftype, void *buf,
- size_t skip, size_t len, void *ivec, int encdec)
+ size_t skip, size_t len, void *ivec, bool encrypt)
{
struct aes_state *as = ks->ks_priv;
struct cryptop *crp;
- struct cryptodesc *crd;
int error;
- crp = crypto_getreq(1);
- crd = crp->crp_desc;
+ crp = crypto_getreq(as->as_session_aes, M_WAITOK);
- crd->crd_skip = skip;
- crd->crd_len = len;
- crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT | encdec;
+ crp->crp_payload_start = skip;
+ crp->crp_payload_length = len;
+ crp->crp_op = encrypt ? CRYPTO_OP_ENCRYPT : CRYPTO_OP_DECRYPT;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_IV_SEPARATE;
if (ivec) {
- bcopy(ivec, crd->crd_iv, 16);
+ memcpy(crp->crp_iv, ivec, 16);
} else {
- bzero(crd->crd_iv, 16);
+ memset(crp->crp_iv, 0, 16);
}
- crd->crd_next = NULL;
- crd->crd_alg = CRYPTO_AES_CBC;
- crp->crp_session = as->as_session_aes;
- crp->crp_flags = buftype | CRYPTO_F_CBIFSYNC;
+ crp->crp_buf_type = buftype;
crp->crp_buf = buf;
- crp->crp_opaque = (void *) as;
+ crp->crp_opaque = as;
crp->crp_callback = aes_crypto_cb;
error = crypto_dispatch(crp);
@@ -204,16 +200,16 @@
/*
* Note: caller will ensure len >= blocklen.
*/
- aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec,
- CRD_F_ENCRYPT);
+ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len, ivec,
+ true);
} else if (plen == 0) {
/*
* This is equivalent to CBC mode followed by swapping
* the last two blocks. We assume that neither of the
* last two blocks cross iov boundaries.
*/
- aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec,
- CRD_F_ENCRYPT);
+ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len, ivec,
+ true);
off = skip + len - 2 * blocklen;
m_copydata(inout, off, 2 * blocklen, (void*) &last2);
m_copyback(inout, off, blocklen, last2.cn);
@@ -227,8 +223,8 @@
* the encrypted versions of the last two blocks, we
* reshuffle to create the final result.
*/
- aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len - plen,
- ivec, CRD_F_ENCRYPT);
+ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len - plen,
+ ivec, true);
/*
* Copy out the last two blocks, pad the last block
@@ -241,8 +237,8 @@
m_copydata(inout, off, blocklen + plen, (void*) &last2);
for (i = plen; i < blocklen; i++)
last2.cn[i] = 0;
- aes_encrypt_1(ks, 0, last2.cn, 0, blocklen, last2.cn_1,
- CRD_F_ENCRYPT);
+ aes_encrypt_1(ks, CRYPTO_BUF_CONTIG, last2.cn, 0, blocklen,
+ last2.cn_1, true);
m_copyback(inout, off, blocklen, last2.cn);
m_copyback(inout, off + blocklen, plen, last2.cn_1);
}
@@ -274,7 +270,8 @@
/*
* Note: caller will ensure len >= blocklen.
*/
- aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, 0);
+ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len, ivec,
+ false);
} else if (plen == 0) {
/*
* This is equivalent to CBC mode followed by swapping
@@ -284,7 +281,8 @@
m_copydata(inout, off, 2 * blocklen, (void*) &last2);
m_copyback(inout, off, blocklen, last2.cn);
m_copyback(inout, off + blocklen, blocklen, last2.cn_1);
- aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, 0);
+ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len, ivec,
+ false);
} else {
/*
* This is the difficult case. We first decrypt the
@@ -298,8 +296,8 @@
* decrypted with the rest in CBC mode.
*/
off = skip + len - plen - blocklen;
- aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, off, blocklen,
- NULL, 0);
+ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, off, blocklen,
+ NULL, false);
m_copydata(inout, off, blocklen + plen, (void*) &last2);
for (i = 0; i < plen; i++) {
@@ -309,8 +307,8 @@
}
m_copyback(inout, off, blocklen + plen, (void*) &last2);
- aes_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len - plen,
- ivec, 0);
+ aes_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len - plen,
+ ivec, false);
}
}
@@ -321,26 +319,17 @@
{
struct aes_state *as = ks->ks_priv;
struct cryptop *crp;
- struct cryptodesc *crd;
int error;
- crp = crypto_getreq(1);
- crd = crp->crp_desc;
+ crp = crypto_getreq(as->as_session_sha1, M_WAITOK);
- crd->crd_skip = skip;
- crd->crd_len = inlen;
- crd->crd_inject = skip + inlen;
- crd->crd_flags = 0;
- crd->crd_next = NULL;
- crd->crd_alg = CRYPTO_SHA1_HMAC;
-
- crp->crp_session = as->as_session_sha1;
- crp->crp_ilen = inlen;
- crp->crp_olen = 12;
- crp->crp_etype = 0;
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
- crp->crp_buf = (void *) inout;
- crp->crp_opaque = (void *) as;
+ crp->crp_payload_start = skip;
+ crp->crp_payload_length = inlen;
+ crp->crp_digest_start = skip + inlen;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC;
+ crp->crp_buf_type = CRYPTO_BUF_MBUF;
+ crp->crp_mbuf = inout;
+ crp->crp_opaque = as;
crp->crp_callback = aes_crypto_cb;
error = crypto_dispatch(crp);
Index: sys/kgssapi/krb5/kcrypto_des.c
===================================================================
--- sys/kgssapi/krb5/kcrypto_des.c
+++ sys/kgssapi/krb5/kcrypto_des.c
@@ -78,25 +78,24 @@
static void
des1_set_key(struct krb5_key_state *ks, const void *in)
{
+ struct crypto_session_params csp;
void *kp = ks->ks_key;
struct des1_state *ds = ks->ks_priv;
- struct cryptoini cri[1];
+
+ if (ds->ds_session)
+ crypto_freesession(ds->ds_session);
if (kp != in)
bcopy(in, kp, ks->ks_class->ec_keylen);
- if (ds->ds_session)
- crypto_freesession(ds->ds_session);
+ memset(&csp, 0, sizeof(csp));
+ csp.csp_mode = CSP_MODE_CIPHER;
+ csp.csp_ivlen = 8;
+ csp.csp_cipher_alg = CRYPTO_DES_CBC;
+ csp.csp_cipher_klen = 8;
+ csp.csp_cipher_key = ks->ks_key;
- bzero(cri, sizeof(cri));
-
- cri[0].cri_alg = CRYPTO_DES_CBC;
- cri[0].cri_klen = 64;
- cri[0].cri_mlen = 0;
- cri[0].cri_key = ks->ks_key;
- cri[0].cri_next = NULL;
-
- crypto_newsession(&ds->ds_session, cri,
+ crypto_newsession(&ds->ds_session, &csp,
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
}
@@ -163,32 +162,27 @@
}
static void
-des1_encrypt_1(const struct krb5_key_state *ks, int buftype, void *buf,
- size_t skip, size_t len, void *ivec, int encdec)
+des1_encrypt_1(const struct krb5_key_state *ks, int buf_type, void *buf,
+ size_t skip, size_t len, void *ivec, bool encrypt)
{
struct des1_state *ds = ks->ks_priv;
struct cryptop *crp;
- struct cryptodesc *crd;
int error;
- crp = crypto_getreq(1);
- crd = crp->crp_desc;
+ crp = crypto_getreq(ds->ds_session, M_WAITOK);
- crd->crd_skip = skip;
- crd->crd_len = len;
- crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT | encdec;
+ crp->crp_payload_start = skip;
+ crp->crp_payload_length = len;
+ crp->crp_op = encrypt ? CRYPTO_OP_ENCRYPT : CRYPTO_OP_DECRYPT;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_IV_SEPARATE;
if (ivec) {
- bcopy(ivec, crd->crd_iv, 8);
+ memcpy(crp->crp_iv, ivec, 8);
} else {
- bzero(crd->crd_iv, 8);
+ memset(crp->crp_iv, 0, 8);
}
- crd->crd_next = NULL;
- crd->crd_alg = CRYPTO_DES_CBC;
-
- crp->crp_session = ds->ds_session;
- crp->crp_flags = buftype | CRYPTO_F_CBIFSYNC;
+ crp->crp_buf_type = buf_type;
crp->crp_buf = buf;
- crp->crp_opaque = (void *) ds;
+ crp->crp_opaque = ds;
crp->crp_callback = des1_crypto_cb;
error = crypto_dispatch(crp);
@@ -208,8 +202,7 @@
size_t skip, size_t len, void *ivec, size_t ivlen)
{
- des1_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec,
- CRD_F_ENCRYPT);
+ des1_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len, ivec, true);
}
static void
@@ -217,7 +210,7 @@
size_t skip, size_t len, void *ivec, size_t ivlen)
{
- des1_encrypt_1(ks, CRYPTO_F_IMBUF, inout, skip, len, ivec, 0);
+ des1_encrypt_1(ks, CRYPTO_BUF_MBUF, inout, skip, len, ivec, false);
}
static int
@@ -244,7 +237,7 @@
m_apply(inout, skip, inlen, MD5Update_int, &md5);
MD5Final(hash, &md5);
- des1_encrypt_1(ks, 0, hash, 0, 16, NULL, CRD_F_ENCRYPT);
+ des1_encrypt_1(ks, CRYPTO_BUF_CONTIG, hash, 0, 16, NULL, true);
m_copyback(inout, skip + inlen, outlen, hash + 8);
}
Index: sys/kgssapi/krb5/kcrypto_des3.c
===================================================================
--- sys/kgssapi/krb5/kcrypto_des3.c
+++ sys/kgssapi/krb5/kcrypto_des3.c
@@ -48,7 +48,8 @@
struct des3_state {
struct mtx ds_lock;
- crypto_session_t ds_session;
+ crypto_session_t ds_cipher_session;
+ crypto_session_t ds_hmac_session;
};
static void
@@ -69,8 +70,10 @@
{
struct des3_state *ds = ks->ks_priv;
- if (ds->ds_session)
- crypto_freesession(ds->ds_session);
+ if (ds->ds_cipher_session) {
+ crypto_freesession(ds->ds_cipher_session);
+ crypto_freesession(ds->ds_hmac_session);
+ }
mtx_destroy(&ds->ds_lock);
free(ks->ks_priv, M_GSSAPI);
}
@@ -78,31 +81,35 @@
static void
des3_set_key(struct krb5_key_state *ks, const void *in)
{
+ struct crypto_session_params csp;
void *kp = ks->ks_key;
struct des3_state *ds = ks->ks_priv;
- struct cryptoini cri[2];
+
+ if (ds->ds_cipher_session) {
+ crypto_freesession(ds->ds_cipher_session);
+ crypto_freesession(ds->ds_hmac_session);
+ }
if (kp != in)
bcopy(in, kp, ks->ks_class->ec_keylen);
- if (ds->ds_session)
- crypto_freesession(ds->ds_session);
+ memset(&csp, 0, sizeof(csp));
+ csp.csp_mode = CSP_MODE_DIGEST;
+ csp.csp_auth_alg = CRYPTO_SHA1_HMAC;
+ csp.csp_auth_klen = 24;
+ csp.csp_auth_key = ks->ks_key;
- bzero(cri, sizeof(cri));
+ crypto_newsession(&ds->ds_hmac_session, &csp,
+ CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
- cri[0].cri_alg = CRYPTO_SHA1_HMAC;
- cri[0].cri_klen = 192;
- cri[0].cri_mlen = 0;
- cri[0].cri_key = ks->ks_key;
- cri[0].cri_next = &cri[1];
+ memset(&csp, 0, sizeof(csp));
+ csp.csp_mode = CSP_MODE_CIPHER;
+ csp.csp_cipher_alg = CRYPTO_3DES_CBC;
+ csp.csp_cipher_klen = 24;
+ csp.csp_cipher_key = ks->ks_key;
+ csp.csp_ivlen = 8;
- cri[1].cri_alg = CRYPTO_3DES_CBC;
- cri[1].cri_klen = 192;
- cri[1].cri_mlen = 0;
- cri[1].cri_key = ks->ks_key;
- cri[1].cri_next = NULL;
-
- crypto_newsession(&ds->ds_session, cri,
+ crypto_newsession(&ds->ds_cipher_session, &csp,
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE);
}
@@ -158,7 +165,7 @@
int error;
struct des3_state *ds = (struct des3_state *) crp->crp_opaque;
- if (crypto_ses2caps(ds->ds_session) & CRYPTOCAP_F_SYNC)
+ if (crypto_ses2caps(crp->crp_session) & CRYPTOCAP_F_SYNC)
return (0);
error = crp->crp_etype;
@@ -174,36 +181,31 @@
static void
des3_encrypt_1(const struct krb5_key_state *ks, struct mbuf *inout,
- size_t skip, size_t len, void *ivec, int encdec)
+ size_t skip, size_t len, void *ivec, bool encrypt)
{
struct des3_state *ds = ks->ks_priv;
struct cryptop *crp;
- struct cryptodesc *crd;
int error;
- crp = crypto_getreq(1);
- crd = crp->crp_desc;
+ crp = crypto_getreq(ds->ds_cipher_session, M_WAITOK);
- crd->crd_skip = skip;
- crd->crd_len = len;
- crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT | encdec;
+ crp->crp_payload_start = skip;
+ crp->crp_payload_length = len;
+ crp->crp_op = encrypt ? CRYPTO_OP_ENCRYPT : CRYPTO_OP_DECRYPT;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_IV_SEPARATE;
if (ivec) {
- bcopy(ivec, crd->crd_iv, 8);
+ memcpy(crp->crp_iv, ivec, 8);
} else {
- bzero(crd->crd_iv, 8);
+ memset(crp->crp_iv, 0, 8);
}
- crd->crd_next = NULL;
- crd->crd_alg = CRYPTO_3DES_CBC;
-
- crp->crp_session = ds->ds_session;
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
- crp->crp_buf = (void *) inout;
- crp->crp_opaque = (void *) ds;
+ crp->crp_buf_type = CRYPTO_BUF_MBUF;
+ crp->crp_mbuf = inout;
+ crp->crp_opaque = ds;
crp->crp_callback = des3_crypto_cb;
error = crypto_dispatch(crp);
- if ((crypto_ses2caps(ds->ds_session) & CRYPTOCAP_F_SYNC) == 0) {
+ if ((crypto_ses2caps(ds->ds_cipher_session) & CRYPTOCAP_F_SYNC) == 0) {
mtx_lock(&ds->ds_lock);
if (!error && !(crp->crp_flags & CRYPTO_F_DONE))
error = msleep(crp, &ds->ds_lock, 0, "gssdes3", 0);
@@ -218,7 +220,7 @@
size_t skip, size_t len, void *ivec, size_t ivlen)
{
- des3_encrypt_1(ks, inout, skip, len, ivec, CRD_F_ENCRYPT);
+ des3_encrypt_1(ks, inout, skip, len, ivec, true);
}
static void
@@ -226,7 +228,7 @@
size_t skip, size_t len, void *ivec, size_t ivlen)
{
- des3_encrypt_1(ks, inout, skip, len, ivec, 0);
+ des3_encrypt_1(ks, inout, skip, len, ivec, false);
}
static void
@@ -235,31 +237,23 @@
{
struct des3_state *ds = ks->ks_priv;
struct cryptop *crp;
- struct cryptodesc *crd;
int error;
- crp = crypto_getreq(1);
- crd = crp->crp_desc;
+ crp = crypto_getreq(ds->ds_hmac_session, M_WAITOK);
- crd->crd_skip = skip;
- crd->crd_len = inlen;
- crd->crd_inject = skip + inlen;
- crd->crd_flags = 0;
- crd->crd_next = NULL;
- crd->crd_alg = CRYPTO_SHA1_HMAC;
-
- crp->crp_session = ds->ds_session;
- crp->crp_ilen = inlen;
- crp->crp_olen = 20;
- crp->crp_etype = 0;
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
- crp->crp_buf = (void *) inout;
- crp->crp_opaque = (void *) ds;
+ crp->crp_payload_start = skip;
+ crp->crp_payload_length = inlen;
+ crp->crp_digest_start = skip + inlen;
+ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC;
+ crp->crp_buf_type = CRYPTO_BUF_MBUF;
+ crp->crp_mbuf = inout;
+ crp->crp_opaque = ds;
crp->crp_callback = des3_crypto_cb;
error = crypto_dispatch(crp);
- if ((crypto_ses2caps(ds->ds_session) & CRYPTOCAP_F_SYNC) == 0) {
+ if ((crypto_ses2caps(ds->ds_hmac_session) & CRYPTOCAP_F_SYNC) == 0) {
mtx_lock(&ds->ds_lock);
if (!error && !(crp->crp_flags & CRYPTO_F_DONE))
error = msleep(crp, &ds->ds_lock, 0, "gssdes3", 0);
Index: sys/mips/cavium/cryptocteon/cavium_crypto.c
===================================================================
--- sys/mips/cavium/cryptocteon/cavium_crypto.c
+++ sys/mips/cavium/cryptocteon/cavium_crypto.c
@@ -328,7 +328,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
uint64_t *data;
int data_i, data_l;
@@ -339,8 +339,8 @@
(crypt_off & 0x7) || (crypt_off + crypt_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -387,7 +387,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
uint64_t *data;
int data_i, data_l;
@@ -398,8 +398,8 @@
(crypt_off & 0x7) || (crypt_off + crypt_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -447,7 +447,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
uint64_t *data, *pdata;
int data_i, data_l;
@@ -458,8 +458,8 @@
(crypt_off & 0x7) || (crypt_off + crypt_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -516,7 +516,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
uint64_t *data, *pdata;
int data_i, data_l;
@@ -527,8 +527,8 @@
(crypt_off & 0x7) || (crypt_off + crypt_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -587,7 +587,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
int next = 0;
uint64_t *data;
@@ -600,8 +600,8 @@
(auth_off & 0x7) || (auth_off + auth_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -667,13 +667,9 @@
CVMX_MT_HSH_STARTMD5(tmp1);
/* save the HMAC */
- IOV_INIT(iov, data, data_i, data_l);
- while (icv_off > 0) {
- IOV_CONSUME(iov, data, data_i, data_l);
- icv_off -= 8;
- }
+ data = (uint64_t *)icv;
CVMX_MF_HSH_IV(*data, 0);
- IOV_CONSUME(iov, data, data_i, data_l);
+ data++;
CVMX_MF_HSH_IV(tmp1, 1);
*(uint32_t *)data = (uint32_t) (tmp1 >> 32);
@@ -689,7 +685,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
int next = 0;
uint64_t *data;
@@ -702,8 +698,8 @@
(auth_off & 0x7) || (auth_off + auth_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -772,13 +768,9 @@
CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
/* save the HMAC */
- IOV_INIT(iov, data, data_i, data_l);
- while (icv_off > 0) {
- IOV_CONSUME(iov, data, data_i, data_l);
- icv_off -= 8;
- }
+ data = (uint64_t *)icv;
CVMX_MF_HSH_IV(*data, 0);
- IOV_CONSUME(iov, data, data_i, data_l);
+ data++;
CVMX_MF_HSH_IV(tmp1, 1);
*(uint32_t *)data = (uint32_t) (tmp1 >> 32);
@@ -794,7 +786,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
int next = 0;
union {
@@ -815,8 +807,8 @@
(auth_off & 0x3) || (auth_off + auth_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -920,16 +912,12 @@
CVMX_MT_HSH_STARTMD5(tmp1);
/* save the HMAC */
- IOV_INIT(iov, data32, data_i, data_l);
- while (icv_off > 0) {
- IOV_CONSUME(iov, data32, data_i, data_l);
- icv_off -= 4;
- }
+ data32 = (uint32_t *)icv;
CVMX_MF_HSH_IV(tmp1, 0);
*data32 = (uint32_t) (tmp1 >> 32);
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
*data32 = (uint32_t) tmp1;
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
CVMX_MF_HSH_IV(tmp1, 1);
*data32 = (uint32_t) (tmp1 >> 32);
@@ -942,7 +930,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
int next = 0;
union {
@@ -963,8 +951,8 @@
(auth_off & 0x3) || (auth_off + auth_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -1068,16 +1056,12 @@
CVMX_MT_HSH_STARTMD5(tmp1);
/* save the HMAC */
- IOV_INIT(iov, data32, data_i, data_l);
- while (icv_off > 0) {
- IOV_CONSUME(iov, data32, data_i, data_l);
- icv_off -= 4;
- }
+ data32 = (uint32_t *)icv;
CVMX_MF_HSH_IV(tmp1, 0);
*data32 = (uint32_t) (tmp1 >> 32);
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
*data32 = (uint32_t) tmp1;
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
CVMX_MF_HSH_IV(tmp1, 1);
*data32 = (uint32_t) (tmp1 >> 32);
@@ -1093,7 +1077,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
int next = 0;
union {
@@ -1114,8 +1098,8 @@
(auth_off & 0x3) || (auth_off + auth_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -1222,16 +1206,12 @@
CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
/* save the HMAC */
- IOV_INIT(iov, data32, data_i, data_l);
- while (icv_off > 0) {
- IOV_CONSUME(iov, data32, data_i, data_l);
- icv_off -= 4;
- }
+ data32 = (uint32_t *)icv;
CVMX_MF_HSH_IV(tmp1, 0);
*data32 = (uint32_t) (tmp1 >> 32);
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
*data32 = (uint32_t) tmp1;
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
CVMX_MF_HSH_IV(tmp1, 1);
*data32 = (uint32_t) (tmp1 >> 32);
@@ -1244,7 +1224,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
int next = 0;
union {
@@ -1265,8 +1245,8 @@
(auth_off & 0x3) || (auth_off + auth_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -1372,16 +1352,12 @@
CVMX_MT_HSH_DATZ(6);
CVMX_MT_HSH_STARTSHA((uint64_t) ((64 + 20) << 3));
/* save the HMAC */
- IOV_INIT(iov, data32, data_i, data_l);
- while (icv_off > 0) {
- IOV_CONSUME(iov, data32, data_i, data_l);
- icv_off -= 4;
- }
+ data32 = (uint32_t *)icv;
CVMX_MF_HSH_IV(tmp1, 0);
*data32 = (uint32_t) (tmp1 >> 32);
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
*data32 = (uint32_t) tmp1;
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
CVMX_MF_HSH_IV(tmp1, 1);
*data32 = (uint32_t) (tmp1 >> 32);
@@ -1397,7 +1373,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
int next = 0;
union {
@@ -1419,8 +1395,8 @@
(auth_off & 0x3) || (auth_off + auth_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -1552,16 +1528,12 @@
CVMX_MT_HSH_STARTMD5(tmp1);
/* save the HMAC */
- IOV_INIT(iov, data32, data_i, data_l);
- while (icv_off > 0) {
- IOV_CONSUME(iov, data32, data_i, data_l);
- icv_off -= 4;
- }
+ data32 = (uint32_t *)icv;
CVMX_MF_HSH_IV(tmp1, 0);
*data32 = (uint32_t) (tmp1 >> 32);
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
*data32 = (uint32_t) tmp1;
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
CVMX_MF_HSH_IV(tmp1, 1);
*data32 = (uint32_t) (tmp1 >> 32);
@@ -1574,7 +1546,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
int next = 0;
union {
@@ -1596,8 +1568,8 @@
(auth_off & 0x3) || (auth_off + auth_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -1725,16 +1697,12 @@
CVMX_MT_HSH_STARTMD5(tmp1);
/* save the HMAC */
- IOV_INIT(iov, data32, data_i, data_l);
- while (icv_off > 0) {
- IOV_CONSUME(iov, data32, data_i, data_l);
- icv_off -= 4;
- }
+ data32 = (uint32_t *)icv;
CVMX_MF_HSH_IV(tmp1, 0);
*data32 = (uint32_t) (tmp1 >> 32);
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
*data32 = (uint32_t) tmp1;
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
CVMX_MF_HSH_IV(tmp1, 1);
*data32 = (uint32_t) (tmp1 >> 32);
@@ -1750,7 +1718,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
int next = 0;
union {
@@ -1772,8 +1740,8 @@
(auth_off & 0x3) || (auth_off + auth_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -1924,16 +1892,12 @@
#endif
/* save the HMAC */
- IOV_INIT(iov, data32, data_i, data_l);
- while (icv_off > 0) {
- IOV_CONSUME(iov, data32, data_i, data_l);
- icv_off -= 4;
- }
+ data32 = (uint32_t *)icv;
CVMX_MF_HSH_IV(tmp1, 0);
*data32 = (uint32_t) (tmp1 >> 32);
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
*data32 = (uint32_t) tmp1;
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
CVMX_MF_HSH_IV(tmp1, 1);
*data32 = (uint32_t) (tmp1 >> 32);
@@ -1946,7 +1910,7 @@
struct iovec *iov, size_t iovcnt, size_t iovlen,
int auth_off, int auth_len,
int crypt_off, int crypt_len,
- int icv_off, uint8_t *ivp)
+ uint8_t *icv, uint8_t *ivp)
{
int next = 0;
union {
@@ -1968,8 +1932,8 @@
(auth_off & 0x3) || (auth_off + auth_len > iovlen))) {
dprintf("%s: Bad parameters od=%p iov=%p iovlen=%jd "
"auth_off=%d auth_len=%d crypt_off=%d crypt_len=%d "
- "icv_off=%d ivp=%p\n", __func__, od, iov, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ "icv=%p ivp=%p\n", __func__, od, iov, iovlen,
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
return -EINVAL;
}
@@ -2119,16 +2083,12 @@
#endif
/* save the HMAC */
- IOV_INIT(iov, data32, data_i, data_l);
- while (icv_off > 0) {
- IOV_CONSUME(iov, data32, data_i, data_l);
- icv_off -= 4;
- }
+ data32 = (uint32_t *)icv;
CVMX_MF_HSH_IV(tmp1, 0);
*data32 = (uint32_t) (tmp1 >> 32);
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
*data32 = (uint32_t) tmp1;
- IOV_CONSUME(iov, data32, data_i, data_l);
+ data32++;
CVMX_MF_HSH_IV(tmp1, 1);
*data32 = (uint32_t) (tmp1 >> 32);
Index: sys/mips/cavium/cryptocteon/cryptocteon.c
===================================================================
--- sys/mips/cavium/cryptocteon/cryptocteon.c
+++ sys/mips/cavium/cryptocteon/cryptocteon.c
@@ -59,7 +59,10 @@
static int cryptocteon_attach(device_t);
static int cryptocteon_process(device_t, struct cryptop *, int);
-static int cryptocteon_newsession(device_t, crypto_session_t, struct cryptoini *);
+static int cryptocteon_probesession(device_t,
+ const struct crypto_session_params *);
+static int cryptocteon_newsession(device_t, crypto_session_t,
+ const struct crypto_session_params *);
static void
cryptocteon_identify(driver_t *drv, device_t parent)
@@ -89,168 +92,187 @@
return (ENXIO);
}
- crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0);
- crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0);
-
return (0);
}
-/*
- * Generate a new octo session. We artifically limit it to a single
- * hash/cipher or hash-cipher combo just to make it easier, most callers
- * do not expect more than this anyway.
- */
+static bool
+cryptocteon_auth_supported(const struct crypto_session_params *csp)
+{
+ u_int hash_len;
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ hash_len = MD5_HASH_LEN;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ hash_len = SHA1_HASH_LEN;
+ break;
+ default:
+ return (false);
+ }
+
+ if (csp->csp_auth_klen > hash_len)
+ return (false);
+ return (true);
+}
+
+static bool
+cryptocteon_cipher_supported(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ if (csp->csp_ivlen != 8)
+ return (false);
+ if (csp->csp_cipher_klen != 8 &&
+ csp->csp_cipher_klen != 24)
+ return (false);
+ break;
+ case CRYPTO_AES_CBC:
+ if (csp->csp_ivlen != 16)
+ return (false);
+ if (csp->csp_cipher_klen != 16 &&
+ csp->csp_cipher_klen != 24 &&
+ csp->csp_cipher_klen != 32)
+ return (false);
+ break;
+ default:
+ return (false);
+ }
+
+ return (true);
+}
+
+static int
+cryptocteon_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ if (!cryptocteon_auth_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!cryptocteon_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_ETA:
+ if (!cryptocteon_auth_supported(csp) ||
+ !cryptocteon_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (CRYPTODEV_PROBE_ACCEL_SOFTWARE);
+}
+
+static void
+cryptocteon_calc_hash(const struct crypto_session_params *csp, const char *key,
+ struct octo_sess *ocd)
+{
+ char hash_key[SHA1_HASH_LEN];
+
+ memset(hash_key, 0, sizeof(hash_key));
+ memcpy(hash_key, key, csp->csp_auth_klen);
+ octo_calc_hash(csp->csp_auth_alg == CRYPTO_SHA1_HMAC, hash_key,
+ ocd->octo_hminner, ocd->octo_hmouter);
+}
+
+/* Generate a new octo session. */
static int
cryptocteon_newsession(device_t dev, crypto_session_t cses,
- struct cryptoini *cri)
+ const struct crypto_session_params *csp)
{
- struct cryptoini *c, *encini = NULL, *macini = NULL;
struct cryptocteon_softc *sc;
struct octo_sess *ocd;
- int i;
sc = device_get_softc(dev);
- if (cri == NULL || sc == NULL)
- return (EINVAL);
-
- /*
- * To keep it simple, we only handle hash, cipher or hash/cipher in a
- * session, you cannot currently do multiple ciphers/hashes in one
- * session even though it would be possibel to code this driver to
- * handle it.
- */
- for (i = 0, c = cri; c && i < 2; i++) {
- if (c->cri_alg == CRYPTO_MD5_HMAC ||
- c->cri_alg == CRYPTO_SHA1_HMAC ||
- c->cri_alg == CRYPTO_NULL_HMAC) {
- if (macini) {
- break;
- }
- macini = c;
- }
- if (c->cri_alg == CRYPTO_DES_CBC ||
- c->cri_alg == CRYPTO_3DES_CBC ||
- c->cri_alg == CRYPTO_AES_CBC ||
- c->cri_alg == CRYPTO_NULL_CBC) {
- if (encini) {
- break;
- }
- encini = c;
- }
- c = c->cri_next;
- }
- if (!macini && !encini) {
- dprintf("%s,%d - EINVAL bad cipher/hash or combination\n",
- __FILE__, __LINE__);
- return EINVAL;
- }
- if (c) {
- dprintf("%s,%d - EINVAL cannot handle chained cipher/hash combos\n",
- __FILE__, __LINE__);
- return EINVAL;
- }
-
- /*
- * So we have something we can do, lets setup the session
- */
ocd = crypto_get_driver_session(cses);
- if (encini && encini->cri_key) {
- ocd->octo_encklen = (encini->cri_klen + 7) / 8;
- memcpy(ocd->octo_enckey, encini->cri_key, ocd->octo_encklen);
- }
-
- if (macini && macini->cri_key) {
- ocd->octo_macklen = (macini->cri_klen + 7) / 8;
- memcpy(ocd->octo_mackey, macini->cri_key, ocd->octo_macklen);
- }
+ ocd->octo_encklen = csp->csp_cipher_klen;
+ if (csp->csp_cipher_key != NULL)
+ memcpy(ocd->octo_enckey, csp->csp_cipher_key,
+ ocd->octo_encklen);
- ocd->octo_mlen = 0;
- if (encini && encini->cri_mlen)
- ocd->octo_mlen = encini->cri_mlen;
- else if (macini && macini->cri_mlen)
- ocd->octo_mlen = macini->cri_mlen;
- else
- ocd->octo_mlen = 12;
+ if (csp->csp_auth_key != NULL)
+ cryptocteon_calc_hash(csp, csp->csp_auth_key, ocd);
- /*
- * point c at the enc if it exists, otherwise the mac
- */
- c = encini ? encini : macini;
+ ocd->octo_mlen = csp->csp_auth_mlen;
+ if (csp->csp_auth_mlen == 0) {
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ ocd->octo_mlen = MD5_HASH_LEN;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ ocd->octo_mlen = SHA1_HASH_LEN;
+ break;
+ }
+ }
- switch (c->cri_alg) {
- case CRYPTO_DES_CBC:
- case CRYPTO_3DES_CBC:
- ocd->octo_ivsize = 8;
- switch (macini ? macini->cri_alg : -1) {
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ switch (csp->csp_auth_alg) {
case CRYPTO_MD5_HMAC:
- ocd->octo_encrypt = octo_des_cbc_md5_encrypt;
- ocd->octo_decrypt = octo_des_cbc_md5_decrypt;
- octo_calc_hash(0, macini->cri_key, ocd->octo_hminner,
- ocd->octo_hmouter);
+ ocd->octo_encrypt = octo_null_md5_encrypt;
+ ocd->octo_decrypt = octo_null_md5_encrypt;
break;
case CRYPTO_SHA1_HMAC:
- ocd->octo_encrypt = octo_des_cbc_sha1_encrypt;
- ocd->octo_decrypt = octo_des_cbc_sha1_encrypt;
- octo_calc_hash(1, macini->cri_key, ocd->octo_hminner,
- ocd->octo_hmouter);
+ ocd->octo_encrypt = octo_null_sha1_encrypt;
+ ocd->octo_decrypt = octo_null_sha1_encrypt;
break;
- case -1:
+ }
+ break;
+ case CSP_MODE_CIPHER:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
ocd->octo_encrypt = octo_des_cbc_encrypt;
ocd->octo_decrypt = octo_des_cbc_decrypt;
break;
- default:
- dprintf("%s,%d: EINVALn", __FILE__, __LINE__);
- return EINVAL;
- }
- break;
- case CRYPTO_AES_CBC:
- ocd->octo_ivsize = 16;
- switch (macini ? macini->cri_alg : -1) {
- case CRYPTO_MD5_HMAC:
- ocd->octo_encrypt = octo_aes_cbc_md5_encrypt;
- ocd->octo_decrypt = octo_aes_cbc_md5_decrypt;
- octo_calc_hash(0, macini->cri_key, ocd->octo_hminner,
- ocd->octo_hmouter);
- break;
- case CRYPTO_SHA1_HMAC:
- ocd->octo_encrypt = octo_aes_cbc_sha1_encrypt;
- ocd->octo_decrypt = octo_aes_cbc_sha1_decrypt;
- octo_calc_hash(1, macini->cri_key, ocd->octo_hminner,
- ocd->octo_hmouter);
- break;
- case -1:
+ case CRYPTO_AES_CBC:
ocd->octo_encrypt = octo_aes_cbc_encrypt;
ocd->octo_decrypt = octo_aes_cbc_decrypt;
break;
- default:
- dprintf("%s,%d: EINVALn", __FILE__, __LINE__);
- return EINVAL;
}
break;
- case CRYPTO_MD5_HMAC:
- ocd->octo_encrypt = octo_null_md5_encrypt;
- ocd->octo_decrypt = octo_null_md5_encrypt;
- octo_calc_hash(0, macini->cri_key, ocd->octo_hminner,
- ocd->octo_hmouter);
- break;
- case CRYPTO_SHA1_HMAC:
- ocd->octo_encrypt = octo_null_sha1_encrypt;
- ocd->octo_decrypt = octo_null_sha1_encrypt;
- octo_calc_hash(1, macini->cri_key, ocd->octo_hminner,
- ocd->octo_hmouter);
+ case CSP_MODE_ETA:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ ocd->octo_encrypt = octo_des_cbc_md5_encrypt;
+ ocd->octo_decrypt = octo_des_cbc_md5_decrypt;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ ocd->octo_encrypt = octo_des_cbc_sha1_encrypt;
+ ocd->octo_decrypt = octo_des_cbc_sha1_encrypt;
+ break;
+ }
+ break;
+ case CRYPTO_AES_CBC:
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ ocd->octo_encrypt = octo_aes_cbc_md5_encrypt;
+ ocd->octo_decrypt = octo_aes_cbc_md5_decrypt;
+ break;
+ case CRYPTO_SHA1_HMAC:
+ ocd->octo_encrypt = octo_aes_cbc_sha1_encrypt;
+ ocd->octo_decrypt = octo_aes_cbc_sha1_decrypt;
+ break;
+ }
+ break;
+ }
break;
- default:
- dprintf("%s,%d: EINVALn", __FILE__, __LINE__);
- return EINVAL;
}
- ocd->octo_encalg = encini ? encini->cri_alg : -1;
- ocd->octo_macalg = macini ? macini->cri_alg : -1;
+ KASSERT(ocd->octo_encrypt != NULL && ocd->octo_decrypt != NULL,
+ ("%s: missing function pointers", __func__));
return (0);
}
@@ -261,106 +283,107 @@
static int
cryptocteon_process(device_t dev, struct cryptop *crp, int hint)
{
- struct cryptodesc *crd;
+ const struct crypto_session_params *csp;
struct octo_sess *od;
size_t iovcnt, iovlen;
struct mbuf *m = NULL;
struct uio *uiop = NULL;
- struct cryptodesc *enccrd = NULL, *maccrd = NULL;
unsigned char *ivp = NULL;
- unsigned char iv_data[HASH_MAX_LEN];
- int auth_off = 0, auth_len = 0, crypt_off = 0, crypt_len = 0, icv_off = 0;
+ unsigned char iv_data[16];
+ unsigned char icv[SHA1_HASH_LEN], icv2[SHA1_HASH_LEN];
+ int auth_off, auth_len, crypt_off, crypt_len;
struct cryptocteon_softc *sc;
sc = device_get_softc(dev);
- if (sc == NULL || crp == NULL)
- return EINVAL;
-
crp->crp_etype = 0;
- if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
- dprintf("%s,%d: EINVAL\n", __FILE__, __LINE__);
- crp->crp_etype = EINVAL;
- goto done;
- }
-
od = crypto_get_driver_session(crp->crp_session);
+ csp = crypto_get_params(crp->crp_session);
+
+ /*
+ * The crypto routines assume that the regions to auth and
+ * cipher are exactly 8 byte multiples and aligned on 8
+ * byte logical boundaries within the iovecs.
+ */
+ if (crp->crp_aad_length % 8 != 0 || crp->crp_payload_length % 8 != 0) {
+ crp->crp_etype = EFBIG;
+ goto done;
+ }
+
+ /*
+ * As currently written, the crypto routines assume the AAD and
+ * payload are adjacent.
+ */
+ if (crp->crp_aad_length != 0 && crp->crp_payload_start !=
+ crp->crp_aad_start + crp->crp_aad_length) {
+ crp->crp_etype = EFBIG;
+ goto done;
+ }
+
+ crypt_off = crp->crp_payload_start;
+ crypt_len = crp->crp_payload_length;
+ if (crp->crp_aad_length != 0) {
+ auth_off = crp->crp_aad_start;
+ auth_len = crp->crp_aad_length + crp->crp_payload_length;
+ } else {
+ auth_off = crypt_off;
+ auth_len = crypt_len;
+ }
/*
* do some error checking outside of the loop for m and IOV processing
* this leaves us with valid m or uiop pointers for later
*/
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ {
unsigned frags;
- m = (struct mbuf *) crp->crp_buf;
+ m = crp->crp_mbuf;
for (frags = 0; m != NULL; frags++)
m = m->m_next;
if (frags >= UIO_MAXIOV) {
printf("%s,%d: %d frags > UIO_MAXIOV", __FILE__, __LINE__, frags);
+ crp->crp_etype = EFBIG;
goto done;
}
- m = (struct mbuf *) crp->crp_buf;
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
- uiop = (struct uio *) crp->crp_buf;
+ m = crp->crp_mbuf;
+ break;
+ }
+ case CRYPTO_BUF_UIO:
+ uiop = crp->crp_uio;
if (uiop->uio_iovcnt > UIO_MAXIOV) {
printf("%s,%d: %d uio_iovcnt > UIO_MAXIOV", __FILE__, __LINE__,
uiop->uio_iovcnt);
+ crp->crp_etype = EFBIG;
goto done;
}
+ break;
}
- /* point our enccrd and maccrd appropriately */
- crd = crp->crp_desc;
- if (crd->crd_alg == od->octo_encalg)
- enccrd = crd;
- if (crd->crd_alg == od->octo_macalg)
- maccrd = crd;
- crd = crd->crd_next;
- if (crd) {
- if (crd->crd_alg == od->octo_encalg)
- enccrd = crd;
- if (crd->crd_alg == od->octo_macalg)
- maccrd = crd;
- crd = crd->crd_next;
- }
- if (crd) {
- crp->crp_etype = EINVAL;
- dprintf("%s,%d: ENOENT - descriptors do not match session\n",
- __FILE__, __LINE__);
- goto done;
- }
-
- if (enccrd) {
- if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
- ivp = enccrd->crd_iv;
- } else {
+ if (csp->csp_cipher_alg != 0) {
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(iv_data, csp->csp_ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen,
+ iv_data);
+ ivp = iv_data;
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ ivp = crp->crp_iv;
+ else {
+ crypto_copydata(crp, crp->crp_iv_start, csp->csp_ivlen,
+ iv_data);
ivp = iv_data;
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, od->octo_ivsize, (caddr_t) ivp);
- }
-
- if (maccrd) {
- auth_off = maccrd->crd_skip;
- auth_len = maccrd->crd_len;
- icv_off = maccrd->crd_inject;
}
-
- crypt_off = enccrd->crd_skip;
- crypt_len = enccrd->crd_len;
- } else { /* if (maccrd) */
- auth_off = maccrd->crd_skip;
- auth_len = maccrd->crd_len;
- icv_off = maccrd->crd_inject;
}
/*
* setup the I/O vector to cover the buffer
*/
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
iovcnt = 0;
iovlen = 0;
@@ -371,7 +394,8 @@
m = m->m_next;
iovlen += od->octo_iov[iovcnt++].iov_len;
}
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ break;
+ case CRYPTO_BUF_UIO:
iovlen = 0;
for (iovcnt = 0; iovcnt < uiop->uio_iovcnt; iovcnt++) {
od->octo_iov[iovcnt].iov_base = uiop->uio_iov[iovcnt].iov_base;
@@ -379,44 +403,44 @@
iovlen += od->octo_iov[iovcnt].iov_len;
}
- } else {
+ break;
+ case CRYPTO_BUF_CONTIG:
iovlen = crp->crp_ilen;
od->octo_iov[0].iov_base = crp->crp_buf;
od->octo_iov[0].iov_len = crp->crp_ilen;
iovcnt = 1;
+ break;
+ default:
+ panic("can't happen");
}
/*
* setup a new explicit key
*/
- if (enccrd) {
- if (enccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
- od->octo_encklen = (enccrd->crd_klen + 7) / 8;
- memcpy(od->octo_enckey, enccrd->crd_key, od->octo_encklen);
- }
- }
- if (maccrd) {
- if (maccrd->crd_flags & CRD_F_KEY_EXPLICIT) {
- od->octo_macklen = (maccrd->crd_klen + 7) / 8;
- memcpy(od->octo_mackey, maccrd->crd_key, od->octo_macklen);
- od->octo_mackey_set = 0;
- }
- if (!od->octo_mackey_set) {
- octo_calc_hash(maccrd->crd_alg == CRYPTO_MD5_HMAC ? 0 : 1,
- maccrd->crd_key, od->octo_hminner, od->octo_hmouter);
- od->octo_mackey_set = 1;
- }
- }
+ if (crp->crp_cipher_key != NULL)
+ memcpy(od->octo_enckey, crp->crp_cipher_key, od->octo_encklen);
+ if (crp->crp_auth_key != NULL)
+ cryptocteon_calc_hash(csp, crp->crp_auth_key, od);
- if (!enccrd || (enccrd->crd_flags & CRD_F_ENCRYPT))
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
(*od->octo_encrypt)(od, od->octo_iov, iovcnt, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
else
(*od->octo_decrypt)(od, od->octo_iov, iovcnt, iovlen,
- auth_off, auth_len, crypt_off, crypt_len, icv_off, ivp);
+ auth_off, auth_len, crypt_off, crypt_len, icv, ivp);
+ if (csp->csp_auth_alg != 0) {
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start,
+ od->octo_mlen, icv2);
+ if (timingsafe_bcmp(icv, icv2, od->octo_mlen) != 0)
+ crp->crp_etype = EBADMSG;
+ } else
+ crypto_copyback(crp, crp->crp_digest_start,
+ od->octo_mlen, icv);
+ }
done:
crypto_done(crp);
return (0);
@@ -429,6 +453,7 @@
DEVMETHOD(device_attach, cryptocteon_attach),
/* crypto device methods */
+ DEVMETHOD(cryptodev_probesession, cryptocteon_probesession),
DEVMETHOD(cryptodev_newsession, cryptocteon_newsession),
DEVMETHOD(cryptodev_process, cryptocteon_process),
Index: sys/mips/cavium/cryptocteon/cryptocteonvar.h
===================================================================
--- sys/mips/cavium/cryptocteon/cryptocteonvar.h
+++ sys/mips/cavium/cryptocteon/cryptocteonvar.h
@@ -34,23 +34,15 @@
struct octo_sess;
-typedef int octo_encrypt_t(struct octo_sess *od, struct iovec *iov, size_t iovcnt, size_t iovlen, int auth_off, int auth_len, int crypt_off, int crypt_len, int icv_off, uint8_t *ivp);
-typedef int octo_decrypt_t(struct octo_sess *od, struct iovec *iov, size_t iovcnt, size_t iovlen, int auth_off, int auth_len, int crypt_off, int crypt_len, int icv_off, uint8_t *ivp);
+typedef int octo_encrypt_t(struct octo_sess *od, struct iovec *iov, size_t iovcnt, size_t iovlen, int auth_off, int auth_len, int crypt_off, int crypt_len, uint8_t *icv, uint8_t *ivp);
+typedef int octo_decrypt_t(struct octo_sess *od, struct iovec *iov, size_t iovcnt, size_t iovlen, int auth_off, int auth_len, int crypt_off, int crypt_len, uint8_t *icv, uint8_t *ivp);
struct octo_sess {
- int octo_encalg;
#define MAX_CIPHER_KEYLEN 64
char octo_enckey[MAX_CIPHER_KEYLEN];
int octo_encklen;
- int octo_macalg;
- #define MAX_HASH_KEYLEN 64
- char octo_mackey[MAX_HASH_KEYLEN];
- int octo_macklen;
- int octo_mackey_set;
-
int octo_mlen;
- int octo_ivsize;
octo_encrypt_t *octo_encrypt;
octo_decrypt_t *octo_decrypt;
Index: sys/mips/nlm/dev/sec/nlmrsa.c
===================================================================
--- sys/mips/nlm/dev/sec/nlmrsa.c
+++ sys/mips/nlm/dev/sec/nlmrsa.c
@@ -76,7 +76,6 @@
#endif
static int xlp_rsa_init(struct xlp_rsa_softc *sc, int node);
-static int xlp_rsa_newsession(device_t , crypto_session_t, struct cryptoini *);
static int xlp_rsa_kprocess(device_t , struct cryptkop *, int);
static int xlp_get_rsa_opsize(struct xlp_rsa_command *cmd, unsigned int bits);
static void xlp_free_cmd_params(struct xlp_rsa_command *cmd);
@@ -98,7 +97,6 @@
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
/* crypto device methods */
- DEVMETHOD(cryptodev_newsession, xlp_rsa_newsession),
DEVMETHOD(cryptodev_kprocess, xlp_rsa_kprocess),
DEVMETHOD_END
@@ -313,20 +311,6 @@
return (0);
}
-/*
- * Allocate a new 'session' (unused).
- */
-static int
-xlp_rsa_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
-{
- struct xlp_rsa_softc *sc = device_get_softc(dev);
-
- if (cri == NULL || sc == NULL)
- return (EINVAL);
-
- return (0);
-}
-
/*
* XXX freesession should run a zero'd mac/encrypt key into context ram.
* XXX to blow away any keys already stored there.
Index: sys/mips/nlm/dev/sec/nlmsec.c
===================================================================
--- sys/mips/nlm/dev/sec/nlmsec.c
+++ sys/mips/nlm/dev/sec/nlmsec.c
@@ -52,6 +52,7 @@
#include <dev/pci/pcivar.h>
#include <opencrypto/cryptodev.h>
+#include <opencrypto/xform_auth.h>
#include "cryptodev_if.h"
@@ -71,13 +72,14 @@
unsigned int creditleft;
-void xlp_sec_print_data(struct cryptop *crp);
-
static int xlp_sec_init(struct xlp_sec_softc *sc);
-static int xlp_sec_newsession(device_t , crypto_session_t, struct cryptoini *);
+static int xlp_sec_probesession(device_t,
+ const struct crypto_session_params *);
+static int xlp_sec_newsession(device_t , crypto_session_t,
+ const struct crypto_session_params *);
static int xlp_sec_process(device_t , struct cryptop *, int);
-static int xlp_copyiv(struct xlp_sec_softc *, struct xlp_sec_command *,
- struct cryptodesc *enccrd);
+static void xlp_copyiv(struct xlp_sec_softc *, struct xlp_sec_command *,
+ const struct crypto_session_params *);
static int xlp_get_nsegs(struct cryptop *, unsigned int *);
static int xlp_alloc_cmd_params(struct xlp_sec_command *, unsigned int);
static void xlp_free_cmd_params(struct xlp_sec_command *);
@@ -97,6 +99,7 @@
DEVMETHOD(bus_driver_added, bus_generic_driver_added),
/* crypto device methods */
+ DEVMETHOD(cryptodev_probesession, xlp_sec_probesession),
DEVMETHOD(cryptodev_newsession, xlp_sec_newsession),
DEVMETHOD(cryptodev_process, xlp_sec_process),
@@ -197,46 +200,6 @@
return;
}
-void
-xlp_sec_print_data(struct cryptop *crp)
-{
- int i, key_len;
- struct cryptodesc *crp_desc;
-
- printf("session = %p, crp_ilen = %d, crp_olen=%d \n", crp->crp_session,
- crp->crp_ilen, crp->crp_olen);
-
- printf("crp_flags = 0x%x\n", crp->crp_flags);
-
- printf("crp buf:\n");
- for (i = 0; i < crp->crp_ilen; i++) {
- printf("%c ", crp->crp_buf[i]);
- if (i % 10 == 0)
- printf("\n");
- }
-
- printf("\n");
- printf("****************** desc ****************\n");
- crp_desc = crp->crp_desc;
- printf("crd_skip=%d, crd_len=%d, crd_flags=0x%x, crd_alg=%d\n",
- crp_desc->crd_skip, crp_desc->crd_len, crp_desc->crd_flags,
- crp_desc->crd_alg);
-
- key_len = crp_desc->crd_klen / 8;
- printf("key(%d) :\n", key_len);
- for (i = 0; i < key_len; i++)
- printf("%d", crp_desc->crd_key[i]);
- printf("\n");
-
- printf(" IV : \n");
- for (i = 0; i < EALG_MAX_BLOCK_LEN; i++)
- printf("%d", crp_desc->crd_iv[i]);
- printf("\n");
-
- printf("crd_next=%p\n", crp_desc->crd_next);
- return;
-}
-
void
print_cmd(struct xlp_sec_command *cmd)
{
@@ -289,8 +252,7 @@
{
struct xlp_sec_command *cmd = NULL;
struct xlp_sec_softc *sc = NULL;
- struct cryptodesc *crd = NULL;
- unsigned int ivlen = 0;
+ uint8_t hash[HASH_MAX_LEN];
KASSERT(code == FMN_SWCODE_CRYPTO,
("%s: bad code = %d, expected code = %d\n", __FUNCTION__,
@@ -310,23 +272,6 @@
(unsigned long long)msg->msg[0], (unsigned long long)msg->msg[1],
(int)CRYPTO_ERROR(msg->msg[1])));
- crd = cmd->enccrd;
- /* Copy the last 8 or 16 bytes to the session iv, so that in few
- * cases this will be used as IV for the next request
- */
- if (crd != NULL) {
- if ((crd->crd_alg == CRYPTO_DES_CBC ||
- crd->crd_alg == CRYPTO_3DES_CBC ||
- crd->crd_alg == CRYPTO_AES_CBC) &&
- (crd->crd_flags & CRD_F_ENCRYPT)) {
- ivlen = ((crd->crd_alg == CRYPTO_AES_CBC) ?
- XLP_SEC_AES_IV_LENGTH : XLP_SEC_DES_IV_LENGTH);
- crypto_copydata(cmd->crp->crp_flags, cmd->crp->crp_buf,
- crd->crd_skip + crd->crd_len - ivlen, ivlen,
- cmd->ses->ses_iv);
- }
- }
-
/* If there are not enough credits to send, then send request
* will fail with ERESTART and the driver will be blocked until it is
* unblocked here after knowing that there are sufficient credits to
@@ -339,10 +284,16 @@
sc->sc_needwakeup &= (~(CRYPTO_SYMQ | CRYPTO_ASYMQ));
}
}
- if(cmd->maccrd) {
- crypto_copyback(cmd->crp->crp_flags,
- cmd->crp->crp_buf, cmd->maccrd->crd_inject,
- cmd->hash_dst_len, cmd->hashdest);
+ if (cmd->hash_dst_len != 0) {
+ if (cmd->crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(cmd->crp, cmd->crp->crp_digest_start,
+ cmd->hash_dst_len, hash);
+ if (timingsafe_bcmp(cmd->hashdest, hash,
+ cmd->hash_dst_len) != 0)
+ cmd->crp->crp_etype = EBADMSG;
+ } else
+ crypto_copyback(cmd->crp, cmd->crp->crp_digest_start,
+ cmd->hash_dst_len, cmd->hashdest);
}
/* This indicates completion of the crypto operation */
@@ -392,29 +343,6 @@
" id\n");
goto error_exit;
}
- if (crypto_register(sc->sc_cid, CRYPTO_DES_CBC, 0, 0) != 0)
- printf("register failed for CRYPTO_DES_CBC\n");
-
- if (crypto_register(sc->sc_cid, CRYPTO_3DES_CBC, 0, 0) != 0)
- printf("register failed for CRYPTO_3DES_CBC\n");
-
- if (crypto_register(sc->sc_cid, CRYPTO_AES_CBC, 0, 0) != 0)
- printf("register failed for CRYPTO_AES_CBC\n");
-
- if (crypto_register(sc->sc_cid, CRYPTO_ARC4, 0, 0) != 0)
- printf("register failed for CRYPTO_ARC4\n");
-
- if (crypto_register(sc->sc_cid, CRYPTO_MD5, 0, 0) != 0)
- printf("register failed for CRYPTO_MD5\n");
-
- if (crypto_register(sc->sc_cid, CRYPTO_SHA1, 0, 0) != 0)
- printf("register failed for CRYPTO_SHA1\n");
-
- if (crypto_register(sc->sc_cid, CRYPTO_MD5_HMAC, 0, 0) != 0)
- printf("register failed for CRYPTO_MD5_HMAC\n");
-
- if (crypto_register(sc->sc_cid, CRYPTO_SHA1_HMAC, 0, 0) != 0)
- printf("register failed for CRYPTO_SHA1_HMAC\n");
base = nlm_get_sec_pcibase(node);
qstart = nlm_qidstart(base);
@@ -443,65 +371,88 @@
return (0);
}
+static bool
+xlp_sec_auth_supported(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5:
+ case CRYPTO_SHA1:
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ break;
+ default:
+ return (false);
+ }
+ return (true);
+}
+
+static bool
+xlp_sec_cipher_supported(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DES_CBC:
+ case CRYPTO_3DES_CBC:
+ if (csp->csp_ivlen != XLP_SEC_DES_IV_LENGTH)
+ return (false);
+ break;
+ case CRYPTO_AES_CBC:
+ if (csp->csp_ivlen != XLP_SEC_AES_IV_LENGTH)
+ return (false);
+ break;
+ case CRYPTO_ARC4:
+ if (csp->csp_ivlen != XLP_SEC_ARC4_IV_LENGTH)
+ return (false);
+ break;
+ default:
+ return (false);
+ }
+
+ return (true);
+}
+
+static int
+xlp_sec_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_DIGEST:
+ if (!xlp_sec_auth_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!xlp_sec_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_ETA:
+ if (!xlp_sec_auth_supported(csp) ||
+ !xlp_sec_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+ return (CRYPTODEV_PROBE_HARDWARE);
+}
+
static int
-xlp_sec_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+xlp_sec_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
{
- struct cryptoini *c;
- struct xlp_sec_softc *sc = device_get_softc(dev);
- int mac = 0, cry = 0;
struct xlp_sec_session *ses;
- struct xlp_sec_command *cmd = NULL;
-
- if (cri == NULL || sc == NULL)
- return (EINVAL);
ses = crypto_get_driver_session(cses);
- cmd = &ses->cmd;
- for (c = cri; c != NULL; c = c->cri_next) {
- switch (c->cri_alg) {
- case CRYPTO_MD5:
- case CRYPTO_SHA1:
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1_HMAC:
- if (mac)
- return (EINVAL);
- mac = 1;
- ses->hs_mlen = c->cri_mlen;
- if (ses->hs_mlen == 0) {
- switch (c->cri_alg) {
- case CRYPTO_MD5:
- case CRYPTO_MD5_HMAC:
- ses->hs_mlen = 16;
- break;
- case CRYPTO_SHA1:
- case CRYPTO_SHA1_HMAC:
- ses->hs_mlen = 20;
- break;
- }
- }
- break;
- case CRYPTO_DES_CBC:
- case CRYPTO_3DES_CBC:
- case CRYPTO_AES_CBC:
- /* XXX this may read fewer, does it matter? */
- read_random(ses->ses_iv, c->cri_alg ==
- CRYPTO_AES_CBC ? XLP_SEC_AES_IV_LENGTH :
- XLP_SEC_DES_IV_LENGTH);
- /* FALLTHROUGH */
- case CRYPTO_ARC4:
- if (cry)
- return (EINVAL);
- cry = 1;
- break;
- default:
- return (EINVAL);
- }
+ if (csp->csp_auth_alg != 0) {
+ if (csp->csp_auth_mlen == 0)
+ ses->hs_mlen = crypto_auth_hash(csp)->hashsize;
+ else
+ ses->hs_mlen = csp->csp_auth_mlen;
}
- if (mac == 0 && cry == 0)
- return (EINVAL);
- cmd->hash_dst_len = ses->hs_mlen;
return (0);
}
@@ -510,54 +461,42 @@
* ram. to blow away any keys already stored there.
*/
-static int
+static void
xlp_copyiv(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd,
- struct cryptodesc *enccrd)
+ const struct crypto_session_params *csp)
{
- unsigned int ivlen = 0;
struct cryptop *crp = NULL;
crp = cmd->crp;
- if (enccrd->crd_alg != CRYPTO_ARC4) {
- ivlen = ((enccrd->crd_alg == CRYPTO_AES_CBC) ?
- XLP_SEC_AES_IV_LENGTH : XLP_SEC_DES_IV_LENGTH);
- if (enccrd->crd_flags & CRD_F_ENCRYPT) {
- if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
- bcopy(enccrd->crd_iv, cmd->iv, ivlen);
- } else {
- bcopy(cmd->ses->ses_iv, cmd->iv, ivlen);
- }
- if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
- crypto_copyback(crp->crp_flags,
- crp->crp_buf, enccrd->crd_inject,
- ivlen, cmd->iv);
- }
- } else {
- if (enccrd->crd_flags & CRD_F_IV_EXPLICIT) {
- bcopy(enccrd->crd_iv, cmd->iv, ivlen);
- } else {
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- enccrd->crd_inject, ivlen, cmd->iv);
- }
- }
+ if (csp->csp_cipher_alg != CRYPTO_ARC4) {
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(cmd->iv, csp->csp_ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, csp->csp_ivlen,
+ cmd->iv);
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ memcpy(cmd->iv, crp->crp_iv, csp->csp_ivlen);
}
- return (0);
}
static int
xlp_get_nsegs(struct cryptop *crp, unsigned int *nsegs)
{
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ {
struct mbuf *m = NULL;
- m = (struct mbuf *)crp->crp_buf;
+ m = crp->crp_mbuf;
while (m != NULL) {
*nsegs += NLM_CRYPTO_NUM_SEGS_REQD(m->m_len);
m = m->m_next;
}
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ break;
+ }
+ case CRYPTO_BUF_UIO:
+ {
struct uio *uio = NULL;
struct iovec *iov = NULL;
int iol = 0;
@@ -570,8 +509,13 @@
iol--;
iov++;
}
- } else {
+ break;
+ }
+ case CRYPTO_BUF_CONTIG:
*nsegs = NLM_CRYPTO_NUM_SEGS_REQD(crp->crp_ilen);
+ break;
+ default:
+ return (EINVAL);
}
return (0);
}
@@ -638,20 +582,24 @@
xlp_sec_process(device_t dev, struct cryptop *crp, int hint)
{
struct xlp_sec_softc *sc = device_get_softc(dev);
+ const struct crypto_session_params *csp;
struct xlp_sec_command *cmd = NULL;
int err = -1, ret = 0;
- struct cryptodesc *crd1, *crd2;
struct xlp_sec_session *ses;
unsigned int nsegs = 0;
- if (crp == NULL || crp->crp_callback == NULL) {
- return (EINVAL);
- }
- if (sc == NULL) {
- err = EINVAL;
- goto errout;
- }
ses = crypto_get_driver_session(crp->crp_session);
+ csp = crypto_get_params(crp->crp_session);
+
+ /*
+ * This device only support AAD requests where the AAD is
+ * adjacent to the payload.
+ */
+ if (crp->crp_aad_length != 0 && crp->crp_payload_start !=
+ crp->crp_aad_start + crp->crp_aad_length) {
+ err = EFBIG;
+ goto errout;
+ }
if ((cmd = malloc(sizeof(struct xlp_sec_command), M_DEVBUF,
M_NOWAIT | M_ZERO)) == NULL) {
@@ -663,18 +611,12 @@
cmd->ses = ses;
cmd->hash_dst_len = ses->hs_mlen;
- if ((crd1 = crp->crp_desc) == NULL) {
- err = EINVAL;
- goto errout;
- }
- crd2 = crd1->crd_next;
-
if ((ret = xlp_get_nsegs(crp, &nsegs)) != 0) {
err = EINVAL;
goto errout;
}
- if (((crd1 != NULL) && (crd1->crd_flags & CRD_F_IV_EXPLICIT)) ||
- ((crd2 != NULL) && (crd2->crd_flags & CRD_F_IV_EXPLICIT))) {
+
+ if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
/* Since IV is given as separate segment to avoid copy */
nsegs += 1;
}
@@ -683,98 +625,70 @@
if ((err = xlp_alloc_cmd_params(cmd, nsegs)) != 0)
goto errout;
- if ((crd1 != NULL) && (crd2 == NULL)) {
- if (crd1->crd_alg == CRYPTO_DES_CBC ||
- crd1->crd_alg == CRYPTO_3DES_CBC ||
- crd1->crd_alg == CRYPTO_AES_CBC ||
- crd1->crd_alg == CRYPTO_ARC4) {
- cmd->enccrd = crd1;
- cmd->maccrd = NULL;
- if ((ret = nlm_get_cipher_param(cmd)) != 0) {
- err = EINVAL;
- goto errout;
- }
- if (crd1->crd_flags & CRD_F_IV_EXPLICIT)
- cmd->cipheroff = cmd->ivlen;
- else
- cmd->cipheroff = cmd->enccrd->crd_skip;
- cmd->cipherlen = cmd->enccrd->crd_len;
- if (crd1->crd_flags & CRD_F_IV_PRESENT)
- cmd->ivoff = 0;
- else
- cmd->ivoff = cmd->enccrd->crd_inject;
- if ((err = xlp_copyiv(sc, cmd, cmd->enccrd)) != 0)
- goto errout;
- if ((err = nlm_crypto_do_cipher(sc, cmd)) != 0)
- goto errout;
- } else if (crd1->crd_alg == CRYPTO_MD5_HMAC ||
- crd1->crd_alg == CRYPTO_SHA1_HMAC ||
- crd1->crd_alg == CRYPTO_SHA1 ||
- crd1->crd_alg == CRYPTO_MD5) {
- cmd->enccrd = NULL;
- cmd->maccrd = crd1;
- if ((ret = nlm_get_digest_param(cmd)) != 0) {
- err = EINVAL;
- goto errout;
- }
- cmd->hashoff = cmd->maccrd->crd_skip;
- cmd->hashlen = cmd->maccrd->crd_len;
- cmd->hmacpad = 0;
- cmd->hashsrc = 0;
- if ((err = nlm_crypto_do_digest(sc, cmd)) != 0)
- goto errout;
- } else {
+ switch (csp->csp_mode) {
+ case CSP_MODE_CIPHER:
+ if ((ret = nlm_get_cipher_param(cmd, csp)) != 0) {
err = EINVAL;
goto errout;
}
- } else if( (crd1 != NULL) && (crd2 != NULL) ) {
- if ((crd1->crd_alg == CRYPTO_MD5_HMAC ||
- crd1->crd_alg == CRYPTO_SHA1_HMAC ||
- crd1->crd_alg == CRYPTO_MD5 ||
- crd1->crd_alg == CRYPTO_SHA1) &&
- (crd2->crd_alg == CRYPTO_DES_CBC ||
- crd2->crd_alg == CRYPTO_3DES_CBC ||
- crd2->crd_alg == CRYPTO_AES_CBC ||
- crd2->crd_alg == CRYPTO_ARC4)) {
- cmd->maccrd = crd1;
- cmd->enccrd = crd2;
- } else if ((crd1->crd_alg == CRYPTO_DES_CBC ||
- crd1->crd_alg == CRYPTO_ARC4 ||
- crd1->crd_alg == CRYPTO_3DES_CBC ||
- crd1->crd_alg == CRYPTO_AES_CBC) &&
- (crd2->crd_alg == CRYPTO_MD5_HMAC ||
- crd2->crd_alg == CRYPTO_SHA1_HMAC ||
- crd2->crd_alg == CRYPTO_MD5 ||
- crd2->crd_alg == CRYPTO_SHA1)) {
- cmd->enccrd = crd1;
- cmd->maccrd = crd2;
- } else {
+ cmd->cipheroff = crp->crp_payload_start;
+ cmd->cipherlen = crp->crp_payload_length;
+ if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
+ cmd->cipheroff += cmd->ivlen;
+ cmd->ivoff = 0;
+ } else
+ cmd->ivoff = crp->crp_iv_start;
+ xlp_copyiv(sc, cmd, csp);
+ if ((err = nlm_crypto_do_cipher(sc, cmd, csp)) != 0)
+ goto errout;
+ break;
+ case CSP_MODE_DIGEST:
+ if ((ret = nlm_get_digest_param(cmd, csp)) != 0) {
err = EINVAL;
goto errout;
}
- if ((ret = nlm_get_cipher_param(cmd)) != 0) {
+ cmd->hashoff = crp->crp_payload_start;
+ cmd->hashlen = crp->crp_payload_length;
+ cmd->hmacpad = 0;
+ cmd->hashsrc = 0;
+ if ((err = nlm_crypto_do_digest(sc, cmd, csp)) != 0)
+ goto errout;
+ break;
+ case CSP_MODE_ETA:
+ if ((ret = nlm_get_cipher_param(cmd, csp)) != 0) {
err = EINVAL;
goto errout;
}
- if ((ret = nlm_get_digest_param(cmd)) != 0) {
+ if ((ret = nlm_get_digest_param(cmd, csp)) != 0) {
err = EINVAL;
goto errout;
}
- cmd->ivoff = cmd->enccrd->crd_inject;
- cmd->hashoff = cmd->maccrd->crd_skip;
- cmd->hashlen = cmd->maccrd->crd_len;
+ if (crp->crp_aad_length != 0) {
+ cmd->hashoff = crp->crp_aad_start;
+ cmd->hashlen = crp->crp_aad_length +
+ crp->crp_payload_length;
+ } else {
+ cmd->hashoff = crp->crp_payload_start;
+ cmd->hashlen = crp->crp_payload_length;
+ }
cmd->hmacpad = 0;
- if (cmd->enccrd->crd_flags & CRD_F_ENCRYPT)
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op))
cmd->hashsrc = 1;
else
cmd->hashsrc = 0;
- cmd->cipheroff = cmd->enccrd->crd_skip;
- cmd->cipherlen = cmd->enccrd->crd_len;
- if ((err = xlp_copyiv(sc, cmd, cmd->enccrd)) != 0)
+ cmd->cipheroff = crp->crp_payload_start;
+ cmd->cipherlen = crp->crp_payload_length;
+ if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
+ cmd->hashoff += cmd->ivlen;
+ cmd->cipheroff += cmd->ivlen;
+ cmd->ivoff = 0;
+ } else
+ cmd->ivoff = crp->crp_iv_start;
+ xlp_copyiv(sc, cmd, csp);
+ if ((err = nlm_crypto_do_cipher_digest(sc, cmd, csp)) != 0)
goto errout;
- if ((err = nlm_crypto_do_cipher_digest(sc, cmd)) != 0)
- goto errout;
- } else {
+ break;
+ default:
err = EINVAL;
goto errout;
}
Index: sys/mips/nlm/dev/sec/nlmseclib.h
===================================================================
--- sys/mips/nlm/dev/sec/nlmseclib.h
+++ sys/mips/nlm/dev/sec/nlmseclib.h
@@ -91,7 +91,6 @@
struct xlp_sec_command {
struct cryptop *crp;
- struct cryptodesc *enccrd, *maccrd;
struct xlp_sec_session *ses;
struct nlm_crypto_pkt_ctrl *ctrlp;
struct nlm_crypto_pkt_param *paramp;
@@ -116,8 +115,6 @@
struct xlp_sec_session {
int hs_mlen;
- uint8_t ses_iv[EALG_MAX_BLOCK_LEN];
- struct xlp_sec_command cmd;
};
/*
@@ -135,17 +132,22 @@
#ifdef NLM_SEC_DEBUG
void print_crypto_params(struct xlp_sec_command *cmd, struct nlm_fmn_msg m);
-void xlp_sec_print_data(struct cryptop *crp);
void print_cmd(struct xlp_sec_command *cmd);
#endif
-int nlm_crypto_form_srcdst_segs(struct xlp_sec_command *cmd);
+int nlm_crypto_form_srcdst_segs(struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp);
int nlm_crypto_do_cipher(struct xlp_sec_softc *sc,
- struct xlp_sec_command *cmd);
+ struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp);
int nlm_crypto_do_digest(struct xlp_sec_softc *sc,
- struct xlp_sec_command *cmd);
+ struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp);
int nlm_crypto_do_cipher_digest(struct xlp_sec_softc *sc,
- struct xlp_sec_command *cmd);
-int nlm_get_digest_param(struct xlp_sec_command *cmd);
-int nlm_get_cipher_param(struct xlp_sec_command *cmd);
+ struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp);
+int nlm_get_digest_param(struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp);
+int nlm_get_cipher_param(struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp);
#endif /* _NLMSECLIB_H_ */
Index: sys/mips/nlm/dev/sec/nlmseclib.c
===================================================================
--- sys/mips/nlm/dev/sec/nlmseclib.c
+++ sys/mips/nlm/dev/sec/nlmseclib.c
@@ -92,18 +92,17 @@
}
int
-nlm_crypto_form_srcdst_segs(struct xlp_sec_command *cmd)
+nlm_crypto_form_srcdst_segs(struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp)
{
unsigned int srcseg = 0, dstseg = 0;
- struct cryptodesc *cipdesc = NULL;
struct cryptop *crp = NULL;
crp = cmd->crp;
- cipdesc = cmd->enccrd;
- if (cipdesc != NULL) {
+ if (csp->csp_mode != CSP_MODE_DIGEST) {
/* IV is given as ONE segment to avoid copy */
- if (cipdesc->crd_flags & CRD_F_IV_EXPLICIT) {
+ if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
srcseg = nlm_crypto_fill_src_seg(cmd->paramp, srcseg,
cmd->iv, cmd->ivlen);
dstseg = nlm_crypto_fill_dst_seg(cmd->paramp, dstseg,
@@ -111,32 +110,37 @@
}
}
- if (crp->crp_flags & CRYPTO_F_IMBUF) {
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ {
struct mbuf *m = NULL;
- m = (struct mbuf *)crp->crp_buf;
+ m = crp->crp_mbuf;
while (m != NULL) {
srcseg = nlm_crypto_fill_src_seg(cmd->paramp, srcseg,
mtod(m,caddr_t), m->m_len);
- if (cipdesc != NULL) {
+ if (csp->csp_mode != CSP_MODE_DIGEST) {
dstseg = nlm_crypto_fill_dst_seg(cmd->paramp,
dstseg, mtod(m,caddr_t), m->m_len);
}
m = m->m_next;
}
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ break;
+ }
+ case CRYPTO_BUF_UIO:
+ {
struct uio *uio = NULL;
struct iovec *iov = NULL;
int iol = 0;
- uio = (struct uio *)crp->crp_buf;
- iov = (struct iovec *)uio->uio_iov;
+ uio = crp->crp_uio;
+ iov = uio->uio_iov;
iol = uio->uio_iovcnt;
while (iol > 0) {
srcseg = nlm_crypto_fill_src_seg(cmd->paramp, srcseg,
(caddr_t)iov->iov_base, iov->iov_len);
- if (cipdesc != NULL) {
+ if (csp->csp_mode != CSP_MODE_DIGEST) {
dstseg = nlm_crypto_fill_dst_seg(cmd->paramp,
dstseg, (caddr_t)iov->iov_base,
iov->iov_len);
@@ -144,67 +148,75 @@
iov++;
iol--;
}
- } else {
+ }
+ case CRYPTO_BUF_CONTIG:
srcseg = nlm_crypto_fill_src_seg(cmd->paramp, srcseg,
((caddr_t)crp->crp_buf), crp->crp_ilen);
- if (cipdesc != NULL) {
+ if (csp->csp_mode != CSP_MODE_DIGEST) {
dstseg = nlm_crypto_fill_dst_seg(cmd->paramp, dstseg,
((caddr_t)crp->crp_buf), crp->crp_ilen);
}
+ break;
}
return (0);
}
int
-nlm_crypto_do_cipher(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd)
+nlm_crypto_do_cipher(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp)
{
- struct cryptodesc *cipdesc = NULL;
- unsigned char *cipkey = NULL;
+ const unsigned char *cipkey = NULL;
int ret = 0;
- cipdesc = cmd->enccrd;
- cipkey = (unsigned char *)cipdesc->crd_key;
+ if (cmd->crp->crp_cipher_key != NULL)
+ cipkey = cmd->crp->crp_cipher_key;
+ else
+ cipkey = csp->csp_cipher_key;
if (cmd->cipheralg == NLM_CIPHER_3DES) {
- if (!(cipdesc->crd_flags & CRD_F_ENCRYPT)) {
- uint64_t *k, *tkey;
- k = (uint64_t *)cipdesc->crd_key;
+ if (!CRYPTO_OP_IS_ENCRYPT(cmd->crp->crp_op)) {
+ const uint64_t *k;
+ uint64_t *tkey;
+ k = (const uint64_t *)cipkey;
tkey = (uint64_t *)cmd->des3key;
tkey[2] = k[0];
tkey[1] = k[1];
tkey[0] = k[2];
- cipkey = (unsigned char *)tkey;
+ cipkey = (const unsigned char *)tkey;
}
}
nlm_crypto_fill_pkt_ctrl(cmd->ctrlp, 0, NLM_HASH_BYPASS, 0,
cmd->cipheralg, cmd->ciphermode, cipkey,
- (cipdesc->crd_klen >> 3), NULL, 0);
+ csp->csp_cipher_klen, NULL, 0);
nlm_crypto_fill_cipher_pkt_param(cmd->ctrlp, cmd->paramp,
- (cipdesc->crd_flags & CRD_F_ENCRYPT) ? 1 : 0, cmd->ivoff,
+ CRYPTO_OP_IS_ENCRYPT(cmd->crp->crp_op) ? 1 : 0, cmd->ivoff,
cmd->ivlen, cmd->cipheroff, cmd->cipherlen);
- nlm_crypto_form_srcdst_segs(cmd);
+ nlm_crypto_form_srcdst_segs(cmd, csp);
ret = nlm_crypto_complete_sec_request(sc, cmd);
return (ret);
}
int
-nlm_crypto_do_digest(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd)
+nlm_crypto_do_digest(struct xlp_sec_softc *sc, struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp)
{
- struct cryptodesc *digdesc = NULL;
+ const char *key;
int ret=0;
- digdesc = cmd->maccrd;
-
- nlm_crypto_fill_pkt_ctrl(cmd->ctrlp, (digdesc->crd_klen) ? 1 : 0,
+ if (cmd->crp->crp_auth_key != NULL)
+ key = cmd->crp->crp_auth_key;
+ else
+ key = csp->csp_auth_key;
+ nlm_crypto_fill_pkt_ctrl(cmd->ctrlp, csp->csp_auth_klen ? 1 : 0,
cmd->hashalg, cmd->hashmode, NLM_CIPHER_BYPASS, 0,
- NULL, 0, digdesc->crd_key, digdesc->crd_klen >> 3);
+ NULL, 0, key, csp->csp_auth_klen);
nlm_crypto_fill_auth_pkt_param(cmd->ctrlp, cmd->paramp,
cmd->hashoff, cmd->hashlen, cmd->hmacpad,
(unsigned char *)cmd->hashdest);
- nlm_crypto_form_srcdst_segs(cmd);
+ nlm_crypto_form_srcdst_segs(cmd, csp);
ret = nlm_crypto_complete_sec_request(sc, cmd);
@@ -213,48 +225,54 @@
int
nlm_crypto_do_cipher_digest(struct xlp_sec_softc *sc,
- struct xlp_sec_command *cmd)
+ struct xlp_sec_command *cmd, const struct crypto_session_params *csp)
{
- struct cryptodesc *cipdesc=NULL, *digdesc=NULL;
- unsigned char *cipkey = NULL;
+ const unsigned char *cipkey = NULL;
+ const char *authkey;
int ret=0;
- cipdesc = cmd->enccrd;
- digdesc = cmd->maccrd;
-
- cipkey = (unsigned char *)cipdesc->crd_key;
+ if (cmd->crp->crp_cipher_key != NULL)
+ cipkey = cmd->crp->crp_cipher_key;
+ else
+ cipkey = csp->csp_cipher_key;
+ if (cmd->crp->crp_auth_key != NULL)
+ authkey = cmd->crp->crp_auth_key;
+ else
+ authkey = csp->csp_auth_key;
if (cmd->cipheralg == NLM_CIPHER_3DES) {
- if (!(cipdesc->crd_flags & CRD_F_ENCRYPT)) {
- uint64_t *k, *tkey;
- k = (uint64_t *)cipdesc->crd_key;
+ if (!CRYPTO_OP_IS_ENCRYPT(cmd->crp->crp_op)) {
+ const uint64_t *k;
+ uint64_t *tkey;
+ k = (const uint64_t *)cipkey;
tkey = (uint64_t *)cmd->des3key;
tkey[2] = k[0];
tkey[1] = k[1];
tkey[0] = k[2];
- cipkey = (unsigned char *)tkey;
+ cipkey = (const unsigned char *)tkey;
}
}
- nlm_crypto_fill_pkt_ctrl(cmd->ctrlp, (digdesc->crd_klen) ? 1 : 0,
+ nlm_crypto_fill_pkt_ctrl(cmd->ctrlp, csp->csp_auth_klen ? 1 : 0,
cmd->hashalg, cmd->hashmode, cmd->cipheralg, cmd->ciphermode,
- cipkey, (cipdesc->crd_klen >> 3),
- digdesc->crd_key, (digdesc->crd_klen >> 3));
+ cipkey, csp->csp_cipher_klen,
+ authkey, csp->csp_auth_klen);
nlm_crypto_fill_cipher_auth_pkt_param(cmd->ctrlp, cmd->paramp,
- (cipdesc->crd_flags & CRD_F_ENCRYPT) ? 1 : 0, cmd->hashsrc,
+ CRYPTO_OP_IS_ENCRYPT(cmd->crp->crp_op) ? 1 : 0, cmd->hashsrc,
cmd->ivoff, cmd->ivlen, cmd->hashoff, cmd->hashlen,
cmd->hmacpad, cmd->cipheroff, cmd->cipherlen,
(unsigned char *)cmd->hashdest);
- nlm_crypto_form_srcdst_segs(cmd);
+ nlm_crypto_form_srcdst_segs(cmd, csp);
ret = nlm_crypto_complete_sec_request(sc, cmd);
return (ret);
}
int
-nlm_get_digest_param(struct xlp_sec_command *cmd)
+nlm_get_digest_param(struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp)
{
- switch(cmd->maccrd->crd_alg) {
+ switch(csp->csp_auth_alg) {
case CRYPTO_MD5:
cmd->hashalg = NLM_HASH_MD5;
cmd->hashmode = NLM_HASH_MODE_SHA1;
@@ -278,9 +296,10 @@
return (0);
}
int
-nlm_get_cipher_param(struct xlp_sec_command *cmd)
+nlm_get_cipher_param(struct xlp_sec_command *cmd,
+ const struct crypto_session_params *csp)
{
- switch(cmd->enccrd->crd_alg) {
+ switch(csp->csp_cipher_alg) {
case CRYPTO_DES_CBC:
cmd->cipheralg = NLM_CIPHER_DES;
cmd->ciphermode = NLM_CIPHER_MODE_CBC;
Index: sys/mips/nlm/hal/nlmsaelib.h
===================================================================
--- sys/mips/nlm/hal/nlmsaelib.h
+++ sys/mips/nlm/hal/nlmsaelib.h
@@ -462,8 +462,8 @@
nlm_crypto_fill_pkt_ctrl(struct nlm_crypto_pkt_ctrl *ctrl, unsigned int hmac,
enum nlm_hash_algo hashalg, enum nlm_hash_mode hashmode,
enum nlm_cipher_algo cipheralg, enum nlm_cipher_mode ciphermode,
- unsigned char *cipherkey, unsigned int cipherkeylen,
- unsigned char *hashkey, unsigned int hashkeylen)
+ const unsigned char *cipherkey, unsigned int cipherkeylen,
+ const unsigned char *hashkey, unsigned int hashkeylen)
{
unsigned int taglen = 0, hklen = 0;
Index: sys/netipsec/xform.h
===================================================================
--- sys/netipsec/xform.h
+++ sys/netipsec/xform.h
@@ -107,10 +107,11 @@
void xform_detach(void *);
int xform_init(struct secasvar *, u_short);
-struct cryptoini;
+struct crypto_session_params;
/* XF_AH */
int xform_ah_authsize(const struct auth_hash *);
-extern int ah_init0(struct secasvar *, struct xformsw *, struct cryptoini *);
+int ah_init0(struct secasvar *, struct xformsw *,
+ struct crypto_session_params *);
extern int ah_zeroize(struct secasvar *sav);
extern size_t ah_hdrsiz(struct secasvar *);
Index: sys/netipsec/xform_ah.c
===================================================================
--- sys/netipsec/xform_ah.c
+++ sys/netipsec/xform_ah.c
@@ -128,9 +128,7 @@
alen = esph->hashsize / 2; /* RFC4868 2.3 */
break;
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
+ case CRYPTO_AES_NIST_GMAC:
alen = esph->hashsize;
break;
@@ -174,7 +172,8 @@
* NB: public for use by esp_init.
*/
int
-ah_init0(struct secasvar *sav, struct xformsw *xsp, struct cryptoini *cria)
+ah_init0(struct secasvar *sav, struct xformsw *xsp,
+ struct crypto_session_params *csp)
{
const struct auth_hash *thash;
int keylen;
@@ -235,11 +234,10 @@
sav->tdb_authalgxform = thash;
/* Initialize crypto session. */
- bzero(cria, sizeof (*cria));
- cria->cri_alg = sav->tdb_authalgxform->type;
- cria->cri_klen = _KEYBITS(sav->key_auth);
- cria->cri_key = sav->key_auth->key_data;
- cria->cri_mlen = AUTHSIZE(sav);
+ csp->csp_auth_alg = sav->tdb_authalgxform->type;
+ csp->csp_auth_klen = _KEYBITS(sav->key_auth) / 8;
+ csp->csp_auth_key = sav->key_auth->key_data;
+ csp->csp_auth_mlen = AUTHSIZE(sav);
return 0;
}
@@ -250,12 +248,14 @@
static int
ah_init(struct secasvar *sav, struct xformsw *xsp)
{
- struct cryptoini cria;
+ struct crypto_session_params csp;
int error;
- error = ah_init0(sav, xsp, &cria);
+ memset(&csp, 0, sizeof(csp));
+ csp.csp_mode = CSP_MODE_DIGEST;
+ error = ah_init0(sav, xsp, &csp);
return error ? error :
- crypto_newsession(&sav->tdb_cryptoid, &cria, V_crypto_support);
+ crypto_newsession(&sav->tdb_cryptoid, &csp, V_crypto_support);
}
/*
@@ -560,7 +560,6 @@
{
IPSEC_DEBUG_DECLARE(char buf[128]);
const struct auth_hash *ahx;
- struct cryptodesc *crda;
struct cryptop *crp;
struct xform_data *xd;
struct newah *ah;
@@ -628,7 +627,7 @@
AHSTAT_ADD(ahs_ibytes, m->m_pkthdr.len - skip - hl);
/* Get crypto descriptors. */
- crp = crypto_getreq(1);
+ crp = crypto_getreq(cryptoid, M_NOWAIT);
if (crp == NULL) {
DPRINTF(("%s: failed to acquire crypto descriptor\n",
__func__));
@@ -637,17 +636,9 @@
goto bad;
}
- crda = crp->crp_desc;
- IPSEC_ASSERT(crda != NULL, ("null crypto descriptor"));
-
- crda->crd_skip = 0;
- crda->crd_len = m->m_pkthdr.len;
- crda->crd_inject = skip + rplen;
-
- /* Authentication operation. */
- crda->crd_alg = ahx->type;
- crda->crd_klen = _KEYBITS(sav->key_auth);
- crda->crd_key = sav->key_auth->key_data;
+ crp->crp_payload_start = 0;
+ crp->crp_payload_length = m->m_pkthdr.len;
+ crp->crp_digest_start = skip + rplen;
/* Allocate IPsec-specific opaque crypto info. */
xd = malloc(sizeof(*xd) + skip + rplen + authsize, M_XDATA,
@@ -686,13 +677,14 @@
/* Crypto operation descriptor. */
crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC;
if (V_async_crypto)
crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER;
- crp->crp_buf = (caddr_t) m;
+ crp->crp_mbuf = m;
+ crp->crp_buf_type = CRYPTO_BUF_MBUF;
crp->crp_callback = ah_input_cb;
- crp->crp_session = cryptoid;
- crp->crp_opaque = (caddr_t) xd;
+ crp->crp_opaque = xd;
/* These are passed as-is to the callback. */
xd->sav = sav;
@@ -725,8 +717,8 @@
int authsize, rplen, ahsize, error, skip, protoff;
uint8_t nxt;
- m = (struct mbuf *) crp->crp_buf;
- xd = (struct xform_data *) crp->crp_opaque;
+ m = crp->crp_mbuf;
+ xd = crp->crp_opaque;
CURVNET_SET(xd->vnet);
sav = xd->sav;
skip = xd->skip;
@@ -866,7 +858,6 @@
{
IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]);
const struct auth_hash *ahx;
- struct cryptodesc *crda;
struct xform_data *xd;
struct mbuf *mi;
struct cryptop *crp;
@@ -988,7 +979,7 @@
SECASVAR_UNLOCK(sav);
/* Get crypto descriptors. */
- crp = crypto_getreq(1);
+ crp = crypto_getreq(cryptoid, M_NOWAIT);
if (crp == NULL) {
DPRINTF(("%s: failed to acquire crypto descriptors\n",
__func__));
@@ -997,15 +988,9 @@
goto bad;
}
- crda = crp->crp_desc;
- crda->crd_skip = 0;
- crda->crd_inject = skip + rplen;
- crda->crd_len = m->m_pkthdr.len;
-
- /* Authentication operation. */
- crda->crd_alg = ahx->type;
- crda->crd_key = sav->key_auth->key_data;
- crda->crd_klen = _KEYBITS(sav->key_auth);
+ crp->crp_payload_start = 0;
+ crp->crp_payload_length = m->m_pkthdr.len;
+ crp->crp_digest_start = skip + rplen;
/* Allocate IPsec-specific opaque crypto info. */
xd = malloc(sizeof(struct xform_data) + skip, M_XDATA,
@@ -1069,13 +1054,14 @@
/* Crypto operation descriptor. */
crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC;
if (V_async_crypto)
crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER;
- crp->crp_buf = (caddr_t) m;
+ crp->crp_mbuf = m;
+ crp->crp_buf_type = CRYPTO_BUF_MBUF;
crp->crp_callback = ah_output_cb;
- crp->crp_session = cryptoid;
- crp->crp_opaque = (caddr_t) xd;
+ crp->crp_opaque = xd;
/* These are passed as-is to the callback. */
xd->sp = sp;
Index: sys/netipsec/xform_esp.c
===================================================================
--- sys/netipsec/xform_esp.c
+++ sys/netipsec/xform_esp.c
@@ -137,7 +137,7 @@
esp_init(struct secasvar *sav, struct xformsw *xsp)
{
const struct enc_xform *txform;
- struct cryptoini cria, crie;
+ struct crypto_session_params csp;
int keylen;
int error;
@@ -193,11 +193,13 @@
else
sav->ivlen = txform->ivsize;
+ memset(&csp, 0, sizeof(csp));
+
/*
* Setup AH-related state.
*/
if (sav->alg_auth != 0) {
- error = ah_init0(sav, xsp, &cria);
+ error = ah_init0(sav, xsp, &csp);
if (error)
return error;
}
@@ -231,35 +233,20 @@
keylen, txform->name));
return EINVAL;
}
- bzero(&cria, sizeof(cria));
- cria.cri_alg = sav->tdb_authalgxform->type;
- cria.cri_key = sav->key_enc->key_data;
- cria.cri_klen = _KEYBITS(sav->key_enc) - SAV_ISGCM(sav) * 32;
- }
+ csp.csp_mode = CSP_MODE_AEAD;
+ } else if (sav->alg_auth != 0)
+ csp.csp_mode = CSP_MODE_ETA;
+ else
+ csp.csp_mode = CSP_MODE_CIPHER;
/* Initialize crypto session. */
- bzero(&crie, sizeof(crie));
- crie.cri_alg = sav->tdb_encalgxform->type;
- crie.cri_key = sav->key_enc->key_data;
- crie.cri_klen = _KEYBITS(sav->key_enc) - SAV_ISCTRORGCM(sav) * 32;
+ csp.csp_cipher_alg = sav->tdb_encalgxform->type;
+ csp.csp_cipher_key = sav->key_enc->key_data;
+ csp.csp_cipher_klen = _KEYBITS(sav->key_enc) / 8 -
+ SAV_ISCTRORGCM(sav) * 4;
+ csp.csp_ivlen = txform->ivsize;
- if (sav->tdb_authalgxform && sav->tdb_encalgxform) {
- /* init both auth & enc */
- crie.cri_next = &cria;
- error = crypto_newsession(&sav->tdb_cryptoid,
- &crie, V_crypto_support);
- } else if (sav->tdb_encalgxform) {
- error = crypto_newsession(&sav->tdb_cryptoid,
- &crie, V_crypto_support);
- } else if (sav->tdb_authalgxform) {
- error = crypto_newsession(&sav->tdb_cryptoid,
- &cria, V_crypto_support);
- } else {
- /* XXX cannot happen? */
- DPRINTF(("%s: no encoding OR authentication xform!\n",
- __func__));
- error = EINVAL;
- }
+ error = crypto_newsession(&sav->tdb_cryptoid, &csp, V_crypto_support);
return error;
}
@@ -289,7 +276,6 @@
const struct auth_hash *esph;
const struct enc_xform *espx;
struct xform_data *xd;
- struct cryptodesc *crde;
struct cryptop *crp;
struct newesp *esp;
uint8_t *ivp;
@@ -369,7 +355,7 @@
ESPSTAT_ADD(esps_ibytes, m->m_pkthdr.len - (skip + hlen + alen));
/* Get crypto descriptors */
- crp = crypto_getreq(esph && espx ? 2 : 1);
+ crp = crypto_getreq(cryptoid, M_NOWAIT);
if (crp == NULL) {
DPRINTF(("%s: failed to acquire crypto descriptors\n",
__func__));
@@ -379,7 +365,7 @@
}
/* Get IPsec-specific opaque pointer */
- xd = malloc(sizeof(*xd) + alen, M_XDATA, M_NOWAIT | M_ZERO);
+ xd = malloc(sizeof(*xd), M_XDATA, M_NOWAIT | M_ZERO);
if (xd == NULL) {
DPRINTF(("%s: failed to allocate xform_data\n", __func__));
ESPSTAT_INC(esps_crypto);
@@ -389,39 +375,24 @@
}
if (esph != NULL) {
- struct cryptodesc *crda = crp->crp_desc;
-
- IPSEC_ASSERT(crda != NULL, ("null ah crypto descriptor"));
-
- /* Authentication descriptor */
- crda->crd_skip = skip;
+ crp->crp_op = CRYPTO_OP_VERIFY_DIGEST;
+ crp->crp_aad_start = skip;
if (SAV_ISGCM(sav))
- crda->crd_len = 8; /* RFC4106 5, SPI + SN */
+ crp->crp_aad_length = 8; /* RFC4106 5, SPI + SN */
else
- crda->crd_len = m->m_pkthdr.len - (skip + alen);
- crda->crd_inject = m->m_pkthdr.len - alen;
-
- crda->crd_alg = esph->type;
-
- /* Copy the authenticator */
- m_copydata(m, m->m_pkthdr.len - alen, alen,
- (caddr_t) (xd + 1));
-
- /* Chain authentication request */
- crde = crda->crd_next;
- } else {
- crde = crp->crp_desc;
+ crp->crp_aad_length = hlen;
+ crp->crp_digest_start = m->m_pkthdr.len - alen;
}
/* Crypto operation descriptor */
crp->crp_ilen = m->m_pkthdr.len; /* Total input length */
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC;
if (V_async_crypto)
crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER;
- crp->crp_buf = (caddr_t) m;
+ crp->crp_mbuf = m;
+ crp->crp_buf_type = CRYPTO_BUF_MBUF;
crp->crp_callback = esp_input_cb;
- crp->crp_session = cryptoid;
- crp->crp_opaque = (caddr_t) xd;
+ crp->crp_opaque = xd;
/* These are passed as-is to the callback */
xd->sav = sav;
@@ -431,13 +402,12 @@
xd->vnet = curvnet;
/* Decryption descriptor */
- IPSEC_ASSERT(crde != NULL, ("null esp crypto descriptor"));
- crde->crd_skip = skip + hlen;
- crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen);
- crde->crd_inject = skip + hlen - sav->ivlen;
+ crp->crp_op |= CRYPTO_OP_DECRYPT;
+ crp->crp_payload_start = skip + hlen;
+ crp->crp_payload_length = m->m_pkthdr.len - (skip + hlen + alen);
if (SAV_ISCTRORGCM(sav)) {
- ivp = &crde->crd_iv[0];
+ ivp = &crp->crp_iv[0];
/* GCM IV Format: RFC4106 4 */
/* CTR IV Format: RFC3686 4 */
@@ -452,10 +422,9 @@
}
m_copydata(m, skip + hlen - sav->ivlen, sav->ivlen, &ivp[4]);
- crde->crd_flags |= CRD_F_IV_EXPLICIT;
- }
-
- crde->crd_alg = espx->type;
+ crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
+ } else if (sav->ivlen != 0)
+ crp->crp_iv_start = skip + hlen - sav->ivlen;
return (crypto_dispatch(crp));
bad:
@@ -471,22 +440,17 @@
esp_input_cb(struct cryptop *crp)
{
IPSEC_DEBUG_DECLARE(char buf[128]);
- u_int8_t lastthree[3], aalg[AH_HMAC_MAXHASHLEN];
+ uint8_t lastthree[3];
const struct auth_hash *esph;
struct mbuf *m;
- struct cryptodesc *crd;
struct xform_data *xd;
struct secasvar *sav;
struct secasindex *saidx;
- caddr_t ptr;
crypto_session_t cryptoid;
int hlen, skip, protoff, error, alen;
- crd = crp->crp_desc;
- IPSEC_ASSERT(crd != NULL, ("null crypto descriptor!"));
-
- m = (struct mbuf *) crp->crp_buf;
- xd = (struct xform_data *) crp->crp_opaque;
+ m = crp->crp_mbuf;
+ xd = crp->crp_opaque;
CURVNET_SET(xd->vnet);
sav = xd->sav;
skip = xd->skip;
@@ -505,10 +469,15 @@
CURVNET_RESTORE();
return (crypto_dispatch(crp));
}
- ESPSTAT_INC(esps_noxform);
- DPRINTF(("%s: crypto error %d\n", __func__, crp->crp_etype));
- error = crp->crp_etype;
- goto bad;
+
+ /* EBADMSG indicates authentication failure. */
+ if (!(crp->crp_etype == EBADMSG && esph != NULL)) {
+ ESPSTAT_INC(esps_noxform);
+ DPRINTF(("%s: crypto error %d\n", __func__,
+ crp->crp_etype));
+ error = crp->crp_etype;
+ goto bad;
+ }
}
/* Shouldn't happen... */
@@ -524,12 +493,7 @@
if (esph != NULL) {
alen = xform_ah_authsize(esph);
AHSTAT_INC(ahs_hist[sav->alg_auth]);
- /* Copy the authenticator from the packet */
- m_copydata(m, m->m_pkthdr.len - alen, alen, aalg);
- ptr = (caddr_t) (xd + 1);
-
- /* Verify authenticator */
- if (timingsafe_bcmp(ptr, aalg, alen) != 0) {
+ if (crp->crp_etype == EBADMSG) {
DPRINTF(("%s: authentication hash mismatch for "
"packet in SA %s/%08lx\n", __func__,
ipsec_address(&saidx->dst, buf, sizeof(buf)),
@@ -666,7 +630,6 @@
u_int idx, int skip, int protoff)
{
IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]);
- struct cryptodesc *crde = NULL, *crda = NULL;
struct cryptop *crp;
const struct auth_hash *esph;
const struct enc_xform *espx;
@@ -825,10 +788,10 @@
prot = IPPROTO_ESP;
m_copyback(m, protoff, sizeof(u_int8_t), (u_char *) &prot);
- /* Get crypto descriptors. */
- crp = crypto_getreq(esph != NULL ? 2 : 1);
+ /* Get crypto descriptor. */
+ crp = crypto_getreq(cryptoid, M_NOWAIT);
if (crp == NULL) {
- DPRINTF(("%s: failed to acquire crypto descriptors\n",
+ DPRINTF(("%s: failed to acquire crypto descriptor\n",
__func__));
ESPSTAT_INC(esps_crypto);
error = ENOBUFS;
@@ -845,19 +808,14 @@
goto bad;
}
- crde = crp->crp_desc;
- crda = crde->crd_next;
-
/* Encryption descriptor. */
- crde->crd_skip = skip + hlen;
- crde->crd_len = m->m_pkthdr.len - (skip + hlen + alen);
- crde->crd_flags = CRD_F_ENCRYPT;
- crde->crd_inject = skip + hlen - sav->ivlen;
+ crp->crp_payload_start = skip + hlen;
+ crp->crp_payload_length = m->m_pkthdr.len - (skip + hlen + alen);
+ crp->crp_op = CRYPTO_OP_ENCRYPT;
/* Encryption operation. */
- crde->crd_alg = espx->type;
if (SAV_ISCTRORGCM(sav)) {
- ivp = &crde->crd_iv[0];
+ ivp = &crp->crp_iv[0];
/* GCM IV Format: RFC4106 4 */
/* CTR IV Format: RFC3686 4 */
@@ -873,7 +831,10 @@
}
m_copyback(m, skip + hlen - sav->ivlen, sav->ivlen, &ivp[4]);
- crde->crd_flags |= CRD_F_IV_EXPLICIT|CRD_F_IV_PRESENT;
+ crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
+ } else if (sav->ivlen != 0) {
+ crp->crp_iv_start = skip + hlen - sav->ivlen;
+ crp->crp_flags |= CRYPTO_F_IV_GENERATE;
}
/* Callback parameters */
@@ -885,23 +846,23 @@
/* Crypto operation descriptor. */
crp->crp_ilen = m->m_pkthdr.len; /* Total input length. */
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
+ crp->crp_flags |= CRYPTO_F_CBIFSYNC;
if (V_async_crypto)
crp->crp_flags |= CRYPTO_F_ASYNC | CRYPTO_F_ASYNC_KEEPORDER;
- crp->crp_buf = (caddr_t) m;
+ crp->crp_mbuf = m;
+ crp->crp_buf_type = CRYPTO_BUF_MBUF;
crp->crp_callback = esp_output_cb;
- crp->crp_opaque = (caddr_t) xd;
- crp->crp_session = cryptoid;
+ crp->crp_opaque = xd;
if (esph) {
/* Authentication descriptor. */
- crda->crd_alg = esph->type;
- crda->crd_skip = skip;
+ crp->crp_op |= CRYPTO_OP_COMPUTE_DIGEST;
+ crp->crp_aad_start = skip;
if (SAV_ISGCM(sav))
- crda->crd_len = 8; /* RFC4106 5, SPI + SN */
+ crp->crp_aad_length = 8; /* RFC4106 5, SPI + SN */
else
- crda->crd_len = m->m_pkthdr.len - (skip + alen);
- crda->crd_inject = m->m_pkthdr.len - alen;
+ crp->crp_aad_length = hlen;
+ crp->crp_digest_start = m->m_pkthdr.len - alen;
}
return crypto_dispatch(crp);
Index: sys/netipsec/xform_ipcomp.c
===================================================================
--- sys/netipsec/xform_ipcomp.c
+++ sys/netipsec/xform_ipcomp.c
@@ -156,7 +156,7 @@
ipcomp_init(struct secasvar *sav, struct xformsw *xsp)
{
const struct comp_algo *tcomp;
- struct cryptoini cric;
+ struct crypto_session_params csp;
/* NB: algorithm really comes in alg_enc and not alg_comp! */
tcomp = comp_algorithm_lookup(sav->alg_enc);
@@ -170,10 +170,11 @@
sav->tdb_compalgxform = tcomp;
/* Initialize crypto session */
- bzero(&cric, sizeof (cric));
- cric.cri_alg = sav->tdb_compalgxform->type;
+ memset(&csp, 0, sizeof(csp));
+ csp.csp_mode = CSP_MODE_COMPRESS;
+ csp.csp_cipher_alg = sav->tdb_compalgxform->type;
- return crypto_newsession(&sav->tdb_cryptoid, &cric, V_crypto_support);
+ return crypto_newsession(&sav->tdb_cryptoid, &csp, V_crypto_support);
}
/*
@@ -195,9 +196,9 @@
ipcomp_input(struct mbuf *m, struct secasvar *sav, int skip, int protoff)
{
struct xform_data *xd;
- struct cryptodesc *crdc;
struct cryptop *crp;
struct ipcomp *ipcomp;
+ crypto_session_t cryptoid;
caddr_t addr;
int error, hlen = IPCOMP_HLENGTH;
@@ -222,8 +223,12 @@
goto bad;
}
+ SECASVAR_LOCK(sav);
+ cryptoid = sav->tdb_cryptoid;
+ SECASVAR_UNLOCK(sav);
+
/* Get crypto descriptors */
- crp = crypto_getreq(1);
+ crp = crypto_getreq(cryptoid, M_NOWAIT);
if (crp == NULL) {
DPRINTF(("%s: no crypto descriptors\n", __func__));
IPCOMPSTAT_INC(ipcomps_crypto);
@@ -237,28 +242,26 @@
crypto_freereq(crp);
goto bad;
}
- crdc = crp->crp_desc;
-
- crdc->crd_skip = skip + hlen;
- crdc->crd_len = m->m_pkthdr.len - (skip + hlen);
- crdc->crd_inject = skip;
/* Decompression operation */
- crdc->crd_alg = sav->tdb_compalgxform->type;
-
+ crp->crp_op = CRYPTO_OP_DECOMPRESS;
+ crp->crp_payload_start = skip + hlen;
+ crp->crp_payload_length = m->m_pkthdr.len - (skip + hlen);
/* Crypto operation descriptor */
crp->crp_ilen = m->m_pkthdr.len - (skip + hlen);
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
- crp->crp_buf = (caddr_t) m;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC;
+ crp->crp_mbuf = m;
+ crp->crp_buf_type = CRYPTO_BUF_MBUF;
crp->crp_callback = ipcomp_input_cb;
- crp->crp_opaque = (caddr_t) xd;
+ crp->crp_opaque = xd;
/* These are passed as-is to the callback */
xd->sav = sav;
xd->protoff = protoff;
xd->skip = skip;
xd->vnet = curvnet;
+ xd->cryptoid = cryptoid;
SECASVAR_LOCK(sav);
crp->crp_session = xd->cryptoid = sav->tdb_cryptoid;
@@ -288,8 +291,8 @@
int skip, protoff;
uint8_t nproto;
- m = (struct mbuf *) crp->crp_buf;
- xd = (struct xform_data *) crp->crp_opaque;
+ m = crp->crp_mbuf;
+ xd = crp->crp_opaque;
CURVNET_SET(xd->vnet);
sav = xd->sav;
skip = xd->skip;
@@ -396,9 +399,9 @@
{
IPSEC_DEBUG_DECLARE(char buf[IPSEC_ADDRSTRLEN]);
const struct comp_algo *ipcompx;
- struct cryptodesc *crdc;
struct cryptop *crp;
struct xform_data *xd;
+ crypto_session_t cryptoid;
int error, ralen, maxpacketsize;
IPSEC_ASSERT(sav != NULL, ("null SA"));
@@ -466,25 +469,23 @@
}
/* Ok now, we can pass to the crypto processing. */
+ SECASVAR_LOCK(sav);
+ cryptoid = sav->tdb_cryptoid;
+ SECASVAR_UNLOCK(sav);
/* Get crypto descriptors */
- crp = crypto_getreq(1);
+ crp = crypto_getreq(cryptoid, M_NOWAIT);
if (crp == NULL) {
IPCOMPSTAT_INC(ipcomps_crypto);
DPRINTF(("%s: failed to acquire crypto descriptor\n",__func__));
error = ENOBUFS;
goto bad;
}
- crdc = crp->crp_desc;
/* Compression descriptor */
- crdc->crd_skip = skip;
- crdc->crd_len = ralen;
- crdc->crd_flags = CRD_F_COMP;
- crdc->crd_inject = skip;
-
- /* Compression operation */
- crdc->crd_alg = ipcompx->type;
+ crp->crp_op = CRYPTO_OP_COMPRESS;
+ crp->crp_payload_start = skip;
+ crp->crp_payload_length = ralen;
/* IPsec-specific opaque crypto info */
xd = malloc(sizeof(struct xform_data), M_XDATA, M_NOWAIT | M_ZERO);
@@ -502,17 +503,15 @@
xd->skip = skip;
xd->protoff = protoff;
xd->vnet = curvnet;
+ xd->cryptoid = cryptoid;
/* Crypto operation descriptor */
crp->crp_ilen = m->m_pkthdr.len; /* Total input length */
- crp->crp_flags = CRYPTO_F_IMBUF | CRYPTO_F_CBIFSYNC;
- crp->crp_buf = (caddr_t) m;
+ crp->crp_flags = CRYPTO_F_CBIFSYNC;
+ crp->crp_mbuf = m;
+ crp->crp_buf_type = CRYPTO_BUF_MBUF;
crp->crp_callback = ipcomp_output_cb;
- crp->crp_opaque = (caddr_t) xd;
-
- SECASVAR_LOCK(sav);
- crp->crp_session = xd->cryptoid = sav->tdb_cryptoid;
- SECASVAR_UNLOCK(sav);
+ crp->crp_opaque = xd;
return crypto_dispatch(crp);
bad:
@@ -538,8 +537,8 @@
u_int idx;
int error, skip, protoff;
- m = (struct mbuf *) crp->crp_buf;
- xd = (struct xform_data *) crp->crp_opaque;
+ m = crp->crp_mbuf;
+ xd = crp->crp_opaque;
CURVNET_SET(xd->vnet);
idx = xd->idx;
sp = xd->sp;
@@ -572,7 +571,7 @@
}
IPCOMPSTAT_INC(ipcomps_hist[sav->alg_comp]);
- if (crp->crp_ilen - skip > crp->crp_olen) {
+ if (crp->crp_payload_length > crp->crp_olen) {
struct mbuf *mo;
struct ipcomp *ipcomp;
int roff;
@@ -639,8 +638,8 @@
} else {
/* Compression was useless, we have lost time. */
IPCOMPSTAT_INC(ipcomps_uncompr);
- DPRINTF(("%s: compressions was useless %d - %d <= %d\n",
- __func__, crp->crp_ilen, skip, crp->crp_olen));
+ DPRINTF(("%s: compressions was useless %d <= %d\n",
+ __func__, crp->crp_payload_length, crp->crp_olen));
/* XXX remember state to not compress the next couple
* of packets, RFC 3173, 2.2. Non-Expansion Policy */
}
Index: sys/opencrypto/criov.c
===================================================================
--- sys/opencrypto/criov.c
+++ sys/opencrypto/criov.c
@@ -157,41 +157,62 @@
}
void
-crypto_copyback(int flags, caddr_t buf, int off, int size, c_caddr_t in)
+crypto_copyback(struct cryptop *crp, int off, int size, const void *src)
{
- if ((flags & CRYPTO_F_IMBUF) != 0)
- m_copyback((struct mbuf *)buf, off, size, in);
- else if ((flags & CRYPTO_F_IOV) != 0)
- cuio_copyback((struct uio *)buf, off, size, in);
- else
- bcopy(in, buf + off, size);
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ m_copyback(crp->crp_mbuf, off, size, src);
+ break;
+ case CRYPTO_BUF_UIO:
+ cuio_copyback(crp->crp_uio, off, size, src);
+ break;
+ case CRYPTO_BUF_CONTIG:
+ bcopy(src, crp->crp_buf + off, size);
+ break;
+ default:
+ panic("invalid crp buf type %d", crp->crp_buf_type);
+ }
}
void
-crypto_copydata(int flags, caddr_t buf, int off, int size, caddr_t out)
+crypto_copydata(struct cryptop *crp, int off, int size, void *dst)
{
- if ((flags & CRYPTO_F_IMBUF) != 0)
- m_copydata((struct mbuf *)buf, off, size, out);
- else if ((flags & CRYPTO_F_IOV) != 0)
- cuio_copydata((struct uio *)buf, off, size, out);
- else
- bcopy(buf + off, out, size);
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ m_copydata(crp->crp_mbuf, off, size, dst);
+ break;
+ case CRYPTO_BUF_UIO:
+ cuio_copydata(crp->crp_uio, off, size, dst);
+ break;
+ case CRYPTO_BUF_CONTIG:
+ bcopy(crp->crp_buf + off, dst, size);
+ break;
+ default:
+ panic("invalid crp buf type %d", crp->crp_buf_type);
+ }
}
int
-crypto_apply(int flags, caddr_t buf, int off, int len,
+crypto_apply(struct cryptop *crp, int off, int len,
int (*f)(void *, void *, u_int), void *arg)
{
int error;
- if ((flags & CRYPTO_F_IMBUF) != 0)
- error = m_apply((struct mbuf *)buf, off, len, f, arg);
- else if ((flags & CRYPTO_F_IOV) != 0)
- error = cuio_apply((struct uio *)buf, off, len, f, arg);
- else
- error = (*f)(arg, buf + off, len);
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ error = m_apply(crp->crp_mbuf, off, len, f, arg);
+ break;
+ case CRYPTO_BUF_UIO:
+ error = cuio_apply(crp->crp_uio, off, len, f, arg);
+ break;
+ case CRYPTO_BUF_CONTIG:
+ error = (*f)(arg, crp->crp_buf + off, len);
+ break;
+ default:
+ panic("invalid crp buf type %d", crp->crp_buf_type);
+ }
return (error);
}
@@ -279,17 +300,17 @@
}
void *
-crypto_contiguous_subsegment(int crp_flags, void *crpbuf,
- size_t skip, size_t len)
+crypto_contiguous_subsegment(struct cryptop *crp, size_t skip, size_t len)
{
- if ((crp_flags & CRYPTO_F_IMBUF) != 0)
- return (m_contiguous_subsegment(crpbuf, skip, len));
- else if ((crp_flags & CRYPTO_F_IOV) != 0)
- return (cuio_contiguous_segment(crpbuf, skip, len));
- else {
- MPASS((crp_flags & (CRYPTO_F_IMBUF | CRYPTO_F_IOV)) !=
- (CRYPTO_F_IMBUF | CRYPTO_F_IOV));
- return ((char*)crpbuf + skip);
+
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ return (m_contiguous_subsegment(crp->crp_mbuf, skip, len));
+ case CRYPTO_BUF_UIO:
+ return (cuio_contiguous_segment(crp->crp_uio, skip, len));
+ case CRYPTO_BUF_CONTIG:
+ return (crp->crp_buf + skip);
+ default:
+ panic("invalid crp buf type %d", crp->crp_buf_type);
}
}
-
Index: sys/opencrypto/crypto.c
===================================================================
--- sys/opencrypto/crypto.c
+++ sys/opencrypto/crypto.c
@@ -56,6 +56,7 @@
#define CRYPTO_TIMING /* enable timing support */
+#include "opt_compat.h"
#include "opt_ddb.h"
#include <sys/param.h>
@@ -69,6 +70,7 @@
#include <sys/mutex.h>
#include <sys/malloc.h>
#include <sys/proc.h>
+#include <sys/refcount.h>
#include <sys/sdt.h>
#include <sys/smp.h>
#include <sys/sysctl.h>
@@ -79,7 +81,8 @@
#include <vm/uma.h>
#include <crypto/intake.h>
#include <opencrypto/cryptodev.h>
-#include <opencrypto/xform.h> /* XXX for M_XDATA */
+#include <opencrypto/xform_auth.h>
+#include <opencrypto/xform_enc.h>
#include <sys/kobj.h>
#include <sys/bus.h>
@@ -89,19 +92,12 @@
#include <machine/pcb.h>
#endif
-struct crypto_session {
- device_t parent;
- void *softc;
- uint32_t hid;
- uint32_t capabilities;
-};
-
SDT_PROVIDER_DEFINE(opencrypto);
/*
* Crypto drivers register themselves by allocating a slot in the
* crypto_drivers table with crypto_get_driverid() and then registering
- * each algorithm they support with crypto_register() and crypto_kregister().
+ * each asym algorithm they support with crypto_kregister().
*/
static struct mtx crypto_drivers_mtx; /* lock on driver table */
#define CRYPTO_DRIVER_LOCK() mtx_lock(&crypto_drivers_mtx)
@@ -117,15 +113,10 @@
* Not tagged fields are read-only.
*/
struct cryptocap {
- device_t cc_dev; /* (d) device/driver */
+ device_t cc_dev;
+ uint32_t cc_hid;
u_int32_t cc_sessions; /* (d) # of sessions */
u_int32_t cc_koperations; /* (d) # os asym operations */
- /*
- * Largest possible operator length (in bits) for each type of
- * encryption algorithm. XXX not used
- */
- u_int16_t cc_max_op_len[CRYPTO_ALGORITHM_MAX + 1];
- u_int8_t cc_alg[CRYPTO_ALGORITHM_MAX + 1];
u_int8_t cc_kalg[CRK_ALGORITHM_MAX + 1];
int cc_flags; /* (d) flags */
@@ -133,9 +124,17 @@
int cc_qblocked; /* (q) symmetric q blocked */
int cc_kqblocked; /* (q) asymmetric q blocked */
size_t cc_session_size;
+ volatile int cc_refs;
+};
+
+static struct cryptocap **crypto_drivers = NULL;
+static int crypto_drivers_size = 0;
+
+struct crypto_session {
+ struct cryptocap *cap;
+ void *softc;
+ struct crypto_session_params csp;
};
-static struct cryptocap *crypto_drivers = NULL;
-static int crypto_drivers_num = 0;
/*
* There are two queues for crypto requests; one for symmetric (e.g.
@@ -151,6 +150,9 @@
#define CRYPTO_Q_LOCK() mtx_lock(&crypto_q_mtx)
#define CRYPTO_Q_UNLOCK() mtx_unlock(&crypto_q_mtx)
+static SYSCTL_NODE(_kern, OID_AUTO, crypto, CTLFLAG_RW, 0,
+ "In-kernel cryptography");
+
/*
* Taskqueue used to dispatch the crypto requests
* that have the CRYPTO_F_ASYNC flag
@@ -187,22 +189,37 @@
(TAILQ_EMPTY(&w->crp_ret_q) && TAILQ_EMPTY(&w->crp_ret_kq) && TAILQ_EMPTY(&w->crp_ordered_ret_q))
static int crypto_workers_num = 0;
+SYSCTL_INT(_kern_crypto, OID_AUTO, num_workers, CTLFLAG_RDTUN,
+ &crypto_workers_num, 0,
+ "Number of crypto workers used to dispatch crypto jobs");
+#ifdef COMPAT_FREEBSD12
SYSCTL_INT(_kern, OID_AUTO, crypto_workers_num, CTLFLAG_RDTUN,
&crypto_workers_num, 0,
"Number of crypto workers used to dispatch crypto jobs");
+#endif
static uma_zone_t cryptop_zone;
-static uma_zone_t cryptodesc_zone;
static uma_zone_t cryptoses_zone;
-int crypto_userasymcrypto = 1; /* userland may do asym crypto reqs */
+int crypto_userasymcrypto = 1;
+SYSCTL_INT(_kern_crypto, OID_AUTO, asym_enable, CTLFLAG_RW,
+ &crypto_userasymcrypto, 0,
+ "Enable user-mode access to asymmetric crypto support");
+#ifdef COMPAT_FREEBSD12
SYSCTL_INT(_kern, OID_AUTO, userasymcrypto, CTLFLAG_RW,
&crypto_userasymcrypto, 0,
"Enable/disable user-mode access to asymmetric crypto support");
-int crypto_devallowsoft = 0; /* only use hardware crypto */
+#endif
+
+int crypto_devallowsoft = 0;
+SYSCTL_INT(_kern_crypto, OID_AUTO, allow_soft, CTLFLAG_RW,
+ &crypto_devallowsoft, 0,
+ "Enable use of software crypto by /dev/crypto");
+#ifdef COMPAT_FREEBSD12
SYSCTL_INT(_kern, OID_AUTO, cryptodevallowsoft, CTLFLAG_RW,
&crypto_devallowsoft, 0,
"Enable/disable use of software crypto by /dev/crypto");
+#endif
MALLOC_DEFINE(M_CRYPTO_DATA, "crypto", "crypto session records");
@@ -211,13 +228,12 @@
static void crypto_ret_proc(struct crypto_ret_worker *ret_worker);
static void crypto_destroy(void);
static int crypto_invoke(struct cryptocap *cap, struct cryptop *crp, int hint);
-static int crypto_kinvoke(struct cryptkop *krp, int flags);
-static void crypto_remove(struct cryptocap *cap);
+static int crypto_kinvoke(struct cryptkop *krp);
static void crypto_task_invoke(void *ctx, int pending);
static void crypto_batch_enqueue(struct cryptop *crp);
static struct cryptostats cryptostats;
-SYSCTL_STRUCT(_kern, OID_AUTO, crypto_stats, CTLFLAG_RW, &cryptostats,
+SYSCTL_STRUCT(_kern_crypto, OID_AUTO, stats, CTLFLAG_RW, &cryptostats,
cryptostats, "Crypto system statistics");
#ifdef CRYPTO_TIMING
@@ -257,6 +273,29 @@
return (keybuf);
}
+static struct cryptocap *
+cap_ref(struct cryptocap *cap)
+{
+
+ refcount_acquire(&cap->cc_refs);
+ return (cap);
+}
+
+static void
+cap_rele(struct cryptocap *cap)
+{
+
+ if (refcount_release(&cap->cc_refs) == 0)
+ return;
+
+ KASSERT(cap->cc_sessions == 0,
+ ("freeing crypto driver with active sessions"));
+ KASSERT(cap->cc_koperations == 0,
+ ("freeing crypto driver with active key operations"));
+
+ free(cap, M_CRYPTO_DATA);
+}
+
static int
crypto_init(void)
{
@@ -273,22 +312,18 @@
cryptop_zone = uma_zcreate("cryptop", sizeof (struct cryptop),
0, 0, 0, 0,
UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
- cryptodesc_zone = uma_zcreate("cryptodesc", sizeof (struct cryptodesc),
- 0, 0, 0, 0,
- UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
cryptoses_zone = uma_zcreate("crypto_session",
sizeof(struct crypto_session), NULL, NULL, NULL, NULL,
UMA_ALIGN_PTR, UMA_ZONE_ZINIT);
- if (cryptodesc_zone == NULL || cryptop_zone == NULL ||
- cryptoses_zone == NULL) {
+ if (cryptop_zone == NULL || cryptoses_zone == NULL) {
printf("crypto_init: cannot setup crypto zones\n");
error = ENOMEM;
goto bad;
}
- crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
- crypto_drivers = malloc(crypto_drivers_num *
+ crypto_drivers_size = CRYPTO_DRIVERS_INITIAL;
+ crypto_drivers = malloc(crypto_drivers_size *
sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT | M_ZERO);
if (crypto_drivers == NULL) {
printf("crypto_init: cannot setup crypto drivers\n");
@@ -379,10 +414,57 @@
}
}
+static void
+hmac_init_pad(struct auth_hash *axf, const char *key, int klen, void *auth_ctx,
+ uint8_t padval)
+{
+ uint8_t hmac_key[HMAC_MAX_BLOCK_LEN];
+ u_int i;
+
+ KASSERT(axf->blocksize <= sizeof(hmac_key),
+ ("Invalid HMAC block size %d", axf->blocksize));
+
+ /*
+ * If the key is larger than the block size, use the digest of
+ * the key as the key instead.
+ */
+ memset(hmac_key, 0, sizeof(hmac_key));
+ if (klen > axf->blocksize) {
+ axf->Init(auth_ctx);
+ axf->Update(auth_ctx, key, klen);
+ axf->Final(hmac_key, auth_ctx);
+ klen = axf->hashsize;
+ } else
+ memcpy(hmac_key, key, klen);
+
+ for (i = 0; i < axf->blocksize; i++)
+ hmac_key[i] ^= padval;
+
+ axf->Init(auth_ctx);
+ axf->Update(auth_ctx, hmac_key, axf->blocksize);
+}
+
+void
+hmac_init_ipad(struct auth_hash *axf, const char *key, int klen,
+ void *auth_ctx)
+{
+
+ hmac_init_pad(axf, key, klen, auth_ctx, HMAC_IPAD_VAL);
+}
+
+void
+hmac_init_opad(struct auth_hash *axf, const char *key, int klen,
+ void *auth_ctx)
+{
+
+ hmac_init_pad(axf, key, klen, auth_ctx, HMAC_OPAD_VAL);
+}
+
static void
crypto_destroy(void)
{
struct crypto_ret_worker *ret_worker;
+ int i;
/*
* Terminate any crypto threads.
@@ -400,13 +482,14 @@
/*
* Reclaim dynamically allocated resources.
*/
- if (crypto_drivers != NULL)
- free(crypto_drivers, M_CRYPTO_DATA);
+ for (i = 0; i < crypto_drivers_size; i++) {
+ if (crypto_drivers[i] != NULL)
+ cap_rele(crypto_drivers[i]);
+ }
+ free(crypto_drivers, M_CRYPTO_DATA);
if (cryptoses_zone != NULL)
uma_zdestroy(cryptoses_zone);
- if (cryptodesc_zone != NULL)
- uma_zdestroy(cryptodesc_zone);
if (cryptop_zone != NULL)
uma_zdestroy(cryptop_zone);
mtx_destroy(&crypto_q_mtx);
@@ -421,13 +504,13 @@
uint32_t
crypto_ses2hid(crypto_session_t crypto_session)
{
- return (crypto_session->hid);
+ return (crypto_session->cap->cc_hid);
}
uint32_t
crypto_ses2caps(crypto_session_t crypto_session)
{
- return (crypto_session->capabilities);
+ return (crypto_session->cap->cc_flags & 0xff000000);
}
void *
@@ -436,86 +519,411 @@
return (crypto_session->softc);
}
+const struct crypto_session_params *
+crypto_get_params(crypto_session_t crypto_session)
+{
+ return (&crypto_session->csp);
+}
+
+struct auth_hash *
+crypto_auth_hash(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ return (&auth_hash_hmac_md5);
+ case CRYPTO_SHA1_HMAC:
+ return (&auth_hash_hmac_sha1);
+ case CRYPTO_SHA2_224_HMAC:
+ return (&auth_hash_hmac_sha2_224);
+ case CRYPTO_SHA2_256_HMAC:
+ return (&auth_hash_hmac_sha2_256);
+ case CRYPTO_SHA2_384_HMAC:
+ return (&auth_hash_hmac_sha2_384);
+ case CRYPTO_SHA2_512_HMAC:
+ return (&auth_hash_hmac_sha2_512);
+ case CRYPTO_NULL_HMAC:
+ return (&auth_hash_null);
+ case CRYPTO_RIPEMD160_HMAC:
+ return (&auth_hash_hmac_ripemd_160);
+ case CRYPTO_MD5_KPDK:
+ return (&auth_hash_key_md5);
+ case CRYPTO_SHA1_KPDK:
+ return (&auth_hash_key_sha1);
+#ifdef notyet
+ case CRYPTO_MD5:
+ return (&auth_hash_md5);
+#endif
+ case CRYPTO_SHA1:
+ return (&auth_hash_sha1);
+ case CRYPTO_SHA2_224:
+ return (&auth_hash_sha2_224);
+ case CRYPTO_SHA2_256:
+ return (&auth_hash_sha2_256);
+ case CRYPTO_SHA2_384:
+ return (&auth_hash_sha2_384);
+ case CRYPTO_SHA2_512:
+ return (&auth_hash_sha2_512);
+ case CRYPTO_AES_NIST_GMAC:
+ switch (csp->csp_auth_klen) {
+ case 128 / 8:
+ return (&auth_hash_nist_gmac_aes_128);
+ case 192 / 8:
+ return (&auth_hash_nist_gmac_aes_192);
+ case 256 / 8:
+ return (&auth_hash_nist_gmac_aes_256);
+ default:
+ return (NULL);
+ }
+ case CRYPTO_BLAKE2B:
+ return (&auth_hash_blake2b);
+ case CRYPTO_BLAKE2S:
+ return (&auth_hash_blake2s);
+ case CRYPTO_POLY1305:
+ return (&auth_hash_poly1305);
+ case CRYPTO_AES_CCM_CBC_MAC:
+ switch (csp->csp_auth_klen) {
+ case 128 / 8:
+ return (&auth_hash_ccm_cbc_mac_128);
+ case 192 / 8:
+ return (&auth_hash_ccm_cbc_mac_192);
+ case 256 / 8:
+ return (&auth_hash_ccm_cbc_mac_256);
+ default:
+ return (NULL);
+ }
+ default:
+ return (NULL);
+ }
+}
+
+struct enc_xform *
+crypto_cipher(const struct crypto_session_params *csp)
+{
+
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DES_CBC:
+ return (&enc_xform_des);
+ case CRYPTO_3DES_CBC:
+ return (&enc_xform_3des);
+ case CRYPTO_BLF_CBC:
+ return (&enc_xform_blf);
+ case CRYPTO_CAST_CBC:
+ return (&enc_xform_cast5);
+ case CRYPTO_SKIPJACK_CBC:
+ return (&enc_xform_skipjack);
+ case CRYPTO_RIJNDAEL128_CBC:
+ return (&enc_xform_rijndael128);
+ case CRYPTO_AES_XTS:
+ return (&enc_xform_aes_xts);
+ case CRYPTO_AES_ICM:
+ return (&enc_xform_aes_icm);
+ case CRYPTO_AES_NIST_GCM_16:
+ return (&enc_xform_aes_nist_gcm);
+ case CRYPTO_CAMELLIA_CBC:
+ return (&enc_xform_camellia);
+ case CRYPTO_NULL_CBC:
+ return (&enc_xform_null);
+ case CRYPTO_CHACHA20:
+ return (&enc_xform_chacha20);
+ case CRYPTO_AES_CCM_16:
+ return (&enc_xform_ccm);
+ default:
+ return (NULL);
+ }
+}
+
static struct cryptocap *
crypto_checkdriver(u_int32_t hid)
{
- if (crypto_drivers == NULL)
- return NULL;
- return (hid >= crypto_drivers_num ? NULL : &crypto_drivers[hid]);
-}
-/*
- * Compare a driver's list of supported algorithms against another
- * list; return non-zero if all algorithms are supported.
- */
-static int
-driver_suitable(const struct cryptocap *cap, const struct cryptoini *cri)
-{
- const struct cryptoini *cr;
-
- /* See if all the algorithms are supported. */
- for (cr = cri; cr; cr = cr->cri_next)
- if (cap->cc_alg[cr->cri_alg] == 0)
- return 0;
- return 1;
+ return (hid >= crypto_drivers_size ? NULL : crypto_drivers[hid]);
}
/*
* Select a driver for a new session that supports the specified
* algorithms and, optionally, is constrained according to the flags.
- * The algorithm we use here is pretty stupid; just use the
- * first driver that supports all the algorithms we need. If there
- * are multiple drivers we choose the driver with the fewest active
- * sessions. We prefer hardware-backed drivers to software ones.
- *
- * XXX We need more smarts here (in real life too, but that's
- * XXX another story altogether).
*/
static struct cryptocap *
-crypto_select_driver(const struct cryptoini *cri, int flags)
+crypto_select_driver(const struct crypto_session_params *csp, int flags)
{
struct cryptocap *cap, *best;
- int match, hid;
+ int best_match, error, hid;
CRYPTO_DRIVER_ASSERT();
- /*
- * Look first for hardware crypto devices if permitted.
- */
- if (flags & CRYPTOCAP_F_HARDWARE)
- match = CRYPTOCAP_F_HARDWARE;
- else
- match = CRYPTOCAP_F_SOFTWARE;
best = NULL;
-again:
- for (hid = 0; hid < crypto_drivers_num; hid++) {
- cap = &crypto_drivers[hid];
+ for (hid = 0; hid < crypto_drivers_size; hid++) {
/*
- * If it's not initialized, is in the process of
- * going away, or is not appropriate (hardware
- * or software based on match), then skip.
+ * If there is no driver for this slot, or the driver
+ * is not appropriate (hardware or software based on
+ * match), then skip.
*/
- if (cap->cc_dev == NULL ||
- (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
- (cap->cc_flags & match) == 0)
+ cap = crypto_drivers[hid];
+ if (cap == NULL ||
+ (cap->cc_flags & flags) == 0)
continue;
- /* verify all the algorithms are supported. */
- if (driver_suitable(cap, cri)) {
- if (best == NULL ||
- cap->cc_sessions < best->cc_sessions)
- best = cap;
+ error = CRYPTODEV_PROBESESSION(cap->cc_dev, csp);
+ if (error >= 0)
+ continue;
+
+ /*
+ * Use the driver with the highest probe value.
+ * Hardware drivers use a higher probe value than
+ * software. In case of a tie, prefer the driver with
+ * the fewest active sessions.
+ */
+ if (best == NULL || error > best_match ||
+ (error == best_match &&
+ cap->cc_sessions < best->cc_sessions)) {
+ best = cap;
+ best_match = error;
}
}
- if (best == NULL && match == CRYPTOCAP_F_HARDWARE &&
- (flags & CRYPTOCAP_F_SOFTWARE)) {
- /* sort of an Algol 68-style for loop */
- match = CRYPTOCAP_F_SOFTWARE;
- goto again;
- }
return best;
}
+static bool
+alg_is_compression(int alg)
+{
+
+ if (alg == CRYPTO_DEFLATE_COMP)
+ return (true);
+ return (false);
+}
+
+static bool
+alg_is_cipher(int alg)
+{
+
+ if (alg >= CRYPTO_DES_CBC && alg <= CRYPTO_SKIPJACK_CBC)
+ return (true);
+ if (alg >= CRYPTO_AES_CBC && alg <= CRYPTO_ARC4)
+ return (true);
+ if (alg == CRYPTO_NULL_CBC)
+ return (true);
+ if (alg >= CRYPTO_CAMELLIA_CBC && alg <= CRYPTO_AES_ICM)
+ return (true);
+ if (alg == CRYPTO_CHACHA20)
+ return (true);
+ return (false);
+}
+
+static bool
+alg_is_digest(int alg)
+{
+
+ if (alg >= CRYPTO_MD5_HMAC && alg <= CRYPTO_SHA1_KPDK)
+ return (true);
+ if (alg >= CRYPTO_MD5 && alg <= CRYPTO_SHA1)
+ return (true);
+ if (alg == CRYPTO_NULL_HMAC)
+ return (true);
+ if (alg >= CRYPTO_SHA2_256_HMAC && alg <= CRYPTO_SHA2_512_HMAC)
+ return (true);
+ if (alg >= CRYPTO_SHA2_256_HMAC && alg <= CRYPTO_SHA2_512_HMAC)
+ return (true);
+ if (alg == CRYPTO_AES_NIST_GMAC)
+ return (true);
+ if (alg >= CRYPTO_BLAKE2B && alg <= CRYPTO_BLAKE2S)
+ return (true);
+ if (alg >= CRYPTO_SHA2_224_HMAC && alg <= CRYPTO_POLY1305)
+ return (true);
+ if (alg == CRYPTO_AES_CCM_CBC_MAC)
+ return (true);
+ return (false);
+}
+
+static bool
+alg_is_keyed_digest(int alg)
+{
+
+ if (alg >= CRYPTO_MD5_HMAC && alg <= CRYPTO_SHA1_KPDK)
+ return (true);
+ if (alg >= CRYPTO_SHA2_256_HMAC && alg <= CRYPTO_SHA2_512_HMAC)
+ return (true);
+ if (alg == CRYPTO_AES_NIST_GMAC)
+ return (true);
+ if (alg >= CRYPTO_BLAKE2B && alg <= CRYPTO_BLAKE2S)
+ return (true);
+ if (alg == CRYPTO_SHA2_224_HMAC)
+ return (true);
+ if (alg == CRYPTO_POLY1305)
+ return (true);
+ if (alg == CRYPTO_AES_CCM_CBC_MAC)
+ return (true);
+ return (false);
+}
+
+static bool
+alg_is_aead(int alg)
+{
+
+ if (alg == CRYPTO_AES_NIST_GCM_16)
+ return (true);
+ if (alg == CRYPTO_AES_CCM_16)
+ return (true);
+ return (false);
+}
+
+/* Various sanity checks on crypto session parameters. */
+static bool
+check_csp(const struct crypto_session_params *csp)
+{
+ struct auth_hash *axf;
+
+ /* Mode-independent checks. */
+ if (csp->csp_flags != 0)
+ return (false);
+ if (csp->csp_ivlen < 0 || csp->csp_cipher_klen < 0 ||
+ csp->csp_auth_klen < 0 || csp->csp_auth_mlen < 0)
+ return (false);
+ if (csp->csp_auth_key != NULL && csp->csp_auth_klen == 0)
+ return (false);
+ if (csp->csp_cipher_key != NULL && csp->csp_cipher_klen == 0)
+ return (false);
+
+ switch (csp->csp_mode) {
+ case CSP_MODE_COMPRESS:
+ if (!alg_is_compression(csp->csp_cipher_alg))
+ return (false);
+ if (csp->csp_flags != 0)
+ return (false);
+ if (csp->csp_cipher_klen != 0 || csp->csp_ivlen != 0 ||
+ csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
+ csp->csp_auth_mlen != 0)
+ return (false);
+ break;
+ case CSP_MODE_CIPHER:
+ if (!alg_is_cipher(csp->csp_cipher_alg))
+ return (false);
+ if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
+ if (csp->csp_cipher_klen == 0)
+ return (false);
+ if (csp->csp_cipher_alg != CRYPTO_ARC4) {
+ if (csp->csp_ivlen == 0)
+ return (false);
+ }
+ }
+ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
+ return (false);
+ if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0 ||
+ csp->csp_auth_mlen != 0)
+ return (false);
+ break;
+ case CSP_MODE_DIGEST:
+ if (csp->csp_cipher_alg != 0 || csp->csp_cipher_klen != 0)
+ return (false);
+
+ /* IV is optional for digests (e.g. GMAC). */
+ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
+ return (false);
+ if (!alg_is_digest(csp->csp_auth_alg))
+ return (false);
+
+ /* Key is optional for BLAKE2 digests. */
+ if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
+ csp->csp_auth_alg == CRYPTO_BLAKE2S)
+ ;
+ else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
+ if (csp->csp_auth_klen == 0)
+ return (false);
+ } else {
+ if (csp->csp_auth_klen != 0)
+ return (false);
+ }
+ if (csp->csp_auth_mlen != 0) {
+ axf = crypto_auth_hash(csp);
+ if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
+ return (false);
+ }
+ break;
+ case CSP_MODE_AEAD:
+ if (!alg_is_aead(csp->csp_cipher_alg))
+ return (false);
+ if (csp->csp_cipher_klen == 0)
+ return (false);
+ if (csp->csp_ivlen == 0 ||
+ csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
+ return (false);
+ if (csp->csp_auth_alg != 0 || csp->csp_auth_klen != 0)
+ return (false);
+
+ /*
+ * XXX: Would be nice to have a better way to get this
+ * value.
+ */
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ if (csp->csp_auth_mlen > 16)
+ return (false);
+ break;
+ }
+ break;
+ case CSP_MODE_ETA:
+ if (!alg_is_cipher(csp->csp_cipher_alg))
+ return (false);
+ if (csp->csp_cipher_alg != CRYPTO_NULL_CBC) {
+ if (csp->csp_cipher_klen == 0)
+ return (false);
+ if (csp->csp_cipher_alg != CRYPTO_ARC4) {
+ if (csp->csp_ivlen == 0)
+ return (false);
+ }
+ }
+ if (csp->csp_ivlen >= EALG_MAX_BLOCK_LEN)
+ return (false);
+ if (!alg_is_digest(csp->csp_auth_alg))
+ return (false);
+
+ /* Key is optional for BLAKE2 digests. */
+ if (csp->csp_auth_alg == CRYPTO_BLAKE2B ||
+ csp->csp_auth_alg == CRYPTO_BLAKE2S)
+ ;
+ else if (alg_is_keyed_digest(csp->csp_auth_alg)) {
+ if (csp->csp_auth_klen == 0)
+ return (false);
+ } else {
+ if (csp->csp_auth_klen != 0)
+ return (false);
+ }
+ if (csp->csp_auth_mlen != 0) {
+ axf = crypto_auth_hash(csp);
+ if (axf == NULL || csp->csp_auth_mlen > axf->hashsize)
+ return (false);
+ }
+ break;
+ default:
+ return (false);
+ }
+
+ return (true);
+}
+
+/*
+ * Delete a session after it has been detached from its driver.
+ */
+static void
+crypto_deletesession(crypto_session_t cses)
+{
+ struct cryptocap *cap;
+
+ cap = cses->cap;
+
+ explicit_bzero(cses->softc, cap->cc_session_size);
+ free(cses->softc, M_CRYPTO_DATA);
+ uma_zfree(cryptoses_zone, cses);
+
+ CRYPTO_DRIVER_LOCK();
+ cap->cc_sessions--;
+ if (cap->cc_sessions == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+ wakeup(cap);
+ CRYPTO_DRIVER_UNLOCK();
+ cap_rele(cap);
+}
+
/*
* Create a new session. The crid argument specifies a crypto
* driver to use or constraints on a driver to select (hardware
@@ -523,18 +931,17 @@
* must be capable of the requested crypto algorithms.
*/
int
-crypto_newsession(crypto_session_t *cses, struct cryptoini *cri, int crid)
+crypto_newsession(crypto_session_t *cses,
+ const struct crypto_session_params *csp, int crid)
{
crypto_session_t res;
- void *softc_mem;
struct cryptocap *cap;
- u_int32_t hid;
- size_t softc_size;
int err;
-restart:
+ if (!check_csp(csp))
+ return (EINVAL);
+
res = NULL;
- softc_mem = NULL;
CRYPTO_DRIVER_LOCK();
if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
@@ -542,76 +949,39 @@
* Use specified driver; verify it is capable.
*/
cap = crypto_checkdriver(crid);
- if (cap != NULL && !driver_suitable(cap, cri))
+ if (cap != NULL && CRYPTODEV_PROBESESSION(cap->cc_dev, csp) > 0)
cap = NULL;
} else {
/*
* No requested driver; select based on crid flags.
*/
- cap = crypto_select_driver(cri, crid);
- /*
- * if NULL then can't do everything in one session.
- * XXX Fix this. We need to inject a "virtual" session
- * XXX layer right about here.
- */
+ cap = crypto_select_driver(csp, crid);
}
if (cap == NULL) {
+ CRYPTO_DRIVER_UNLOCK();
CRYPTDEB("no driver");
- err = EOPNOTSUPP;
- goto out;
+ return (EOPNOTSUPP);
}
+ cap_ref(cap);
cap->cc_sessions++;
- softc_size = cap->cc_session_size;
- hid = cap - crypto_drivers;
- cap = NULL;
CRYPTO_DRIVER_UNLOCK();
- softc_mem = malloc(softc_size, M_CRYPTO_DATA, M_WAITOK | M_ZERO);
res = uma_zalloc(cryptoses_zone, M_WAITOK | M_ZERO);
- res->softc = softc_mem;
-
- CRYPTO_DRIVER_LOCK();
- cap = crypto_checkdriver(hid);
- if (cap != NULL && (cap->cc_flags & CRYPTOCAP_F_CLEANUP) != 0) {
- cap->cc_sessions--;
- crypto_remove(cap);
- cap = NULL;
- }
- if (cap == NULL) {
- free(softc_mem, M_CRYPTO_DATA);
- uma_zfree(cryptoses_zone, res);
- CRYPTO_DRIVER_UNLOCK();
- goto restart;
- }
+ res->cap = cap;
+ res->softc = malloc(cap->cc_session_size, M_CRYPTO_DATA, M_WAITOK |
+ M_ZERO);
+ res->csp = *csp;
/* Call the driver initialization routine. */
- err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, cri);
+ err = CRYPTODEV_NEWSESSION(cap->cc_dev, res, csp);
if (err != 0) {
CRYPTDEB("dev newsession failed: %d", err);
- goto out;
+ crypto_deletesession(res);
+ return (err);
}
- res->capabilities = cap->cc_flags & 0xff000000;
- res->hid = hid;
*cses = res;
-
-out:
- CRYPTO_DRIVER_UNLOCK();
- if (err != 0) {
- free(softc_mem, M_CRYPTO_DATA);
- if (res != NULL)
- uma_zfree(cryptoses_zone, res);
- }
- return err;
-}
-
-static void
-crypto_remove(struct cryptocap *cap)
-{
-
- mtx_assert(&crypto_drivers_mtx, MA_OWNED);
- if (cap->cc_sessions == 0 && cap->cc_koperations == 0)
- bzero(cap, sizeof(*cap));
+ return (0);
}
/*
@@ -622,101 +992,81 @@
crypto_freesession(crypto_session_t cses)
{
struct cryptocap *cap;
- void *ses;
- size_t ses_size;
- u_int32_t hid;
if (cses == NULL)
return;
- CRYPTO_DRIVER_LOCK();
-
- hid = crypto_ses2hid(cses);
- KASSERT(hid < crypto_drivers_num,
- ("bogus crypto_session %p hid %u", cses, hid));
- cap = &crypto_drivers[hid];
-
- ses = cses->softc;
- ses_size = cap->cc_session_size;
-
- if (cap->cc_sessions)
- cap->cc_sessions--;
+ cap = cses->cap;
/* Call the driver cleanup routine, if available. */
CRYPTODEV_FREESESSION(cap->cc_dev, cses);
- explicit_bzero(ses, ses_size);
- free(ses, M_CRYPTO_DATA);
- uma_zfree(cryptoses_zone, cses);
-
- if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
- crypto_remove(cap);
-
- CRYPTO_DRIVER_UNLOCK();
+ crypto_deletesession(cses);
}
/*
- * Return an unused driver id. Used by drivers prior to registering
- * support for the algorithms they handle.
+ * Return a new driver id. Registers a driver with the system so that
+ * it can be probed by subsequent sessions.
*/
int32_t
crypto_get_driverid(device_t dev, size_t sessionsize, int flags)
{
- struct cryptocap *newdrv;
+ struct cryptocap *cap, **newdrv;
int i;
if ((flags & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
- printf("%s: no flags specified when registering driver\n",
- device_get_nameunit(dev));
+ device_printf(dev,
+ "no flags specified when registering driver\n");
return -1;
}
+ cap = malloc(sizeof(*cap), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
+ cap->cc_dev = dev;
+ cap->cc_session_size = sessionsize;
+ cap->cc_flags = flags;
+ refcount_init(&cap->cc_refs, 1);
+
CRYPTO_DRIVER_LOCK();
-
- for (i = 0; i < crypto_drivers_num; i++) {
- if (crypto_drivers[i].cc_dev == NULL &&
- (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) == 0) {
- break;
+ for (;;) {
+ for (i = 0; i < crypto_drivers_size; i++) {
+ if (crypto_drivers[i] == NULL)
+ break;
}
- }
- /* Out of entries, allocate some more. */
- if (i == crypto_drivers_num) {
- /* Be careful about wrap-around. */
- if (2 * crypto_drivers_num <= crypto_drivers_num) {
+ if (i < crypto_drivers_size)
+ break;
+
+ /* Out of entries, allocate some more. */
+
+ if (2 * crypto_drivers_size <= crypto_drivers_size) {
CRYPTO_DRIVER_UNLOCK();
printf("crypto: driver count wraparound!\n");
- return -1;
+ cap_rele(cap);
+ return (-1);
}
+ CRYPTO_DRIVER_UNLOCK();
- newdrv = malloc(2 * crypto_drivers_num *
- sizeof(struct cryptocap), M_CRYPTO_DATA, M_NOWAIT|M_ZERO);
- if (newdrv == NULL) {
- CRYPTO_DRIVER_UNLOCK();
- printf("crypto: no space to expand driver table!\n");
- return -1;
- }
+ newdrv = malloc(2 * crypto_drivers_size *
+ sizeof(*crypto_drivers), M_CRYPTO_DATA, M_WAITOK | M_ZERO);
- bcopy(crypto_drivers, newdrv,
- crypto_drivers_num * sizeof(struct cryptocap));
+ CRYPTO_DRIVER_LOCK();
+ memcpy(newdrv, crypto_drivers,
+ crypto_drivers_size * sizeof(*crypto_drivers));
- crypto_drivers_num *= 2;
+ crypto_drivers_size *= 2;
free(crypto_drivers, M_CRYPTO_DATA);
crypto_drivers = newdrv;
}
- /* NB: state is zero'd on free */
- crypto_drivers[i].cc_sessions = 1; /* Mark */
- crypto_drivers[i].cc_dev = dev;
- crypto_drivers[i].cc_flags = flags;
- crypto_drivers[i].cc_session_size = sessionsize;
+ cap->cc_hid = i;
+ crypto_drivers[i] = cap;
+ CRYPTO_DRIVER_UNLOCK();
+
if (bootverbose)
printf("crypto: assign %s driver id %u, flags 0x%x\n",
device_get_nameunit(dev), i, flags);
- CRYPTO_DRIVER_UNLOCK();
-
return i;
}
@@ -729,20 +1079,22 @@
int
crypto_find_driver(const char *match)
{
+ struct cryptocap *cap;
int i, len = strlen(match);
CRYPTO_DRIVER_LOCK();
- for (i = 0; i < crypto_drivers_num; i++) {
- device_t dev = crypto_drivers[i].cc_dev;
- if (dev == NULL ||
- (crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP))
+ for (i = 0; i < crypto_drivers_size; i++) {
+ if (crypto_drivers[i] == NULL)
continue;
- if (strncmp(match, device_get_nameunit(dev), len) == 0 ||
- strncmp(match, device_get_name(dev), len) == 0)
- break;
+ cap = crypto_drivers[i];
+ if (strncmp(match, device_get_nameunit(cap->cc_dev), len) == 0 ||
+ strncmp(match, device_get_name(cap->cc_dev), len) == 0) {
+ CRYPTO_DRIVER_UNLOCK();
+ return (i);
+ }
}
CRYPTO_DRIVER_UNLOCK();
- return i < crypto_drivers_num ? i : -1;
+ return (-1);
}
/*
@@ -752,8 +1104,16 @@
device_t
crypto_find_device_byhid(int hid)
{
- struct cryptocap *cap = crypto_checkdriver(hid);
- return cap != NULL ? cap->cc_dev : NULL;
+ struct cryptocap *cap;
+ device_t dev;
+
+ dev = NULL;
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_checkdriver(hid);
+ if (cap != NULL)
+ dev = cap->cc_dev;
+ CRYPTO_DRIVER_UNLOCK();
+ return (dev);
}
/*
@@ -762,8 +1122,16 @@
int
crypto_getcaps(int hid)
{
- struct cryptocap *cap = crypto_checkdriver(hid);
- return cap != NULL ? cap->cc_flags : 0;
+ struct cryptocap *cap;
+ int flags;
+
+ flags = 0;
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_checkdriver(hid);
+ if (cap != NULL)
+ flags = cap->cc_flags;
+ CRYPTO_DRIVER_UNLOCK();
+ return (flags);
}
/*
@@ -802,103 +1170,6 @@
return err;
}
-/*
- * Register support for a non-key-related algorithm. This routine
- * is called once for each such algorithm supported by a driver.
- */
-int
-crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
- u_int32_t flags)
-{
- struct cryptocap *cap;
- int err;
-
- CRYPTO_DRIVER_LOCK();
-
- cap = crypto_checkdriver(driverid);
- /* NB: algorithms are in the range [1..max] */
- if (cap != NULL &&
- (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX)) {
- /*
- * XXX Do some performance testing to determine placing.
- * XXX We probably need an auxiliary data structure that
- * XXX describes relative performances.
- */
-
- cap->cc_alg[alg] = flags | CRYPTO_ALG_FLAG_SUPPORTED;
- cap->cc_max_op_len[alg] = maxoplen;
- if (bootverbose)
- printf("crypto: %s registers alg %u flags %u maxoplen %u\n"
- , device_get_nameunit(cap->cc_dev)
- , alg
- , flags
- , maxoplen
- );
- cap->cc_sessions = 0; /* Unmark */
- err = 0;
- } else
- err = EINVAL;
-
- CRYPTO_DRIVER_UNLOCK();
- return err;
-}
-
-static void
-driver_finis(struct cryptocap *cap)
-{
- u_int32_t ses, kops;
-
- CRYPTO_DRIVER_ASSERT();
-
- ses = cap->cc_sessions;
- kops = cap->cc_koperations;
- bzero(cap, sizeof(*cap));
- if (ses != 0 || kops != 0) {
- /*
- * If there are pending sessions,
- * just mark as invalid.
- */
- cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
- cap->cc_sessions = ses;
- cap->cc_koperations = kops;
- }
-}
-
-/*
- * Unregister a crypto driver. If there are pending sessions using it,
- * leave enough information around so that subsequent calls using those
- * sessions will correctly detect the driver has been unregistered and
- * reroute requests.
- */
-int
-crypto_unregister(u_int32_t driverid, int alg)
-{
- struct cryptocap *cap;
- int i, err;
-
- CRYPTO_DRIVER_LOCK();
- cap = crypto_checkdriver(driverid);
- if (cap != NULL &&
- (CRYPTO_ALGORITHM_MIN <= alg && alg <= CRYPTO_ALGORITHM_MAX) &&
- cap->cc_alg[alg] != 0) {
- cap->cc_alg[alg] = 0;
- cap->cc_max_op_len[alg] = 0;
-
- /* Was this the last algorithm ? */
- for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
- if (cap->cc_alg[i] != 0)
- break;
-
- if (i == CRYPTO_ALGORITHM_MAX + 1)
- driver_finis(cap);
- err = 0;
- } else
- err = EINVAL;
- CRYPTO_DRIVER_UNLOCK();
-
- return err;
-}
-
/*
* Unregister all algorithms associated with a crypto driver.
* If there are pending sessions using it, leave enough information
@@ -910,18 +1181,27 @@
crypto_unregister_all(u_int32_t driverid)
{
struct cryptocap *cap;
- int err;
CRYPTO_DRIVER_LOCK();
cap = crypto_checkdriver(driverid);
- if (cap != NULL) {
- driver_finis(cap);
- err = 0;
- } else
- err = EINVAL;
+ if (cap == NULL) {
+ CRYPTO_DRIVER_UNLOCK();
+ return (EINVAL);
+ }
+
+ cap->cc_flags |= CRYPTOCAP_F_CLEANUP;
+ crypto_drivers[driverid] = NULL;
+
+ /*
+ * XXX: This doesn't do anything to kick sessions that
+ * have no pending operations.
+ */
+ while (cap->cc_sessions != 0 || cap->cc_koperations != 0)
+ mtx_sleep(cap, &crypto_drivers_mtx, 0, "cryunreg", 0);
CRYPTO_DRIVER_UNLOCK();
+ cap_rele(cap);
- return err;
+ return (0);
}
/*
@@ -951,6 +1231,125 @@
return err;
}
+#ifdef INVARIANTS
+/* Various sanity checks on crypto requests. */
+static void
+crp_sanity(struct cryptop *crp)
+{
+ struct crypto_session_params *csp;
+
+ KASSERT(crp->crp_session != NULL, ("incoming crp without a session"));
+ KASSERT(crp->crp_ilen >= 0, ("incoming crp with -ve input length"));
+ KASSERT(crp->crp_etype == 0, ("incoming crp with error"));
+ KASSERT(!(crp->crp_flags & CRYPTO_F_DONE),
+ ("incoming crp already done"));
+
+ csp = &crp->crp_session->csp;
+ switch (csp->csp_mode) {
+ case CSP_MODE_COMPRESS:
+ KASSERT(crp->crp_op == CRYPTO_OP_COMPRESS ||
+ crp->crp_op == CRYPTO_OP_DECOMPRESS,
+ ("invalid compression op %x", crp->crp_op));
+ break;
+ case CSP_MODE_CIPHER:
+ KASSERT(crp->crp_op == CRYPTO_OP_ENCRYPT ||
+ crp->crp_op == CRYPTO_OP_DECRYPT,
+ ("invalid cipher op %x", crp->crp_op));
+ break;
+ case CSP_MODE_DIGEST:
+ KASSERT(crp->crp_op == CRYPTO_OP_COMPUTE_DIGEST ||
+ crp->crp_op == CRYPTO_OP_VERIFY_DIGEST,
+ ("invalid digest op %x", crp->crp_op));
+ break;
+ case CSP_MODE_AEAD:
+ KASSERT(crp->crp_op ==
+ (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
+ crp->crp_op ==
+ (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
+ ("invalid AEAD op %x", crp->crp_op));
+ if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16)
+ KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
+ ("GCM without a separate IV"));
+ if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16)
+ KASSERT(crp->crp_flags & CRYPTO_F_IV_SEPARATE,
+ ("CCM without a separate IV"));
+ break;
+ case CSP_MODE_ETA:
+ KASSERT(crp->crp_op ==
+ (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST) ||
+ crp->crp_op ==
+ (CRYPTO_OP_DECRYPT | CRYPTO_OP_VERIFY_DIGEST),
+ ("invalid ETA op %x", crp->crp_op));
+ break;
+ }
+ KASSERT((crp->crp_flags & CRYPTO_F_IV_GENERATE) == 0 ||
+ crp->crp_op == CRYPTO_OP_ENCRYPT ||
+ crp->crp_op == (CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST),
+ ("IV_GENERATE set for non-encryption operation %x", crp->crp_op));
+ KASSERT((crp->crp_flags &
+ (CRYPTO_F_IV_SEPARATE | CRYPTO_F_IV_GENERATE)) !=
+ (CRYPTO_F_IV_SEPARATE | CRYPTO_F_IV_GENERATE),
+ ("crp with both IV_SEPARATE and IV_GENERATE set"));
+ KASSERT(crp->crp_buf_type >= CRYPTO_BUF_CONTIG &&
+ crp->crp_buf_type <= CRYPTO_BUF_MBUF,
+ ("invalid crp buffer type %d", crp->crp_buf_type));
+ if (csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
+ KASSERT(crp->crp_aad_start == 0 ||
+ crp->crp_aad_start < crp->crp_ilen,
+ ("invalid AAD start"));
+ KASSERT(crp->crp_aad_length != 0 || crp->crp_aad_start == 0,
+ ("AAD with zero length and non-zero start"));
+ KASSERT(crp->crp_aad_length == 0 ||
+ crp->crp_aad_start + crp->crp_aad_length <= crp->crp_ilen,
+ ("AAD outside input length"));
+ } else {
+ KASSERT(crp->crp_aad_start == 0 && crp->crp_aad_length == 0,
+ ("AAD region in request not supporting AAD"));
+ }
+ if (csp->csp_ivlen == 0) {
+ KASSERT((crp->crp_flags &
+ (CRYPTO_F_IV_SEPARATE | CRYPTO_F_IV_GENERATE)) == 0,
+ ("IV_GENERATE or IV_SEPARATE set when IV isn't used"));
+ KASSERT(crp->crp_iv_start == 0,
+ ("crp_iv_start set when IV isn't used"));
+ } else if (crp->crp_flags & CRYPTO_F_IV_SEPARATE) {
+ KASSERT(crp->crp_iv_start == 0,
+ ("IV_SEPARATE used with non-zero IV start"));
+ } else {
+ KASSERT(crp->crp_iv_start < crp->crp_ilen,
+ ("invalid IV start"));
+ KASSERT(crp->crp_iv_start + csp->csp_ivlen <= crp->crp_ilen,
+ ("IV outside input length"));
+ }
+ KASSERT(crp->crp_payload_start == 0 ||
+ crp->crp_payload_start < crp->crp_ilen,
+ ("invalid payload start"));
+ KASSERT(crp->crp_payload_start + crp->crp_payload_length <=
+ crp->crp_ilen, ("payload outside input length"));
+ if (csp->csp_mode == CSP_MODE_DIGEST ||
+ csp->csp_mode == CSP_MODE_AEAD || csp->csp_mode == CSP_MODE_ETA) {
+ KASSERT(crp->crp_digest_start == 0 ||
+ crp->crp_digest_start < crp->crp_ilen,
+ ("invalid digest start"));
+ /* XXX: For the mlen == 0 case this check isn't perfect. */
+ KASSERT(crp->crp_digest_start + csp->csp_auth_mlen <=
+ crp->crp_ilen,
+ ("digest outside input length"));
+ } else {
+ KASSERT(crp->crp_digest_start == 0,
+ ("non-zero digest start for request without a digest"));
+ }
+ if (csp->csp_cipher_klen != 0)
+ KASSERT(csp->csp_cipher_key != NULL ||
+ crp->crp_cipher_key != NULL,
+ ("cipher request without a key"));
+ if (csp->csp_auth_klen != 0)
+ KASSERT(csp->csp_auth_key != NULL || crp->crp_auth_key != NULL,
+ ("auth request without a key"));
+ KASSERT(crp->crp_callback != NULL, ("incoming crp without callback"));
+}
+#endif
+
/*
* Add a crypto request to a queue, to be processed by the kernel thread.
*/
@@ -958,9 +1357,14 @@
crypto_dispatch(struct cryptop *crp)
{
struct cryptocap *cap;
- u_int32_t hid;
int result;
+#ifdef INVARIANTS
+ crp_sanity(crp);
+#endif
+
+ /* TODO: Handle CRYPTO_F_IV_GENERATE so drivers don't have to. */
+
cryptostats.cs_ops++;
#ifdef CRYPTO_TIMING
@@ -987,16 +1391,12 @@
}
if ((crp->crp_flags & CRYPTO_F_BATCH) == 0) {
- hid = crypto_ses2hid(crp->crp_session);
-
/*
* Caller marked the request to be processed
* immediately; dispatch it directly to the
* driver unless the driver is currently blocked.
*/
- cap = crypto_checkdriver(hid);
- /* Driver cannot disappeared when there is an active session. */
- KASSERT(cap != NULL, ("%s: Driver disappeared.", __func__));
+ cap = crp->crp_session->cap;
if (!cap->cc_qblocked) {
result = crypto_invoke(cap, crp, 0);
if (result != ERESTART)
@@ -1033,7 +1433,8 @@
cryptostats.cs_kops++;
- error = crypto_kinvoke(krp, krp->krp_crid);
+ krp->krp_cap = NULL;
+ error = crypto_kinvoke(krp);
if (error == ERESTART) {
CRYPTO_Q_LOCK();
TAILQ_INSERT_TAIL(&crp_kq, krp, krp_next);
@@ -1081,15 +1482,14 @@
match = CRYPTOCAP_F_SOFTWARE;
best = NULL;
again:
- for (hid = 0; hid < crypto_drivers_num; hid++) {
- cap = &crypto_drivers[hid];
+ for (hid = 0; hid < crypto_drivers_size; hid++) {
/*
- * If it's not initialized, is in the process of
- * going away, or is not appropriate (hardware
- * or software based on match), then skip.
+ * If there is no driver for this slot, or the driver
+ * is not appropriate (hardware or software based on
+ * match), then skip.
*/
+ cap = crypto_drivers[hid];
if (cap->cc_dev == NULL ||
- (cap->cc_flags & CRYPTOCAP_F_CLEANUP) ||
(cap->cc_flags & match) == 0)
continue;
@@ -1111,30 +1511,33 @@
}
/*
- * Dispatch an asymmetric crypto request.
+ * Choose a driver for an asymmetric crypto request.
*/
-static int
-crypto_kinvoke(struct cryptkop *krp, int crid)
+static struct cryptocap *
+crypto_lookup_kdriver(struct cryptkop *krp)
{
- struct cryptocap *cap = NULL;
- int error;
+ struct cryptocap *cap;
+ uint32_t crid;
- KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
- KASSERT(krp->krp_callback != NULL,
- ("%s: krp->crp_callback == NULL", __func__));
+ /* If this request is requeued, it might already have a driver. */
+ cap = krp->krp_cap;
+ if (cap != NULL)
+ return (cap);
- CRYPTO_DRIVER_LOCK();
+ /* Use krp_crid to choose a driver. */
+ crid = krp->krp_crid;
if ((crid & (CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE)) == 0) {
cap = crypto_checkdriver(crid);
if (cap != NULL) {
/*
- * Driver present, it must support the necessary
- * algorithm and, if s/w drivers are excluded,
- * it must be registered as hardware-backed.
+ * Driver present, it must support the
+ * necessary algorithm and, if s/w drivers are
+ * excluded, it must be registered as
+ * hardware-backed.
*/
if (!kdriver_suitable(cap, krp) ||
(!crypto_devallowsoft &&
- (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
+ (cap->cc_flags & CRYPTOCAP_F_HARDWARE) == 0))
cap = NULL;
}
} else {
@@ -1145,32 +1548,61 @@
crid &= ~CRYPTOCAP_F_SOFTWARE;
cap = crypto_select_kdriver(krp, crid);
}
- if (cap != NULL && !cap->cc_kqblocked) {
- krp->krp_hid = cap - crypto_drivers;
- cap->cc_koperations++;
- CRYPTO_DRIVER_UNLOCK();
- error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
- CRYPTO_DRIVER_LOCK();
- if (error == ERESTART) {
- cap->cc_koperations--;
- CRYPTO_DRIVER_UNLOCK();
- return (error);
- }
- } else {
- /*
- * NB: cap is !NULL if device is blocked; in
- * that case return ERESTART so the operation
- * is resubmitted if possible.
- */
- error = (cap == NULL) ? ENODEV : ERESTART;
+
+ if (cap != NULL) {
+ krp->krp_cap = cap_ref(cap);
+ krp->krp_hid = cap->cc_hid;
}
- CRYPTO_DRIVER_UNLOCK();
+ return (cap);
+}
- if (error) {
- krp->krp_status = error;
+/*
+ * Dispatch an asymmetric crypto request.
+ */
+static int
+crypto_kinvoke(struct cryptkop *krp)
+{
+ struct cryptocap *cap = NULL;
+ int error;
+
+ KASSERT(krp != NULL, ("%s: krp == NULL", __func__));
+ KASSERT(krp->krp_callback != NULL,
+ ("%s: krp->crp_callback == NULL", __func__));
+
+ CRYPTO_DRIVER_LOCK();
+ cap = crypto_lookup_kdriver(krp);
+ if (cap == NULL) {
+ CRYPTO_DRIVER_UNLOCK();
+ krp->krp_status = ENODEV;
crypto_kdone(krp);
+ return (0);
}
- return 0;
+
+ /*
+ * If the device is blocked, return ERESTART to requeue it.
+ */
+ if (cap->cc_kqblocked) {
+ /*
+ * XXX: Previously this set krp_status to ERESTART and
+ * invoked crypto_kdone but the caller would still
+ * requeue it.
+ */
+ CRYPTO_DRIVER_UNLOCK();
+ return (ERESTART);
+ }
+
+ cap->cc_koperations++;
+ CRYPTO_DRIVER_UNLOCK();
+ error = CRYPTODEV_KPROCESS(cap->cc_dev, krp, 0);
+ if (error == ERESTART) {
+ CRYPTO_DRIVER_LOCK();
+ cap->cc_koperations--;
+ CRYPTO_DRIVER_UNLOCK();
+ return (error);
+ }
+
+ KASSERT(error == 0, ("error %d returned from crypto_kprocess", error));
+ return (0);
}
#ifdef CRYPTO_TIMING
@@ -1204,13 +1636,10 @@
{
struct cryptocap *cap;
struct cryptop *crp;
- int hid, result;
+ int result;
crp = (struct cryptop *)ctx;
-
- hid = crypto_ses2hid(crp->crp_session);
- cap = crypto_checkdriver(hid);
-
+ cap = crp->crp_session->cap;
result = crypto_invoke(cap, crp, 0);
if (result == ERESTART)
crypto_batch_enqueue(crp);
@@ -1226,14 +1655,15 @@
KASSERT(crp != NULL, ("%s: crp == NULL", __func__));
KASSERT(crp->crp_callback != NULL,
("%s: crp->crp_callback == NULL", __func__));
- KASSERT(crp->crp_desc != NULL, ("%s: crp->crp_desc == NULL", __func__));
+ KASSERT(crp->crp_session != NULL,
+ ("%s: crp->crp_session == NULL", __func__));
#ifdef CRYPTO_TIMING
if (crypto_timing)
crypto_tstat(&cryptostats.cs_invoke, &crp->crp_tstamp);
#endif
if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
- struct cryptodesc *crd;
+ struct crypto_session_params csp;
crypto_session_t nses;
/*
@@ -1242,14 +1672,32 @@
*
* XXX: What if there are more already queued requests for this
* session?
+ *
+ * XXX: Real solution is to make sessions refcounted
+ * and force callers to hold a reference when
+ * assigning to crp_session. Could maybe change
+ * crypto_getreq to accept a session pointer to make
+ * that work. Alternatively, we could abandon the
+ * notion of rewriting crp_session in requests forcing
+ * the caller to deal with allocating a new session.
+ * Perhaps provide a method to allow a crp's session to
+ * be swapped that callers could use.
*/
+ csp = crp->crp_session->csp;
crypto_freesession(crp->crp_session);
- for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
- crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
-
- /* XXX propagate flags from initial session? */
- if (crypto_newsession(&nses, &(crp->crp_desc->CRD_INI),
+ /*
+ * XXX: Key pointers may no longer be valid. If we
+ * really want to support this we need to define the
+ * KPI such that 'csp' is required to be valid for the
+ * duration of a session by the caller perhaps.
+ *
+ * XXX: If the keys have been changed this will reuse
+ * the old keys. This probably suggests making
+ * rekeying more explicit and updating the key
+ * pointers in 'csp' when the keys change.
+ */
+ if (crypto_newsession(&nses, &csp,
CRYPTOCAP_F_HARDWARE | CRYPTOCAP_F_SOFTWARE) == 0)
crp->crp_session = nses;
@@ -1264,13 +1712,9 @@
}
}
-/*
- * Release a set of crypto descriptors.
- */
void
crypto_freereq(struct cryptop *crp)
{
- struct cryptodesc *crd;
if (crp == NULL)
return;
@@ -1300,36 +1744,18 @@
}
#endif
- while ((crd = crp->crp_desc) != NULL) {
- crp->crp_desc = crd->crd_next;
- uma_zfree(cryptodesc_zone, crd);
- }
uma_zfree(cryptop_zone, crp);
}
-/*
- * Acquire a set of crypto descriptors.
- */
struct cryptop *
-crypto_getreq(int num)
+crypto_getreq(crypto_session_t cses, int how)
{
- struct cryptodesc *crd;
struct cryptop *crp;
- crp = uma_zalloc(cryptop_zone, M_NOWAIT|M_ZERO);
- if (crp != NULL) {
- while (num--) {
- crd = uma_zalloc(cryptodesc_zone, M_NOWAIT|M_ZERO);
- if (crd == NULL) {
- crypto_freereq(crp);
- return NULL;
- }
-
- crd->crd_next = crp->crp_desc;
- crp->crp_desc = crd;
- }
- }
- return crp;
+ MPASS(how == M_WAITOK || how == M_NOWAIT);
+ crp = uma_zalloc(cryptop_zone, how | M_ZERO);
+ crp->crp_session = cses;
+ return (crp);
}
/*
@@ -1432,15 +1858,14 @@
if (krp->krp_status != 0)
cryptostats.cs_kerrs++;
CRYPTO_DRIVER_LOCK();
- /* XXX: What if driver is loaded in the meantime? */
- if (krp->krp_hid < crypto_drivers_num) {
- cap = &crypto_drivers[krp->krp_hid];
- KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
- cap->cc_koperations--;
- if (cap->cc_flags & CRYPTOCAP_F_CLEANUP)
- crypto_remove(cap);
- }
+ cap = krp->krp_cap;
+ KASSERT(cap->cc_koperations > 0, ("cc_koperations == 0"));
+ cap->cc_koperations--;
+ if (cap->cc_koperations == 0 && cap->cc_flags & CRYPTOCAP_F_CLEANUP)
+ wakeup(cap);
CRYPTO_DRIVER_UNLOCK();
+ krp->krp_cap = NULL;
+ cap_rele(cap);
ret_worker = CRYPTO_RETW(0);
@@ -1457,11 +1882,12 @@
int hid, kalg, feat = 0;
CRYPTO_DRIVER_LOCK();
- for (hid = 0; hid < crypto_drivers_num; hid++) {
- const struct cryptocap *cap = &crypto_drivers[hid];
+ for (hid = 0; hid < crypto_drivers_size; hid++) {
+ const struct cryptocap *cap = crypto_drivers[hid];
- if ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
- !crypto_devallowsoft) {
+ if (cap == NULL ||
+ ((cap->cc_flags & CRYPTOCAP_F_SOFTWARE) &&
+ !crypto_devallowsoft)) {
continue;
}
for (kalg = 0; kalg < CRK_ALGORITHM_MAX; kalg++)
@@ -1500,7 +1926,6 @@
struct cryptop *crp, *submit;
struct cryptkop *krp;
struct cryptocap *cap;
- u_int32_t hid;
int result, hint;
#if defined(__i386__) || defined(__amd64__) || defined(__aarch64__)
@@ -1517,15 +1942,14 @@
submit = NULL;
hint = 0;
TAILQ_FOREACH(crp, &crp_q, crp_next) {
- hid = crypto_ses2hid(crp->crp_session);
- cap = crypto_checkdriver(hid);
+ cap = crp->crp_session->cap;
/*
* Driver cannot disappeared when there is an active
* session.
*/
KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
__func__, __LINE__));
- if (cap == NULL || cap->cc_dev == NULL) {
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
/* Op needs to be migrated, process it. */
if (submit == NULL)
submit = crp;
@@ -1541,7 +1965,7 @@
* better to just use a per-driver
* queue instead.
*/
- if (crypto_ses2hid(submit->crp_session) == hid)
+ if (submit->crp_session->cap == cap)
hint = CRYPTO_HINT_MORE;
break;
} else {
@@ -1554,11 +1978,12 @@
}
if (submit != NULL) {
TAILQ_REMOVE(&crp_q, submit, crp_next);
- hid = crypto_ses2hid(submit->crp_session);
- cap = crypto_checkdriver(hid);
+ cap = submit->crp_session->cap;
KASSERT(cap != NULL, ("%s:%u Driver disappeared.",
__func__, __LINE__));
+ CRYPTO_Q_UNLOCK();
result = crypto_invoke(cap, submit, hint);
+ CRYPTO_Q_LOCK();
if (result == ERESTART) {
/*
* The driver ran out of resources, mark the
@@ -1569,8 +1994,7 @@
* at the front. This should be ok; putting
* it at the end does not work.
*/
- /* XXX validate sid again? */
- crypto_drivers[crypto_ses2hid(submit->crp_session)].cc_qblocked = 1;
+ cap->cc_qblocked = 1;
TAILQ_INSERT_HEAD(&crp_q, submit, crp_next);
cryptostats.cs_blocks++;
}
@@ -1578,19 +2002,15 @@
/* As above, but for key ops */
TAILQ_FOREACH(krp, &crp_kq, krp_next) {
- cap = crypto_checkdriver(krp->krp_hid);
- if (cap == NULL || cap->cc_dev == NULL) {
+ cap = krp->krp_cap;
+ if (cap->cc_flags & CRYPTOCAP_F_CLEANUP) {
/*
- * Operation needs to be migrated, invalidate
- * the assigned device so it will reselect a
- * new one below. Propagate the original
- * crid selection flags if supplied.
+ * Operation needs to be migrated,
+ * clear krp_cap so a new driver is
+ * selected.
*/
- krp->krp_hid = krp->krp_crid &
- (CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE);
- if (krp->krp_hid == 0)
- krp->krp_hid =
- CRYPTOCAP_F_SOFTWARE|CRYPTOCAP_F_HARDWARE;
+ krp->krp_cap = NULL;
+ cap_rele(cap);
break;
}
if (!cap->cc_kqblocked)
@@ -1598,7 +2018,9 @@
}
if (krp != NULL) {
TAILQ_REMOVE(&crp_kq, krp, krp_next);
- result = crypto_kinvoke(krp, krp->krp_hid);
+ CRYPTO_Q_UNLOCK();
+ result = crypto_kinvoke(krp);
+ CRYPTO_Q_LOCK();
if (result == ERESTART) {
/*
* The driver ran out of resources, mark the
@@ -1609,8 +2031,7 @@
* at the front. This should be ok; putting
* it at the end does not work.
*/
- /* XXX validate sid again? */
- crypto_drivers[krp->krp_hid].cc_kqblocked = 1;
+ krp->krp_cap->cc_kqblocked = 1;
TAILQ_INSERT_HEAD(&crp_kq, krp, krp_next);
cryptostats.cs_kblocks++;
}
@@ -1731,9 +2152,9 @@
, "QB"
, "KB"
);
- for (hid = 0; hid < crypto_drivers_num; hid++) {
- const struct cryptocap *cap = &crypto_drivers[hid];
- if (cap->cc_dev == NULL)
+ for (hid = 0; hid < crypto_drivers_size; hid++) {
+ const struct cryptocap *cap = crypto_drivers[hid];
+ if (cap == NULL)
continue;
db_printf("%-12s %4u %4u %08x %2u %2u\n"
, device_get_nameunit(cap->cc_dev)
@@ -1756,15 +2177,15 @@
db_printf("%4s %8s %4s %4s %4s %4s %8s %8s\n",
"HID", "Caps", "Ilen", "Olen", "Etype", "Flags",
- "Desc", "Callback");
+ "Device", "Callback");
TAILQ_FOREACH(crp, &crp_q, crp_next) {
db_printf("%4u %08x %4u %4u %4u %04x %8p %8p\n"
- , (int) crypto_ses2hid(crp->crp_session)
+ , crp->crp_session->cap->cc_hid
, (int) crypto_ses2caps(crp->crp_session)
, crp->crp_ilen, crp->crp_olen
, crp->crp_etype
, crp->crp_flags
- , crp->crp_desc
+ , device_get_nameunit(crp->crp_session->cap->cc_dev)
, crp->crp_callback
);
}
@@ -1775,7 +2196,7 @@
TAILQ_FOREACH(crp, &ret_worker->crp_ret_q, crp_next) {
db_printf("%8td %4u %4u %04x %8p\n"
, CRYPTO_RETW_ID(ret_worker)
- , (int) crypto_ses2hid(crp->crp_session)
+ , crp->crp_session->cap->cc_hid
, crp->crp_etype
, crp->crp_flags
, crp->crp_callback
Index: sys/opencrypto/cryptodev.h
===================================================================
--- sys/opencrypto/cryptodev.h
+++ sys/opencrypto/cryptodev.h
@@ -71,7 +71,6 @@
/* Some initial values */
#define CRYPTO_DRIVERS_INITIAL 4
-#define CRYPTO_SW_SESSIONS 32
/* Hash values */
#define NULL_HASH_LEN 16
@@ -189,11 +188,13 @@
#define CRYPTO_CAMELLIA_CBC 21
#define CRYPTO_AES_XTS 22
#define CRYPTO_AES_ICM 23 /* commonly known as CTR mode */
-#define CRYPTO_AES_NIST_GMAC 24 /* cipher side */
+#define CRYPTO_AES_NIST_GMAC 24 /* GMAC only */
#define CRYPTO_AES_NIST_GCM_16 25 /* 16 byte ICV */
+#ifdef _KERNEL
#define CRYPTO_AES_128_NIST_GMAC 26 /* auth side */
#define CRYPTO_AES_192_NIST_GMAC 27 /* auth side */
#define CRYPTO_AES_256_NIST_GMAC 28 /* auth side */
+#endif
#define CRYPTO_BLAKE2B 29 /* Blake2b hash */
#define CRYPTO_BLAKE2S 30 /* Blake2s hash */
#define CRYPTO_CHACHA20 31 /* Chacha20 stream cipher */
@@ -378,6 +379,13 @@
#ifdef _KERNEL
+/*
+ * Return values for cryptodev_probesession methods.
+ */
+#define CRYPTODEV_PROBE_HARDWARE (-100)
+#define CRYPTODEV_PROBE_ACCEL_SOFTWARE (-200)
+#define CRYPTODEV_PROBE_SOFTWARE (-500)
+
#if 0
#define CRYPTDEB(s, ...) do { \
printf("%s:%d: " s "\n", __FILE__, __LINE__, ## __VA_ARGS__); \
@@ -386,40 +394,29 @@
#define CRYPTDEB(...) do { } while (0)
#endif
-/* Standard initialization structure beginning */
-struct cryptoini {
- int cri_alg; /* Algorithm to use */
- int cri_klen; /* Key length, in bits */
- int cri_mlen; /* Number of bytes we want from the
- entire hash. 0 means all. */
- caddr_t cri_key; /* key to use */
- u_int8_t cri_iv[EALG_MAX_BLOCK_LEN]; /* IV to use */
- struct cryptoini *cri_next;
-};
+struct crypto_session_params {
+ int csp_mode; /* Type of operations to perform. */
-/* Describe boundaries of a single crypto operation */
-struct cryptodesc {
- int crd_skip; /* How many bytes to ignore from start */
- int crd_len; /* How many bytes to process */
- int crd_inject; /* Where to inject results, if applicable */
- int crd_flags;
+#define CSP_MODE_NONE 0
+#define CSP_MODE_COMPRESS 1 /* Compression/decompression. */
+#define CSP_MODE_CIPHER 2 /* Encrypt/decrypt. */
+#define CSP_MODE_DIGEST 3 /* Compute/verify digest. */
+#define CSP_MODE_AEAD 4 /* Combined auth/encryption. */
+#define CSP_MODE_ETA 5 /* IPsec style encrypt-then-auth */
-#define CRD_F_ENCRYPT 0x01 /* Set when doing encryption */
-#define CRD_F_IV_PRESENT 0x02 /* When encrypting, IV is already in
- place, so don't copy. */
-#define CRD_F_IV_EXPLICIT 0x04 /* IV explicitly provided */
-#define CRD_F_DSA_SHA_NEEDED 0x08 /* Compute SHA-1 of buffer for DSA */
-#define CRD_F_COMP 0x0f /* Set when doing compression */
-#define CRD_F_KEY_EXPLICIT 0x10 /* Key explicitly provided */
+ int csp_flags;
- struct cryptoini CRD_INI; /* Initialization/context data */
-#define crd_esn CRD_INI.cri_esn
-#define crd_iv CRD_INI.cri_iv
-#define crd_key CRD_INI.cri_key
-#define crd_alg CRD_INI.cri_alg
-#define crd_klen CRD_INI.cri_klen
+ int csp_ivlen; /* IV length in bytes. */
- struct cryptodesc *crd_next;
+ int csp_cipher_alg;
+ int csp_cipher_klen; /* Key length in bytes. */
+ const void *csp_cipher_key;
+
+ int csp_auth_alg;
+ int csp_auth_klen; /* Key length in bytes. */
+ const void *csp_auth_key;
+ int csp_auth_mlen; /* Number of digest bytes to use.
+ 0 means all. */
};
/* Structure describing complete operation */
@@ -444,8 +441,6 @@
*/
int crp_flags;
-#define CRYPTO_F_IMBUF 0x0001 /* Input/output are mbuf chains */
-#define CRYPTO_F_IOV 0x0002 /* Input/output are uio */
#define CRYPTO_F_BATCH 0x0008 /* Batch op if possible */
#define CRYPTO_F_CBIMM 0x0010 /* Do callback immediately */
#define CRYPTO_F_DONE 0x0020 /* Operation completed */
@@ -458,14 +453,35 @@
* order there are submitted. Applied only
* if CRYPTO_F_ASYNC flags is set
*/
+#define CRYPTO_F_IV_SEPARATE 0x0200 /* Use crp_iv[] as IV. */
+#define CRYPTO_F_IV_GENERATE 0x0400 /* Generate a random IV and store. */
+
+ int crp_op;
union {
caddr_t crp_buf; /* Data to be processed */
struct mbuf *crp_mbuf;
struct uio *crp_uio;
};
- void * crp_opaque; /* Opaque pointer, passed along */
- struct cryptodesc *crp_desc; /* Linked list of processing descriptors */
+ int crp_buf_type; /* Which union member describes data. */
+
+ int crp_aad_start; /* Location of AAD. */
+ int crp_aad_length; /* 0 => no AAD. */
+ int crp_iv_start; /* Location of IV. IV length is from
+ * the session.
+ */
+ int crp_payload_start; /* Location of ciphertext. */
+ int crp_payload_length;
+ int crp_digest_start; /* Location of MAC/tag. Length is
+ * from the session.
+ */
+
+ uint8_t crp_iv[EALG_MAX_BLOCK_LEN]; /* IV if IV_SEPARATE. */
+
+ const void *crp_cipher_key; /* New cipher key if non-NULL. */
+ const void *crp_auth_key; /* New auth key if non-NULL. */
+
+ void *crp_opaque; /* Opaque pointer, passed along */
int (*crp_callback)(struct cryptop *); /* Callback function */
@@ -485,11 +501,18 @@
(crp)->crp_flags & CRYPTO_F_ASYNC_KEEPORDER)
#define CRYPTO_BUF_CONTIG 0x0
-#define CRYPTO_BUF_IOV 0x1
+#define CRYPTO_BUF_UIO 0x1
#define CRYPTO_BUF_MBUF 0x2
-#define CRYPTO_OP_DECRYPT 0x0
-#define CRYPTO_OP_ENCRYPT 0x1
+/* Flags in crp_op. */
+#define CRYPTO_OP_DECRYPT 0x0
+#define CRYPTO_OP_ENCRYPT 0x1
+#define CRYPTO_OP_IS_ENCRYPT(op) ((op) & CRYPTO_OP_ENCRYPT)
+#define CRYPTO_OP_COMPUTE_DIGEST 0x0
+#define CRYPTO_OP_VERIFY_DIGEST 0x2
+#define CRYPTO_OP_DECOMPRESS CRYPTO_OP_DECRYPT
+#define CRYPTO_OP_COMPRESS CRYPTO_OP_ENCRYPT
+#define CRYPTO_OP_IS_COMPRESS(op) ((op) & CRYPTO_OP_COMPRESS)
/*
* Hints passed to process methods.
@@ -504,18 +527,24 @@
u_short krp_iparams; /* # of input parameters */
u_short krp_oparams; /* # of output parameters */
u_int krp_crid; /* desired device, etc. */
- u_int32_t krp_hid;
+ uint32_t krp_hid; /* device used */
struct crparam krp_param[CRK_MAXPARAM]; /* kvm */
- int (*krp_callback)(struct cryptkop *);
+ void (*krp_callback)(struct cryptkop *);
+ struct cryptocap *krp_cap;
};
uint32_t crypto_ses2hid(crypto_session_t crypto_session);
uint32_t crypto_ses2caps(crypto_session_t crypto_session);
void *crypto_get_driver_session(crypto_session_t crypto_session);
+const struct crypto_session_params *crypto_get_params(
+ crypto_session_t crypto_session);
+struct auth_hash *crypto_auth_hash(const struct crypto_session_params *csp);
+struct enc_xform *crypto_cipher(const struct crypto_session_params *csp);
MALLOC_DECLARE(M_CRYPTO_DATA);
-extern int crypto_newsession(crypto_session_t *cses, struct cryptoini *cri, int hard);
+extern int crypto_newsession(crypto_session_t *cses,
+ const struct crypto_session_params *params, int hard);
extern void crypto_freesession(crypto_session_t cses);
#define CRYPTOCAP_F_HARDWARE CRYPTO_FLAG_HARDWARE
#define CRYPTOCAP_F_SOFTWARE CRYPTO_FLAG_SOFTWARE
@@ -525,10 +554,7 @@
extern int crypto_find_driver(const char *);
extern device_t crypto_find_device_byhid(int hid);
extern int crypto_getcaps(int hid);
-extern int crypto_register(u_int32_t driverid, int alg, u_int16_t maxoplen,
- u_int32_t flags);
extern int crypto_kregister(u_int32_t, int, u_int32_t);
-extern int crypto_unregister(u_int32_t driverid, int alg);
extern int crypto_unregister_all(u_int32_t driverid);
extern int crypto_dispatch(struct cryptop *crp);
extern int crypto_kdispatch(struct cryptkop *);
@@ -540,17 +566,30 @@
extern int crypto_getfeat(int *);
extern void crypto_freereq(struct cryptop *crp);
-extern struct cryptop *crypto_getreq(int num);
+extern struct cryptop *crypto_getreq(crypto_session_t cses, int how);
extern int crypto_usercrypto; /* userland may do crypto requests */
extern int crypto_userasymcrypto; /* userland may do asym crypto reqs */
extern int crypto_devallowsoft; /* only use hardware crypto */
+/* Helper routines for drivers to initialize auth contexts for HMAC. */
+struct auth_hash;
+
+void hmac_init_ipad(struct auth_hash *axf, const char *key, int klen,
+ void *auth_ctx);
+void hmac_init_opad(struct auth_hash *axf, const char *key, int klen,
+ void *auth_ctx);
+
/*
* Crypto-related utility routines used mainly by drivers.
*
* XXX these don't really belong here; but for now they're
* kept apart from the rest of the system.
+ *
+ * Similar to m_copyback/data, *_copyback copy data from the 'src'
+ * buffer into the crypto request's data buffer while *_copydata copy
+ * data from the crypto request's data buffer into the the 'dst'
+ * buffer.
*/
struct uio;
extern void cuio_copydata(struct uio* uio, int off, int len, caddr_t cp);
@@ -564,14 +603,13 @@
extern int crypto_mbuftoiov(struct mbuf *mbuf, struct iovec **iovptr,
int *cnt, int *allocated);
-extern void crypto_copyback(int flags, caddr_t buf, int off, int size,
- c_caddr_t in);
-extern void crypto_copydata(int flags, caddr_t buf, int off, int size,
- caddr_t out);
-extern int crypto_apply(int flags, caddr_t buf, int off, int len,
+void crypto_copyback(struct cryptop *crp, int off, int size,
+ const void *src);
+void crypto_copydata(struct cryptop *crp, int off, int size, void *dst);
+int crypto_apply(struct cryptop *crp, int off, int len,
int (*f)(void *, void *, u_int), void *arg);
-
-extern void *crypto_contiguous_subsegment(int, void *, size_t, size_t);
+void *crypto_contiguous_subsegment(struct cryptop *crp, size_t skip,
+ size_t len);
#endif /* _KERNEL */
#endif /* _CRYPTO_CRYPTO_H_ */
Index: sys/opencrypto/cryptodev.c
===================================================================
--- sys/opencrypto/cryptodev.c
+++ sys/opencrypto/cryptodev.c
@@ -51,7 +51,6 @@
#include <sys/file.h>
#include <sys/filedesc.h>
#include <sys/errno.h>
-#include <sys/uio.h>
#include <sys/random.h>
#include <sys/conf.h>
#include <sys/kernel.h>
@@ -270,23 +269,19 @@
u_int32_t ses;
struct mtx lock; /* for op submission */
- u_int32_t cipher;
struct enc_xform *txform;
- u_int32_t mac;
- struct auth_hash *thash;
+ int hashsize;
+ int ivsize;
+ int mode;
- caddr_t key;
- int keylen;
-
- caddr_t mackey;
- int mackeylen;
+ void *key;
+ void *mackey;
};
struct cryptop_data {
struct csession *cse;
- struct iovec iovec[1];
- struct uio uio;
+ char *buf;
bool done;
};
@@ -326,9 +321,9 @@
static struct csession *csefind(struct fcrypt *, u_int);
static bool csedelete(struct fcrypt *, u_int);
-static struct csession *csecreate(struct fcrypt *, crypto_session_t, caddr_t,
- u_int64_t, caddr_t, u_int64_t, u_int32_t, u_int32_t, struct enc_xform *,
- struct auth_hash *);
+static struct csession *csecreate(struct fcrypt *, crypto_session_t,
+ struct crypto_session_params *, struct enc_xform *, void *,
+ struct auth_hash *, void *);
static void csefree(struct csession *);
static int cryptodev_op(struct csession *, struct crypt_op *,
@@ -375,7 +370,7 @@
struct thread *td)
{
#define SES2(p) ((struct session2_op *)p)
- struct cryptoini cria, crie;
+ struct crypto_session_params csp;
struct fcrypt *fcr = fp->f_data;
struct csession *cse;
struct session_op *sop;
@@ -383,6 +378,8 @@
struct crypt_aead *caead;
struct enc_xform *txform = NULL;
struct auth_hash *thash = NULL;
+ void *key = NULL;
+ void *mackey = NULL;
struct crypt_kop *kop;
crypto_session_t cses;
u_int32_t ses;
@@ -487,18 +484,37 @@
case CRYPTO_RIPEMD160_HMAC:
thash = &auth_hash_hmac_ripemd_160;
break;
+#ifdef COMPAT_FREEBSD12
case CRYPTO_AES_128_NIST_GMAC:
- thash = &auth_hash_nist_gmac_aes_128;
- break;
case CRYPTO_AES_192_NIST_GMAC:
- thash = &auth_hash_nist_gmac_aes_192;
- break;
case CRYPTO_AES_256_NIST_GMAC:
- thash = &auth_hash_nist_gmac_aes_256;
+ /* Should always be paired with GCM. */
+ if (sop->cipher != CRYPTO_AES_NIST_GCM_16) {
+ CRYPTDEB("GMAC without GCM");
+ return (EINVAL);
+ }
+ break;
+#endif
+ case CRYPTO_AES_NIST_GMAC:
+ switch (sop->mackeylen * 8) {
+ case 128:
+ thash = &auth_hash_nist_gmac_aes_128;
+ break;
+ case 192:
+ thash = &auth_hash_nist_gmac_aes_192;
+ break;
+ case 256:
+ thash = &auth_hash_nist_gmac_aes_256;
+ break;
+ default:
+ CRYPTDEB("invalid GMAC key length");
+ SDT_PROBE1(opencrypto, dev, ioctl, error,
+ __LINE__);
+ return (EINVAL);
+ }
break;
-
case CRYPTO_AES_CCM_CBC_MAC:
- switch (sop->keylen) {
+ switch (sop->mackeylen) {
case 16:
thash = &auth_hash_ccm_cbc_mac_128;
break;
@@ -554,12 +570,52 @@
return (EINVAL);
}
- bzero(&crie, sizeof(crie));
- bzero(&cria, sizeof(cria));
+ if (txform == NULL && thash == NULL)
+ return (EINVAL);
+
+ memset(&csp, 0, sizeof(csp));
+
+ if (sop->cipher == CRYPTO_AES_NIST_GCM_16) {
+ switch (sop->mac) {
+#ifdef COMPAT_FREEBSD12
+ case CRYPTO_AES_128_NIST_GMAC:
+ case CRYPTO_AES_192_NIST_GMAC:
+ case CRYPTO_AES_256_NIST_GMAC:
+ if (sop->keylen != sop->mackeylen)
+ return (EINVAL);
+ break;
+#endif
+ case 0:
+ break;
+ default:
+ return (EINVAL);
+ }
+ csp.csp_mode = CSP_MODE_AEAD;
+ } else if (sop->cipher == CRYPTO_AES_CCM_16) {
+ switch (sop->mac) {
+#ifdef COMPAT_FREEBSD12
+ case CRYPTO_AES_CCM_CBC_MAC:
+ if (sop->keylen != sop->mackeylen)
+ return (EINVAL);
+ thash = NULL;
+ break;
+#endif
+ case 0:
+ break;
+ default:
+ return (EINVAL);
+ }
+ csp.csp_mode = CSP_MODE_AEAD;
+ } else if (txform && thash)
+ csp.csp_mode = CSP_MODE_ETA;
+ else if (txform)
+ csp.csp_mode = CSP_MODE_CIPHER;
+ else
+ csp.csp_mode = CSP_MODE_DIGEST;
if (txform) {
- crie.cri_alg = txform->type;
- crie.cri_klen = sop->keylen * 8;
+ csp.csp_cipher_alg = txform->type;
+ csp.csp_cipher_klen = sop->keylen;
if (sop->keylen > txform->maxkey ||
sop->keylen < txform->minkey) {
CRYPTDEB("invalid cipher parameters");
@@ -569,22 +625,21 @@
goto bail;
}
- crie.cri_key = malloc(crie.cri_klen / 8,
- M_XDATA, M_WAITOK);
- if ((error = copyin(sop->key, crie.cri_key,
- crie.cri_klen / 8))) {
+ key = malloc(csp.csp_cipher_klen, M_XDATA, M_WAITOK);
+ error = copyin(sop->key, key, csp.csp_cipher_klen);
+ if (error) {
CRYPTDEB("invalid key");
SDT_PROBE1(opencrypto, dev, ioctl, error,
__LINE__);
goto bail;
}
- if (thash)
- crie.cri_next = &cria;
+ csp.csp_cipher_key = key;
+ csp.csp_ivlen = txform->ivsize;
}
if (thash) {
- cria.cri_alg = thash->type;
- cria.cri_klen = sop->mackeylen * 8;
+ csp.csp_auth_alg = thash->type;
+ csp.csp_auth_klen = sop->mackeylen;
if (sop->mackeylen > thash->keysize ||
sop->mackeylen < 0) {
CRYPTDEB("invalid mac key length");
@@ -594,17 +649,24 @@
goto bail;
}
- if (cria.cri_klen) {
- cria.cri_key = malloc(cria.cri_klen / 8,
- M_XDATA, M_WAITOK);
- if ((error = copyin(sop->mackey, cria.cri_key,
- cria.cri_klen / 8))) {
+ if (csp.csp_auth_klen) {
+ mackey = malloc(csp.csp_auth_klen, M_XDATA,
+ M_WAITOK);
+ error = copyin(sop->mackey, mackey,
+ csp.csp_auth_klen);
+ if (error) {
CRYPTDEB("invalid mac key");
SDT_PROBE1(opencrypto, dev, ioctl,
error, __LINE__);
goto bail;
}
+ csp.csp_auth_key = mackey;
}
+
+ if (csp.csp_auth_alg == CRYPTO_AES_NIST_GMAC)
+ csp.csp_ivlen = AES_GCM_IV_LEN;
+ if (csp.csp_auth_alg == CRYPTO_AES_CCM_CBC_MAC)
+ csp.csp_ivlen = AES_CCM_IV_LEN;
}
/* NB: CIOCGSESSION2 has the crid */
@@ -623,16 +685,14 @@
}
} else
crid = CRYPTOCAP_F_HARDWARE;
- error = crypto_newsession(&cses, (txform ? &crie : &cria), crid);
+ error = crypto_newsession(&cses, &csp, crid);
if (error) {
CRYPTDEB("crypto_newsession");
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
- cse = csecreate(fcr, cses, crie.cri_key, crie.cri_klen,
- cria.cri_key, cria.cri_klen, sop->cipher, sop->mac, txform,
- thash);
+ cse = csecreate(fcr, cses, &csp, txform, key, thash, mackey);
if (cse == NULL) {
crypto_freesession(cses);
@@ -652,10 +712,8 @@
}
bail:
if (error) {
- if (crie.cri_key)
- free(crie.cri_key, M_XDATA);
- if (cria.cri_key)
- free(cria.cri_key, M_XDATA);
+ free(key, M_XDATA);
+ free(mackey, M_XDATA);
}
#ifdef COMPAT_FREEBSD32
else {
@@ -773,20 +831,11 @@
cod_alloc(struct csession *cse, size_t len, struct thread *td)
{
struct cryptop_data *cod;
- struct uio *uio;
cod = malloc(sizeof(struct cryptop_data), M_XDATA, M_WAITOK | M_ZERO);
cod->cse = cse;
- uio = &cod->uio;
- uio->uio_iov = cod->iovec;
- uio->uio_iovcnt = 1;
- uio->uio_resid = len;
- uio->uio_segflg = UIO_SYSSPACE;
- uio->uio_rw = UIO_WRITE;
- uio->uio_td = td;
- uio->uio_iov[0].iov_len = len;
- uio->uio_iov[0].iov_base = malloc(len, M_XDATA, M_WAITOK);
+ cod->buf = malloc(len, M_XDATA, M_WAITOK);
return (cod);
}
@@ -794,7 +843,7 @@
cod_free(struct cryptop_data *cod)
{
- free(cod->uio.uio_iov[0].iov_base, M_XDATA);
+ free(cod->buf, M_XDATA);
free(cod, M_XDATA);
}
@@ -803,8 +852,10 @@
{
static struct timeval arc4warn, blfwarn, castwarn, deswarn, md5warn;
static struct timeval skipwarn, tdeswarn;
+ const struct crypto_session_params *csp;
- switch (cse->cipher) {
+ csp = crypto_get_params(cse->cses);
+ switch (csp->csp_cipher_alg) {
case CRYPTO_DES_CBC:
if (ratecheck(&deswarn, &warninterval))
gone_in(13, "DES cipher via /dev/crypto");
@@ -831,7 +882,7 @@
break;
}
- switch (cse->mac) {
+ switch (csp->csp_auth_alg) {
case CRYPTO_MD5_HMAC:
if (ratecheck(&md5warn, &warninterval))
gone_in(13, "MD5-HMAC authenticator via /dev/crypto");
@@ -848,7 +899,6 @@
{
struct cryptop_data *cod = NULL;
struct cryptop *crp = NULL;
- struct cryptodesc *crde = NULL, *crda = NULL;
int error;
if (cop->len > 256*1024-4) {
@@ -863,106 +913,135 @@
}
}
- if (cse->thash)
- cod = cod_alloc(cse, cop->len + cse->thash->hashsize, td);
- else
- cod = cod_alloc(cse, cop->len, td);
-
- crp = crypto_getreq((cse->txform != NULL) + (cse->thash != NULL));
- if (crp == NULL) {
+ if (cop->mac && cse->hashsize == 0) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- error = ENOMEM;
+ error = EINVAL;
goto bail;
}
- if (cse->thash && cse->txform) {
- if (cop->flags & COP_F_CIPHER_FIRST) {
- crde = crp->crp_desc;
- crda = crde->crd_next;
- } else {
- crda = crp->crp_desc;
- crde = crda->crd_next;
+ /*
+ * The COP_F_CIPHER_FIRST flag predates explicit session
+ * modes, but the only way it was used was for EtA so allow it
+ * as long as it is consistent with EtA.
+ */
+ if (cop->flags & COP_F_CIPHER_FIRST) {
+ if (cop->op != COP_ENCRYPT) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ return (EINVAL);
}
- } else if (cse->thash) {
- crda = crp->crp_desc;
- } else if (cse->txform) {
- crde = crp->crp_desc;
- } else {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- error = EINVAL;
- goto bail;
}
- if ((error = copyin(cop->src, cod->uio.uio_iov[0].iov_base,
- cop->len))) {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- goto bail;
- }
+ cod = cod_alloc(cse, cop->len + cse->hashsize, td);
- if (crda) {
- crda->crd_skip = 0;
- crda->crd_len = cop->len;
- crda->crd_inject = cop->len;
+ crp = crypto_getreq(cse->cses, M_WAITOK);
- crda->crd_alg = cse->mac;
- crda->crd_key = cse->mackey;
- crda->crd_klen = cse->mackeylen * 8;
+ error = copyin(cop->src, cod->buf, cop->len);
+ if (error) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ goto bail;
}
+ crp->crp_payload_start = 0;
+ crp->crp_payload_length = cop->len;
+ if (cse->hashsize)
+ crp->crp_digest_start = cop->len;
- if (crde) {
- if (cop->op == COP_ENCRYPT)
- crde->crd_flags |= CRD_F_ENCRYPT;
- else
- crde->crd_flags &= ~CRD_F_ENCRYPT;
- crde->crd_len = cop->len;
- crde->crd_inject = 0;
-
- crde->crd_alg = cse->cipher;
- crde->crd_key = cse->key;
- crde->crd_klen = cse->keylen * 8;
+ switch (cse->mode) {
+ case CSP_MODE_COMPRESS:
+ switch (cop->op) {
+ case COP_ENCRYPT:
+ crp->crp_op = CRYPTO_OP_COMPRESS;
+ break;
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_DECOMPRESS;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ case CSP_MODE_CIPHER:
+ switch (cop->op) {
+ case COP_ENCRYPT:
+ crp->crp_op = CRYPTO_OP_ENCRYPT;
+ break;
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_DECRYPT;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ case CSP_MODE_DIGEST:
+ switch (cop->op) {
+ case 0:
+ case COP_ENCRYPT:
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_COMPUTE_DIGEST;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ case CSP_MODE_ETA:
+ switch (cop->op) {
+ case COP_ENCRYPT:
+ crp->crp_op = CRYPTO_OP_ENCRYPT |
+ CRYPTO_OP_COMPUTE_DIGEST;
+ break;
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_DECRYPT |
+ CRYPTO_OP_VERIFY_DIGEST;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
}
- crp->crp_ilen = cop->len;
- crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
- | (cop->flags & COP_F_BATCH);
- crp->crp_uio = &cod->uio;
+ crp->crp_ilen = cop->len + cse->hashsize;
+ crp->crp_flags = CRYPTO_F_CBIMM | (cop->flags & COP_F_BATCH);
+ crp->crp_buf = cod->buf;
+ crp->crp_buf_type = CRYPTO_BUF_CONTIG;
crp->crp_callback = cryptodev_cb;
- crp->crp_session = cse->cses;
crp->crp_opaque = cod;
if (cop->iv) {
- if (crde == NULL) {
+ if (cse->ivsize == 0) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
error = EINVAL;
goto bail;
}
- if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- error = EINVAL;
- goto bail;
- }
- if ((error = copyin(cop->iv, crde->crd_iv,
- cse->txform->ivsize))) {
+ error = copyin(cop->iv, crp->crp_iv, cse->ivsize);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
- crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
- crde->crd_skip = 0;
- } else if (cse->cipher == CRYPTO_ARC4) { /* XXX use flag? */
- crde->crd_skip = 0;
- } else if (crde) {
- crde->crd_flags |= CRD_F_IV_PRESENT;
- crde->crd_skip = cse->txform->ivsize;
- crde->crd_len -= cse->txform->ivsize;
+ crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
+ } else if (cse->ivsize != 0) {
+ crp->crp_iv_start = 0;
+ crp->crp_payload_start += cse->ivsize;
+ crp->crp_payload_length -= cse->ivsize;
}
- if (cop->mac && crda == NULL) {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- error = EINVAL;
- goto bail;
+ if (cop->mac != NULL) {
+ error = copyin(cop->mac, cod->buf + cop->len, cse->hashsize);
+ if (error) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ goto bail;
+ }
}
cryptodev_warn(cse);
-
again:
/*
* Let the dispatch run unlocked, then, interlock against the
@@ -995,18 +1074,20 @@
goto bail;
}
- if (cop->dst &&
- (error = copyout(cod->uio.uio_iov[0].iov_base, cop->dst,
- cop->len))) {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- goto bail;
+ if (cop->dst != NULL) {
+ error = copyout(cod->buf, cop->dst, cop->len);
+ if (error) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ goto bail;
+ }
}
- if (cop->mac &&
- (error = copyout((caddr_t)cod->uio.uio_iov[0].iov_base + cop->len,
- cop->mac, cse->thash->hashsize))) {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- goto bail;
+ if (cop->mac != NULL) {
+ error = copyout(cod->buf + cop->len, cop->mac, cse->hashsize);
+ if (error) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ goto bail;
+ }
}
bail:
@@ -1027,7 +1108,6 @@
{
struct cryptop_data *cod = NULL;
struct cryptop *crp = NULL;
- struct cryptodesc *crde = NULL, *crda = NULL;
int error;
if (caead->len > 256*1024-4 || caead->aadlen > 256*1024-4) {
@@ -1035,99 +1115,122 @@
return (E2BIG);
}
- if (cse->txform == NULL || cse->thash == NULL || caead->tag == NULL ||
+ if (cse->txform == NULL || cse->hashsize == 0 || caead->tag == NULL ||
(caead->len % cse->txform->blocksize) != 0) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
return (EINVAL);
}
- cod = cod_alloc(cse, caead->aadlen + caead->len + cse->thash->hashsize,
- td);
-
- crp = crypto_getreq(2);
- if (crp == NULL) {
- error = ENOMEM;
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- goto bail;
- }
-
+ /*
+ * The COP_F_CIPHER_FIRST flag predates explicit session
+ * modes, but the only way it was used was for EtA so allow it
+ * as long as it is consistent with EtA.
+ */
if (caead->flags & COP_F_CIPHER_FIRST) {
- crde = crp->crp_desc;
- crda = crde->crd_next;
- } else {
- crda = crp->crp_desc;
- crde = crda->crd_next;
+ if (caead->op != COP_ENCRYPT) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ return (EINVAL);
+ }
}
- if ((error = copyin(caead->aad, cod->uio.uio_iov[0].iov_base,
- caead->aadlen))) {
+ cod = cod_alloc(cse, caead->aadlen + caead->len + cse->hashsize, td);
+
+ crp = crypto_getreq(cse->cses, M_WAITOK);
+
+ error = copyin(caead->aad, cod->buf, caead->aadlen);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
+ crp->crp_aad_start = 0;
+ crp->crp_aad_length = caead->aadlen;
- if ((error = copyin(caead->src, (char *)cod->uio.uio_iov[0].iov_base +
- caead->aadlen, caead->len))) {
+ error = copyin(caead->src, cod->buf + caead->aadlen, caead->len);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
+ crp->crp_payload_start = caead->aadlen;
+ crp->crp_payload_length = caead->len;
+ crp->crp_digest_start = caead->aadlen + caead->len;
- /*
- * For GCM/CCM, crd_len covers only the AAD. For other ciphers
- * chained with an HMAC, crd_len covers both the AAD and the
- * cipher text.
- */
- crda->crd_skip = 0;
- if (cse->cipher == CRYPTO_AES_NIST_GCM_16 ||
- cse->cipher == CRYPTO_AES_CCM_16)
- crda->crd_len = caead->aadlen;
- else
- crda->crd_len = caead->aadlen + caead->len;
- crda->crd_inject = caead->aadlen + caead->len;
-
- crda->crd_alg = cse->mac;
- crda->crd_key = cse->mackey;
- crda->crd_klen = cse->mackeylen * 8;
-
- if (caead->op == COP_ENCRYPT)
- crde->crd_flags |= CRD_F_ENCRYPT;
- else
- crde->crd_flags &= ~CRD_F_ENCRYPT;
- crde->crd_skip = caead->aadlen;
- crde->crd_len = caead->len;
- crde->crd_inject = caead->aadlen;
-
- crde->crd_alg = cse->cipher;
- crde->crd_key = cse->key;
- crde->crd_klen = cse->keylen * 8;
+ switch (cse->mode) {
+ case CSP_MODE_AEAD:
+ switch (caead->op) {
+ case COP_ENCRYPT:
+ crp->crp_op = CRYPTO_OP_ENCRYPT |
+ CRYPTO_OP_COMPUTE_DIGEST;
+ break;
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_DECRYPT |
+ CRYPTO_OP_VERIFY_DIGEST;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ case CSP_MODE_ETA:
+ switch (caead->op) {
+ case COP_ENCRYPT:
+ crp->crp_op = CRYPTO_OP_ENCRYPT |
+ CRYPTO_OP_COMPUTE_DIGEST;
+ break;
+ case COP_DECRYPT:
+ crp->crp_op = CRYPTO_OP_DECRYPT |
+ CRYPTO_OP_VERIFY_DIGEST;
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
+ break;
+ default:
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ error = EINVAL;
+ goto bail;
+ }
- crp->crp_ilen = caead->aadlen + caead->len;
- crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM
- | (caead->flags & COP_F_BATCH);
- crp->crp_uio = &cod->uio;
+ crp->crp_ilen = caead->aadlen + caead->len + cse->hashsize;
+ crp->crp_flags = CRYPTO_F_CBIMM | (caead->flags & COP_F_BATCH);
+ crp->crp_buf = cod->buf;
+ crp->crp_buf_type = CRYPTO_BUF_CONTIG;
crp->crp_callback = cryptodev_cb;
- crp->crp_session = cse->cses;
crp->crp_opaque = cod;
if (caead->iv) {
- if (caead->ivlen > sizeof(crde->crd_iv)) {
+ /*
+ * Permit a 16-byte IV for AES-XTS, but only use the
+ * first 8 bytes as a block number.
+ */
+ if (cse->mode == CSP_MODE_ETA &&
+ caead->ivlen == AES_BLOCK_LEN &&
+ cse->ivsize == AES_XTS_IV_LEN)
+ caead->ivlen = AES_XTS_IV_LEN;
+
+ if (caead->ivlen != cse->ivsize) {
error = EINVAL;
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
- if ((error = copyin(caead->iv, crde->crd_iv, caead->ivlen))) {
+ error = copyin(caead->iv, crp->crp_iv, cse->ivsize);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
- crde->crd_flags |= CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
+ crp->crp_flags |= CRYPTO_F_IV_SEPARATE;
} else {
- crde->crd_flags |= CRD_F_IV_PRESENT;
- crde->crd_skip += cse->txform->ivsize;
- crde->crd_len -= cse->txform->ivsize;
+ crp->crp_iv_start = crp->crp_payload_start;
+ crp->crp_payload_start += cse->ivsize;
+ crp->crp_payload_length -= cse->ivsize;
}
- if ((error = copyin(caead->tag, (caddr_t)cod->uio.uio_iov[0].iov_base +
- caead->len + caead->aadlen, cse->thash->hashsize))) {
+ error = copyin(caead->tag, cod->buf + caead->len + caead->aadlen,
+ cse->hashsize);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
@@ -1164,15 +1267,18 @@
goto bail;
}
- if (caead->dst && (error = copyout(
- (caddr_t)cod->uio.uio_iov[0].iov_base + caead->aadlen, caead->dst,
- caead->len))) {
- SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
- goto bail;
+ if (caead->dst != NULL) {
+ error = copyout(cod->buf + caead->aadlen, caead->dst,
+ caead->len);
+ if (error) {
+ SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
+ goto bail;
+ }
}
- if ((error = copyout((caddr_t)cod->uio.uio_iov[0].iov_base +
- caead->aadlen + caead->len, caead->tag, cse->thash->hashsize))) {
+ error = copyout(cod->buf + caead->aadlen + caead->len, caead->tag,
+ cse->hashsize);
+ if (error) {
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
goto bail;
}
@@ -1202,13 +1308,11 @@
return (0);
}
-static int
-cryptodevkey_cb(void *op)
+static void
+cryptodevkey_cb(struct cryptkop *krp)
{
- struct cryptkop *krp = (struct cryptkop *) op;
wakeup_one(krp);
- return (0);
}
static int
@@ -1267,7 +1371,7 @@
krp->krp_oparams = kop->crk_oparams;
krp->krp_crid = kop->crk_crid;
krp->krp_status = 0;
- krp->krp_callback = (int (*) (struct cryptkop *)) cryptodevkey_cb;
+ krp->krp_callback = cryptodevkey_cb;
for (i = 0; i < CRK_MAXPARAM; i++) {
if (kop->crk_param[i].crp_nbits > 65536) {
@@ -1303,7 +1407,7 @@
goto fail;
}
- kop->crk_crid = krp->krp_crid; /* device that did the work */
+ kop->crk_crid = krp->krp_hid; /* device that did the work */
if (krp->krp_status != 0) {
error = krp->krp_status;
SDT_PROBE1(opencrypto, dev, ioctl, error, __LINE__);
@@ -1429,9 +1533,9 @@
}
struct csession *
-csecreate(struct fcrypt *fcr, crypto_session_t cses, caddr_t key, u_int64_t keylen,
- caddr_t mackey, u_int64_t mackeylen, u_int32_t cipher, u_int32_t mac,
- struct enc_xform *txform, struct auth_hash *thash)
+csecreate(struct fcrypt *fcr, crypto_session_t cses,
+ struct crypto_session_params *csp, struct enc_xform *txform,
+ void *key, struct auth_hash *thash, void *mackey)
{
struct csession *cse;
@@ -1441,14 +1545,17 @@
mtx_init(&cse->lock, "cryptodev", "crypto session lock", MTX_DEF);
refcount_init(&cse->refs, 1);
cse->key = key;
- cse->keylen = keylen/8;
cse->mackey = mackey;
- cse->mackeylen = mackeylen/8;
+ cse->mode = csp->csp_mode;
cse->cses = cses;
- cse->cipher = cipher;
- cse->mac = mac;
cse->txform = txform;
- cse->thash = thash;
+ if (thash != NULL)
+ cse->hashsize = thash->hashsize;
+ else if (csp->csp_cipher_alg == CRYPTO_AES_NIST_GCM_16)
+ cse->hashsize = AES_GMAC_HASH_LEN;
+ else if (csp->csp_cipher_alg == CRYPTO_AES_CCM_16)
+ cse->hashsize = AES_CBC_MAC_HASH_LEN;
+ cse->ivsize = csp->csp_ivlen;
mtx_lock(&fcr->lock);
TAILQ_INSERT_TAIL(&fcr->csessions, cse, next);
cse->ses = fcr->sesn++;
Index: sys/opencrypto/cryptodev_if.m
===================================================================
--- sys/opencrypto/cryptodev_if.m
+++ sys/opencrypto/cryptodev_if.m
@@ -40,32 +40,138 @@
};
/**
- * Crypto driver method to initialize a new session object with the given
- * initialization parameters (cryptoini). The driver's session memory object
- * is already allocated and zeroed, like driver softcs. It is accessed with
+ * @brief Probe to see if a crypto driver supports a session.
+ *
+ * The crypto framework invokes this method on each crypto driver when
+ * creating a session for symmetric crypto operations to determine if
+ * the driver supports the algorithms and mode requested by the
+ * session.
+ *
+ * If the driver does not support a session with the requested
+ * parameters, this function should fail with an error.
+ *
+ * If the driver does support a session with the requested parameters,
+ * this function should return a negative value indicating the
+ * priority of this driver. These negative values should be derived
+ * from one of the CRYPTODEV_PROBE_* constants in
+ * <opencrypto/cryptodev.h>.
+ *
+ * This function's return value is similar to that used by
+ * DEVICE_PROBE(9). However, a return value of zero is not supported
+ * and should not be used.
+ *
+ * @param dev the crypto driver device
+ * @param csp crypto session parameters
+ *
+ * @retval negative if the driver supports this session - the
+ * least negative value is used to select the
+ * driver for the session
+ * @retval EINVAL if the driver does not support the session
+ * @retval positive if some other error occurs
+ */
+METHOD int probesession {
+ device_t dev;
+ const struct crypto_session_params *csp;
+};
+
+/**
+ * @brief Initialize a new crypto session object
+ *
+ * Invoked by the crypto framework to initialize driver-specific data
+ * for a crypto session. The framework allocates and zeroes the
+ * driver's per-session memory object prior to invoking this method.
+ * The driver is able to access it's per-session memory object via
* crypto_get_driver_session().
+ *
+ * @param dev the crypto driver device
+ * @param crypto_session session being initialized
+ * @param csp crypto session parameters
+ *
+ * @retval 0 success
+ * @retval non-zero if some kind of error occurred
*/
METHOD int newsession {
device_t dev;
crypto_session_t crypto_session;
- struct cryptoini *cri;
+ const struct crypto_session_params *csp;
};
/**
- * Optional crypto driver method to release any additional allocations. OCF
- * owns session memory itself; it is zeroed before release.
+ * @brief Destroy a crypto session object
+ *
+ * The crypto framework invokes this method when tearing down a crypto
+ * session. After this callback returns, the frame will explicitly
+ * zero and free the drvier's per-session memory object. If the
+ * driver requires additional actions to destroy a session, it should
+ * perform those in this method. If the driver does not require
+ * additional actions it does not need to provide an implementation of
+ * this method.
+ *
+ * @param dev the crypto driver device
+ * @param crypto_session session being destroyed
*/
METHOD void freesession {
device_t dev;
crypto_session_t crypto_session;
} DEFAULT null_freesession;
+/**
+ * @brief Perform a symmetric crypto operation
+ *
+ * The crypto framework invokes this method for each symmetric crypto
+ * operation performed on a session. A reference to the containing
+ * session is stored as a member of 'struct cryptop'. This routine
+ * should not block, but queue the operation if necessary.
+ *
+ * This method may return ERESTART to indicate that any internal
+ * queues are full so the operation should be queued in the crypto
+ * framework and retried in the future.
+ *
+ * To report errors with a crypto operation, 'crp_etype' should be set
+ * and the operation completed by calling 'crypto_done'. This method
+ * should then return zero.
+ *
+ * @param dev the crypto driver device
+ * @param op crypto operation to perform
+ * @param flags set to CRYPTO_HINT_MORE if additional symmetric
+ * crypto operations are queued for this driver;
+ * otherwise set to zero.
+ *
+ * @retval 0 success
+ * @retval ERESTART internal queue is full
+ */
METHOD int process {
device_t dev;
struct cryptop *op;
int flags;
};
+/**
+ * @brief Perform an asymmetric crypto operation
+ *
+ * The crypto framework invokes this method for each asymmetric crypto
+ * operation. Each asymmetric crypto operation should be
+ * self-contained and is not assicated with any persistent session.
+ * This routine should not block, but queue the operation if
+ * necessary.
+ *
+ * This method may return ERESTART to indicate that any internal
+ * queues are full so the operation should be queued in the crypto
+ * framework and retried in the future.
+ *
+ * To report errors with a crypto operation, 'krp_status' should be set
+ * and the operation completed by calling 'crypto_kdone'. This method
+ * should then return zero.
+ *
+ * @param dev the crypto driver device
+ * @param op crypto operation to perform
+ * @param flags set to CRYPTO_HINT_MORE if additional asymmetric
+ * crypto operations are queued for this driver;
+ * otherwise set to zero.
+ *
+ * @retval 0 success
+ * @retval ERESTART internal queue is full
+ */
METHOD int kprocess {
device_t dev;
struct cryptkop *op;
Index: sys/opencrypto/cryptosoft.h
===================================================================
--- sys/opencrypto/cryptosoft.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* $FreeBSD$ */
-/* $OpenBSD: cryptosoft.h,v 1.10 2002/04/22 23:10:09 deraadt Exp $ */
-
-/*-
- * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
- *
- * This code was written by Angelos D. Keromytis in Athens, Greece, in
- * February 2000. Network Security Technologies Inc. (NSTI) kindly
- * supported the development of this code.
- *
- * Copyright (c) 2000 Angelos D. Keromytis
- *
- * Permission to use, copy, and modify this software with or without fee
- * is hereby granted, provided that this entire notice is included in
- * all source code copies of any software which is or includes a copy or
- * modification of this software.
- *
- * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
- * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
- * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
- * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
- * PURPOSE.
- */
-
-#ifndef _CRYPTO_CRYPTOSOFT_H_
-#define _CRYPTO_CRYPTOSOFT_H_
-
-/* Software session entry */
-struct swcr_data {
- int sw_alg; /* Algorithm */
- union {
- struct {
- u_int8_t *SW_ictx;
- u_int8_t *SW_octx;
- u_int16_t SW_klen;
- u_int16_t SW_mlen;
- struct auth_hash *SW_axf;
- } SWCR_AUTH;
- struct {
- u_int8_t *SW_kschedule;
- struct enc_xform *SW_exf;
- } SWCR_ENC;
- struct {
- u_int32_t SW_size;
- struct comp_algo *SW_cxf;
- } SWCR_COMP;
- } SWCR_UN;
-
-#define sw_ictx SWCR_UN.SWCR_AUTH.SW_ictx
-#define sw_octx SWCR_UN.SWCR_AUTH.SW_octx
-#define sw_klen SWCR_UN.SWCR_AUTH.SW_klen
-#define sw_mlen SWCR_UN.SWCR_AUTH.SW_mlen
-#define sw_axf SWCR_UN.SWCR_AUTH.SW_axf
-#define sw_kschedule SWCR_UN.SWCR_ENC.SW_kschedule
-#define sw_exf SWCR_UN.SWCR_ENC.SW_exf
-#define sw_size SWCR_UN.SWCR_COMP.SW_size
-#define sw_cxf SWCR_UN.SWCR_COMP.SW_cxf
-};
-
-struct swcr_session {
- struct mtx swcr_lock;
- struct swcr_data swcr_algorithms[2];
- unsigned swcr_nalgs;
-};
-
-#ifdef _KERNEL
-extern u_int8_t hmac_ipad_buffer[];
-extern u_int8_t hmac_opad_buffer[];
-#endif /* _KERNEL */
-
-#endif /* _CRYPTO_CRYPTO_H_ */
Index: sys/opencrypto/cryptosoft.c
===================================================================
--- sys/opencrypto/cryptosoft.c
+++ sys/opencrypto/cryptosoft.c
@@ -55,36 +55,60 @@
#include <sys/md5.h>
#include <opencrypto/cryptodev.h>
-#include <opencrypto/cryptosoft.h>
#include <opencrypto/xform.h>
#include <sys/kobj.h>
#include <sys/bus.h>
#include "cryptodev_if.h"
-_Static_assert(AES_CCM_IV_LEN == AES_GCM_IV_LEN,
- "AES_GCM_IV_LEN must currently be the same as AES_CCM_IV_LEN");
+struct swcr_auth {
+ void *sw_ictx;
+ void *sw_octx;
+ struct auth_hash *sw_axf;
+ uint16_t sw_mlen;
+ uint16_t sw_octx_len;
+};
+
+struct swcr_encdec {
+ uint8_t *sw_kschedule;
+ struct enc_xform *sw_exf;
+};
+
+struct swcr_compdec {
+ struct comp_algo *sw_cxf;
+};
+
+struct swcr_session {
+ struct mtx swcr_lock;
+ int (*swcr_process)(struct swcr_session *, struct cryptop *);
+
+ struct swcr_auth swcr_auth;
+ struct swcr_encdec swcr_encdec;
+ struct swcr_compdec swcr_compdec;
+};
static int32_t swcr_id;
-u_int8_t hmac_ipad_buffer[HMAC_MAX_BLOCK_LEN];
-u_int8_t hmac_opad_buffer[HMAC_MAX_BLOCK_LEN];
-
-static int swcr_encdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
-static int swcr_authcompute(struct cryptodesc *, struct swcr_data *, caddr_t, int);
-static int swcr_authenc(struct cryptop *crp);
-static int swcr_compdec(struct cryptodesc *, struct swcr_data *, caddr_t, int);
static void swcr_freesession(device_t dev, crypto_session_t cses);
+/* Used for CRYPTO_NULL_CBC. */
+static int
+swcr_null(struct swcr_session *ses, struct cryptop *crp)
+{
+
+ return (0);
+}
+
/*
* Apply a symmetric encryption/decryption algorithm.
*/
static int
-swcr_encdec(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
- int flags)
+swcr_encdec(struct swcr_session *ses, struct cryptop *crp)
{
unsigned char iv[EALG_MAX_BLOCK_LEN], blk[EALG_MAX_BLOCK_LEN];
unsigned char *ivp, *nivp, iv2[EALG_MAX_BLOCK_LEN];
+ const struct crypto_session_params *csp;
+ struct swcr_encdec *sw;
struct enc_xform *exf;
int i, j, k, blks, ind, count, ivlen;
struct uio *uio, uiolcl;
@@ -92,51 +116,39 @@
struct iovec *iov;
int iovcnt, iovalloc;
int error;
+ bool encrypting;
error = 0;
+ sw = &ses->swcr_encdec;
exf = sw->sw_exf;
blks = exf->blocksize;
ivlen = exf->ivsize;
/* Check for non-padded data */
- if (crd->crd_len % blks)
+ if ((crp->crp_payload_length % blks) != 0)
return EINVAL;
- if (crd->crd_alg == CRYPTO_AES_ICM &&
- (crd->crd_flags & CRD_F_IV_EXPLICIT) == 0)
+ if (exf == &enc_xform_aes_icm &&
+ (crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
return (EINVAL);
- /* Initialize the IV */
- if (crd->crd_flags & CRD_F_ENCRYPT) {
- /* IV explicitly provided ? */
- if (crd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(crd->crd_iv, iv, ivlen);
- else
- arc4rand(iv, ivlen, 0);
-
- /* Do we need to write the IV */
- if (!(crd->crd_flags & CRD_F_IV_PRESENT))
- crypto_copyback(flags, buf, crd->crd_inject, ivlen, iv);
-
- } else { /* Decryption */
- /* IV explicitly provided ? */
- if (crd->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(crd->crd_iv, iv, ivlen);
- else {
- /* Get IV off buf */
- crypto_copydata(flags, buf, crd->crd_inject, ivlen, iv);
- }
- }
-
- if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
- int error;
-
+ /* IV explicitly provided ? */
+ if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ bcopy(crp->crp_iv, iv, ivlen);
+ else if (crp->crp_flags & CRYPTO_F_IV_GENERATE) {
+ arc4rand(iv, ivlen, 0);
+ crypto_copyback(crp, crp->crp_iv_start, ivlen, iv);
+ } else
+ crypto_copydata(crp, crp->crp_iv_start, ivlen, iv);
+
+ if (crp->crp_cipher_key != NULL) {
if (sw->sw_kschedule)
exf->zerokey(&(sw->sw_kschedule));
+ csp = crypto_get_params(crp->crp_session);
error = exf->setkey(&sw->sw_kschedule,
- crd->crd_key, crd->crd_klen / 8);
+ crp->crp_cipher_key, csp->csp_cipher_klen);
if (error)
return (error);
}
@@ -145,20 +157,24 @@
iovcnt = nitems(iovlcl);
iovalloc = 0;
uio = &uiolcl;
- if ((flags & CRYPTO_F_IMBUF) != 0) {
- error = crypto_mbuftoiov((struct mbuf *)buf, &iov, &iovcnt,
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ error = crypto_mbuftoiov(crp->crp_mbuf, &iov, &iovcnt,
&iovalloc);
if (error)
return (error);
uio->uio_iov = iov;
uio->uio_iovcnt = iovcnt;
- } else if ((flags & CRYPTO_F_IOV) != 0)
- uio = (struct uio *)buf;
- else {
- iov[0].iov_base = buf;
- iov[0].iov_len = crd->crd_skip + crd->crd_len;
+ break;
+ case CRYPTO_BUF_UIO:
+ uio = crp->crp_uio;
+ break;
+ case CRYPTO_BUF_CONTIG:
+ iov[0].iov_base = crp->crp_buf;
+ iov[0].iov_len = crp->crp_ilen;
uio->uio_iov = iov;
uio->uio_iovcnt = 1;
+ break;
}
ivp = iv;
@@ -171,14 +187,15 @@
exf->reinit(sw->sw_kschedule, iv);
}
- count = crd->crd_skip;
+ count = crp->crp_payload_start;
ind = cuio_getptr(uio, count, &k);
if (ind == -1) {
error = EINVAL;
goto out;
}
- i = crd->crd_len;
+ i = crp->crp_payload_length;
+ encrypting = CRYPTO_OP_IS_ENCRYPT(crp->crp_op);
while (i > 0) {
/*
@@ -191,14 +208,14 @@
/* Actual encryption/decryption */
if (exf->reinit) {
- if (crd->crd_flags & CRD_F_ENCRYPT) {
+ if (encrypting) {
exf->encrypt(sw->sw_kschedule,
blk);
} else {
exf->decrypt(sw->sw_kschedule,
blk);
}
- } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ } else if (encrypting) {
/* XOR with previous block */
for (j = 0; j < blks; j++)
blk[j] ^= ivp[j];
@@ -257,11 +274,10 @@
idat = (uint8_t *)uio->uio_iov[ind].iov_base + k;
if (exf->reinit) {
- if ((crd->crd_flags & CRD_F_ENCRYPT) != 0 &&
- exf->encrypt_multi == NULL)
+ if (encrypting && exf->encrypt_multi == NULL)
exf->encrypt(sw->sw_kschedule,
idat);
- else if ((crd->crd_flags & CRD_F_ENCRYPT) != 0) {
+ else if (encrypting) {
nb = rounddown(rem, blks);
exf->encrypt_multi(sw->sw_kschedule,
idat, nb);
@@ -273,7 +289,7 @@
exf->decrypt_multi(sw->sw_kschedule,
idat, nb);
}
- } else if (crd->crd_flags & CRD_F_ENCRYPT) {
+ } else if (encrypting) {
/* XOR with previous block/IV */
for (j = 0; j < blks; j++)
idat[j] ^= ivp[j];
@@ -325,13 +341,10 @@
return (error);
}
-static int __result_use_check
-swcr_authprepare(struct auth_hash *axf, struct swcr_data *sw, u_char *key,
- int klen)
+static void
+swcr_authprepare(struct auth_hash *axf, struct swcr_auth *sw,
+ const uint8_t *key, int klen)
{
- int k;
-
- klen /= 8;
switch (axf->type) {
case CRYPTO_MD5_HMAC:
@@ -342,22 +355,8 @@
case CRYPTO_SHA2_512_HMAC:
case CRYPTO_NULL_HMAC:
case CRYPTO_RIPEMD160_HMAC:
- for (k = 0; k < klen; k++)
- key[k] ^= HMAC_IPAD_VAL;
-
- axf->Init(sw->sw_ictx);
- axf->Update(sw->sw_ictx, key, klen);
- axf->Update(sw->sw_ictx, hmac_ipad_buffer, axf->blocksize - klen);
-
- for (k = 0; k < klen; k++)
- key[k] ^= (HMAC_IPAD_VAL ^ HMAC_OPAD_VAL);
-
- axf->Init(sw->sw_octx);
- axf->Update(sw->sw_octx, key, klen);
- axf->Update(sw->sw_octx, hmac_opad_buffer, axf->blocksize - klen);
-
- for (k = 0; k < klen; k++)
- key[k] ^= HMAC_OPAD_VAL;
+ hmac_init_ipad(axf, key, klen, sw->sw_ictx);
+ hmac_init_opad(axf, key, klen, sw->sw_octx);
break;
case CRYPTO_MD5_KPDK:
case CRYPTO_SHA1_KPDK:
@@ -374,7 +373,6 @@
*/
u_char buf[SHA1_RESULTLEN];
- sw->sw_klen = klen;
bcopy(key, sw->sw_octx, klen);
axf->Init(sw->sw_ictx);
axf->Update(sw->sw_ictx, key, klen);
@@ -382,55 +380,53 @@
break;
}
case CRYPTO_POLY1305:
- if (klen != POLY1305_KEY_LEN) {
- CRYPTDEB("bad poly1305 key size %d", klen);
- return EINVAL;
- }
- /* FALLTHROUGH */
case CRYPTO_BLAKE2B:
case CRYPTO_BLAKE2S:
axf->Setkey(sw->sw_ictx, key, klen);
axf->Init(sw->sw_ictx);
break;
default:
- printf("%s: CRD_F_KEY_EXPLICIT flag given, but algorithm %d "
- "doesn't use keys.\n", __func__, axf->type);
- return EINVAL;
+ panic("%s: algorithm %d doesn't use keys", __func__, axf->type);
}
- return 0;
}
/*
- * Compute keyed-hash authenticator.
+ * Compute or verify hash.
*/
static int
-swcr_authcompute(struct cryptodesc *crd, struct swcr_data *sw, caddr_t buf,
- int flags)
+swcr_authcompute(struct swcr_session *ses, struct cryptop *crp)
{
- unsigned char aalg[HASH_MAX_LEN];
+ u_char aalg[HASH_MAX_LEN];
+ u_char uaalg[HASH_MAX_LEN];
+ const struct crypto_session_params *csp;
+ struct swcr_auth *sw;
struct auth_hash *axf;
union authctx ctx;
int err;
- if (sw->sw_ictx == 0)
- return EINVAL;
+ sw = &ses->swcr_auth;
axf = sw->sw_axf;
- if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
- err = swcr_authprepare(axf, sw, crd->crd_key, crd->crd_klen);
- if (err != 0)
- return err;
+ if (crp->crp_auth_key != NULL) {
+ csp = crypto_get_params(crp->crp_session);
+ swcr_authprepare(axf, sw, crp->crp_auth_key,
+ csp->csp_auth_klen);
}
bcopy(sw->sw_ictx, &ctx, axf->ctxsize);
- err = crypto_apply(flags, buf, crd->crd_skip, crd->crd_len,
- (int (*)(void *, void *, unsigned int))axf->Update, (caddr_t)&ctx);
+ err = crypto_apply(crp, crp->crp_aad_start, crp->crp_aad_length,
+ (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
if (err)
return err;
- switch (sw->sw_alg) {
+ err = crypto_apply(crp, crp->crp_payload_start, crp->crp_payload_length,
+ (int (*)(void *, void *, unsigned int))axf->Update, &ctx);
+ if (err)
+ return err;
+
+ switch (axf->type) {
case CRYPTO_SHA1:
case CRYPTO_SHA2_224:
case CRYPTO_SHA2_256:
@@ -468,7 +464,7 @@
* and let Final() do the proper, natural "algofill"
* padding.
*/
- axf->Update(&ctx, sw->sw_octx, sw->sw_klen);
+ axf->Update(&ctx, sw->sw_octx, sw->sw_octx_len);
axf->Final(aalg, &ctx);
break;
@@ -480,20 +476,22 @@
break;
}
- /* Inject the authentication data */
- crypto_copyback(flags, buf, crd->crd_inject,
- sw->sw_mlen == 0 ? axf->hashsize : sw->sw_mlen, aalg);
- return 0;
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, sw->sw_mlen, uaalg);
+ if (timingsafe_bcmp(aalg, uaalg, sw->sw_mlen) != 0)
+ return (EBADMSG);
+ } else {
+ /* Inject the authentication data */
+ crypto_copyback(crp, crp->crp_digest_start, sw->sw_mlen, aalg);
+ }
+ return (0);
}
CTASSERT(INT_MAX <= (1ll<<39) - 256); /* GCM: plain text < 2^39-256 */
CTASSERT(INT_MAX <= (uint64_t)-1); /* GCM: associated data <= 2^64-1 */
-/*
- * Apply a combined encryption-authentication transformation
- */
static int
-swcr_authenc(struct cryptop *crp)
+swcr_gmac(struct swcr_session *ses, struct cryptop *crp)
{
uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
u_char *blk = (u_char *)blkbuf;
@@ -501,288 +499,403 @@
u_char uaalg[AALG_MAX_RESULT_LEN];
u_char iv[EALG_MAX_BLOCK_LEN];
union authctx ctx;
- struct swcr_session *ses;
- struct cryptodesc *crd, *crda = NULL, *crde = NULL;
- struct swcr_data *sw, *swa, *swe = NULL;
- struct auth_hash *axf = NULL;
- struct enc_xform *exf = NULL;
- caddr_t buf = (caddr_t)crp->crp_buf;
+ struct swcr_auth *swa;
+ struct auth_hash *axf;
uint32_t *blkp;
- int aadlen, blksz, i, ivlen, len, iskip, oskip, r;
- int isccm = 0;
-
- ivlen = blksz = iskip = oskip = 0;
-
- ses = crypto_get_driver_session(crp->crp_session);
-
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- for (i = 0; i < nitems(ses->swcr_algorithms) &&
- ses->swcr_algorithms[i].sw_alg != crd->crd_alg; i++)
- ;
- if (i == nitems(ses->swcr_algorithms))
- return (EINVAL);
-
- sw = &ses->swcr_algorithms[i];
- switch (sw->sw_alg) {
- case CRYPTO_AES_CCM_16:
- case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_NIST_GMAC:
- swe = sw;
- crde = crd;
- exf = swe->sw_exf;
- /* AES_CCM_IV_LEN and AES_GCM_IV_LEN are both 12 */
- ivlen = AES_CCM_IV_LEN;
- break;
- case CRYPTO_AES_CCM_CBC_MAC:
- isccm = 1;
- /* FALLTHROUGH */
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- swa = sw;
- crda = crd;
- axf = swa->sw_axf;
- if (swa->sw_ictx == 0)
- return (EINVAL);
- bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
- blksz = axf->blocksize;
- break;
- default:
- return (EINVAL);
- }
+ int blksz, i, ivlen, len;
+
+ swa = &ses->swcr_auth;
+ axf = swa->sw_axf;
+
+ bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
+ blksz = axf->blocksize;
+
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE)
+ return (EINVAL);
+
+ /* Initialize the IV */
+ ivlen = AES_GCM_IV_LEN;
+ if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ bcopy(crp->crp_iv, iv, ivlen);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, ivlen, iv);
+
+ axf->Reinit(&ctx, iv, ivlen);
+ for (i = 0; i < crp->crp_payload_length; i += blksz) {
+ len = MIN(crp->crp_payload_length - i, blksz);
+ crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
+ bzero(blk + len, blksz - len);
+ axf->Update(&ctx, blk, blksz);
+ }
+
+ /* length block */
+ bzero(blk, blksz);
+ blkp = (uint32_t *)blk + 1;
+ *blkp = htobe32(crp->crp_payload_length * 8);
+ axf->Update(&ctx, blk, blksz);
+
+ /* Finalize MAC */
+ axf->Final(aalg, &ctx);
+
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
+ uaalg);
+ if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
+ return (EBADMSG);
+ } else {
+ /* Inject the authentication data */
+ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
}
- if (crde == NULL || crda == NULL)
+ return (0);
+}
+
+static int
+swcr_gcm(struct swcr_session *ses, struct cryptop *crp)
+{
+ uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
+ u_char *blk = (u_char *)blkbuf;
+ u_char aalg[AALG_MAX_RESULT_LEN];
+ u_char uaalg[AALG_MAX_RESULT_LEN];
+ u_char iv[EALG_MAX_BLOCK_LEN];
+ union authctx ctx;
+ struct swcr_auth *swa;
+ struct swcr_encdec *swe;
+ struct auth_hash *axf;
+ struct enc_xform *exf;
+ uint32_t *blkp;
+ int blksz, i, ivlen, len, r;
+
+ swa = &ses->swcr_auth;
+ axf = swa->sw_axf;
+
+ bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
+ blksz = axf->blocksize;
+
+ swe = &ses->swcr_encdec;
+ exf = swe->sw_exf;
+
+ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
return (EINVAL);
- /*
- * We need to make sure that the auth algorithm matches the
- * encr algorithm. Specifically, for AES-GCM must go with
- * AES NIST GMAC, and AES-CCM must go with CBC-MAC.
- */
- if (crde->crd_alg == CRYPTO_AES_NIST_GCM_16) {
- switch (crda->crd_alg) {
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- break; /* Good! */
- default:
- return (EINVAL); /* Not good! */
+
+ /* Initialize the IV */
+ ivlen = AES_GCM_IV_LEN;
+ bcopy(crp->crp_iv, iv, ivlen);
+
+ /* Supply MAC with IV */
+ axf->Reinit(&ctx, iv, ivlen);
+
+ /* Supply MAC with AAD */
+ for (i = 0; i < crp->crp_aad_length; i += blksz) {
+ len = MIN(crp->crp_aad_length - i, blksz);
+ crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
+ bzero(blk + len, blksz - len);
+ axf->Update(&ctx, blk, blksz);
+ }
+
+ exf->reinit(swe->sw_kschedule, iv);
+
+ /* Do encryption with MAC */
+ for (i = 0; i < crp->crp_payload_length; i += len) {
+ len = MIN(crp->crp_payload_length - i, blksz);
+ if (len < blksz)
+ bzero(blk, blksz);
+ crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ exf->encrypt(swe->sw_kschedule, blk);
+ axf->Update(&ctx, blk, len);
+ crypto_copyback(crp, crp->crp_payload_start + i, len,
+ blk);
+ } else {
+ axf->Update(&ctx, blk, len);
}
- } else if (crde->crd_alg == CRYPTO_AES_CCM_16 &&
- crda->crd_alg != CRYPTO_AES_CCM_CBC_MAC)
- return (EINVAL);
+ }
- if ((crde->crd_alg == CRYPTO_AES_NIST_GCM_16 ||
- crde->crd_alg == CRYPTO_AES_CCM_16) &&
- (crde->crd_flags & CRD_F_IV_EXPLICIT) == 0)
- return (EINVAL);
+ /* length block */
+ bzero(blk, blksz);
+ blkp = (uint32_t *)blk + 1;
+ *blkp = htobe32(crp->crp_aad_length * 8);
+ blkp = (uint32_t *)blk + 3;
+ *blkp = htobe32(crp->crp_payload_length * 8);
+ axf->Update(&ctx, blk, blksz);
+
+ /* Finalize MAC */
+ axf->Final(aalg, &ctx);
+
+ /* Validate tag */
+ if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
+ uaalg);
- if (crde->crd_klen != crda->crd_klen)
+ r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
+ if (r != 0)
+ return (EBADMSG);
+
+ /* tag matches, decrypt data */
+ for (i = 0; i < crp->crp_payload_length; i += blksz) {
+ len = MIN(crp->crp_payload_length - i, blksz);
+ if (len < blksz)
+ bzero(blk, blksz);
+ crypto_copydata(crp, crp->crp_payload_start + i, len,
+ blk);
+ exf->decrypt(swe->sw_kschedule, blk);
+ crypto_copyback(crp, crp->crp_payload_start + i, len,
+ blk);
+ }
+ } else {
+ /* Inject the authentication data */
+ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
+ aalg);
+ }
+
+ return (0);
+}
+
+static int
+swcr_ccm_cbc_mac(struct swcr_session *ses, struct cryptop *crp)
+{
+ uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
+ u_char *blk = (u_char *)blkbuf;
+ u_char aalg[AALG_MAX_RESULT_LEN];
+ u_char uaalg[AALG_MAX_RESULT_LEN];
+ u_char iv[EALG_MAX_BLOCK_LEN];
+ union authctx ctx;
+ struct swcr_auth *swa;
+ struct auth_hash *axf;
+ int blksz, i, ivlen, len;
+
+ swa = &ses->swcr_auth;
+ axf = swa->sw_axf;
+
+ bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
+ blksz = axf->blocksize;
+
+ if (crp->crp_flags & CRYPTO_F_IV_GENERATE)
return (EINVAL);
/* Initialize the IV */
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- /* IV explicitly provided ? */
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(crde->crd_iv, iv, ivlen);
- else
- arc4rand(iv, ivlen, 0);
-
- /* Do we need to write the IV */
- if (!(crde->crd_flags & CRD_F_IV_PRESENT))
- crypto_copyback(crp->crp_flags, buf, crde->crd_inject,
- ivlen, iv);
-
- } else { /* Decryption */
- /* IV explicitly provided ? */
- if (crde->crd_flags & CRD_F_IV_EXPLICIT)
- bcopy(crde->crd_iv, iv, ivlen);
- else {
- /* Get IV off buf */
- crypto_copydata(crp->crp_flags, buf, crde->crd_inject,
- ivlen, iv);
- }
+ ivlen = AES_CCM_IV_LEN;
+ if (crp->crp_flags & CRYPTO_F_IV_SEPARATE)
+ bcopy(crp->crp_iv, iv, ivlen);
+ else
+ crypto_copydata(crp, crp->crp_iv_start, ivlen, iv);
+
+ /*
+ * AES CCM-CBC-MAC needs to know the length of both the auth
+ * data and payload data before doing the auth computation.
+ */
+ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_payload_length;
+ ctx.aes_cbc_mac_ctx.cryptDataLength = 0;
+
+ axf->Reinit(&ctx, iv, ivlen);
+ for (i = 0; i < crp->crp_payload_length; i += blksz) {
+ len = MIN(crp->crp_payload_length - i, blksz);
+ crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
+ bzero(blk + len, blksz - len);
+ axf->Update(&ctx, blk, blksz);
}
- if (swa->sw_alg == CRYPTO_AES_CCM_CBC_MAC) {
- /*
- * AES CCM-CBC needs to know the length of
- * both the auth data, and payload data, before
- * doing the auth computation.
- */
- ctx.aes_cbc_mac_ctx.authDataLength = crda->crd_len;
- ctx.aes_cbc_mac_ctx.cryptDataLength = crde->crd_len;
+ /* Finalize MAC */
+ axf->Final(aalg, &ctx);
+
+ if (crp->crp_op & CRYPTO_OP_VERIFY_DIGEST) {
+ crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
+ uaalg);
+ if (timingsafe_bcmp(aalg, uaalg, swa->sw_mlen) != 0)
+ return (EBADMSG);
+ } else {
+ /* Inject the authentication data */
+ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen, aalg);
}
+ return (0);
+}
+
+static int
+swcr_ccm(struct swcr_session *ses, struct cryptop *crp)
+{
+ uint32_t blkbuf[howmany(EALG_MAX_BLOCK_LEN, sizeof(uint32_t))];
+ u_char *blk = (u_char *)blkbuf;
+ u_char aalg[AALG_MAX_RESULT_LEN];
+ u_char uaalg[AALG_MAX_RESULT_LEN];
+ u_char iv[EALG_MAX_BLOCK_LEN];
+ union authctx ctx;
+ struct swcr_auth *swa;
+ struct swcr_encdec *swe;
+ struct auth_hash *axf;
+ struct enc_xform *exf;
+ int blksz, i, ivlen, len, r;
+
+ swa = &ses->swcr_auth;
+ axf = swa->sw_axf;
+
+ bcopy(swa->sw_ictx, &ctx, axf->ctxsize);
+ blksz = axf->blocksize;
+
+ swe = &ses->swcr_encdec;
+ exf = swe->sw_exf;
+
+ if ((crp->crp_flags & CRYPTO_F_IV_SEPARATE) == 0)
+ return (EINVAL);
+
+ /* Initialize the IV */
+ ivlen = AES_CCM_IV_LEN;
+ bcopy(crp->crp_iv, iv, ivlen);
+
+ /*
+ * AES CCM-CBC-MAC needs to know the length of both the auth
+ * data and payload data before doing the auth computation.
+ */
+ ctx.aes_cbc_mac_ctx.authDataLength = crp->crp_aad_length;
+ ctx.aes_cbc_mac_ctx.cryptDataLength = crp->crp_payload_length;
+
/* Supply MAC with IV */
- if (axf->Reinit)
- axf->Reinit(&ctx, iv, ivlen);
+ axf->Reinit(&ctx, iv, ivlen);
/* Supply MAC with AAD */
- aadlen = crda->crd_len;
-
- for (i = iskip; i < crda->crd_len; i += blksz) {
- len = MIN(crda->crd_len - i, blksz - oskip);
- crypto_copydata(crp->crp_flags, buf, crda->crd_skip + i, len,
- blk + oskip);
- bzero(blk + len + oskip, blksz - len - oskip);
+ for (i = 0; i < crp->crp_aad_length; i += blksz) {
+ len = MIN(crp->crp_aad_length - i, blksz);
+ crypto_copydata(crp, crp->crp_aad_start + i, len, blk);
+ bzero(blk + len, blksz - len);
axf->Update(&ctx, blk, blksz);
- oskip = 0; /* reset initial output offset */
}
- if (exf->reinit)
- exf->reinit(swe->sw_kschedule, iv);
+ exf->reinit(swe->sw_kschedule, iv);
/* Do encryption/decryption with MAC */
- for (i = 0; i < crde->crd_len; i += len) {
- if (exf->encrypt_multi != NULL) {
- len = rounddown(crde->crd_len - i, blksz);
- if (len == 0)
- len = blksz;
- else
- len = MIN(len, sizeof(blkbuf));
- } else
- len = blksz;
- len = MIN(crde->crd_len - i, len);
+ for (i = 0; i < crp->crp_payload_length; i += len) {
+ len = MIN(crp->crp_payload_length - i, blksz);
if (len < blksz)
bzero(blk, blksz);
- crypto_copydata(crp->crp_flags, buf, crde->crd_skip + i, len,
- blk);
- /*
- * One of the problems with CCM+CBC is that the authentication
- * is done on the unecncrypted data. As a result, we have
- * to do the authentication update at different times,
- * depending on whether it's CCM or not.
- */
- if (crde->crd_flags & CRD_F_ENCRYPT) {
- if (isccm)
- axf->Update(&ctx, blk, len);
- if (exf->encrypt_multi != NULL)
- exf->encrypt_multi(swe->sw_kschedule, blk,
- len);
- else
- exf->encrypt(swe->sw_kschedule, blk);
- if (!isccm)
- axf->Update(&ctx, blk, len);
- crypto_copyback(crp->crp_flags, buf,
- crde->crd_skip + i, len, blk);
+ crypto_copydata(crp, crp->crp_payload_start + i, len, blk);
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ axf->Update(&ctx, blk, len);
+ exf->encrypt(swe->sw_kschedule, blk);
+ crypto_copyback(crp, crp->crp_payload_start + i, len,
+ blk);
} else {
- if (isccm) {
- KASSERT(exf->encrypt_multi == NULL,
- ("assume CCM is single-block only"));
- exf->decrypt(swe->sw_kschedule, blk);
- }
+ /*
+ * One of the problems with CCM+CBC is that
+ * the authentication is done on the
+ * unecncrypted data. As a result, we have to
+ * decrypt the data twice: once to generate
+ * the tag and a second time after the tag is
+ * verified.
+ */
+ exf->decrypt(swe->sw_kschedule, blk);
axf->Update(&ctx, blk, len);
}
}
- /* Do any required special finalization */
- switch (crda->crd_alg) {
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- /* length block */
- bzero(blk, blksz);
- blkp = (uint32_t *)blk + 1;
- *blkp = htobe32(aadlen * 8);
- blkp = (uint32_t *)blk + 3;
- *blkp = htobe32(crde->crd_len * 8);
- axf->Update(&ctx, blk, blksz);
- break;
- }
-
/* Finalize MAC */
axf->Final(aalg, &ctx);
/* Validate tag */
- if (!(crde->crd_flags & CRD_F_ENCRYPT)) {
- crypto_copydata(crp->crp_flags, buf, crda->crd_inject,
- axf->hashsize, uaalg);
+ if (!CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ crypto_copydata(crp, crp->crp_digest_start, swa->sw_mlen,
+ uaalg);
- r = timingsafe_bcmp(aalg, uaalg, axf->hashsize);
- if (r == 0) {
- /* tag matches, decrypt data */
- if (isccm) {
- KASSERT(exf->reinit != NULL,
- ("AES-CCM reinit function must be set"));
- exf->reinit(swe->sw_kschedule, iv);
- }
- for (i = 0; i < crde->crd_len; i += blksz) {
- len = MIN(crde->crd_len - i, blksz);
- if (len < blksz)
- bzero(blk, blksz);
- crypto_copydata(crp->crp_flags, buf,
- crde->crd_skip + i, len, blk);
- exf->decrypt(swe->sw_kschedule, blk);
- crypto_copyback(crp->crp_flags, buf,
- crde->crd_skip + i, len, blk);
- }
- } else
+ r = timingsafe_bcmp(aalg, uaalg, swa->sw_mlen);
+ if (r != 0)
return (EBADMSG);
+
+ /* tag matches, decrypt data */
+ exf->reinit(swe->sw_kschedule, iv);
+ for (i = 0; i < crp->crp_payload_length; i += blksz) {
+ len = MIN(crp->crp_payload_length - i, blksz);
+ if (len < blksz)
+ bzero(blk, blksz);
+ crypto_copydata(crp, crp->crp_payload_start + i, len,
+ blk);
+ exf->decrypt(swe->sw_kschedule, blk);
+ crypto_copyback(crp, crp->crp_payload_start + i, len,
+ blk);
+ }
} else {
/* Inject the authentication data */
- crypto_copyback(crp->crp_flags, buf, crda->crd_inject,
- axf->hashsize, aalg);
+ crypto_copyback(crp, crp->crp_digest_start, swa->sw_mlen,
+ aalg);
}
return (0);
}
+/*
+ * Apply a cipher and a digest to perform EtA.
+ */
+static int
+swcr_eta(struct swcr_session *ses, struct cryptop *crp)
+{
+ int error;
+
+ if (CRYPTO_OP_IS_ENCRYPT(crp->crp_op)) {
+ error = swcr_encdec(ses, crp);
+ if (error == 0)
+ error = swcr_authcompute(ses, crp);
+ } else {
+ error = swcr_authcompute(ses, crp);
+ if (error == 0)
+ error = swcr_encdec(ses, crp);
+ }
+ return (error);
+}
+
/*
* Apply a compression/decompression algorithm
*/
static int
-swcr_compdec(struct cryptodesc *crd, struct swcr_data *sw,
- caddr_t buf, int flags)
+swcr_compdec(struct swcr_session *ses, struct cryptop *crp)
{
u_int8_t *data, *out;
struct comp_algo *cxf;
int adj;
u_int32_t result;
- cxf = sw->sw_cxf;
+ cxf = ses->swcr_compdec.sw_cxf;
/* We must handle the whole buffer of data in one time
* then if there is not all the data in the mbuf, we must
* copy in a buffer.
*/
- data = malloc(crd->crd_len, M_CRYPTO_DATA, M_NOWAIT);
+ data = malloc(crp->crp_payload_length, M_CRYPTO_DATA, M_NOWAIT);
if (data == NULL)
return (EINVAL);
- crypto_copydata(flags, buf, crd->crd_skip, crd->crd_len, data);
+ crypto_copydata(crp, crp->crp_payload_start, crp->crp_payload_length,
+ data);
- if (crd->crd_flags & CRD_F_COMP)
- result = cxf->compress(data, crd->crd_len, &out);
+ if (CRYPTO_OP_IS_COMPRESS(crp->crp_op))
+ result = cxf->compress(data, crp->crp_payload_length, &out);
else
- result = cxf->decompress(data, crd->crd_len, &out);
+ result = cxf->decompress(data, crp->crp_payload_length, &out);
free(data, M_CRYPTO_DATA);
if (result == 0)
- return EINVAL;
+ return (EINVAL);
+ crp->crp_olen = result;
- /* Copy back the (de)compressed data. m_copyback is
- * extending the mbuf as necessary.
- */
- sw->sw_size = result;
/* Check the compressed size when doing compression */
- if (crd->crd_flags & CRD_F_COMP) {
- if (result >= crd->crd_len) {
+ if (CRYPTO_OP_IS_COMPRESS(crp->crp_op)) {
+ if (result >= crp->crp_payload_length) {
/* Compression was useless, we lost time */
free(out, M_CRYPTO_DATA);
- return 0;
+ return (0);
}
}
- crypto_copyback(flags, buf, crd->crd_skip, result, out);
- if (result < crd->crd_len) {
- adj = result - crd->crd_len;
- if (flags & CRYPTO_F_IMBUF) {
- adj = result - crd->crd_len;
- m_adj((struct mbuf *)buf, adj);
- } else if (flags & CRYPTO_F_IOV) {
- struct uio *uio = (struct uio *)buf;
+ /* Copy back the (de)compressed data. m_copyback is
+ * extending the mbuf as necessary.
+ */
+ crypto_copyback(crp, crp->crp_payload_start, result, out);
+ if (result < crp->crp_payload_length) {
+ switch (crp->crp_buf_type) {
+ case CRYPTO_BUF_MBUF:
+ adj = result - crp->crp_payload_length;
+ m_adj(crp->crp_mbuf, adj);
+ break;
+ case CRYPTO_BUF_UIO: {
+ struct uio *uio = crp->crp_uio;
int ind;
- adj = crd->crd_len - result;
+ adj = crp->crp_payload_length - result;
ind = uio->uio_iovcnt - 1;
while (adj > 0 && ind >= 0) {
@@ -796,391 +909,522 @@
ind--;
uio->uio_iovcnt--;
}
+ }
+ break;
}
}
free(out, M_CRYPTO_DATA);
return 0;
}
+static int
+swcr_setup_encdec(struct swcr_session *ses,
+ const struct crypto_session_params *csp)
+{
+ struct swcr_encdec *swe;
+ struct enc_xform *txf;
+ int error;
+
+ swe = &ses->swcr_encdec;
+ txf = crypto_cipher(csp);
+ MPASS(txf->ivsize == csp->csp_ivlen);
+ if (csp->csp_cipher_key != NULL) {
+ error = txf->setkey(&swe->sw_kschedule,
+ csp->csp_cipher_key, csp->csp_cipher_klen);
+ if (error)
+ return (error);
+ }
+ swe->sw_exf = txf;
+ return (0);
+}
+
+static int
+swcr_setup_auth(struct swcr_session *ses,
+ const struct crypto_session_params *csp)
+{
+ struct swcr_auth *swa;
+ struct auth_hash *axf;
+
+ swa = &ses->swcr_auth;
+
+ axf = crypto_auth_hash(csp);
+ swa->sw_axf = axf;
+ if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
+ return (EINVAL);
+ if (csp->csp_auth_mlen == 0)
+ swa->sw_mlen = axf->hashsize;
+ else
+ swa->sw_mlen = csp->csp_auth_mlen;
+ swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
+ if (swa->sw_ictx == NULL)
+ return (ENOBUFS);
+
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_224_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_NULL_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ swa->sw_octx_len = axf->ctxsize;
+ swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if (swa->sw_octx == NULL)
+ return (ENOBUFS);
+
+ if (csp->csp_auth_key != NULL) {
+ swcr_authprepare(axf, swa, csp->csp_auth_key,
+ csp->csp_auth_klen);
+ }
+
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_authcompute;
+ break;
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ swa->sw_octx_len = csp->csp_auth_klen;
+ swa->sw_octx = malloc(swa->sw_octx_len, M_CRYPTO_DATA,
+ M_NOWAIT);
+ if (swa->sw_octx == NULL)
+ return (ENOBUFS);
+
+ /* Store the key so we can "append" it to the payload */
+ if (csp->csp_auth_key != NULL) {
+ swcr_authprepare(axf, swa, csp->csp_auth_key,
+ csp->csp_auth_klen);
+ }
+
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_authcompute;
+ break;
+#ifdef notdef
+ case CRYPTO_MD5:
+#endif
+ case CRYPTO_SHA1:
+ case CRYPTO_SHA2_224:
+ case CRYPTO_SHA2_256:
+ case CRYPTO_SHA2_384:
+ case CRYPTO_SHA2_512:
+ axf->Init(swa->sw_ictx);
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_authcompute;
+ break;
+ case CRYPTO_AES_NIST_GMAC:
+ axf->Init(swa->sw_ictx);
+ axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
+ csp->csp_auth_klen);
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_gmac;
+ break;
+ case CRYPTO_POLY1305:
+ case CRYPTO_BLAKE2B:
+ case CRYPTO_BLAKE2S:
+ /*
+ * Blake2b and Blake2s support an optional key but do
+ * not require one.
+ */
+ if (csp->csp_auth_klen == 0 || csp->csp_auth_key != NULL)
+ axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
+ csp->csp_auth_klen);
+ axf->Init(swa->sw_ictx);
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_authcompute;
+ break;
+ case CRYPTO_AES_CCM_CBC_MAC:
+ axf->Init(swa->sw_ictx);
+ axf->Setkey(swa->sw_ictx, csp->csp_auth_key,
+ csp->csp_auth_klen);
+ if (csp->csp_mode == CSP_MODE_DIGEST)
+ ses->swcr_process = swcr_ccm_cbc_mac;
+ break;
+ }
+
+ return (0);
+}
+
+static int
+swcr_setup_gcm(struct swcr_session *ses,
+ const struct crypto_session_params *csp)
+{
+ struct swcr_encdec *swe;
+ struct swcr_auth *swa;
+ struct enc_xform *txf;
+ struct auth_hash *axf;
+ int error;
+
+ if (csp->csp_ivlen != AES_GCM_IV_LEN)
+ return (EINVAL);
+
+ /* First, setup the auth side. */
+ swa = &ses->swcr_auth;
+ switch (csp->csp_cipher_klen * 8) {
+ case 128:
+ axf = &auth_hash_nist_gmac_aes_128;
+ break;
+ case 192:
+ axf = &auth_hash_nist_gmac_aes_192;
+ break;
+ case 256:
+ axf = &auth_hash_nist_gmac_aes_256;
+ break;
+ default:
+ return (EINVAL);
+ }
+ swa->sw_axf = axf;
+ if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
+ return (EINVAL);
+ if (csp->csp_auth_mlen == 0)
+ swa->sw_mlen = axf->hashsize;
+ else
+ swa->sw_mlen = csp->csp_auth_mlen;
+ swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
+ if (swa->sw_ictx == NULL)
+ return (ENOBUFS);
+ axf->Init(swa->sw_ictx);
+ if (csp->csp_cipher_key != NULL)
+ axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
+ csp->csp_cipher_klen);
+
+ /* Second, setup the cipher side. */
+ swe = &ses->swcr_encdec;
+ txf = &enc_xform_aes_nist_gcm;
+ if (csp->csp_cipher_key != NULL) {
+ error = txf->setkey(&swe->sw_kschedule,
+ csp->csp_cipher_key, csp->csp_cipher_klen);
+ if (error)
+ return (error);
+ }
+ swe->sw_exf = txf;
+
+ return (0);
+}
+
+static int
+swcr_setup_ccm(struct swcr_session *ses,
+ const struct crypto_session_params *csp)
+{
+ struct swcr_encdec *swe;
+ struct swcr_auth *swa;
+ struct enc_xform *txf;
+ struct auth_hash *axf;
+ int error;
+
+ if (csp->csp_ivlen != AES_CCM_IV_LEN)
+ return (EINVAL);
+
+ /* First, setup the auth side. */
+ swa = &ses->swcr_auth;
+ switch (csp->csp_cipher_klen * 8) {
+ case 128:
+ axf = &auth_hash_ccm_cbc_mac_128;
+ break;
+ case 192:
+ axf = &auth_hash_ccm_cbc_mac_192;
+ break;
+ case 256:
+ axf = &auth_hash_ccm_cbc_mac_256;
+ break;
+ default:
+ return (EINVAL);
+ }
+ swa->sw_axf = axf;
+ if (csp->csp_auth_mlen < 0 || csp->csp_auth_mlen > axf->hashsize)
+ return (EINVAL);
+ if (csp->csp_auth_mlen == 0)
+ swa->sw_mlen = axf->hashsize;
+ else
+ swa->sw_mlen = csp->csp_auth_mlen;
+ swa->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA, M_NOWAIT);
+ if (swa->sw_ictx == NULL)
+ return (ENOBUFS);
+ axf->Init(swa->sw_ictx);
+ if (csp->csp_cipher_key != NULL)
+ axf->Setkey(swa->sw_ictx, csp->csp_cipher_key,
+ csp->csp_cipher_klen);
+
+ /* Second, setup the cipher side. */
+ swe = &ses->swcr_encdec;
+ txf = &enc_xform_ccm;
+ if (csp->csp_cipher_key != NULL) {
+ error = txf->setkey(&swe->sw_kschedule,
+ csp->csp_cipher_key, csp->csp_cipher_klen);
+ if (error)
+ return (error);
+ }
+ swe->sw_exf = txf;
+
+ return (0);
+}
+
+static bool
+swcr_auth_supported(const struct crypto_session_params *csp)
+{
+ struct auth_hash *axf;
+
+ axf = crypto_auth_hash(csp);
+ if (axf == NULL)
+ return (false);
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_MD5_HMAC:
+ case CRYPTO_SHA1_HMAC:
+ case CRYPTO_SHA2_224_HMAC:
+ case CRYPTO_SHA2_256_HMAC:
+ case CRYPTO_SHA2_384_HMAC:
+ case CRYPTO_SHA2_512_HMAC:
+ case CRYPTO_NULL_HMAC:
+ case CRYPTO_RIPEMD160_HMAC:
+ case CRYPTO_MD5_KPDK:
+ case CRYPTO_SHA1_KPDK:
+ break;
+ case CRYPTO_AES_NIST_GMAC:
+ switch (csp->csp_auth_klen * 8) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ return (false);
+ }
+ if (csp->csp_auth_key == NULL)
+ return (false);
+ if (csp->csp_ivlen != AES_GCM_IV_LEN)
+ return (false);
+ break;
+ case CRYPTO_POLY1305:
+ if (csp->csp_auth_klen != POLY1305_KEY_LEN)
+ return (false);
+ break;
+ case CRYPTO_AES_CCM_CBC_MAC:
+ switch (csp->csp_auth_klen * 8) {
+ case 128:
+ case 192:
+ case 256:
+ break;
+ default:
+ return (false);
+ }
+ if (csp->csp_auth_key == NULL)
+ return (false);
+ if (csp->csp_ivlen != AES_CCM_IV_LEN)
+ return (false);
+ break;
+ }
+ return (true);
+}
+
+static bool
+swcr_cipher_supported(const struct crypto_session_params *csp)
+{
+ struct enc_xform *txf;
+
+ txf = crypto_cipher(csp);
+ if (txf == NULL)
+ return (false);
+ if (csp->csp_cipher_alg != CRYPTO_NULL_CBC &&
+ txf->ivsize != csp->csp_ivlen)
+ return (false);
+ return (true);
+}
+
+static int
+swcr_probesession(device_t dev, const struct crypto_session_params *csp)
+{
+
+ if (csp->csp_flags != 0)
+ return (EINVAL);
+ switch (csp->csp_mode) {
+ case CSP_MODE_COMPRESS:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_DEFLATE_COMP:
+ break;
+ default:
+ return (EINVAL);
+ }
+ break;
+ case CSP_MODE_CIPHER:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ return (EINVAL);
+ default:
+ if (!swcr_cipher_supported(csp))
+ return (EINVAL);
+ break;
+ }
+ break;
+ case CSP_MODE_DIGEST:
+ if (!swcr_auth_supported(csp))
+ return (EINVAL);
+ break;
+ case CSP_MODE_AEAD:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ break;
+ default:
+ return (EINVAL);
+ }
+ break;
+ case CSP_MODE_ETA:
+ /* AEAD algorithms cannot be used for EtA. */
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ return (EINVAL);
+ }
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_AES_NIST_GMAC:
+ case CRYPTO_AES_CCM_CBC_MAC:
+ return (EINVAL);
+ }
+
+ if (!swcr_cipher_supported(csp) ||
+ !swcr_auth_supported(csp))
+ return (EINVAL);
+ break;
+ default:
+ return (EINVAL);
+ }
+
+ return (CRYPTODEV_PROBE_SOFTWARE);
+}
+
/*
* Generate a new software session.
*/
static int
-swcr_newsession(device_t dev, crypto_session_t cses, struct cryptoini *cri)
+swcr_newsession(device_t dev, crypto_session_t cses,
+ const struct crypto_session_params *csp)
{
struct swcr_session *ses;
- struct swcr_data *swd;
- struct auth_hash *axf;
- struct enc_xform *txf;
+ struct swcr_encdec *swe;
+ struct swcr_auth *swa;
struct comp_algo *cxf;
- size_t i;
- int len;
int error;
- if (cses == NULL || cri == NULL)
- return EINVAL;
-
ses = crypto_get_driver_session(cses);
mtx_init(&ses->swcr_lock, "swcr session lock", NULL, MTX_DEF);
- for (i = 0; cri != NULL && i < nitems(ses->swcr_algorithms); i++) {
- swd = &ses->swcr_algorithms[i];
-
- switch (cri->cri_alg) {
- case CRYPTO_DES_CBC:
- txf = &enc_xform_des;
- goto enccommon;
- case CRYPTO_3DES_CBC:
- txf = &enc_xform_3des;
- goto enccommon;
- case CRYPTO_BLF_CBC:
- txf = &enc_xform_blf;
- goto enccommon;
- case CRYPTO_CAST_CBC:
- txf = &enc_xform_cast5;
- goto enccommon;
- case CRYPTO_SKIPJACK_CBC:
- txf = &enc_xform_skipjack;
- goto enccommon;
- case CRYPTO_RIJNDAEL128_CBC:
- txf = &enc_xform_rijndael128;
- goto enccommon;
- case CRYPTO_AES_XTS:
- txf = &enc_xform_aes_xts;
- goto enccommon;
- case CRYPTO_AES_ICM:
- txf = &enc_xform_aes_icm;
- goto enccommon;
- case CRYPTO_AES_NIST_GCM_16:
- txf = &enc_xform_aes_nist_gcm;
- goto enccommon;
- case CRYPTO_AES_CCM_16:
- txf = &enc_xform_ccm;
- goto enccommon;
- case CRYPTO_AES_NIST_GMAC:
- txf = &enc_xform_aes_nist_gmac;
- swd->sw_exf = txf;
- break;
- case CRYPTO_CAMELLIA_CBC:
- txf = &enc_xform_camellia;
- goto enccommon;
- case CRYPTO_NULL_CBC:
- txf = &enc_xform_null;
- goto enccommon;
- case CRYPTO_CHACHA20:
- txf = &enc_xform_chacha20;
- goto enccommon;
- enccommon:
- if (cri->cri_key != NULL) {
- error = txf->setkey(&swd->sw_kschedule,
- cri->cri_key, cri->cri_klen / 8);
- if (error) {
- swcr_freesession(dev, cses);
- return error;
- }
- }
- swd->sw_exf = txf;
- break;
-
- case CRYPTO_MD5_HMAC:
- axf = &auth_hash_hmac_md5;
- goto authcommon;
- case CRYPTO_SHA1_HMAC:
- axf = &auth_hash_hmac_sha1;
- goto authcommon;
- case CRYPTO_SHA2_224_HMAC:
- axf = &auth_hash_hmac_sha2_224;
- goto authcommon;
- case CRYPTO_SHA2_256_HMAC:
- axf = &auth_hash_hmac_sha2_256;
- goto authcommon;
- case CRYPTO_SHA2_384_HMAC:
- axf = &auth_hash_hmac_sha2_384;
- goto authcommon;
- case CRYPTO_SHA2_512_HMAC:
- axf = &auth_hash_hmac_sha2_512;
- goto authcommon;
- case CRYPTO_NULL_HMAC:
- axf = &auth_hash_null;
- goto authcommon;
- case CRYPTO_RIPEMD160_HMAC:
- axf = &auth_hash_hmac_ripemd_160;
- authcommon:
- swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_ictx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
-
- swd->sw_octx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_octx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
-
- if (cri->cri_key != NULL) {
- error = swcr_authprepare(axf, swd,
- cri->cri_key, cri->cri_klen);
- if (error != 0) {
- swcr_freesession(dev, cses);
- return error;
- }
- }
-
- swd->sw_mlen = cri->cri_mlen;
- swd->sw_axf = axf;
- break;
-
- case CRYPTO_MD5_KPDK:
- axf = &auth_hash_key_md5;
- goto auth2common;
-
- case CRYPTO_SHA1_KPDK:
- axf = &auth_hash_key_sha1;
- auth2common:
- swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_ictx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
-
- swd->sw_octx = malloc(cri->cri_klen / 8,
- M_CRYPTO_DATA, M_NOWAIT);
- if (swd->sw_octx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
-
- /* Store the key so we can "append" it to the payload */
- if (cri->cri_key != NULL) {
- error = swcr_authprepare(axf, swd,
- cri->cri_key, cri->cri_klen);
- if (error != 0) {
- swcr_freesession(dev, cses);
- return error;
- }
- }
-
- swd->sw_mlen = cri->cri_mlen;
- swd->sw_axf = axf;
- break;
-#ifdef notdef
- case CRYPTO_MD5:
- axf = &auth_hash_md5;
- goto auth3common;
-#endif
-
- case CRYPTO_SHA1:
- axf = &auth_hash_sha1;
- goto auth3common;
- case CRYPTO_SHA2_224:
- axf = &auth_hash_sha2_224;
- goto auth3common;
- case CRYPTO_SHA2_256:
- axf = &auth_hash_sha2_256;
- goto auth3common;
- case CRYPTO_SHA2_384:
- axf = &auth_hash_sha2_384;
- goto auth3common;
- case CRYPTO_SHA2_512:
- axf = &auth_hash_sha2_512;
-
- auth3common:
- swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_ictx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
-
- axf->Init(swd->sw_ictx);
- swd->sw_mlen = cri->cri_mlen;
- swd->sw_axf = axf;
- break;
-
- case CRYPTO_AES_CCM_CBC_MAC:
- switch (cri->cri_klen) {
- case 128:
- axf = &auth_hash_ccm_cbc_mac_128;
- break;
- case 192:
- axf = &auth_hash_ccm_cbc_mac_192;
- break;
- case 256:
- axf = &auth_hash_ccm_cbc_mac_256;
- break;
- default:
- swcr_freesession(dev, cses);
- return EINVAL;
- }
- goto auth4common;
- case CRYPTO_AES_128_NIST_GMAC:
- axf = &auth_hash_nist_gmac_aes_128;
- goto auth4common;
-
- case CRYPTO_AES_192_NIST_GMAC:
- axf = &auth_hash_nist_gmac_aes_192;
- goto auth4common;
-
- case CRYPTO_AES_256_NIST_GMAC:
- axf = &auth_hash_nist_gmac_aes_256;
- auth4common:
- len = cri->cri_klen / 8;
- if (len != 16 && len != 24 && len != 32) {
- swcr_freesession(dev, cses);
- return EINVAL;
- }
-
- swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_ictx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
- axf->Init(swd->sw_ictx);
- axf->Setkey(swd->sw_ictx, cri->cri_key, len);
- swd->sw_axf = axf;
- break;
-
- case CRYPTO_BLAKE2B:
- axf = &auth_hash_blake2b;
- goto auth5common;
- case CRYPTO_BLAKE2S:
- axf = &auth_hash_blake2s;
- goto auth5common;
- case CRYPTO_POLY1305:
- axf = &auth_hash_poly1305;
- auth5common:
- swd->sw_ictx = malloc(axf->ctxsize, M_CRYPTO_DATA,
- M_NOWAIT);
- if (swd->sw_ictx == NULL) {
- swcr_freesession(dev, cses);
- return ENOBUFS;
- }
- axf->Setkey(swd->sw_ictx, cri->cri_key,
- cri->cri_klen / 8);
- axf->Init(swd->sw_ictx);
- swd->sw_axf = axf;
- break;
-
+ error = 0;
+ swe = &ses->swcr_encdec;
+ swa = &ses->swcr_auth;
+ switch (csp->csp_mode) {
+ case CSP_MODE_COMPRESS:
+ switch (csp->csp_cipher_alg) {
case CRYPTO_DEFLATE_COMP:
cxf = &comp_algo_deflate;
- swd->sw_cxf = cxf;
break;
+#ifdef INVARIANTS
default:
- swcr_freesession(dev, cses);
- return EINVAL;
+ panic("bad compression algo");
+#endif
}
-
- swd->sw_alg = cri->cri_alg;
- cri = cri->cri_next;
- ses->swcr_nalgs++;
- }
+ ses->swcr_compdec.sw_cxf = cxf;
+ ses->swcr_process = swcr_compdec;
+ break;
+ case CSP_MODE_CIPHER:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_NULL_CBC:
+ ses->swcr_process = swcr_null;
+ break;
+#ifdef INVARIANTS
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ panic("bad cipher algo");
+#endif
+ default:
+ error = swcr_setup_encdec(ses, csp);
+ if (error == 0)
+ ses->swcr_process = swcr_encdec;
+ }
+ break;
+ case CSP_MODE_DIGEST:
+ error = swcr_setup_auth(ses, csp);
+ break;
+ case CSP_MODE_AEAD:
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ error = swcr_setup_gcm(ses, csp);
+ if (error == 0)
+ ses->swcr_process = swcr_gcm;
+ break;
+ case CRYPTO_AES_CCM_16:
+ error = swcr_setup_ccm(ses, csp);
+ if (error == 0)
+ ses->swcr_process = swcr_ccm;
+ break;
+#ifdef INVARIANTS
+ default:
+ panic("bad aead algo");
+#endif
+ }
+ break;
+ case CSP_MODE_ETA:
+#ifdef INVARIANTS
+ switch (csp->csp_cipher_alg) {
+ case CRYPTO_AES_NIST_GCM_16:
+ case CRYPTO_AES_CCM_16:
+ panic("bad eta cipher algo");
+ }
+ switch (csp->csp_auth_alg) {
+ case CRYPTO_AES_NIST_GMAC:
+ case CRYPTO_AES_CCM_CBC_MAC:
+ panic("bad eta auth algo");
+ }
+#endif
- if (cri != NULL) {
- CRYPTDEB("Bogus session request for three or more algorithms");
- return EINVAL;
+ error = swcr_setup_auth(ses, csp);
+ if (error)
+ break;
+ if (csp->csp_cipher_alg == CRYPTO_NULL_CBC) {
+ /* Effectively degrade to digest mode. */
+ ses->swcr_process = swcr_authcompute;
+ break;
+ }
+
+ error = swcr_setup_encdec(ses, csp);
+ if (error == 0)
+ ses->swcr_process = swcr_eta;
+ break;
+ default:
+ error = EINVAL;
}
- return 0;
+
+ if (error)
+ swcr_freesession(dev, cses);
+ return (error);
}
static void
swcr_freesession(device_t dev, crypto_session_t cses)
{
struct swcr_session *ses;
- struct swcr_data *swd;
+ struct swcr_auth *swa;
struct enc_xform *txf;
struct auth_hash *axf;
- size_t i;
ses = crypto_get_driver_session(cses);
mtx_destroy(&ses->swcr_lock);
- for (i = 0; i < nitems(ses->swcr_algorithms); i++) {
- swd = &ses->swcr_algorithms[i];
- switch (swd->sw_alg) {
- case CRYPTO_DES_CBC:
- case CRYPTO_3DES_CBC:
- case CRYPTO_BLF_CBC:
- case CRYPTO_CAST_CBC:
- case CRYPTO_SKIPJACK_CBC:
- case CRYPTO_RIJNDAEL128_CBC:
- case CRYPTO_AES_XTS:
- case CRYPTO_AES_ICM:
- case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_NIST_GMAC:
- case CRYPTO_CAMELLIA_CBC:
- case CRYPTO_NULL_CBC:
- case CRYPTO_CHACHA20:
- case CRYPTO_AES_CCM_16:
- txf = swd->sw_exf;
-
- if (swd->sw_kschedule)
- txf->zerokey(&(swd->sw_kschedule));
- break;
-
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_224_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- case CRYPTO_RIPEMD160_HMAC:
- case CRYPTO_NULL_HMAC:
- case CRYPTO_AES_CCM_CBC_MAC:
- axf = swd->sw_axf;
-
- if (swd->sw_ictx) {
- bzero(swd->sw_ictx, axf->ctxsize);
- free(swd->sw_ictx, M_CRYPTO_DATA);
- }
- if (swd->sw_octx) {
- bzero(swd->sw_octx, axf->ctxsize);
- free(swd->sw_octx, M_CRYPTO_DATA);
- }
- break;
-
- case CRYPTO_MD5_KPDK:
- case CRYPTO_SHA1_KPDK:
- axf = swd->sw_axf;
-
- if (swd->sw_ictx) {
- bzero(swd->sw_ictx, axf->ctxsize);
- free(swd->sw_ictx, M_CRYPTO_DATA);
- }
- if (swd->sw_octx) {
- bzero(swd->sw_octx, swd->sw_klen);
- free(swd->sw_octx, M_CRYPTO_DATA);
- }
- break;
-
- case CRYPTO_BLAKE2B:
- case CRYPTO_BLAKE2S:
- case CRYPTO_MD5:
- case CRYPTO_POLY1305:
- case CRYPTO_SHA1:
- case CRYPTO_SHA2_224:
- case CRYPTO_SHA2_256:
- case CRYPTO_SHA2_384:
- case CRYPTO_SHA2_512:
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- axf = swd->sw_axf;
-
- if (swd->sw_ictx) {
- explicit_bzero(swd->sw_ictx, axf->ctxsize);
- free(swd->sw_ictx, M_CRYPTO_DATA);
- }
- break;
-
- case CRYPTO_DEFLATE_COMP:
- /* Nothing to do */
- break;
+ txf = ses->swcr_encdec.sw_exf;
+ if (txf != NULL) {
+ if (ses->swcr_encdec.sw_kschedule != NULL)
+ txf->zerokey(&(ses->swcr_encdec.sw_kschedule));
+ }
+
+ axf = ses->swcr_auth.sw_axf;
+ if (axf != NULL) {
+ swa = &ses->swcr_auth;
+ if (swa->sw_ictx != NULL) {
+ explicit_bzero(swa->sw_ictx, axf->ctxsize);
+ free(swa->sw_ictx, M_CRYPTO_DATA);
+ }
+ if (swa->sw_octx != NULL) {
+ explicit_bzero(swa->sw_octx, swa->sw_octx_len);
+ free(swa->sw_octx, M_CRYPTO_DATA);
}
}
}
@@ -1191,117 +1435,16 @@
static int
swcr_process(device_t dev, struct cryptop *crp, int hint)
{
- struct swcr_session *ses = NULL;
- struct cryptodesc *crd;
- struct swcr_data *sw;
- size_t i;
-
- /* Sanity check */
- if (crp == NULL)
- return EINVAL;
-
- if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
- crp->crp_etype = EINVAL;
- goto done;
- }
+ struct swcr_session *ses;
ses = crypto_get_driver_session(crp->crp_session);
mtx_lock(&ses->swcr_lock);
- /* Go through crypto descriptors, processing as we go */
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- /*
- * Find the crypto context.
- *
- * XXX Note that the logic here prevents us from having
- * XXX the same algorithm multiple times in a session
- * XXX (or rather, we can but it won't give us the right
- * XXX results). To do that, we'd need some way of differentiating
- * XXX between the various instances of an algorithm (so we can
- * XXX locate the correct crypto context).
- */
- for (i = 0; i < nitems(ses->swcr_algorithms) &&
- ses->swcr_algorithms[i].sw_alg != crd->crd_alg; i++)
- ;
+ crp->crp_etype = ses->swcr_process(ses, crp);
- /* No such context ? */
- if (i == nitems(ses->swcr_algorithms)) {
- crp->crp_etype = EINVAL;
- goto done;
- }
- sw = &ses->swcr_algorithms[i];
- switch (sw->sw_alg) {
- case CRYPTO_DES_CBC:
- case CRYPTO_3DES_CBC:
- case CRYPTO_BLF_CBC:
- case CRYPTO_CAST_CBC:
- case CRYPTO_SKIPJACK_CBC:
- case CRYPTO_RIJNDAEL128_CBC:
- case CRYPTO_AES_XTS:
- case CRYPTO_AES_ICM:
- case CRYPTO_CAMELLIA_CBC:
- case CRYPTO_CHACHA20:
- if ((crp->crp_etype = swcr_encdec(crd, sw,
- crp->crp_buf, crp->crp_flags)) != 0)
- goto done;
- break;
- case CRYPTO_NULL_CBC:
- crp->crp_etype = 0;
- break;
- case CRYPTO_MD5_HMAC:
- case CRYPTO_SHA1_HMAC:
- case CRYPTO_SHA2_224_HMAC:
- case CRYPTO_SHA2_256_HMAC:
- case CRYPTO_SHA2_384_HMAC:
- case CRYPTO_SHA2_512_HMAC:
- case CRYPTO_RIPEMD160_HMAC:
- case CRYPTO_NULL_HMAC:
- case CRYPTO_MD5_KPDK:
- case CRYPTO_SHA1_KPDK:
- case CRYPTO_MD5:
- case CRYPTO_SHA1:
- case CRYPTO_SHA2_224:
- case CRYPTO_SHA2_256:
- case CRYPTO_SHA2_384:
- case CRYPTO_SHA2_512:
- case CRYPTO_BLAKE2B:
- case CRYPTO_BLAKE2S:
- case CRYPTO_POLY1305:
- if ((crp->crp_etype = swcr_authcompute(crd, sw,
- crp->crp_buf, crp->crp_flags)) != 0)
- goto done;
- break;
-
- case CRYPTO_AES_NIST_GCM_16:
- case CRYPTO_AES_NIST_GMAC:
- case CRYPTO_AES_128_NIST_GMAC:
- case CRYPTO_AES_192_NIST_GMAC:
- case CRYPTO_AES_256_NIST_GMAC:
- case CRYPTO_AES_CCM_16:
- case CRYPTO_AES_CCM_CBC_MAC:
- crp->crp_etype = swcr_authenc(crp);
- goto done;
-
- case CRYPTO_DEFLATE_COMP:
- if ((crp->crp_etype = swcr_compdec(crd, sw,
- crp->crp_buf, crp->crp_flags)) != 0)
- goto done;
- else
- crp->crp_olen = (int)sw->sw_size;
- break;
-
- default:
- /* Unknown/unsupported algorithm */
- crp->crp_etype = EINVAL;
- goto done;
- }
- }
-
-done:
- if (ses)
- mtx_unlock(&ses->swcr_lock);
+ mtx_unlock(&ses->swcr_lock);
crypto_done(crp);
- return 0;
+ return (0);
}
static void
@@ -1323,58 +1466,15 @@
static int
swcr_attach(device_t dev)
{
- memset(hmac_ipad_buffer, HMAC_IPAD_VAL, HMAC_MAX_BLOCK_LEN);
- memset(hmac_opad_buffer, HMAC_OPAD_VAL, HMAC_MAX_BLOCK_LEN);
swcr_id = crypto_get_driverid(dev, sizeof(struct swcr_session),
CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
if (swcr_id < 0) {
device_printf(dev, "cannot initialize!");
- return ENOMEM;
+ return (ENXIO);
}
-#define REGISTER(alg) \
- crypto_register(swcr_id, alg, 0,0)
- REGISTER(CRYPTO_DES_CBC);
- REGISTER(CRYPTO_3DES_CBC);
- REGISTER(CRYPTO_BLF_CBC);
- REGISTER(CRYPTO_CAST_CBC);
- REGISTER(CRYPTO_SKIPJACK_CBC);
- REGISTER(CRYPTO_NULL_CBC);
- REGISTER(CRYPTO_MD5_HMAC);
- REGISTER(CRYPTO_SHA1_HMAC);
- REGISTER(CRYPTO_SHA2_224_HMAC);
- REGISTER(CRYPTO_SHA2_256_HMAC);
- REGISTER(CRYPTO_SHA2_384_HMAC);
- REGISTER(CRYPTO_SHA2_512_HMAC);
- REGISTER(CRYPTO_RIPEMD160_HMAC);
- REGISTER(CRYPTO_NULL_HMAC);
- REGISTER(CRYPTO_MD5_KPDK);
- REGISTER(CRYPTO_SHA1_KPDK);
- REGISTER(CRYPTO_MD5);
- REGISTER(CRYPTO_SHA1);
- REGISTER(CRYPTO_SHA2_224);
- REGISTER(CRYPTO_SHA2_256);
- REGISTER(CRYPTO_SHA2_384);
- REGISTER(CRYPTO_SHA2_512);
- REGISTER(CRYPTO_RIJNDAEL128_CBC);
- REGISTER(CRYPTO_AES_XTS);
- REGISTER(CRYPTO_AES_ICM);
- REGISTER(CRYPTO_AES_NIST_GCM_16);
- REGISTER(CRYPTO_AES_NIST_GMAC);
- REGISTER(CRYPTO_AES_128_NIST_GMAC);
- REGISTER(CRYPTO_AES_192_NIST_GMAC);
- REGISTER(CRYPTO_AES_256_NIST_GMAC);
- REGISTER(CRYPTO_CAMELLIA_CBC);
- REGISTER(CRYPTO_DEFLATE_COMP);
- REGISTER(CRYPTO_BLAKE2B);
- REGISTER(CRYPTO_BLAKE2S);
- REGISTER(CRYPTO_CHACHA20);
- REGISTER(CRYPTO_AES_CCM_16);
- REGISTER(CRYPTO_AES_CCM_CBC_MAC);
- REGISTER(CRYPTO_POLY1305);
-#undef REGISTER
- return 0;
+ return (0);
}
static int
@@ -1390,6 +1490,7 @@
DEVMETHOD(device_attach, swcr_attach),
DEVMETHOD(device_detach, swcr_detach),
+ DEVMETHOD(cryptodev_probesession, swcr_probesession),
DEVMETHOD(cryptodev_newsession, swcr_newsession),
DEVMETHOD(cryptodev_freesession,swcr_freesession),
DEVMETHOD(cryptodev_process, swcr_process),
Index: sys/opencrypto/ktls_ocf.c
===================================================================
--- sys/opencrypto/ktls_ocf.c
+++ sys/opencrypto/ktls_ocf.c
@@ -45,7 +45,6 @@
struct ocf_session {
crypto_session_t sid;
- int crda_alg;
struct mtx lock;
};
@@ -100,8 +99,6 @@
{
struct uio uio;
struct tls_aead_data ad;
- struct tls_nonce_data nd;
- struct cryptodesc *crde, *crda;
struct cryptop *crp;
struct ocf_session *os;
struct ocf_operation *oo;
@@ -116,19 +113,15 @@
oo->os = os;
iov = oo->iov;
- crp = crypto_getreq(2);
- if (crp == NULL) {
- free(oo, M_KTLS_OCF);
- return (ENOMEM);
- }
+ crp = crypto_getreq(os->sid, M_WAITOK);
/* Setup the IV. */
- memcpy(nd.fixed, tls->params.iv, TLS_AEAD_GCM_LEN);
- memcpy(&nd.seq, hdr + 1, sizeof(nd.seq));
+ memcpy(crp->crp_iv, tls->params.iv, TLS_AEAD_GCM_LEN);
+ memcpy(crp->crp_iv + TLS_AEAD_GCM_LEN, hdr + 1, sizeof(uint64_t));
/* Setup the AAD. */
tls_comp_len = ntohs(hdr->tls_length) -
- (AES_GMAC_HASH_LEN + sizeof(nd.seq));
+ (AES_GMAC_HASH_LEN + sizeof(uint64_t));
ad.seq = htobe64(seqno);
ad.type = hdr->tls_type;
ad.tls_vmajor = hdr->tls_vmajor;
@@ -160,26 +153,20 @@
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_td = curthread;
- crp->crp_session = os->sid;
- crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM;
+ crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
+ crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
+ crp->crp_buf_type = CRYPTO_BUF_UIO;
crp->crp_uio = &uio;
crp->crp_ilen = uio.uio_resid;
crp->crp_opaque = oo;
crp->crp_callback = ktls_ocf_callback;
- crde = crp->crp_desc;
- crda = crde->crd_next;
-
- crda->crd_alg = os->crda_alg;
- crda->crd_skip = 0;
- crda->crd_len = sizeof(ad);
- crda->crd_inject = crp->crp_ilen - AES_GMAC_HASH_LEN;
-
- crde->crd_alg = CRYPTO_AES_NIST_GCM_16;
- crde->crd_skip = sizeof(ad);
- crde->crd_len = crp->crp_ilen - (sizeof(ad) + AES_GMAC_HASH_LEN);
- crde->crd_flags = CRD_F_ENCRYPT | CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
- memcpy(crde->crd_iv, &nd, sizeof(nd));
+ crp->crp_aad_start = 0;
+ crp->crp_aad_length = sizeof(ad);
+ crp->crp_payload_start = sizeof(ad);
+ crp->crp_payload_length = crp->crp_ilen -
+ (sizeof(ad) + AES_GMAC_HASH_LEN);
+ crp->crp_digest_start = crp->crp_ilen - AES_GMAC_HASH_LEN;
counter_u64_add(ocf_tls12_gcm_crypts, 1);
for (;;) {
@@ -216,7 +203,6 @@
struct uio uio;
struct tls_aead_data_13 ad;
char nonce[12];
- struct cryptodesc *crde, *crda;
struct cryptop *crp;
struct ocf_session *os;
struct ocf_operation *oo;
@@ -230,11 +216,7 @@
oo->os = os;
iov = oo->iov;
- crp = crypto_getreq(2);
- if (crp == NULL) {
- free(oo, M_KTLS_OCF);
- return (ENOMEM);
- }
+ crp = crypto_getreq(os->sid, M_WAITOK);
/* Setup the nonce. */
memcpy(nonce, tls->params.iv, tls->params.iv_len);
@@ -272,26 +254,21 @@
uio.uio_segflg = UIO_SYSSPACE;
uio.uio_td = curthread;
- crp->crp_session = os->sid;
- crp->crp_flags = CRYPTO_F_IOV | CRYPTO_F_CBIMM;
+ crp->crp_op = CRYPTO_OP_ENCRYPT | CRYPTO_OP_COMPUTE_DIGEST;
+ crp->crp_flags = CRYPTO_F_CBIMM | CRYPTO_F_IV_SEPARATE;
+ crp->crp_buf_type = CRYPTO_BUF_UIO;
crp->crp_uio = &uio;
crp->crp_ilen = uio.uio_resid;
crp->crp_opaque = oo;
crp->crp_callback = ktls_ocf_callback;
- crde = crp->crp_desc;
- crda = crde->crd_next;
-
- crda->crd_alg = os->crda_alg;
- crda->crd_skip = 0;
- crda->crd_len = sizeof(ad);
- crda->crd_inject = crp->crp_ilen - AES_GMAC_HASH_LEN;
-
- crde->crd_alg = CRYPTO_AES_NIST_GCM_16;
- crde->crd_skip = sizeof(ad);
- crde->crd_len = crp->crp_ilen - (sizeof(ad) + AES_GMAC_HASH_LEN);
- crde->crd_flags = CRD_F_ENCRYPT | CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
- memcpy(crde->crd_iv, nonce, sizeof(nonce));
+ crp->crp_aad_start = 0;
+ crp->crp_aad_length = sizeof(ad);
+ crp->crp_payload_start = sizeof(ad);
+ crp->crp_payload_length = crp->crp_ilen -
+ (sizeof(ad) + AES_GMAC_HASH_LEN);
+ crp->crp_digest_start = crp->crp_ilen - AES_GMAC_HASH_LEN;
+ memcpy(crp->crp_iv, nonce, sizeof(nonce));
counter_u64_add(ocf_tls13_gcm_crypts, 1);
for (;;) {
@@ -326,6 +303,7 @@
struct ocf_session *os;
os = tls->cipher;
+ crypto_freesession(os->sid);
mtx_destroy(&os->lock);
explicit_bzero(os, sizeof(*os));
free(os, M_KTLS_OCF);
@@ -334,27 +312,26 @@
static int
ktls_ocf_try(struct socket *so, struct ktls_session *tls)
{
- struct cryptoini cria, crie;
+ struct crypto_session_params csp;
struct ocf_session *os;
int error;
- memset(&cria, 0, sizeof(cria));
- memset(&crie, 0, sizeof(crie));
+ memset(&csp, 0, sizeof(csp));
switch (tls->params.cipher_algorithm) {
case CRYPTO_AES_NIST_GCM_16:
switch (tls->params.cipher_key_len) {
case 128 / 8:
- cria.cri_alg = CRYPTO_AES_128_NIST_GMAC;
- break;
case 256 / 8:
- cria.cri_alg = CRYPTO_AES_256_NIST_GMAC;
break;
default:
return (EINVAL);
}
- cria.cri_key = tls->params.cipher_key;
- cria.cri_klen = tls->params.cipher_key_len * 8;
+ csp.csp_mode = CSP_MODE_AEAD;
+ csp.csp_cipher_alg = CRYPTO_AES_NIST_GCM_16;
+ csp.csp_cipher_key = tls->params.cipher_key;
+ csp.csp_cipher_klen = tls->params.cipher_key_len;
+ csp.csp_ivlen = AES_GCM_IV_LEN;
break;
default:
return (EPROTONOSUPPORT);
@@ -370,19 +347,13 @@
if (os == NULL)
return (ENOMEM);
- crie.cri_alg = tls->params.cipher_algorithm;
- crie.cri_key = tls->params.cipher_key;
- crie.cri_klen = tls->params.cipher_key_len * 8;
-
- crie.cri_next = &cria;
- error = crypto_newsession(&os->sid, &crie,
+ error = crypto_newsession(&os->sid, &csp,
CRYPTO_FLAG_HARDWARE | CRYPTO_FLAG_SOFTWARE);
if (error) {
free(os, M_KTLS_OCF);
return (error);
}
- os->crda_alg = cria.cri_alg;
mtx_init(&os->lock, "ktls_ocf", NULL, MTX_DEF);
tls->cipher = os;
if (tls->params.tls_vminor == TLS_MINOR_VER_THREE)
Index: sys/opencrypto/xform_gmac.c
===================================================================
--- sys/opencrypto/xform_gmac.c
+++ sys/opencrypto/xform_gmac.c
@@ -66,7 +66,7 @@
/* Authentication instances */
struct auth_hash auth_hash_nist_gmac_aes_128 = {
- CRYPTO_AES_128_NIST_GMAC, "GMAC-AES-128",
+ CRYPTO_AES_NIST_GMAC, "GMAC-AES-128",
AES_128_GMAC_KEY_LEN, AES_GMAC_HASH_LEN, sizeof(struct aes_gmac_ctx),
GMAC_BLOCK_LEN,
(void (*)(void *)) AES_GMAC_Init,
@@ -77,7 +77,7 @@
};
struct auth_hash auth_hash_nist_gmac_aes_192 = {
- CRYPTO_AES_192_NIST_GMAC, "GMAC-AES-192",
+ CRYPTO_AES_NIST_GMAC, "GMAC-AES-192",
AES_192_GMAC_KEY_LEN, AES_GMAC_HASH_LEN, sizeof(struct aes_gmac_ctx),
GMAC_BLOCK_LEN,
(void (*)(void *)) AES_GMAC_Init,
@@ -88,7 +88,7 @@
};
struct auth_hash auth_hash_nist_gmac_aes_256 = {
- CRYPTO_AES_256_NIST_GMAC, "GMAC-AES-256",
+ CRYPTO_AES_NIST_GMAC, "GMAC-AES-256",
AES_256_GMAC_KEY_LEN, AES_GMAC_HASH_LEN, sizeof(struct aes_gmac_ctx),
GMAC_BLOCK_LEN,
(void (*)(void *)) AES_GMAC_Init,
Index: sys/sys/bus_dma.h
===================================================================
--- sys/sys/bus_dma.h
+++ sys/sys/bus_dma.h
@@ -111,6 +111,7 @@
/* Forwards needed by prototypes below. */
union ccb;
struct bio;
+struct cryptop;
struct mbuf;
struct memdesc;
struct pmap;
@@ -263,6 +264,13 @@
bus_dmamap_callback_t *callback, void *callback_arg,
int flags);
+/*
+ * Like bus_dmamap_load but for crypto ops.
+ */
+int bus_dmamap_load_crp(bus_dma_tag_t dmat, bus_dmamap_t map,
+ struct cryptop *crp, bus_dmamap_callback_t *callback,
+ void *callback_arg, int flags);
+
/*
* Loads any memory descriptor.
*/
Index: tests/sys/opencrypto/cryptodev.py
===================================================================
--- tests/sys/opencrypto/cryptodev.py
+++ tests/sys/opencrypto/cryptodev.py
@@ -643,8 +643,7 @@
tag = _spdechex('0032a1dc85f1c9786925a2e71d8272dd')
tag = _spdechex('8d11a0929cb3fbe1fef01a4a38d5f8ea')
- c = Crypto(CRYPTO_AES_NIST_GCM_16, key,
- mac=CRYPTO_AES_128_NIST_GMAC, mackey=key)
+ c = Crypto(CRYPTO_AES_NIST_GCM_16, key)
enc, enctag = c.encrypt(pt, iv, aad=aad)
@@ -680,7 +679,7 @@
ct = _spdechex('93fe7d9e9bfd10348a5606e5cafa7354')
tag = _spdechex('0032a1dc85f1c9786925a2e71d8272dd')
- c = Crypto(CRYPTO_AES_GCM_16, key, mac=CRYPTO_AES_128_GMAC, mackey=key)
+ c = Crypto(CRYPTO_AES_GCM_16, key)
enc, enctag = c.encrypt(pt, iv, aad=aad)
Index: tests/sys/opencrypto/cryptodevh.py
===================================================================
--- tests/sys/opencrypto/cryptodevh.py
+++ tests/sys/opencrypto/cryptodevh.py
@@ -194,9 +194,6 @@
CRYPTO_AES_ICM = 23
CRYPTO_AES_NIST_GMAC = 24
CRYPTO_AES_NIST_GCM_16 = 25
-CRYPTO_AES_128_NIST_GMAC = 26
-CRYPTO_AES_192_NIST_GMAC = 27
-CRYPTO_AES_256_NIST_GMAC = 28
CRYPTO_BLAKE2B = 29
CRYPTO_BLAKE2S = 30
CRYPTO_CHACHA20 = 31
Index: tests/sys/opencrypto/cryptotest.py
===================================================================
--- tests/sys/opencrypto/cryptotest.py
+++ tests/sys/opencrypto/cryptotest.py
@@ -90,10 +90,6 @@
for i in katg('gcmtestvectors', 'gcmDecrypt*'):
self.runGCM(i, 'DECRYPT')
- _gmacsizes = { 32: cryptodev.CRYPTO_AES_256_NIST_GMAC,
- 24: cryptodev.CRYPTO_AES_192_NIST_GMAC,
- 16: cryptodev.CRYPTO_AES_128_NIST_GMAC,
- }
def runGCM(self, fname, mode):
curfun = None
if mode == 'ENCRYPT':
@@ -127,9 +123,7 @@
try:
c = Crypto(cryptodev.CRYPTO_AES_NIST_GCM_16,
- cipherkey,
- mac=self._gmacsizes[len(cipherkey)],
- mackey=cipherkey, crid=crid,
+ cipherkey, crid=crid,
maclen=16)
except EnvironmentError as e:
# Can't test algorithms the driver does not support.
Index: tools/tools/crypto/cryptocheck.c
===================================================================
--- tools/tools/crypto/cryptocheck.c
+++ tools/tools/crypto/cryptocheck.c
@@ -95,6 +95,9 @@
* sha256hmac 256-bit SHA-2 HMAC
* sha384hmac 384-bit SHA-2 HMAC
* sha512hmac 512-bit SHA-2 HMAC
+ * gmac 128-bit GMAC
+ * gmac192 192-bit GMAC
+ * gmac256 256-bit GMAC
*
* Ciphers:
* aes-cbc 128-bit AES-CBC
@@ -145,7 +148,7 @@
const char *name;
int cipher;
int mac;
- enum { T_HASH, T_HMAC, T_CIPHER, T_ETA, T_AEAD } type;
+ enum { T_HASH, T_HMAC, T_GMAC, T_CIPHER, T_ETA, T_AEAD } type;
const EVP_CIPHER *(*evp_cipher)(void);
const EVP_MD *(*evp_md)(void);
} algs[] = {
@@ -173,6 +176,12 @@
.evp_md = EVP_blake2b512 },
{ .name = "blake2s", .mac = CRYPTO_BLAKE2S, .type = T_HASH,
.evp_md = EVP_blake2s256 },
+ { .name = "gmac", .mac = CRYPTO_AES_NIST_GMAC, .type = T_GMAC,
+ .evp_cipher = EVP_aes_128_gcm },
+ { .name = "gmac192", .mac = CRYPTO_AES_NIST_GMAC, .type = T_GMAC,
+ .evp_cipher = EVP_aes_192_gcm },
+ { .name = "gmac256", .mac = CRYPTO_AES_NIST_GMAC, .type = T_GMAC,
+ .evp_cipher = EVP_aes_256_gcm },
{ .name = "aes-cbc", .cipher = CRYPTO_AES_CBC, .type = T_CIPHER,
.evp_cipher = EVP_aes_128_cbc },
{ .name = "aes-cbc192", .cipher = CRYPTO_AES_CBC, .type = T_CIPHER,
@@ -191,23 +200,17 @@
.evp_cipher = EVP_aes_256_xts },
{ .name = "chacha20", .cipher = CRYPTO_CHACHA20, .type = T_CIPHER,
.evp_cipher = EVP_chacha20 },
- { .name = "aes-gcm", .cipher = CRYPTO_AES_NIST_GCM_16,
- .mac = CRYPTO_AES_128_NIST_GMAC, .type = T_AEAD,
+ { .name = "aes-gcm", .cipher = CRYPTO_AES_NIST_GCM_16, .type = T_AEAD,
.evp_cipher = EVP_aes_128_gcm },
{ .name = "aes-gcm192", .cipher = CRYPTO_AES_NIST_GCM_16,
- .mac = CRYPTO_AES_192_NIST_GMAC, .type = T_AEAD,
- .evp_cipher = EVP_aes_192_gcm },
+ .type = T_AEAD, .evp_cipher = EVP_aes_192_gcm },
{ .name = "aes-gcm256", .cipher = CRYPTO_AES_NIST_GCM_16,
- .mac = CRYPTO_AES_256_NIST_GMAC, .type = T_AEAD,
- .evp_cipher = EVP_aes_256_gcm },
- { .name = "aes-ccm", .cipher = CRYPTO_AES_CCM_16,
- .mac = CRYPTO_AES_CCM_CBC_MAC, .type = T_AEAD,
+ .type = T_AEAD, .evp_cipher = EVP_aes_256_gcm },
+ { .name = "aes-ccm", .cipher = CRYPTO_AES_CCM_16, .type = T_AEAD,
.evp_cipher = EVP_aes_128_ccm },
- { .name = "aes-ccm192", .cipher = CRYPTO_AES_CCM_16,
- .mac = CRYPTO_AES_CCM_CBC_MAC, .type = T_AEAD,
+ { .name = "aes-ccm192", .cipher = CRYPTO_AES_CCM_16, .type = T_AEAD,
.evp_cipher = EVP_aes_192_ccm },
- { .name = "aes-ccm256", .cipher = CRYPTO_AES_CCM_16,
- .mac = CRYPTO_AES_CCM_CBC_MAC, .type = T_AEAD,
+ { .name = "aes-ccm256", .cipher = CRYPTO_AES_CCM_16, .type = T_AEAD,
.evp_cipher = EVP_aes_256_ccm },
};
@@ -832,7 +835,7 @@
return (ocf_init_session(&sop, "ETA", alg->name, ses));
}
-static bool
+static int
ocf_eta(const struct ocf_session *ses, const struct alg *alg, const char *iv,
size_t iv_len, const char *aad, size_t aad_len, const char *input,
char *output, size_t size, char *digest, int op)
@@ -844,7 +847,6 @@
ocf_init_caead(ses, &caead);
caead.op = op;
- caead.flags = op == COP_ENCRYPT ? COP_F_CIPHER_FIRST : 0;
caead.len = size;
caead.aadlen = aad_len;
caead.ivlen = iv_len;
@@ -860,7 +862,6 @@
ocf_init_cop(ses, &cop);
cop.op = op;
- cop.flags = op == COP_ENCRYPT ? COP_F_CIPHER_FIRST : 0;
cop.len = size;
cop.src = (char *)input;
cop.dst = output;
@@ -870,13 +871,9 @@
ret = ioctl(ses->fd, CIOCCRYPT, &cop);
}
- if (ret < 0) {
- warn("cryptodev %s (%zu) ETA failed for device %s",
- alg->name, size, crfind(crid));
- return (false);
- }
-
- return (true);
+ if (ret < 0)
+ return (errno);
+ return (0);
}
static void
@@ -887,7 +884,8 @@
const EVP_MD *md;
char *aad, *buffer, *cleartext, *ciphertext;
char *iv, *auth_key, *cipher_key;
- u_int i, iv_len, auth_key_len, cipher_key_len, digest_len;
+ u_int iv_len, auth_key_len, cipher_key_len, digest_len;
+ int error;
char control_digest[EVP_MAX_MD_SIZE], test_digest[EVP_MAX_MD_SIZE];
cipher = alg->evp_cipher();
@@ -935,10 +933,14 @@
goto out;
/* OCF encrypt + HMAC. */
- if (!ocf_eta(&ses, alg, iv, iv_len,
+ error = ocf_eta(&ses, alg, iv, iv_len,
aad_len != 0 ? cleartext : NULL, aad_len, cleartext + aad_len,
- buffer + aad_len, size, test_digest, COP_ENCRYPT))
+ buffer + aad_len, size, test_digest, COP_ENCRYPT);
+ if (error != 0) {
+ warnc(error, "cryptodev %s (%zu) ETA failed for device %s",
+ alg->name, size, crfind(ses.crid));
goto out;
+ }
if (memcmp(ciphertext + aad_len, buffer + aad_len, size) != 0) {
printf("%s (%zu) encryption mismatch:\n", alg->name, size);
printf("control:\n");
@@ -962,10 +964,14 @@
}
/* OCF HMAC + decrypt. */
- if (!ocf_eta(&ses, alg, iv, iv_len,
+ error = ocf_eta(&ses, alg, iv, iv_len,
aad_len != 0 ? ciphertext : NULL, aad_len, ciphertext + aad_len,
- buffer + aad_len, size, test_digest, COP_DECRYPT))
+ buffer + aad_len, size, test_digest, COP_DECRYPT);
+ if (error != 0) {
+ warnc(error, "cryptodev %s (%zu) ETA failed for device %s",
+ alg->name, size, crfind(ses.crid));
goto out;
+ }
if (memcmp(cleartext + aad_len, buffer + aad_len, size) != 0) {
printf("%s (%zu) decryption mismatch:\n", alg->name, size);
printf("control:\n");
@@ -975,6 +981,23 @@
goto out;
}
+ /* Verify OCF HMAC + decrypt fails with busted MAC. */
+ test_digest[0] ^= 0x1;
+ error = ocf_eta(&ses, alg, iv, iv_len,
+ aad_len != 0 ? ciphertext : NULL, aad_len, ciphertext + aad_len,
+ buffer + aad_len, size, test_digest, COP_DECRYPT);
+ if (error != EBADMSG) {
+ if (error != 0)
+ warnc(error,
+ "cryptodev %s (%zu) corrupt tag failed for device %s",
+ alg->name, size, crfind(ses.crid));
+ else
+ warnx(
+ "cryptodev %s (%zu) corrupt tag didn't fail for device %s",
+ alg->name, size, crfind(ses.crid));
+ goto out;
+ }
+
if (verbose)
printf("%s (%zu) matched (cryptodev device %s)\n",
alg->name, size, crfind(ses.crid));
@@ -989,6 +1012,115 @@
free(cipher_key);
}
+static void
+openssl_gmac(const struct alg *alg, const EVP_CIPHER *cipher, const char *key,
+ const char *iv, const char *input, size_t size, char *tag)
+{
+ EVP_CIPHER_CTX *ctx;
+ int outl;
+
+ ctx = EVP_CIPHER_CTX_new();
+ if (ctx == NULL)
+ errx(1, "OpenSSL %s (%zu) ctx new failed: %s", alg->name,
+ size, ERR_error_string(ERR_get_error(), NULL));
+ if (EVP_EncryptInit_ex(ctx, cipher, NULL, (const u_char *)key,
+ (const u_char *)iv) != 1)
+ errx(1, "OpenSSL %s (%zu) ctx init failed: %s", alg->name,
+ size, ERR_error_string(ERR_get_error(), NULL));
+ EVP_CIPHER_CTX_set_padding(ctx, 0);
+ if (EVP_EncryptUpdate(ctx, NULL, &outl, (const u_char *)input,
+ size) != 1)
+ errx(1, "OpenSSL %s (%zu) update failed: %s",
+ alg->name, size, ERR_error_string(ERR_get_error(), NULL));
+ if (EVP_EncryptFinal_ex(ctx, NULL, &outl) != 1)
+ errx(1, "OpenSSL %s (%zu) final failed: %s", alg->name,
+ size, ERR_error_string(ERR_get_error(), NULL));
+ if (EVP_CIPHER_CTX_ctrl(ctx, EVP_CTRL_GCM_GET_TAG, AES_GMAC_HASH_LEN,
+ tag) != 1)
+ errx(1, "OpenSSL %s (%zu) get tag failed: %s", alg->name,
+ size, ERR_error_string(ERR_get_error(), NULL));
+ EVP_CIPHER_CTX_free(ctx);
+}
+
+static bool
+ocf_gmac(const struct alg *alg, const char *input, size_t size, const char *key,
+ size_t key_len, const char *iv, char *tag, int *cridp)
+{
+ struct ocf_session ses;
+ struct session2_op sop;
+ struct crypt_op cop;
+
+ ocf_init_sop(&sop);
+ sop.mackeylen = key_len;
+ sop.mackey = (char *)key;
+ sop.mac = alg->mac;
+ if (!ocf_init_session(&sop, "GMAC", alg->name, &ses))
+ return (false);
+
+ ocf_init_cop(&ses, &cop);
+ cop.op = 0;
+ cop.len = size;
+ cop.src = (char *)input;
+ cop.mac = tag;
+ cop.iv = iv;
+
+ if (ioctl(ses.fd, CIOCCRYPT, &cop) < 0) {
+ warn("cryptodev %s (%zu) failed for device %s", alg->name,
+ size, crfind(crid));
+ ocf_destroy_session(&ses);
+ return (false);
+ }
+
+ *cridp = ses.crid;
+ ocf_destroy_session(&ses);
+ return (true);
+}
+
+static void
+run_gmac_test(const struct alg *alg, size_t size)
+{
+ const EVP_CIPHER *cipher;
+ char *iv, *key, *buffer;
+ u_int iv_len, key_len, digest_len;
+ int crid;
+ char control_tag[AES_GMAC_HASH_LEN], test_tag[AES_GMAC_HASH_LEN];
+
+ cipher = alg->evp_cipher();
+
+ memset(control_tag, 0x3c, sizeof(control_tag));
+ memset(test_tag, 0x3c, sizeof(test_tag));
+
+ key_len = EVP_CIPHER_key_length(cipher);
+ iv_len = EVP_CIPHER_iv_length(cipher);
+
+ key = alloc_buffer(key_len);
+ iv = generate_iv(iv_len, alg);
+ buffer = alloc_buffer(size);
+
+ /* OpenSSL GMAC. */
+ openssl_gmac(alg, cipher, key, iv, buffer, size, control_tag);
+
+ /* OCF GMAC. */
+ if (!ocf_gmac(alg, buffer, size, key, key_len, iv, test_tag, &crid))
+ goto out;
+ if (memcmp(control_tag, test_tag, sizeof(control_tag)) != 0) {
+ printf("%s (%zu) mismatch:\n", alg->name, size);
+ printf("control:\n");
+ hexdump(control_tag, sizeof(control_tag), NULL, 0);
+ printf("test (cryptodev device %s):\n", crfind(crid));
+ hexdump(test_tag, sizeof(test_tag), NULL, 0);
+ goto out;
+ }
+
+ if (verbose)
+ printf("%s (%zu) matched (cryptodev device %s)\n",
+ alg->name, size, crfind(crid));
+
+out:
+ free(buffer);
+ free(key);
+}
+
static void
openssl_gcm_encrypt(const struct alg *alg, const EVP_CIPHER *cipher,
const char *key, const char *iv, const char *aad, size_t aad_len,
@@ -1142,9 +1274,6 @@
sop.keylen = key_len;
sop.key = (char *)key;
sop.cipher = alg->cipher;
- sop.mackeylen = key_len;
- sop.mackey = (char *)key;
- sop.mac = alg->mac;
return (ocf_init_session(&sop, "AEAD", alg->name, ses));
}
@@ -1316,6 +1445,9 @@
case T_HMAC:
run_hmac_test(alg, size);
break;
+ case T_GMAC:
+ run_gmac_test(alg, size);
+ break;
case T_CIPHER:
run_cipher_test(alg, size);
break;
@@ -1353,7 +1485,7 @@
u_int i;
for (i = 0; i < nitems(algs); i++)
- if (algs[i].type == T_HMAC)
+ if (algs[i].type == T_HMAC || algs[i].type == T_GMAC)
run_test_sizes(&algs[i], sizes, nsizes);
}

File Metadata

Mime Type
text/plain
Expires
Wed, Nov 20, 3:31 PM (21 h, 39 m)
Storage Engine
blob
Storage Format
Raw Data
Storage Handle
14741542
Default Alt Text
D23677.diff (691 KB)

Event Timeline