summaryrefslogtreecommitdiff
path: root/target/linux/generic-2.6/files/crypto/ocf/cryptosoft.c
diff options
context:
space:
mode:
authorJo-Philipp Wich <jow@openwrt.org>2010-05-04 20:59:19 +0000
committerJo-Philipp Wich <jow@openwrt.org>2010-05-04 20:59:19 +0000
commit32e7340bf26d1d59aefb14bf1af4c2e3cb9a9642 (patch)
treee04b357c36a5042b6f844d01e89633949600fdb3 /target/linux/generic-2.6/files/crypto/ocf/cryptosoft.c
parentd30f29165acba8532b117bd7c61f71be4cd6619c (diff)
downloadmtk-20170518-32e7340bf26d1d59aefb14bf1af4c2e3cb9a9642.zip
mtk-20170518-32e7340bf26d1d59aefb14bf1af4c2e3cb9a9642.tar.gz
mtk-20170518-32e7340bf26d1d59aefb14bf1af4c2e3cb9a9642.tar.bz2
update OCF framework to version 20100325
SVN-Revision: 21356
Diffstat (limited to 'target/linux/generic-2.6/files/crypto/ocf/cryptosoft.c')
-rw-r--r--target/linux/generic-2.6/files/crypto/ocf/cryptosoft.c1188
1 files changed, 750 insertions, 438 deletions
diff --git a/target/linux/generic-2.6/files/crypto/ocf/cryptosoft.c b/target/linux/generic-2.6/files/crypto/ocf/cryptosoft.c
index 1486889..52e1570 100644
--- a/target/linux/generic-2.6/files/crypto/ocf/cryptosoft.c
+++ b/target/linux/generic-2.6/files/crypto/ocf/cryptosoft.c
@@ -3,8 +3,8 @@
* original cryptosoft for BSD by Angelos D. Keromytis (angelos@cis.upenn.edu)
* but is mostly unrecognisable,
*
- * Written by David McCullough <david_mccullough@securecomputing.com>
- * Copyright (C) 2004-2007 David McCullough
+ * Written by David McCullough <david_mccullough@mcafee.com>
+ * Copyright (C) 2004-2010 David McCullough
* Copyright (C) 2004-2005 Intel Corporation.
*
* LICENSE TERMS
@@ -47,7 +47,13 @@
#include <linux/mm.h>
#include <linux/skbuff.h>
#include <linux/random.h>
-#include <asm/scatterlist.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
+#include <linux/scatterlist.h>
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29)
+#include <crypto/hash.h>
+#endif
#include <cryptodev.h>
#include <uio.h>
@@ -58,14 +64,24 @@ struct {
#define offset_in_page(p) ((unsigned long)(p) & ~PAGE_MASK)
-/* Software session entry */
+#define SW_TYPE_CIPHER 0x01
+#define SW_TYPE_HMAC 0x02
+#define SW_TYPE_HASH 0x04
+#define SW_TYPE_COMP 0x08
+#define SW_TYPE_BLKCIPHER 0x10
+#define SW_TYPE_ALG_MASK 0x1f
+
+#define SW_TYPE_ASYNC 0x8000
+
+/* We change some of the above if we have an async interface */
+
+#define SW_TYPE_ALG_AMASK (SW_TYPE_ALG_MASK | SW_TYPE_ASYNC)
-#define SW_TYPE_CIPHER 0
-#define SW_TYPE_HMAC 1
-#define SW_TYPE_AUTH2 2
-#define SW_TYPE_HASH 3
-#define SW_TYPE_COMP 4
-#define SW_TYPE_BLKCIPHER 5
+#define SW_TYPE_ABLKCIPHER (SW_TYPE_BLKCIPHER | SW_TYPE_ASYNC)
+#define SW_TYPE_AHASH (SW_TYPE_HASH | SW_TYPE_ASYNC)
+#define SW_TYPE_AHMAC (SW_TYPE_HMAC | SW_TYPE_ASYNC)
+
+#define SCATTERLIST_MAX 16
struct swcr_data {
int sw_type;
@@ -82,6 +98,23 @@ struct swcr_data {
struct swcr_data *sw_next;
};
+struct swcr_req {
+ struct swcr_data *sw_head;
+ struct swcr_data *sw;
+ struct cryptop *crp;
+ struct cryptodesc *crd;
+ struct scatterlist sg[SCATTERLIST_MAX];
+ unsigned char iv[EALG_MAX_BLOCK_LEN];
+ char result[HASH_MAX_LEN];
+ void *crypto_req;
+};
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
+static kmem_cache_t *swcr_req_cache;
+#else
+static struct kmem_cache *swcr_req_cache;
+#endif
+
#ifndef CRYPTO_TFM_MODE_CBC
/*
* As of linux-2.6.21 this is no longer defined, and presumably no longer
@@ -103,8 +136,8 @@ struct swcr_data {
struct crypto_tfm *tfm;
void *info;
};
- #define ecb(X) #X
- #define cbc(X) #X
+ #define ecb(X) #X , CRYPTO_TFM_MODE_ECB
+ #define cbc(X) #X , CRYPTO_TFM_MODE_CBC
#define crypto_has_blkcipher(X, Y, Z) crypto_alg_available(X, 0)
#define crypto_blkcipher_cast(X) X
#define crypto_blkcipher_tfm(X) X
@@ -116,13 +149,14 @@ struct swcr_data {
crypto_cipher_encrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
#define crypto_blkcipher_decrypt_iv(W, X, Y, Z) \
crypto_cipher_decrypt_iv((W)->tfm, X, Y, Z, (u8 *)((W)->info))
+ #define crypto_blkcipher_set_flags(x, y) /* nop */
/* Hash/HMAC/Digest */
struct hash_desc
{
struct crypto_tfm *tfm;
};
- #define hmac(X) #X
+ #define hmac(X) #X , 0
#define crypto_has_hash(X, Y, Z) crypto_alg_available(X, 0)
#define crypto_hash_cast(X) X
#define crypto_hash_tfm(X) X
@@ -139,53 +173,66 @@ struct swcr_data {
#define crypto_comp_tfm(X) X
#define crypto_comp_cast(X) X
#define crypto_alloc_comp(X, Y, Z) crypto_alloc_tfm(X, mode)
+ #define plain(X) #X , 0
#else
- #define ecb(X) "ecb(" #X ")"
- #define cbc(X) "cbc(" #X ")"
- #define hmac(X) "hmac(" #X ")"
+ #define ecb(X) "ecb(" #X ")" , 0
+ #define cbc(X) "cbc(" #X ")" , 0
+ #define hmac(X) "hmac(" #X ")" , 0
+ #define plain(X) #X , 0
#endif /* if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
-struct crypto_details
-{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
+/* no ablkcipher in older kernels */
+#define crypto_alloc_ablkcipher(a,b,c) (NULL)
+#define crypto_ablkcipher_tfm(x) ((struct crypto_tfm *)(x))
+#define crypto_ablkcipher_set_flags(a, b) /* nop */
+#define crypto_ablkcipher_setkey(x, y, z) (-EINVAL)
+#define crypto_has_ablkcipher(a,b,c) (0)
+#else
+#define HAVE_ABLKCIPHER
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
+/* no ahash in older kernels */
+#define crypto_ahash_tfm(x) ((struct crypto_tfm *)(x))
+#define crypto_alloc_ahash(a,b,c) (NULL)
+#define crypto_ahash_digestsize(x) 0
+#else
+#define HAVE_AHASH
+#endif
+
+struct crypto_details {
char *alg_name;
int mode;
int sw_type;
};
-/*
- * This needs to be kept updated with CRYPTO_xxx list (cryptodev.h).
- * If the Algorithm is not supported, then insert a {NULL, 0, 0} entry.
- *
- * IMPORTANT: The index to the array IS CRYPTO_xxx.
- */
-static struct crypto_details crypto_details[CRYPTO_ALGORITHM_MAX + 1] = {
- { NULL, 0, 0 },
- /* CRYPTO_xxx index starts at 1 */
- { cbc(des), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
- { cbc(des3_ede), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
- { cbc(blowfish), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
- { cbc(cast5), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
- { cbc(skipjack), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
- { hmac(md5), 0, SW_TYPE_HMAC },
- { hmac(sha1), 0, SW_TYPE_HMAC },
- { hmac(ripemd160), 0, SW_TYPE_HMAC },
- { "md5-kpdk??", 0, SW_TYPE_HASH },
- { "sha1-kpdk??", 0, SW_TYPE_HASH },
- { cbc(aes), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
- { ecb(arc4), CRYPTO_TFM_MODE_ECB, SW_TYPE_BLKCIPHER },
- { "md5", 0, SW_TYPE_HASH },
- { "sha1", 0, SW_TYPE_HASH },
- { hmac(digest_null), 0, SW_TYPE_HMAC },
- { cbc(cipher_null), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
- { "deflate", 0, SW_TYPE_COMP },
- { hmac(sha256), 0, SW_TYPE_HMAC },
- { hmac(sha384), 0, SW_TYPE_HMAC },
- { hmac(sha512), 0, SW_TYPE_HMAC },
- { cbc(camellia), CRYPTO_TFM_MODE_CBC, SW_TYPE_BLKCIPHER },
- { "sha256", 0, SW_TYPE_HASH },
- { "sha384", 0, SW_TYPE_HASH },
- { "sha512", 0, SW_TYPE_HASH },
- { "ripemd160", 0, SW_TYPE_HASH },
+static struct crypto_details crypto_details[] = {
+ [CRYPTO_DES_CBC] = { cbc(des), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_3DES_CBC] = { cbc(des3_ede), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_BLF_CBC] = { cbc(blowfish), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_CAST_CBC] = { cbc(cast5), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_SKIPJACK_CBC] = { cbc(skipjack), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_MD5_HMAC] = { hmac(md5), SW_TYPE_HMAC, },
+ [CRYPTO_SHA1_HMAC] = { hmac(sha1), SW_TYPE_HMAC, },
+ [CRYPTO_RIPEMD160_HMAC] = { hmac(ripemd160), SW_TYPE_HMAC, },
+ [CRYPTO_MD5_KPDK] = { plain(md5-kpdk), SW_TYPE_HASH, },
+ [CRYPTO_SHA1_KPDK] = { plain(sha1-kpdk), SW_TYPE_HASH, },
+ [CRYPTO_AES_CBC] = { cbc(aes), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_ARC4] = { ecb(arc4), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_MD5] = { plain(md5), SW_TYPE_HASH, },
+ [CRYPTO_SHA1] = { plain(sha1), SW_TYPE_HASH, },
+ [CRYPTO_NULL_HMAC] = { hmac(digest_null), SW_TYPE_HMAC, },
+ [CRYPTO_NULL_CBC] = { cbc(cipher_null), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_DEFLATE_COMP] = { plain(deflate), SW_TYPE_COMP, },
+ [CRYPTO_SHA2_256_HMAC] = { hmac(sha256), SW_TYPE_HMAC, },
+ [CRYPTO_SHA2_384_HMAC] = { hmac(sha384), SW_TYPE_HMAC, },
+ [CRYPTO_SHA2_512_HMAC] = { hmac(sha512), SW_TYPE_HMAC, },
+ [CRYPTO_CAMELLIA_CBC] = { cbc(camellia), SW_TYPE_BLKCIPHER, },
+ [CRYPTO_SHA2_256] = { plain(sha256), SW_TYPE_HASH, },
+ [CRYPTO_SHA2_384] = { plain(sha384), SW_TYPE_HASH, },
+ [CRYPTO_SHA2_512] = { plain(sha512), SW_TYPE_HASH, },
+ [CRYPTO_RIPEMD160] = { plain(ripemd160), SW_TYPE_HASH, },
};
int32_t swcr_id = -1;
@@ -197,6 +244,16 @@ module_param(swcr_fail_if_compression_grows, int, 0644);
MODULE_PARM_DESC(swcr_fail_if_compression_grows,
"Treat compression that results in more data as a failure");
+int swcr_no_ahash = 0;
+module_param(swcr_no_ahash, int, 0644);
+MODULE_PARM_DESC(swcr_no_ahash,
+ "Do not use async hash/hmac even if available");
+
+int swcr_no_ablk = 0;
+module_param(swcr_no_ablk, int, 0644);
+MODULE_PARM_DESC(swcr_no_ablk,
+ "Do not use async blk ciphers even if available");
+
static struct swcr_data **swcr_sessions = NULL;
static u_int32_t swcr_sesnum = 0;
@@ -216,6 +273,8 @@ int swcr_debug = 0;
module_param(swcr_debug, int, 0644);
MODULE_PARM_DESC(swcr_debug, "Enable debug");
+static void swcr_process_req(struct swcr_req *req);
+
/*
* Generate a new software session.
*/
@@ -226,7 +285,7 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
u_int32_t i;
int error;
char *algo;
- int mode, sw_type;
+ int mode;
dprintk("%s()\n", __FUNCTION__);
if (sid == NULL || cri == NULL) {
@@ -283,7 +342,8 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
}
memset(*swd, 0, sizeof(struct swcr_data));
- if (cri->cri_alg > CRYPTO_ALGORITHM_MAX) {
+ if (cri->cri_alg < 0 ||
+ cri->cri_alg>=sizeof(crypto_details)/sizeof(crypto_details[0])){
printk("cryptosoft: Unknown algorithm 0x%x\n", cri->cri_alg);
swcr_freesession(NULL, i);
return EINVAL;
@@ -297,7 +357,8 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
}
mode = crypto_details[cri->cri_alg].mode;
- sw_type = crypto_details[cri->cri_alg].sw_type;
+ (*swd)->sw_type = crypto_details[cri->cri_alg].sw_type;
+ (*swd)->sw_alg = cri->cri_alg;
/* Algorithm specific configuration */
switch (cri->cri_alg) {
@@ -308,15 +369,23 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
break;
}
- if (sw_type == SW_TYPE_BLKCIPHER) {
- dprintk("%s crypto_alloc_blkcipher(%s, 0x%x)\n", __FUNCTION__,
+ if ((*swd)->sw_type & SW_TYPE_BLKCIPHER) {
+ dprintk("%s crypto_alloc_*blkcipher(%s, 0x%x)\n", __FUNCTION__,
algo, mode);
- (*swd)->sw_tfm = crypto_blkcipher_tfm(
- crypto_alloc_blkcipher(algo, 0,
- CRYPTO_ALG_ASYNC));
+ /* try async first */
+ (*swd)->sw_tfm = swcr_no_ablk ? NULL :
+ crypto_ablkcipher_tfm(crypto_alloc_ablkcipher(algo, 0, 0));
+ if ((*swd)->sw_tfm) {
+ dprintk("%s %s cipher is async\n", __FUNCTION__, algo);
+ (*swd)->sw_type |= SW_TYPE_ASYNC;
+ } else {
+ dprintk("%s %s cipher is sync\n", __FUNCTION__, algo);
+ (*swd)->sw_tfm = crypto_blkcipher_tfm(
+ crypto_alloc_blkcipher(algo, 0, CRYPTO_ALG_ASYNC));
+ }
if (!(*swd)->sw_tfm) {
- dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s,0x%x)\n",
+ dprintk("cryptosoft: crypto_alloc_blkcipher failed(%s, 0x%x)\n",
algo,mode);
swcr_freesession(NULL, i);
return EINVAL;
@@ -324,28 +393,50 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
if (debug) {
dprintk("%s key:cri->cri_klen=%d,(cri->cri_klen + 7)/8=%d",
- __FUNCTION__,cri->cri_klen,(cri->cri_klen + 7)/8);
+ __FUNCTION__, cri->cri_klen, (cri->cri_klen + 7) / 8);
for (i = 0; i < (cri->cri_klen + 7) / 8; i++)
- {
- dprintk("%s0x%x", (i % 8) ? " " : "\n ",cri->cri_key[i]);
- }
+ dprintk("%s0x%x", (i % 8) ? " " : "\n ",
+ cri->cri_key[i] & 0xff);
dprintk("\n");
}
- error = crypto_blkcipher_setkey(
- crypto_blkcipher_cast((*swd)->sw_tfm), cri->cri_key,
- (cri->cri_klen + 7) / 8);
+ if ((*swd)->sw_type & SW_TYPE_ASYNC) {
+ /* OCF doesn't enforce keys */
+ crypto_ablkcipher_set_flags(
+ __crypto_ablkcipher_cast((*swd)->sw_tfm),
+ CRYPTO_TFM_REQ_WEAK_KEY);
+ error = crypto_ablkcipher_setkey(
+ __crypto_ablkcipher_cast((*swd)->sw_tfm),
+ cri->cri_key, (cri->cri_klen + 7) / 8);
+ } else {
+ /* OCF doesn't enforce keys */
+ crypto_blkcipher_set_flags(
+ crypto_blkcipher_cast((*swd)->sw_tfm),
+ CRYPTO_TFM_REQ_WEAK_KEY);
+ error = crypto_blkcipher_setkey(
+ crypto_blkcipher_cast((*swd)->sw_tfm),
+ cri->cri_key, (cri->cri_klen + 7) / 8);
+ }
if (error) {
printk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n", error,
(*swd)->sw_tfm->crt_flags);
swcr_freesession(NULL, i);
return error;
}
- } else if (sw_type == SW_TYPE_HMAC || sw_type == SW_TYPE_HASH) {
- dprintk("%s crypto_alloc_hash(%s, 0x%x)\n", __FUNCTION__,
+ } else if ((*swd)->sw_type & (SW_TYPE_HMAC | SW_TYPE_HASH)) {
+ dprintk("%s crypto_alloc_*hash(%s, 0x%x)\n", __FUNCTION__,
algo, mode);
- (*swd)->sw_tfm = crypto_hash_tfm(
- crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
+ /* try async first */
+ (*swd)->sw_tfm = swcr_no_ahash ? NULL :
+ crypto_ahash_tfm(crypto_alloc_ahash(algo, 0, 0));
+ if ((*swd)->sw_tfm) {
+ dprintk("%s %s hash is async\n", __FUNCTION__, algo);
+ (*swd)->sw_type |= SW_TYPE_ASYNC;
+ } else {
+ dprintk("%s %s hash is sync\n", __FUNCTION__, algo);
+ (*swd)->sw_tfm = crypto_hash_tfm(
+ crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC));
+ }
if (!(*swd)->sw_tfm) {
dprintk("cryptosoft: crypto_alloc_hash failed(%s,0x%x)\n",
@@ -356,7 +447,7 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
(*swd)->u.hmac.sw_klen = (cri->cri_klen + 7) / 8;
(*swd)->u.hmac.sw_key = (char *)kmalloc((*swd)->u.hmac.sw_klen,
- SLAB_ATOMIC);
+ SLAB_ATOMIC);
if ((*swd)->u.hmac.sw_key == NULL) {
swcr_freesession(NULL, i);
dprintk("%s,%d: ENOBUFS\n", __FILE__, __LINE__);
@@ -365,12 +456,14 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
memcpy((*swd)->u.hmac.sw_key, cri->cri_key, (*swd)->u.hmac.sw_klen);
if (cri->cri_mlen) {
(*swd)->u.hmac.sw_mlen = cri->cri_mlen;
- } else {
- (*swd)->u.hmac.sw_mlen =
- crypto_hash_digestsize(
- crypto_hash_cast((*swd)->sw_tfm));
+ } else if ((*swd)->sw_type & SW_TYPE_ASYNC) {
+ (*swd)->u.hmac.sw_mlen = crypto_ahash_digestsize(
+ __crypto_ahash_cast((*swd)->sw_tfm));
+ } else {
+ (*swd)->u.hmac.sw_mlen = crypto_hash_digestsize(
+ crypto_hash_cast((*swd)->sw_tfm));
}
- } else if (sw_type == SW_TYPE_COMP) {
+ } else if ((*swd)->sw_type & SW_TYPE_COMP) {
(*swd)->sw_tfm = crypto_comp_tfm(
crypto_alloc_comp(algo, 0, CRYPTO_ALG_ASYNC));
if (!(*swd)->sw_tfm) {
@@ -386,14 +479,11 @@ swcr_newsession(device_t dev, u_int32_t *sid, struct cryptoini *cri)
return ENOBUFS;
}
} else {
- printk("cryptosoft: Unhandled sw_type %d\n", sw_type);
+ printk("cryptosoft: Unhandled sw_type %d\n", (*swd)->sw_type);
swcr_freesession(NULL, i);
return EINVAL;
}
- (*swd)->sw_alg = cri->cri_alg;
- (*swd)->sw_type = sw_type;
-
cri = cri->cri_next;
swd = &((*swd)->sw_next);
}
@@ -422,9 +512,35 @@ swcr_freesession(device_t dev, u_int64_t tid)
while ((swd = swcr_sessions[sid]) != NULL) {
swcr_sessions[sid] = swd->sw_next;
- if (swd->sw_tfm)
- crypto_free_tfm(swd->sw_tfm);
- if (swd->sw_type == SW_TYPE_COMP) {
+ if (swd->sw_tfm) {
+ switch (swd->sw_type & SW_TYPE_ALG_AMASK) {
+#ifdef HAVE_AHASH
+ case SW_TYPE_AHMAC:
+ case SW_TYPE_AHASH:
+ crypto_free_ahash(__crypto_ahash_cast(swd->sw_tfm));
+ break;
+#endif
+#ifdef HAVE_ABLKCIPHER
+ case SW_TYPE_ABLKCIPHER:
+ crypto_free_ablkcipher(__crypto_ablkcipher_cast(swd->sw_tfm));
+ break;
+#endif
+ case SW_TYPE_BLKCIPHER:
+ crypto_free_blkcipher(crypto_blkcipher_cast(swd->sw_tfm));
+ break;
+ case SW_TYPE_HMAC:
+ case SW_TYPE_HASH:
+ crypto_free_hash(crypto_hash_cast(swd->sw_tfm));
+ break;
+ case SW_TYPE_COMP:
+ crypto_free_comp(crypto_comp_cast(swd->sw_tfm));
+ default:
+ crypto_free_tfm(swd->sw_tfm);
+ break;
+ }
+ swd->sw_tfm = NULL;
+ }
+ if (swd->sw_type & SW_TYPE_COMP) {
if (swd->u.sw_comp_buf)
kfree(swd->u.sw_comp_buf);
} else {
@@ -436,351 +552,574 @@ swcr_freesession(device_t dev, u_int64_t tid)
return 0;
}
-/*
- * Process a software request.
- */
-static int
-swcr_process(device_t dev, struct cryptop *crp, int hint)
+#if defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH)
+/* older kernels had no async interface */
+
+static void swcr_process_callback(struct crypto_async_request *creq, int err)
+{
+ struct swcr_req *req = creq->data;
+
+ dprintk("%s()\n", __FUNCTION__);
+ if (err) {
+ if (err == -EINPROGRESS)
+ return;
+ dprintk("%s() fail %d\n", __FUNCTION__, -err);
+ req->crp->crp_etype = -err;
+ goto done;
+ }
+
+ switch (req->sw->sw_type & SW_TYPE_ALG_AMASK) {
+ case SW_TYPE_AHMAC:
+ case SW_TYPE_AHASH:
+ crypto_copyback(req->crp->crp_flags, req->crp->crp_buf,
+ req->crd->crd_inject, req->sw->u.hmac.sw_mlen, req->result);
+ ahash_request_free(req->crypto_req);
+ break;
+ case SW_TYPE_ABLKCIPHER:
+ ablkcipher_request_free(req->crypto_req);
+ break;
+ default:
+ req->crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ req->crd = req->crd->crd_next;
+ if (req->crd) {
+ swcr_process_req(req);
+ return;
+ }
+
+done:
+ dprintk("%s crypto_done %p\n", __FUNCTION__, req);
+ crypto_done(req->crp);
+ kmem_cache_free(swcr_req_cache, req);
+}
+#endif /* defined(HAVE_ABLKCIPHER) || defined(HAVE_AHASH) */
+
+
+static void swcr_process_req(struct swcr_req *req)
{
- struct cryptodesc *crd;
struct swcr_data *sw;
- u_int32_t lid;
-#define SCATTERLIST_MAX 16
- struct scatterlist sg[SCATTERLIST_MAX];
+ struct cryptop *crp = req->crp;
+ struct cryptodesc *crd = req->crd;
+ struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
+ struct uio *uiop = (struct uio *) crp->crp_buf;
int sg_num, sg_len, skip;
- struct sk_buff *skb = NULL;
- struct uio *uiop = NULL;
dprintk("%s()\n", __FUNCTION__);
- /* Sanity check */
- if (crp == NULL) {
- dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
- return EINVAL;
- }
- crp->crp_etype = 0;
+ /*
+ * Find the crypto context.
+ *
+ * XXX Note that the logic here prevents us from having
+ * XXX the same algorithm multiple times in a session
+ * XXX (or rather, we can but it won't give us the right
+ * XXX results). To do that, we'd need some way of differentiating
+ * XXX between the various instances of an algorithm (so we can
+ * XXX locate the correct crypto context).
+ */
+ for (sw = req->sw_head; sw && sw->sw_alg != crd->crd_alg; sw = sw->sw_next)
+ ;
- if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
- dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ /* No such context ? */
+ if (sw == NULL) {
crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
goto done;
}
- lid = crp->crp_sid & 0xffffffff;
- if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
- swcr_sessions[lid] == NULL) {
- crp->crp_etype = ENOENT;
- dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
- goto done;
- }
+ req->sw = sw;
+ skip = crd->crd_skip;
/*
- * do some error checking outside of the loop for SKB and IOV processing
- * this leaves us with valid skb or uiop pointers for later
+ * setup the SG list skip from the start of the buffer
*/
+ memset(req->sg, 0, sizeof(req->sg));
+ sg_init_table(req->sg, SCATTERLIST_MAX);
if (crp->crp_flags & CRYPTO_F_SKBUF) {
- skb = (struct sk_buff *) crp->crp_buf;
- if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
- printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
- skb_shinfo(skb)->nr_frags);
- goto done;
+ int i, len;
+
+ sg_num = 0;
+ sg_len = 0;
+
+ if (skip < skb_headlen(skb)) {
+ len = skb_headlen(skb) - skip;
+ if (len + sg_len > crd->crd_len)
+ len = crd->crd_len - sg_len;
+ sg_set_page(&req->sg[sg_num],
+ virt_to_page(skb->data + skip), len,
+ offset_in_page(skb->data + skip));
+ sg_len += len;
+ sg_num++;
+ skip = 0;
+ } else
+ skip -= skb_headlen(skb);
+
+ for (i = 0; sg_len < crd->crd_len &&
+ i < skb_shinfo(skb)->nr_frags &&
+ sg_num < SCATTERLIST_MAX; i++) {
+ if (skip < skb_shinfo(skb)->frags[i].size) {
+ len = skb_shinfo(skb)->frags[i].size - skip;
+ if (len + sg_len > crd->crd_len)
+ len = crd->crd_len - sg_len;
+ sg_set_page(&req->sg[sg_num],
+ skb_shinfo(skb)->frags[i].page,
+ len,
+ skb_shinfo(skb)->frags[i].page_offset + skip);
+ sg_len += len;
+ sg_num++;
+ skip = 0;
+ } else
+ skip -= skb_shinfo(skb)->frags[i].size;
}
} else if (crp->crp_flags & CRYPTO_F_IOV) {
- uiop = (struct uio *) crp->crp_buf;
- if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
- printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
- uiop->uio_iovcnt);
- goto done;
+ int len;
+
+ sg_len = 0;
+ for (sg_num = 0; sg_len < crd->crd_len &&
+ sg_num < uiop->uio_iovcnt &&
+ sg_num < SCATTERLIST_MAX; sg_num++) {
+ if (skip <= uiop->uio_iov[sg_num].iov_len) {
+ len = uiop->uio_iov[sg_num].iov_len - skip;
+ if (len + sg_len > crd->crd_len)
+ len = crd->crd_len - sg_len;
+ sg_set_page(&req->sg[sg_num],
+ virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
+ len,
+ offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
+ sg_len += len;
+ skip = 0;
+ } else
+ skip -= uiop->uio_iov[sg_num].iov_len;
}
+ } else {
+ sg_len = (crp->crp_ilen - skip);
+ if (sg_len > crd->crd_len)
+ sg_len = crd->crd_len;
+ sg_set_page(&req->sg[0], virt_to_page(crp->crp_buf + skip),
+ sg_len, offset_in_page(crp->crp_buf + skip));
+ sg_num = 1;
}
- /* Go through crypto descriptors, processing as we go */
- for (crd = crp->crp_desc; crd; crd = crd->crd_next) {
- /*
- * Find the crypto context.
- *
- * XXX Note that the logic here prevents us from having
- * XXX the same algorithm multiple times in a session
- * XXX (or rather, we can but it won't give us the right
- * XXX results). To do that, we'd need some way of differentiating
- * XXX between the various instances of an algorithm (so we can
- * XXX locate the correct crypto context).
- */
- for (sw = swcr_sessions[lid]; sw && sw->sw_alg != crd->crd_alg;
- sw = sw->sw_next)
- ;
+ switch (sw->sw_type & SW_TYPE_ALG_AMASK) {
+
+#ifdef HAVE_AHASH
+ case SW_TYPE_AHMAC:
+ case SW_TYPE_AHASH:
+ {
+ int ret;
- /* No such context ? */
- if (sw == NULL) {
+ /* check we have room for the result */
+ if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
+ dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
+ "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
+ crd->crd_inject, sw->u.hmac.sw_mlen);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ req->crypto_req =
+ ahash_request_alloc(__crypto_ahash_cast(sw->sw_tfm),GFP_KERNEL);
+ if (!req->crypto_req) {
+ crp->crp_etype = ENOMEM;
+ dprintk("%s,%d: ENOMEM ahash_request_alloc", __FILE__, __LINE__);
+ goto done;
+ }
+
+ ahash_request_set_callback(req->crypto_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
+
+ memset(req->result, 0, sizeof(req->result));
+
+ if (sw->sw_type & SW_TYPE_AHMAC)
+ crypto_ahash_setkey(__crypto_ahash_cast(sw->sw_tfm),
+ sw->u.hmac.sw_key, sw->u.hmac.sw_klen);
+ ahash_request_set_crypt(req->crypto_req, req->sg, req->result, sg_len);
+ ret = crypto_ahash_digest(req->crypto_req);
+ switch (ret) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ return;
+ default:
+ case 0:
+ dprintk("hash OP %s %d\n", ret ? "failed" : "success", ret);
+ crp->crp_etype = ret;
+ ahash_request_free(req->crypto_req);
+ goto done;
+ }
+ } break;
+#endif /* HAVE_AHASH */
+
+#ifdef HAVE_ABLKCIPHER
+ case SW_TYPE_ABLKCIPHER: {
+ int ret;
+ unsigned char *ivp = req->iv;
+ int ivsize =
+ crypto_ablkcipher_ivsize(__crypto_ablkcipher_cast(sw->sw_tfm));
+
+ if (sg_len < crypto_ablkcipher_blocksize(
+ __crypto_ablkcipher_cast(sw->sw_tfm))) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
+ sg_len, crypto_ablkcipher_blocksize(
+ __crypto_ablkcipher_cast(sw->sw_tfm)));
+ goto done;
+ }
+
+ if (ivsize > sizeof(req->iv)) {
crp->crp_etype = EINVAL;
dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
goto done;
}
- skip = crd->crd_skip;
+ req->crypto_req = ablkcipher_request_alloc(
+ __crypto_ablkcipher_cast(sw->sw_tfm), GFP_KERNEL);
+ if (!req->crypto_req) {
+ crp->crp_etype = ENOMEM;
+ dprintk("%s,%d: ENOMEM ablkcipher_request_alloc",
+ __FILE__, __LINE__);
+ goto done;
+ }
- /*
- * setup the SG list skip from the start of the buffer
- */
- memset(sg, 0, sizeof(sg));
- if (crp->crp_flags & CRYPTO_F_SKBUF) {
- int i, len;
+ ablkcipher_request_set_callback(req->crypto_req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG, swcr_process_callback, req);
- sg_num = 0;
- sg_len = 0;
+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ int i, error;
- if (skip < skb_headlen(skb)) {
- len = skb_headlen(skb) - skip;
- if (len + sg_len > crd->crd_len)
- len = crd->crd_len - sg_len;
- sg_set_page(&sg[sg_num],
- virt_to_page(skb->data + skip), len,
- offset_in_page(skb->data + skip));
- sg_len += len;
- sg_num++;
- skip = 0;
- } else
- skip -= skb_headlen(skb);
-
- for (i = 0; sg_len < crd->crd_len &&
- i < skb_shinfo(skb)->nr_frags &&
- sg_num < SCATTERLIST_MAX; i++) {
- if (skip < skb_shinfo(skb)->frags[i].size) {
- len = skb_shinfo(skb)->frags[i].size - skip;
- if (len + sg_len > crd->crd_len)
- len = crd->crd_len - sg_len;
- sg_set_page(&sg[sg_num],
- skb_shinfo(skb)->frags[i].page,
- len,
- skb_shinfo(skb)->frags[i].page_offset + skip);
- sg_len += len;
- sg_num++;
- skip = 0;
- } else
- skip -= skb_shinfo(skb)->frags[i].size;
- }
- } else if (crp->crp_flags & CRYPTO_F_IOV) {
- int len;
-
- sg_len = 0;
- for (sg_num = 0; sg_len <= crd->crd_len &&
- sg_num < uiop->uio_iovcnt &&
- sg_num < SCATTERLIST_MAX; sg_num++) {
- if (skip <= uiop->uio_iov[sg_num].iov_len) {
- len = uiop->uio_iov[sg_num].iov_len - skip;
- if (len + sg_len > crd->crd_len)
- len = crd->crd_len - sg_len;
- sg_set_page(&sg[sg_num],
- virt_to_page(uiop->uio_iov[sg_num].iov_base+skip),
- len,
- offset_in_page(uiop->uio_iov[sg_num].iov_base+skip));
- sg_len += len;
- skip = 0;
- } else
- skip -= uiop->uio_iov[sg_num].iov_len;
+ if (debug) {
+ dprintk("%s key:", __FUNCTION__);
+ for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
+ dprintk("%s0x%x", (i % 8) ? " " : "\n ",
+ crd->crd_key[i] & 0xff);
+ dprintk("\n");
}
- } else {
- sg_len = (crp->crp_ilen - skip);
- if (sg_len > crd->crd_len)
- sg_len = crd->crd_len;
- sg_set_page(&sg[0], virt_to_page(crp->crp_buf + skip),
- sg_len, offset_in_page(crp->crp_buf + skip));
- sg_num = 1;
- }
-
-
- switch (sw->sw_type) {
- case SW_TYPE_BLKCIPHER: {
- unsigned char iv[EALG_MAX_BLOCK_LEN];
- unsigned char *ivp = iv;
- int ivsize =
- crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
- struct blkcipher_desc desc;
-
- if (sg_len < crypto_blkcipher_blocksize(
- crypto_blkcipher_cast(sw->sw_tfm))) {
- crp->crp_etype = EINVAL;
- dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
- sg_len, crypto_blkcipher_blocksize(
- crypto_blkcipher_cast(sw->sw_tfm)));
- goto done;
+ /* OCF doesn't enforce keys */
+ crypto_ablkcipher_set_flags(__crypto_ablkcipher_cast(sw->sw_tfm),
+ CRYPTO_TFM_REQ_WEAK_KEY);
+ error = crypto_ablkcipher_setkey(
+ __crypto_ablkcipher_cast(sw->sw_tfm), crd->crd_key,
+ (crd->crd_klen + 7) / 8);
+ if (error) {
+ dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
+ error, sw->sw_tfm->crt_flags);
+ crp->crp_etype = -error;
}
+ }
+
+ if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
- if (ivsize > sizeof(iv)) {
- crp->crp_etype = EINVAL;
- dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
- goto done;
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+ ivp = crd->crd_iv;
+ else
+ get_random_bytes(ivp, ivsize);
+ /*
+ * do we have to copy the IV back to the buffer ?
+ */
+ if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, ivsize, (caddr_t)ivp);
}
+ ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
+ sg_len, ivp);
+ ret = crypto_ablkcipher_encrypt(req->crypto_req);
- if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
- int i, error;
+ } else { /*decrypt */
- if (debug) {
- dprintk("%s key:", __FUNCTION__);
- for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
- dprintk("%s0x%x", (i % 8) ? " " : "\n ",
- crd->crd_key[i]);
- dprintk("\n");
- }
- error = crypto_blkcipher_setkey(
- crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
- (crd->crd_klen + 7) / 8);
- if (error) {
- dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
- error, sw->sw_tfm->crt_flags);
- crp->crp_etype = -error;
- }
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT)
+ ivp = crd->crd_iv;
+ else
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, ivsize, (caddr_t)ivp);
+ ablkcipher_request_set_crypt(req->crypto_req, req->sg, req->sg,
+ sg_len, ivp);
+ ret = crypto_ablkcipher_decrypt(req->crypto_req);
+ }
+
+ switch (ret) {
+ case -EINPROGRESS:
+ case -EBUSY:
+ return;
+ default:
+ case 0:
+ dprintk("crypto OP %s %d\n", ret ? "failed" : "success", ret);
+ crp->crp_etype = ret;
+ goto done;
+ }
+ } break;
+#endif /* HAVE_ABLKCIPHER */
+
+ case SW_TYPE_BLKCIPHER: {
+ unsigned char iv[EALG_MAX_BLOCK_LEN];
+ unsigned char *ivp = iv;
+ struct blkcipher_desc desc;
+ int ivsize = crypto_blkcipher_ivsize(crypto_blkcipher_cast(sw->sw_tfm));
+
+ if (sg_len < crypto_blkcipher_blocksize(
+ crypto_blkcipher_cast(sw->sw_tfm))) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL len %d < %d\n", __FILE__, __LINE__,
+ sg_len, crypto_blkcipher_blocksize(
+ crypto_blkcipher_cast(sw->sw_tfm)));
+ goto done;
+ }
+
+ if (ivsize > sizeof(iv)) {
+ crp->crp_etype = EINVAL;
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ goto done;
+ }
+
+ if (crd->crd_flags & CRD_F_KEY_EXPLICIT) {
+ int i, error;
+
+ if (debug) {
+ dprintk("%s key:", __FUNCTION__);
+ for (i = 0; i < (crd->crd_klen + 7) / 8; i++)
+ dprintk("%s0x%x", (i % 8) ? " " : "\n ",
+ crd->crd_key[i] & 0xff);
+ dprintk("\n");
+ }
+ /* OCF doesn't enforce keys */
+ crypto_blkcipher_set_flags(crypto_blkcipher_cast(sw->sw_tfm),
+ CRYPTO_TFM_REQ_WEAK_KEY);
+ error = crypto_blkcipher_setkey(
+ crypto_blkcipher_cast(sw->sw_tfm), crd->crd_key,
+ (crd->crd_klen + 7) / 8);
+ if (error) {
+ dprintk("cryptosoft: setkey failed %d (crt_flags=0x%x)\n",
+ error, sw->sw_tfm->crt_flags);
+ crp->crp_etype = -error;
}
+ }
+
+ memset(&desc, 0, sizeof(desc));
+ desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
+
+ if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
- memset(&desc, 0, sizeof(desc));
- desc.tfm = crypto_blkcipher_cast(sw->sw_tfm);
-
- if (crd->crd_flags & CRD_F_ENCRYPT) { /* encrypt */
-
- if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
- ivp = crd->crd_iv;
- } else {
- get_random_bytes(ivp, ivsize);
- }
- /*
- * do we have to copy the IV back to the buffer ?
- */
- if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, ivsize, (caddr_t)ivp);
- }
- desc.info = ivp;
- crypto_blkcipher_encrypt_iv(&desc, sg, sg, sg_len);
-
- } else { /*decrypt */
-
- if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
- ivp = crd->crd_iv;
- } else {
- crypto_copydata(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, ivsize, (caddr_t)ivp);
- }
- desc.info = ivp;
- crypto_blkcipher_decrypt_iv(&desc, sg, sg, sg_len);
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+ ivp = crd->crd_iv;
+ } else {
+ get_random_bytes(ivp, ivsize);
}
- } break;
- case SW_TYPE_HMAC:
- case SW_TYPE_HASH:
- {
- char result[HASH_MAX_LEN];
- struct hash_desc desc;
-
- /* check we have room for the result */
- if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
- dprintk(
- "cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d digestsize=%d\n",
- crp->crp_ilen, crd->crd_skip + sg_len, crd->crd_inject,
- sw->u.hmac.sw_mlen);
- crp->crp_etype = EINVAL;
- goto done;
+ /*
+ * do we have to copy the IV back to the buffer ?
+ */
+ if ((crd->crd_flags & CRD_F_IV_PRESENT) == 0) {
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, ivsize, (caddr_t)ivp);
+ }
+ desc.info = ivp;
+ crypto_blkcipher_encrypt_iv(&desc, req->sg, req->sg, sg_len);
+
+ } else { /*decrypt */
+
+ if (crd->crd_flags & CRD_F_IV_EXPLICIT) {
+ ivp = crd->crd_iv;
+ } else {
+ crypto_copydata(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, ivsize, (caddr_t)ivp);
}
+ desc.info = ivp;
+ crypto_blkcipher_decrypt_iv(&desc, req->sg, req->sg, sg_len);
+ }
+ } break;
- memset(&desc, 0, sizeof(desc));
- desc.tfm = crypto_hash_cast(sw->sw_tfm);
+ case SW_TYPE_HMAC:
+ case SW_TYPE_HASH:
+ {
+ char result[HASH_MAX_LEN];
+ struct hash_desc desc;
+
+ /* check we have room for the result */
+ if (crp->crp_ilen - crd->crd_inject < sw->u.hmac.sw_mlen) {
+ dprintk("cryptosoft: EINVAL crp_ilen=%d, len=%d, inject=%d "
+ "digestsize=%d\n", crp->crp_ilen, crd->crd_skip + sg_len,
+ crd->crd_inject, sw->u.hmac.sw_mlen);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
- memset(result, 0, sizeof(result));
+ memset(&desc, 0, sizeof(desc));
+ desc.tfm = crypto_hash_cast(sw->sw_tfm);
- if (sw->sw_type == SW_TYPE_HMAC) {
+ memset(result, 0, sizeof(result));
+
+ if (sw->sw_type & SW_TYPE_HMAC) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
- crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
- sg, sg_num, result);
+ crypto_hmac(sw->sw_tfm, sw->u.hmac.sw_key, &sw->u.hmac.sw_klen,
+ req->sg, sg_num, result);
#else
- crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
- sw->u.hmac.sw_klen);
- crypto_hash_digest(&desc, sg, sg_len, result);
-#endif /* #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
-
- } else { /* SW_TYPE_HASH */
- crypto_hash_digest(&desc, sg, sg_len, result);
+ crypto_hash_setkey(desc.tfm, sw->u.hmac.sw_key,
+ sw->u.hmac.sw_klen);
+ crypto_hash_digest(&desc, req->sg, sg_len, result);
+#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) */
+
+ } else { /* SW_TYPE_HASH */
+ crypto_hash_digest(&desc, req->sg, sg_len, result);
+ }
+
+ crypto_copyback(crp->crp_flags, crp->crp_buf,
+ crd->crd_inject, sw->u.hmac.sw_mlen, result);
+ }
+ break;
+
+ case SW_TYPE_COMP: {
+ void *ibuf = NULL;
+ void *obuf = sw->u.sw_comp_buf;
+ int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
+ int ret = 0;
+
+ /*
+ * we need to use an additional copy if there is more than one
+ * input chunk since the kernel comp routines do not handle
+ * SG yet. Otherwise we just use the input buffer as is.
+ * Rather than allocate another buffer we just split the tmp
+ * buffer we already have.
+ * Perhaps we should just use zlib directly ?
+ */
+ if (sg_num > 1) {
+ int blk;
+
+ ibuf = obuf;
+ for (blk = 0; blk < sg_num; blk++) {
+ memcpy(obuf, sg_virt(&req->sg[blk]),
+ req->sg[blk].length);
+ obuf += req->sg[blk].length;
+ }
+ olen -= sg_len;
+ } else
+ ibuf = sg_virt(&req->sg[0]);
+
+ if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
+ ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
+ ibuf, ilen, obuf, &olen);
+ if (!ret && olen > crd->crd_len) {
+ dprintk("cryptosoft: ERANGE compress %d into %d\n",
+ crd->crd_len, olen);
+ if (swcr_fail_if_compression_grows)
+ ret = ERANGE;
}
+ } else { /* decompress */
+ ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
+ ibuf, ilen, obuf, &olen);
+ if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
+ dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
+ "space for %d,at offset %d\n",
+ crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
+ ret = ETOOSMALL;
+ }
+ }
+ if (ret)
+ dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
+ /*
+ * on success copy result back,
+ * linux crpyto API returns -errno, we need to fix that
+ */
+ crp->crp_etype = ret < 0 ? -ret : ret;
+ if (ret == 0) {
+ /* copy back the result and return it's size */
crypto_copyback(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, sw->u.hmac.sw_mlen, result);
- }
- break;
+ crd->crd_inject, olen, obuf);
+ crp->crp_olen = olen;
+ }
- case SW_TYPE_COMP: {
- void *ibuf = NULL;
- void *obuf = sw->u.sw_comp_buf;
- int ilen = sg_len, olen = CRYPTO_MAX_DATA_LEN;
- int ret = 0;
- /*
- * we need to use an additional copy if there is more than one
- * input chunk since the kernel comp routines do not handle
- * SG yet. Otherwise we just use the input buffer as is.
- * Rather than allocate another buffer we just split the tmp
- * buffer we already have.
- * Perhaps we should just use zlib directly ?
- */
- if (sg_num > 1) {
- int blk;
-
- ibuf = obuf;
- for (blk = 0; blk < sg_num; blk++) {
- memcpy(obuf, sg_virt(&sg[blk]),
- sg[blk].length);
- obuf += sg[blk].length;
- }
- olen -= sg_len;
- } else
- ibuf = sg_virt(&sg[0]);
-
- if (crd->crd_flags & CRD_F_ENCRYPT) { /* compress */
- ret = crypto_comp_compress(crypto_comp_cast(sw->sw_tfm),
- ibuf, ilen, obuf, &olen);
- if (!ret && olen > crd->crd_len) {
- dprintk("cryptosoft: ERANGE compress %d into %d\n",
- crd->crd_len, olen);
- if (swcr_fail_if_compression_grows)
- ret = ERANGE;
- }
- } else { /* decompress */
- ret = crypto_comp_decompress(crypto_comp_cast(sw->sw_tfm),
- ibuf, ilen, obuf, &olen);
- if (!ret && (olen + crd->crd_inject) > crp->crp_olen) {
- dprintk("cryptosoft: ETOOSMALL decompress %d into %d, "
- "space for %d,at offset %d\n",
- crd->crd_len, olen, crp->crp_olen, crd->crd_inject);
- ret = ETOOSMALL;
- }
- }
- if (ret)
- dprintk("%s,%d: ret = %d\n", __FILE__, __LINE__, ret);
+ } break;
- /*
- * on success copy result back,
- * linux crpyto API returns -errno, we need to fix that
- */
- crp->crp_etype = ret < 0 ? -ret : ret;
- if (ret == 0) {
- /* copy back the result and return it's size */
- crypto_copyback(crp->crp_flags, crp->crp_buf,
- crd->crd_inject, olen, obuf);
- crp->crp_olen = olen;
- }
+ default:
+ /* Unknown/unsupported algorithm */
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+done:
+ crypto_done(crp);
+ kmem_cache_free(swcr_req_cache, req);
+}
- } break;
- default:
- /* Unknown/unsupported algorithm */
- dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
- crp->crp_etype = EINVAL;
+/*
+ * Process a crypto request.
+ */
+static int
+swcr_process(device_t dev, struct cryptop *crp, int hint)
+{
+ struct swcr_req *req = NULL;
+ u_int32_t lid;
+
+ dprintk("%s()\n", __FUNCTION__);
+ /* Sanity check */
+ if (crp == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ return EINVAL;
+ }
+
+ crp->crp_etype = 0;
+
+ if (crp->crp_desc == NULL || crp->crp_buf == NULL) {
+ dprintk("%s,%d: EINVAL\n", __FILE__, __LINE__);
+ crp->crp_etype = EINVAL;
+ goto done;
+ }
+
+ lid = crp->crp_sid & 0xffffffff;
+ if (lid >= swcr_sesnum || lid == 0 || swcr_sessions == NULL ||
+ swcr_sessions[lid] == NULL) {
+ crp->crp_etype = ENOENT;
+ dprintk("%s,%d: ENOENT\n", __FILE__, __LINE__);
+ goto done;
+ }
+
+ /*
+ * do some error checking outside of the loop for SKB and IOV processing
+ * this leaves us with valid skb or uiop pointers for later
+ */
+ if (crp->crp_flags & CRYPTO_F_SKBUF) {
+ struct sk_buff *skb = (struct sk_buff *) crp->crp_buf;
+ if (skb_shinfo(skb)->nr_frags >= SCATTERLIST_MAX) {
+ printk("%s,%d: %d nr_frags > SCATTERLIST_MAX", __FILE__, __LINE__,
+ skb_shinfo(skb)->nr_frags);
+ goto done;
+ }
+ } else if (crp->crp_flags & CRYPTO_F_IOV) {
+ struct uio *uiop = (struct uio *) crp->crp_buf;
+ if (uiop->uio_iovcnt > SCATTERLIST_MAX) {
+ printk("%s,%d: %d uio_iovcnt > SCATTERLIST_MAX", __FILE__, __LINE__,
+ uiop->uio_iovcnt);
goto done;
}
}
+ /*
+ * setup a new request ready for queuing
+ */
+ req = kmem_cache_alloc(swcr_req_cache, SLAB_ATOMIC);
+ if (req == NULL) {
+ dprintk("%s,%d: ENOMEM\n", __FILE__, __LINE__);
+ crp->crp_etype = ENOMEM;
+ goto done;
+ }
+ memset(req, 0, sizeof(*req));
+
+ req->sw_head = swcr_sessions[lid];
+ req->crp = crp;
+ req->crd = crp->crp_desc;
+
+ swcr_process_req(req);
+ return 0;
+
done:
crypto_done(crp);
+ if (req)
+ kmem_cache_free(swcr_req_cache, req);
return 0;
}
+
static int
cryptosoft_init(void)
{
@@ -789,24 +1128,34 @@ cryptosoft_init(void)
dprintk("%s(%p)\n", __FUNCTION__, cryptosoft_init);
+ swcr_req_cache = kmem_cache_create("cryptosoft_req",
+ sizeof(struct swcr_req), 0, SLAB_HWCACHE_ALIGN, NULL
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)
+ , NULL
+#endif
+ );
+ if (!swcr_req_cache) {
+ printk("cryptosoft: failed to create request cache\n");
+ return -ENOENT;
+ }
+
softc_device_init(&swcr_softc, "cryptosoft", 0, swcr_methods);
swcr_id = crypto_get_driverid(softc_get_device(&swcr_softc),
CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_SYNC);
if (swcr_id < 0) {
- printk("Software crypto device cannot initialize!");
+ printk("cryptosoft: Software crypto device cannot initialize!");
return -ENODEV;
}
#define REGISTER(alg) \
- crypto_register(swcr_id, alg, 0,0);
+ crypto_register(swcr_id, alg, 0,0)
- for (i = CRYPTO_ALGORITHM_MIN; i <= CRYPTO_ALGORITHM_MAX; ++i)
- {
+ for (i = 0; i < sizeof(crypto_details)/sizeof(crypto_details[0]); i++) {
+ int found;
algo = crypto_details[i].alg_name;
- if (!algo || !*algo)
- {
+ if (!algo || !*algo) {
dprintk("%s:Algorithm %d not supported\n", __FUNCTION__, i);
continue;
}
@@ -814,72 +1163,34 @@ cryptosoft_init(void)
mode = crypto_details[i].mode;
sw_type = crypto_details[i].sw_type;
- switch (sw_type)
- {
- case SW_TYPE_CIPHER:
- if (crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC))
- {
- REGISTER(i);
- }
- else
- {
- dprintk("%s:CIPHER algorithm %d:'%s' not supported\n",
- __FUNCTION__, i, algo);
- }
- break;
- case SW_TYPE_HMAC:
- if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
- {
- REGISTER(i);
- }
- else
- {
- dprintk("%s:HMAC algorithm %d:'%s' not supported\n",
- __FUNCTION__, i, algo);
- }
- break;
- case SW_TYPE_HASH:
- if (crypto_has_hash(algo, 0, CRYPTO_ALG_ASYNC))
- {
- REGISTER(i);
- }
- else
- {
- dprintk("%s:HASH algorithm %d:'%s' not supported\n",
- __FUNCTION__, i, algo);
- }
- break;
- case SW_TYPE_COMP:
- if (crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC))
- {
- REGISTER(i);
- }
- else
- {
- dprintk("%s:COMP algorithm %d:'%s' not supported\n",
- __FUNCTION__, i, algo);
- }
- break;
- case SW_TYPE_BLKCIPHER:
- if (crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC))
- {
- REGISTER(i);
- }
- else
- {
- dprintk("%s:BLKCIPHER algorithm %d:'%s' not supported\n",
- __FUNCTION__, i, algo);
- }
- break;
- default:
- dprintk(
- "%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
+ found = 0;
+ switch (sw_type & SW_TYPE_ALG_MASK) {
+ case SW_TYPE_CIPHER:
+ found = crypto_has_cipher(algo, 0, CRYPTO_ALG_ASYNC);
+ break;
+ case SW_TYPE_HMAC:
+ found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
+ break;
+ case SW_TYPE_HASH:
+ found = crypto_has_hash(algo, 0, swcr_no_ahash?CRYPTO_ALG_ASYNC:0);
+ break;
+ case SW_TYPE_COMP:
+ found = crypto_has_comp(algo, 0, CRYPTO_ALG_ASYNC);
+ break;
+ case SW_TYPE_BLKCIPHER:
+ found = crypto_has_blkcipher(algo, 0, CRYPTO_ALG_ASYNC);
+ if (!found && !swcr_no_ablk)
+ found = crypto_has_ablkcipher(algo, 0, 0);
+ break;
+ }
+ if (found) {
+ REGISTER(i);
+ } else {
+ dprintk("%s:Algorithm Type %d not supported (algorithm %d:'%s')\n",
__FUNCTION__, sw_type, i, algo);
- break;
}
}
-
- return(0);
+ return 0;
}
static void
@@ -888,9 +1199,10 @@ cryptosoft_exit(void)
dprintk("%s()\n", __FUNCTION__);
crypto_unregister_all(swcr_id);
swcr_id = -1;
+ kmem_cache_destroy(swcr_req_cache);
}
-module_init(cryptosoft_init);
+late_initcall(cryptosoft_init);
module_exit(cryptosoft_exit);
MODULE_LICENSE("Dual BSD/GPL");