aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/amcc/crypto4xx_sa.c23
-rw-r--r--drivers/crypto/atmel-aes.c2
-rw-r--r--drivers/crypto/atmel-sha.c50
-rw-r--r--drivers/crypto/atmel-tdes.c2
-rw-r--r--drivers/crypto/bfin_crc.c4
-rw-r--r--drivers/crypto/caam/caamalg.c14
-rw-r--r--drivers/crypto/caam/ctrl.c6
-rw-r--r--drivers/crypto/caam/error.c13
-rw-r--r--drivers/crypto/caam/jr.c37
-rw-r--r--drivers/crypto/caam/sg_sw_sec4.h8
-rw-r--r--drivers/crypto/ccp/ccp-dev.c1
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/crypto/nx/nx.c6
-rw-r--r--drivers/crypto/omap-aes.c4
-rw-r--r--drivers/crypto/omap-des.c8
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h6
-rw-r--r--drivers/crypto/qat/qat_common/adf_aer.c24
-rw-r--r--drivers/crypto/qat/qat_common/adf_cfg.c2
-rw-r--r--drivers/crypto/qat/qat_common/adf_common_drv.h2
-rw-r--r--drivers/crypto/qat/qat_common/adf_ctl_drv.c7
-rw-r--r--drivers/crypto/qat/qat_common/adf_init.c98
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport_internal.h1
-rw-r--r--drivers/crypto/qat/qat_common/icp_qat_hw.h2
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c642
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.h16
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c19
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c42
-rw-r--r--drivers/crypto/qce/dma.c6
-rw-r--r--drivers/crypto/qce/sha.c2
-rw-r--r--drivers/crypto/sahara.c2
-rw-r--r--drivers/crypto/talitos.c8
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c10
32 files changed, 708 insertions, 363 deletions
diff --git a/drivers/crypto/amcc/crypto4xx_sa.c b/drivers/crypto/amcc/crypto4xx_sa.c
index de8a7a4..69182e2 100644
--- a/drivers/crypto/amcc/crypto4xx_sa.c
+++ b/drivers/crypto/amcc/crypto4xx_sa.c
@@ -34,29 +34,6 @@
#include "crypto4xx_sa.h"
#include "crypto4xx_core.h"
-u32 get_dynamic_sa_offset_iv_field(struct crypto4xx_ctx *ctx)
-{
- u32 offset;
- union dynamic_sa_contents cts;
-
- if (ctx->direction == DIR_INBOUND)
- cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_in))->sa_contents;
- else
- cts.w = ((struct dynamic_sa_ctl *)(ctx->sa_out))->sa_contents;
- offset = cts.bf.key_size
- + cts.bf.inner_size
- + cts.bf.outer_size
- + cts.bf.spi
- + cts.bf.seq_num0
- + cts.bf.seq_num1
- + cts.bf.seq_num_mask0
- + cts.bf.seq_num_mask1
- + cts.bf.seq_num_mask2
- + cts.bf.seq_num_mask3;
-
- return sizeof(struct dynamic_sa_ctl) + offset * 4;
-}
-
u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx)
{
u32 offset;
diff --git a/drivers/crypto/atmel-aes.c b/drivers/crypto/atmel-aes.c
index 53d1c33..6597aac 100644
--- a/drivers/crypto/atmel-aes.c
+++ b/drivers/crypto/atmel-aes.c
@@ -673,9 +673,9 @@ err_map_out:
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
DMA_TO_DEVICE);
err_map_in:
+err_alloc:
free_page((unsigned long)dd->buf_out);
free_page((unsigned long)dd->buf_in);
-err_alloc:
if (err)
pr_err("error: %d\n", err);
return err;
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index d94f07c..34db04a 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -102,10 +102,6 @@ struct atmel_sha_ctx {
struct atmel_sha_dev *dd;
unsigned long flags;
-
- /* fallback stuff */
- struct crypto_shash *fallback;
-
};
#define ATMEL_SHA_QUEUE_LENGTH 50
@@ -974,19 +970,8 @@ static int atmel_sha_digest(struct ahash_request *req)
return atmel_sha_init(req) ?: atmel_sha_finup(req);
}
-static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
+static int atmel_sha_cra_init(struct crypto_tfm *tfm)
{
- struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
- const char *alg_name = crypto_tfm_alg_name(tfm);
-
- /* Allocate a fallback and abort if it failed. */
- tctx->fallback = crypto_alloc_shash(alg_name, 0,
- CRYPTO_ALG_NEED_FALLBACK);
- if (IS_ERR(tctx->fallback)) {
- pr_err("atmel-sha: fallback driver '%s' could not be loaded.\n",
- alg_name);
- return PTR_ERR(tctx->fallback);
- }
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
sizeof(struct atmel_sha_reqctx) +
SHA_BUFFER_LEN + SHA512_BLOCK_SIZE);
@@ -994,19 +979,6 @@ static int atmel_sha_cra_init_alg(struct crypto_tfm *tfm, const char *alg_base)
return 0;
}
-static int atmel_sha_cra_init(struct crypto_tfm *tfm)
-{
- return atmel_sha_cra_init_alg(tfm, NULL);
-}
-
-static void atmel_sha_cra_exit(struct crypto_tfm *tfm)
-{
- struct atmel_sha_ctx *tctx = crypto_tfm_ctx(tfm);
-
- crypto_free_shash(tctx->fallback);
- tctx->fallback = NULL;
-}
-
static struct ahash_alg sha_1_256_algs[] = {
{
.init = atmel_sha_init,
@@ -1020,14 +992,12 @@ static struct ahash_alg sha_1_256_algs[] = {
.cra_name = "sha1",
.cra_driver_name = "atmel-sha1",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = atmel_sha_cra_init,
- .cra_exit = atmel_sha_cra_exit,
}
}
},
@@ -1043,14 +1013,12 @@ static struct ahash_alg sha_1_256_algs[] = {
.cra_name = "sha256",
.cra_driver_name = "atmel-sha256",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA256_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = atmel_sha_cra_init,
- .cra_exit = atmel_sha_cra_exit,
}
}
},
@@ -1068,14 +1036,12 @@ static struct ahash_alg sha_224_alg = {
.cra_name = "sha224",
.cra_driver_name = "atmel-sha224",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA224_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
.cra_alignmask = 0,
.cra_module = THIS_MODULE,
.cra_init = atmel_sha_cra_init,
- .cra_exit = atmel_sha_cra_exit,
}
}
};
@@ -1093,14 +1059,12 @@ static struct ahash_alg sha_384_512_algs[] = {
.cra_name = "sha384",
.cra_driver_name = "atmel-sha384",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
.cra_alignmask = 0x3,
.cra_module = THIS_MODULE,
.cra_init = atmel_sha_cra_init,
- .cra_exit = atmel_sha_cra_exit,
}
}
},
@@ -1116,14 +1080,12 @@ static struct ahash_alg sha_384_512_algs[] = {
.cra_name = "sha512",
.cra_driver_name = "atmel-sha512",
.cra_priority = 100,
- .cra_flags = CRYPTO_ALG_ASYNC |
- CRYPTO_ALG_NEED_FALLBACK,
+ .cra_flags = CRYPTO_ALG_ASYNC,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct atmel_sha_ctx),
.cra_alignmask = 0x3,
.cra_module = THIS_MODULE,
.cra_init = atmel_sha_cra_init,
- .cra_exit = atmel_sha_cra_exit,
}
}
},
diff --git a/drivers/crypto/atmel-tdes.c b/drivers/crypto/atmel-tdes.c
index 5e7c896..258772d 100644
--- a/drivers/crypto/atmel-tdes.c
+++ b/drivers/crypto/atmel-tdes.c
@@ -376,9 +376,9 @@ err_map_out:
dma_unmap_single(dd->dev, dd->dma_addr_in, dd->buflen,
DMA_TO_DEVICE);
err_map_in:
+err_alloc:
free_page((unsigned long)dd->buf_out);
free_page((unsigned long)dd->buf_in);
-err_alloc:
if (err)
pr_err("error: %d\n", err);
return err;
diff --git a/drivers/crypto/bfin_crc.c b/drivers/crypto/bfin_crc.c
index 9ae149bd..d9af940 100644
--- a/drivers/crypto/bfin_crc.c
+++ b/drivers/crypto/bfin_crc.c
@@ -110,7 +110,7 @@ static int sg_count(struct scatterlist *sg_list)
while (!sg_is_last(sg)) {
sg_nents++;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return sg_nents;
@@ -744,7 +744,7 @@ static int __init bfin_crypto_crc_mod_init(void)
ret = platform_driver_register(&bfin_crypto_crc_driver);
if (ret) {
- pr_info(KERN_ERR "unable to register driver\n");
+ pr_err("unable to register driver\n");
return ret;
}
diff --git a/drivers/crypto/caam/caamalg.c b/drivers/crypto/caam/caamalg.c
index 3187400..29071a1 100644
--- a/drivers/crypto/caam/caamalg.c
+++ b/drivers/crypto/caam/caamalg.c
@@ -2532,7 +2532,7 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
in_options = 0;
} else {
src_dma = edesc->sec4_sg_dma;
- sec4_sg_index += (iv_contig ? 0 : 1) + edesc->src_nents;
+ sec4_sg_index += edesc->src_nents + 1;
in_options = LDST_SGF;
}
append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options);
@@ -2714,10 +2714,10 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (!all_contig) {
if (!is_gcm) {
sg_to_sec4_sg(req->assoc,
- (assoc_nents ? : 1),
+ assoc_nents,
edesc->sec4_sg +
sec4_sg_index, 0);
- sec4_sg_index += assoc_nents ? : 1;
+ sec4_sg_index += assoc_nents;
}
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
@@ -2726,17 +2726,17 @@ static struct aead_edesc *aead_edesc_alloc(struct aead_request *req,
if (is_gcm) {
sg_to_sec4_sg(req->assoc,
- (assoc_nents ? : 1),
+ assoc_nents,
edesc->sec4_sg +
sec4_sg_index, 0);
- sec4_sg_index += assoc_nents ? : 1;
+ sec4_sg_index += assoc_nents;
}
sg_to_sec4_sg_last(req->src,
- (src_nents ? : 1),
+ src_nents,
edesc->sec4_sg +
sec4_sg_index, 0);
- sec4_sg_index += src_nents ? : 1;
+ sec4_sg_index += src_nents;
}
if (dst_nents) {
sg_to_sec4_sg_last(req->dst, dst_nents,
diff --git a/drivers/crypto/caam/ctrl.c b/drivers/crypto/caam/ctrl.c
index 70f1e6f..efba4cc 100644
--- a/drivers/crypto/caam/ctrl.c
+++ b/drivers/crypto/caam/ctrl.c
@@ -175,13 +175,10 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
{
struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
struct caam_ctrl __iomem *ctrl;
- struct rng4tst __iomem *r4tst;
u32 *desc, status, rdsta_val;
int ret = 0, sh_idx;
ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
- r4tst = &ctrl->r4tst[0];
-
desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
if (!desc)
return -ENOMEM;
@@ -209,8 +206,7 @@ static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
* without any error (HW optimizations for later
* CAAM eras), then try again.
*/
- rdsta_val =
- rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
+ rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
if (status || !(rdsta_val & (1 << sh_idx)))
ret = -EAGAIN;
if (ret)
diff --git a/drivers/crypto/caam/error.c b/drivers/crypto/caam/error.c
index 66d73bf..33e41ea 100644
--- a/drivers/crypto/caam/error.c
+++ b/drivers/crypto/caam/error.c
@@ -151,10 +151,15 @@ static void report_ccb_status(struct device *jrdev, const u32 status,
else
snprintf(err_err_code, sizeof(err_err_code), "%02x", err_id);
- dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
- status, error, idx_str, idx,
- cha_str, cha_err_code,
- err_str, err_err_code);
+ /*
+ * CCB ICV check failures are part of normal operation life;
+ * we leave the upper layers to do what they want with them.
+ */
+ if (err_id != JRSTA_CCBERR_ERRID_ICVCHK)
+ dev_err(jrdev, "%08x: %s: %s %d: %s%s: %s%s\n",
+ status, error, idx_str, idx,
+ cha_str, cha_err_code,
+ err_str, err_err_code);
}
static void report_jump_status(struct device *jrdev, const u32 status,
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 9b3ef1bc..b8b5d47 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -384,30 +384,28 @@ static int caam_jr_init(struct device *dev)
if (error) {
dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
jrp->ridx, jrp->irq);
- irq_dispose_mapping(jrp->irq);
- jrp->irq = 0;
- return -EINVAL;
+ goto out_kill_deq;
}
error = caam_reset_hw_jr(dev);
if (error)
- return error;
+ goto out_free_irq;
+ error = -ENOMEM;
jrp->inpring = dma_alloc_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
&inpbusaddr, GFP_KERNEL);
+ if (!jrp->inpring)
+ goto out_free_irq;
jrp->outring = dma_alloc_coherent(dev, sizeof(struct jr_outentry) *
JOBR_DEPTH, &outbusaddr, GFP_KERNEL);
+ if (!jrp->outring)
+ goto out_free_inpring;
jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH,
GFP_KERNEL);
-
- if ((jrp->inpring == NULL) || (jrp->outring == NULL) ||
- (jrp->entinfo == NULL)) {
- dev_err(dev, "can't allocate job rings for %d\n",
- jrp->ridx);
- return -ENOMEM;
- }
+ if (!jrp->entinfo)
+ goto out_free_outring;
for (i = 0; i < JOBR_DEPTH; i++)
jrp->entinfo[i].desc_addr_dma = !0;
@@ -434,6 +432,19 @@ static int caam_jr_init(struct device *dev)
(JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
return 0;
+
+out_free_outring:
+ dma_free_coherent(dev, sizeof(struct jr_outentry) * JOBR_DEPTH,
+ jrp->outring, outbusaddr);
+out_free_inpring:
+ dma_free_coherent(dev, sizeof(dma_addr_t) * JOBR_DEPTH,
+ jrp->inpring, inpbusaddr);
+ dev_err(dev, "can't allocate job rings for %d\n", jrp->ridx);
+out_free_irq:
+ free_irq(jrp->irq, dev);
+out_kill_deq:
+ tasklet_kill(&jrp->irqtask);
+ return error;
}
@@ -484,8 +495,10 @@ static int caam_jr_probe(struct platform_device *pdev)
/* Now do the platform independent part */
error = caam_jr_init(jrdev); /* now turn on hardware */
- if (error)
+ if (error) {
+ irq_dispose_mapping(jrpriv->irq);
return error;
+ }
jrpriv->dev = jrdev;
spin_lock(&driver_data.jr_alloc_lock);
diff --git a/drivers/crypto/caam/sg_sw_sec4.h b/drivers/crypto/caam/sg_sw_sec4.h
index ce28a56..3b91821 100644
--- a/drivers/crypto/caam/sg_sw_sec4.h
+++ b/drivers/crypto/caam/sg_sw_sec4.h
@@ -37,7 +37,7 @@ sg_to_sec4_sg(struct scatterlist *sg, int sg_count,
dma_to_sec4_sg_one(sec4_sg_ptr, sg_dma_address(sg),
sg_dma_len(sg), offset);
sec4_sg_ptr++;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
sg_count--;
}
return sec4_sg_ptr - 1;
@@ -67,7 +67,7 @@ static inline int __sg_count(struct scatterlist *sg_list, int nbytes,
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0)
*chained = true;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return sg_nents;
@@ -93,7 +93,7 @@ static int dma_map_sg_chained(struct device *dev, struct scatterlist *sg,
int i;
for (i = 0; i < nents; i++) {
dma_map_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
} else {
dma_map_sg(dev, sg, nents, dir);
@@ -109,7 +109,7 @@ static int dma_unmap_sg_chained(struct device *dev, struct scatterlist *sg,
int i;
for (i = 0; i < nents; i++) {
dma_unmap_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
} else {
dma_unmap_sg(dev, sg, nents, dir);
diff --git a/drivers/crypto/ccp/ccp-dev.c b/drivers/crypto/ccp/ccp-dev.c
index c6e6171..ca29c12 100644
--- a/drivers/crypto/ccp/ccp-dev.c
+++ b/drivers/crypto/ccp/ccp-dev.c
@@ -583,6 +583,7 @@ bool ccp_queues_suspended(struct ccp_device *ccp)
#ifdef CONFIG_X86
static const struct x86_cpu_id ccp_support[] = {
{ X86_VENDOR_AMD, 22, },
+ { },
};
#endif
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index f757a0f..48f4535 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -784,7 +784,7 @@ static struct buffer_desc *chainup_buffers(struct device *dev,
struct buffer_desc *buf, gfp_t flags,
enum dma_data_direction dir)
{
- for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
+ for (; nbytes > 0; sg = sg_next(sg)) {
unsigned len = min(nbytes, sg->length);
struct buffer_desc *next_buf;
u32 next_buf_phys;
@@ -982,7 +982,7 @@ static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
break;
offset += sg->length;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return (start + nbytes > offset + sg->length);
}
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index a392465..1da6dc5 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -177,7 +177,7 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
break;
offset += sg_src->length;
- sg_src = scatterwalk_sg_next(sg_src);
+ sg_src = sg_next(sg_src);
}
/* start - offset is the number of bytes to advance in the scatterlist
@@ -187,9 +187,9 @@ struct nx_sg *nx_walk_and_build(struct nx_sg *nx_dst,
while (len && (nx_sg - nx_dst) < sglen) {
n = scatterwalk_clamp(&walk, len);
if (!n) {
- /* In cases where we have scatterlist chain scatterwalk_sg_next
+ /* In cases where we have scatterlist chain sg_next
* handles with it properly */
- scatterwalk_start(&walk, scatterwalk_sg_next(walk.sg));
+ scatterwalk_start(&walk, sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len);
}
dst = scatterwalk_map(&walk);
diff --git a/drivers/crypto/omap-aes.c b/drivers/crypto/omap-aes.c
index f79dd41..42f95a4 100644
--- a/drivers/crypto/omap-aes.c
+++ b/drivers/crypto/omap-aes.c
@@ -994,7 +994,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
scatterwalk_advance(&dd->in_walk, 4);
if (dd->in_sg->length == _calc_walked(in)) {
- dd->in_sg = scatterwalk_sg_next(dd->in_sg);
+ dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
scatterwalk_start(&dd->in_walk,
dd->in_sg);
@@ -1026,7 +1026,7 @@ static irqreturn_t omap_aes_irq(int irq, void *dev_id)
*dst = omap_aes_read(dd, AES_REG_DATA_N(dd, i));
scatterwalk_advance(&dd->out_walk, 4);
if (dd->out_sg->length == _calc_walked(out)) {
- dd->out_sg = scatterwalk_sg_next(dd->out_sg);
+ dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
scatterwalk_start(&dd->out_walk,
dd->out_sg);
diff --git a/drivers/crypto/omap-des.c b/drivers/crypto/omap-des.c
index e350f5b..4630709 100644
--- a/drivers/crypto/omap-des.c
+++ b/drivers/crypto/omap-des.c
@@ -921,7 +921,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
scatterwalk_advance(&dd->in_walk, 4);
if (dd->in_sg->length == _calc_walked(in)) {
- dd->in_sg = scatterwalk_sg_next(dd->in_sg);
+ dd->in_sg = sg_next(dd->in_sg);
if (dd->in_sg) {
scatterwalk_start(&dd->in_walk,
dd->in_sg);
@@ -953,7 +953,7 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
*dst = omap_des_read(dd, DES_REG_DATA_N(dd, i));
scatterwalk_advance(&dd->out_walk, 4);
if (dd->out_sg->length == _calc_walked(out)) {
- dd->out_sg = scatterwalk_sg_next(dd->out_sg);
+ dd->out_sg = sg_next(dd->out_sg);
if (dd->out_sg) {
scatterwalk_start(&dd->out_walk,
dd->out_sg);
@@ -965,9 +965,9 @@ static irqreturn_t omap_des_irq(int irq, void *dev_id)
}
}
- dd->total -= DES_BLOCK_SIZE;
+ BUG_ON(dd->total < DES_BLOCK_SIZE);
- BUG_ON(dd->total < 0);
+ dd->total -= DES_BLOCK_SIZE;
/* Clear IRQ status */
status &= ~DES_REG_IRQ_DATA_OUT;
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 2ed4256..19c0efa 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -47,7 +47,6 @@
#ifndef ADF_ACCEL_DEVICES_H_
#define ADF_ACCEL_DEVICES_H_
#include <linux/module.h>
-#include <linux/atomic.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/io.h>
@@ -148,6 +147,11 @@ struct adf_hw_device_data {
int (*alloc_irq)(struct adf_accel_dev *accel_dev);
void (*free_irq)(struct adf_accel_dev *accel_dev);
void (*enable_error_correction)(struct adf_accel_dev *accel_dev);
+ int (*init_admin_comms)(struct adf_accel_dev *accel_dev);
+ void (*exit_admin_comms)(struct adf_accel_dev *accel_dev);
+ int (*init_arb)(struct adf_accel_dev *accel_dev);
+ void (*exit_arb)(struct adf_accel_dev *accel_dev);
+ void (*enable_ints)(struct adf_accel_dev *accel_dev);
const char *fw_name;
uint32_t pci_dev_id;
uint32_t fuses;
diff --git a/drivers/crypto/qat/qat_common/adf_aer.c b/drivers/crypto/qat/qat_common/adf_aer.c
index 10ce4a2..fa1fef8 100644
--- a/drivers/crypto/qat/qat_common/adf_aer.c
+++ b/drivers/crypto/qat/qat_common/adf_aer.c
@@ -82,28 +82,15 @@ struct adf_reset_dev_data {
struct work_struct reset_work;
};
-#define PPDSTAT_OFFSET 0x7E
static void adf_dev_restore(struct adf_accel_dev *accel_dev)
{
struct pci_dev *pdev = accel_to_pci_dev(accel_dev);
struct pci_dev *parent = pdev->bus->self;
- uint16_t ppdstat = 0, bridge_ctl = 0;
- int pending = 0;
+ uint16_t bridge_ctl = 0;
pr_info("QAT: Resetting device qat_dev%d\n", accel_dev->accel_id);
- pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
- pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
- if (pending) {
- int ctr = 0;
-
- do {
- msleep(100);
- pci_read_config_word(pdev, PPDSTAT_OFFSET, &ppdstat);
- pending = ppdstat & PCI_EXP_DEVSTA_TRPND;
- } while (pending && ctr++ < 10);
- }
- if (pending)
+ if (!pci_wait_for_pending_transaction(pdev))
pr_info("QAT: Transaction still in progress. Proceeding\n");
pci_read_config_word(parent, PCI_BRIDGE_CONTROL, &bridge_ctl);
@@ -125,8 +112,9 @@ static void adf_device_reset_worker(struct work_struct *work)
adf_dev_restarting_notify(accel_dev);
adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
adf_dev_restore(accel_dev);
- if (adf_dev_start(accel_dev)) {
+ if (adf_dev_init(accel_dev) || adf_dev_start(accel_dev)) {
/* The device hanged and we can't restart it so stop here */
dev_err(&GET_DEV(accel_dev), "Restart device failed\n");
kfree(reset_data);
@@ -148,8 +136,8 @@ static int adf_dev_aer_schedule_reset(struct adf_accel_dev *accel_dev,
{
struct adf_reset_dev_data *reset_data;
- if (adf_dev_started(accel_dev) &&
- !test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
+ if (!adf_dev_started(accel_dev) ||
+ test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
return 0;
set_bit(ADF_STATUS_RESTARTING, &accel_dev->status);
diff --git a/drivers/crypto/qat/qat_common/adf_cfg.c b/drivers/crypto/qat/qat_common/adf_cfg.c
index aba7f1d..de16da9 100644
--- a/drivers/crypto/qat/qat_common/adf_cfg.c
+++ b/drivers/crypto/qat/qat_common/adf_cfg.c
@@ -50,6 +50,7 @@
#include <linux/seq_file.h>
#include "adf_accel_devices.h"
#include "adf_cfg.h"
+#include "adf_common_drv.h"
static DEFINE_MUTEX(qat_cfg_read_lock);
@@ -159,6 +160,7 @@ void adf_cfg_del_all(struct adf_accel_dev *accel_dev)
down_write(&dev_cfg_data->lock);
adf_cfg_section_del_all(&dev_cfg_data->sec_list);
up_write(&dev_cfg_data->lock);
+ clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
}
/**
diff --git a/drivers/crypto/qat/qat_common/adf_common_drv.h b/drivers/crypto/qat/qat_common/adf_common_drv.h
index 5e8f9d4..a62e485 100644
--- a/drivers/crypto/qat/qat_common/adf_common_drv.h
+++ b/drivers/crypto/qat/qat_common/adf_common_drv.h
@@ -93,7 +93,7 @@ int adf_service_unregister(struct service_hndl *service);
int adf_dev_init(struct adf_accel_dev *accel_dev);
int adf_dev_start(struct adf_accel_dev *accel_dev);
int adf_dev_stop(struct adf_accel_dev *accel_dev);
-int adf_dev_shutdown(struct adf_accel_dev *accel_dev);
+void adf_dev_shutdown(struct adf_accel_dev *accel_dev);
int adf_ctl_dev_register(void);
void adf_ctl_dev_unregister(void);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 7ee93f8..74207a6 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -282,6 +282,8 @@ static int adf_ctl_stop_devices(uint32_t id)
if (adf_dev_stop(accel_dev)) {
pr_err("QAT: Failed to stop qat_dev%d\n", id);
ret = -EFAULT;
+ } else {
+ adf_dev_shutdown(accel_dev);
}
}
}
@@ -343,7 +345,9 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
if (!adf_dev_started(accel_dev)) {
pr_info("QAT: Starting acceleration device qat_dev%d.\n",
ctl_data->device_id);
- ret = adf_dev_start(accel_dev);
+ ret = adf_dev_init(accel_dev);
+ if (!ret)
+ ret = adf_dev_start(accel_dev);
} else {
pr_info("QAT: Acceleration device qat_dev%d already started.\n",
ctl_data->device_id);
@@ -351,6 +355,7 @@ static int adf_ctl_ioctl_dev_start(struct file *fp, unsigned int cmd,
if (ret) {
pr_err("QAT: Failed to start qat_dev%d\n", ctl_data->device_id);
adf_dev_stop(accel_dev);
+ adf_dev_shutdown(accel_dev);
}
out:
kfree(ctl_data);
diff --git a/drivers/crypto/qat/qat_common/adf_init.c b/drivers/crypto/qat/qat_common/adf_init.c
index 5c0e47a..8f0ca49 100644
--- a/drivers/crypto/qat/qat_common/adf_init.c
+++ b/drivers/crypto/qat/qat_common/adf_init.c
@@ -108,26 +108,47 @@ int adf_service_unregister(struct service_hndl *service)
EXPORT_SYMBOL_GPL(adf_service_unregister);
/**
- * adf_dev_start() - Start acceleration service for the given accel device
- * @accel_dev: Pointer to acceleration device.
+ * adf_dev_init() - Init data structures and services for the given accel device
+ * @accel_dev: Pointer to acceleration device.
*
- * Function notifies all the registered services that the acceleration device
- * is ready to be used.
- * To be used by QAT device specific drivers.
+ * Initialize the ring data structures and the admin comms and arbitration
+ * services.
*
* Return: 0 on success, error code othewise.
*/
-int adf_dev_start(struct adf_accel_dev *accel_dev)
+int adf_dev_init(struct adf_accel_dev *accel_dev)
{
struct service_hndl *service;
struct list_head *list_itr;
struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ if (!hw_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to init device - hw_data not set\n");
+ return -EFAULT;
+ }
+
if (!test_bit(ADF_STATUS_CONFIGURED, &accel_dev->status)) {
pr_info("QAT: Device not configured\n");
return -EFAULT;
}
- set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
+ if (adf_init_etr_data(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed initialize etr\n");
+ return -EFAULT;
+ }
+
+ if (hw_data->init_admin_comms && hw_data->init_admin_comms(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed initialize admin comms\n");
+ return -EFAULT;
+ }
+
+ if (hw_data->init_arb && hw_data->init_arb(accel_dev)) {
+ dev_err(&GET_DEV(accel_dev), "Failed initialize hw arbiter\n");
+ return -EFAULT;
+ }
+
+ hw_data->enable_ints(accel_dev);
if (adf_ae_init(accel_dev)) {
pr_err("QAT: Failed to initialise Acceleration Engine\n");
@@ -178,6 +199,27 @@ int adf_dev_start(struct adf_accel_dev *accel_dev)
hw_data->enable_error_correction(accel_dev);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_init);
+
+/**
+ * adf_dev_start() - Start acceleration service for the given accel device
+ * @accel_dev: Pointer to acceleration device.
+ *
+ * Function notifies all the registered services that the acceleration device
+ * is ready to be used.
+ * To be used by QAT device specific drivers.
+ *
+ * Return: 0 on success, error code othewise.
+ */
+int adf_dev_start(struct adf_accel_dev *accel_dev)
+{
+ struct service_hndl *service;
+ struct list_head *list_itr;
+
+ set_bit(ADF_STATUS_STARTING, &accel_dev->status);
+
if (adf_ae_start(accel_dev)) {
pr_err("QAT: AE Start Failed\n");
return -EFAULT;
@@ -232,16 +274,15 @@ EXPORT_SYMBOL_GPL(adf_dev_start);
*/
int adf_dev_stop(struct adf_accel_dev *accel_dev)
{
- struct adf_hw_device_data *hw_data = accel_dev->hw_device;
struct service_hndl *service;
struct list_head *list_itr;
- int ret, wait = 0;
+ bool wait = false;
+ int ret;
if (!adf_dev_started(accel_dev) &&
!test_bit(ADF_STATUS_STARTING, &accel_dev->status)) {
return 0;
}
- clear_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
clear_bit(ADF_STATUS_STARTING, &accel_dev->status);
clear_bit(ADF_STATUS_STARTED, &accel_dev->status);
@@ -258,7 +299,7 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
if (!ret) {
clear_bit(accel_dev->accel_id, &service->start_status);
} else if (ret == -EAGAIN) {
- wait = 1;
+ wait = true;
clear_bit(accel_dev->accel_id, &service->start_status);
}
}
@@ -278,13 +319,36 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
if (wait)
msleep(100);
- if (adf_dev_started(accel_dev)) {
+ if (test_bit(ADF_STATUS_AE_STARTED, &accel_dev->status)) {
if (adf_ae_stop(accel_dev))
pr_err("QAT: failed to stop AE\n");
else
clear_bit(ADF_STATUS_AE_STARTED, &accel_dev->status);
}
+ return 0;
+}
+EXPORT_SYMBOL_GPL(adf_dev_stop);
+
+/**
+ * adf_dev_shutdown() - shutdown acceleration services and data strucutures
+ * @accel_dev: Pointer to acceleration device
+ *
+ * Cleanup the ring data structures and the admin comms and arbitration
+ * services.
+ */
+void adf_dev_shutdown(struct adf_accel_dev *accel_dev)
+{
+ struct adf_hw_device_data *hw_data = accel_dev->hw_device;
+ struct service_hndl *service;
+ struct list_head *list_itr;
+
+ if (!hw_data) {
+ dev_err(&GET_DEV(accel_dev),
+ "QAT: Failed to shutdown device - hw_data not set\n");
+ return;
+ }
+
if (test_bit(ADF_STATUS_AE_UCODE_LOADED, &accel_dev->status)) {
if (adf_ae_fw_release(accel_dev))
pr_err("QAT: Failed to release the ucode\n");
@@ -335,9 +399,15 @@ int adf_dev_stop(struct adf_accel_dev *accel_dev)
if (!test_bit(ADF_STATUS_RESTARTING, &accel_dev->status))
adf_cfg_del_all(accel_dev);
- return 0;
+ if (hw_data->exit_arb)
+ hw_data->exit_arb(accel_dev);
+
+ if (hw_data->exit_admin_comms)
+ hw_data->exit_admin_comms(accel_dev);
+
+ adf_cleanup_etr_data(accel_dev);
}
-EXPORT_SYMBOL_GPL(adf_dev_stop);
+EXPORT_SYMBOL_GPL(adf_dev_shutdown);
int adf_dev_restarting_notify(struct adf_accel_dev *accel_dev)
{
diff --git a/drivers/crypto/qat/qat_common/adf_transport_internal.h b/drivers/crypto/qat/qat_common/adf_transport_internal.h
index c405460..a486962 100644
--- a/drivers/crypto/qat/qat_common/adf_transport_internal.h
+++ b/drivers/crypto/qat/qat_common/adf_transport_internal.h
@@ -48,7 +48,6 @@
#define ADF_TRANSPORT_INTRN_H
#include <linux/interrupt.h>
-#include <linux/atomic.h>
#include <linux/spinlock_types.h>
#include "adf_transport.h"
diff --git a/drivers/crypto/qat/qat_common/icp_qat_hw.h b/drivers/crypto/qat/qat_common/icp_qat_hw.h
index 5031f8c..68f191b 100644
--- a/drivers/crypto/qat/qat_common/icp_qat_hw.h
+++ b/drivers/crypto/qat/qat_common/icp_qat_hw.h
@@ -301,5 +301,5 @@ struct icp_qat_hw_cipher_aes256_f8 {
struct icp_qat_hw_cipher_algo_blk {
struct icp_qat_hw_cipher_aes256_f8 aes;
-};
+} __aligned(64);
#endif
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 19eea1c..1dc5b0a 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -63,15 +63,15 @@
#include "icp_qat_fw.h"
#include "icp_qat_fw_la.h"
-#define QAT_AES_HW_CONFIG_ENC(alg) \
+#define QAT_AES_HW_CONFIG_CBC_ENC(alg) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_NO_CONVERT, \
- ICP_QAT_HW_CIPHER_ENCRYPT)
+ ICP_QAT_HW_CIPHER_NO_CONVERT, \
+ ICP_QAT_HW_CIPHER_ENCRYPT)
-#define QAT_AES_HW_CONFIG_DEC(alg) \
+#define QAT_AES_HW_CONFIG_CBC_DEC(alg) \
ICP_QAT_HW_CIPHER_CONFIG_BUILD(ICP_QAT_HW_CIPHER_CBC_MODE, alg, \
- ICP_QAT_HW_CIPHER_KEY_CONVERT, \
- ICP_QAT_HW_CIPHER_DECRYPT)
+ ICP_QAT_HW_CIPHER_KEY_CONVERT, \
+ ICP_QAT_HW_CIPHER_DECRYPT)
static atomic_t active_dev;
@@ -102,25 +102,31 @@ struct qat_alg_cd {
};
} __aligned(64);
-#define MAX_AUTH_STATE_SIZE sizeof(struct icp_qat_hw_auth_algo_blk)
-
-struct qat_auth_state {
- uint8_t data[MAX_AUTH_STATE_SIZE + 64];
-} __aligned(64);
-
-struct qat_alg_session_ctx {
+struct qat_alg_aead_ctx {
struct qat_alg_cd *enc_cd;
- dma_addr_t enc_cd_paddr;
struct qat_alg_cd *dec_cd;
+ dma_addr_t enc_cd_paddr;
dma_addr_t dec_cd_paddr;
- struct icp_qat_fw_la_bulk_req enc_fw_req_tmpl;
- struct icp_qat_fw_la_bulk_req dec_fw_req_tmpl;
- struct qat_crypto_instance *inst;
- struct crypto_tfm *tfm;
+ struct icp_qat_fw_la_bulk_req enc_fw_req;
+ struct icp_qat_fw_la_bulk_req dec_fw_req;
struct crypto_shash *hash_tfm;
enum icp_qat_hw_auth_algo qat_hash_alg;
+ struct qat_crypto_instance *inst;
+ struct crypto_tfm *tfm;
uint8_t salt[AES_BLOCK_SIZE];
- spinlock_t lock; /* protects qat_alg_session_ctx struct */
+ spinlock_t lock; /* protects qat_alg_aead_ctx struct */
+};
+
+struct qat_alg_ablkcipher_ctx {
+ struct icp_qat_hw_cipher_algo_blk *enc_cd;
+ struct icp_qat_hw_cipher_algo_blk *dec_cd;
+ dma_addr_t enc_cd_paddr;
+ dma_addr_t dec_cd_paddr;
+ struct icp_qat_fw_la_bulk_req enc_fw_req;
+ struct icp_qat_fw_la_bulk_req dec_fw_req;
+ struct qat_crypto_instance *inst;
+ struct crypto_tfm *tfm;
+ spinlock_t lock; /* protects qat_alg_ablkcipher_ctx struct */
};
static int get_current_node(void)
@@ -144,43 +150,37 @@ static int qat_get_inter_state_size(enum icp_qat_hw_auth_algo qat_hash_alg)
}
static int qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash,
- struct qat_alg_session_ctx *ctx,
+ struct qat_alg_aead_ctx *ctx,
const uint8_t *auth_key,
unsigned int auth_keylen)
{
- struct qat_auth_state auth_state;
SHASH_DESC_ON_STACK(shash, ctx->hash_tfm);
struct sha1_state sha1;
struct sha256_state sha256;
struct sha512_state sha512;
int block_size = crypto_shash_blocksize(ctx->hash_tfm);
int digest_size = crypto_shash_digestsize(ctx->hash_tfm);
- uint8_t *ipad = auth_state.data;
- uint8_t *opad = ipad + block_size;
+ char ipad[block_size];
+ char opad[block_size];
__be32 *hash_state_out;
__be64 *hash512_state_out;
int i, offset;
- memzero_explicit(auth_state.data, MAX_AUTH_STATE_SIZE + 64);
+ memset(ipad, 0, block_size);
+ memset(opad, 0, block_size);
shash->tfm = ctx->hash_tfm;
shash->flags = 0x0;
if (auth_keylen > block_size) {
- char buff[SHA512_BLOCK_SIZE];
int ret = crypto_shash_digest(shash, auth_key,
- auth_keylen, buff);
+ auth_keylen, ipad);
if (ret)
return ret;
- memcpy(ipad, buff, digest_size);
- memcpy(opad, buff, digest_size);
- memzero_explicit(ipad + digest_size, block_size - digest_size);
- memzero_explicit(opad + digest_size, block_size - digest_size);
+ memcpy(opad, ipad, digest_size);
} else {
memcpy(ipad, auth_key, auth_keylen);
memcpy(opad, auth_key, auth_keylen);
- memzero_explicit(ipad + auth_keylen, block_size - auth_keylen);
- memzero_explicit(opad + auth_keylen, block_size - auth_keylen);
}
for (i = 0; i < block_size; i++) {
@@ -267,8 +267,6 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
header->comn_req_flags =
ICP_QAT_FW_COMN_FLAGS_BUILD(QAT_COMN_CD_FLD_TYPE_64BIT_ADR,
QAT_COMN_PTR_TYPE_SGL);
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
- ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
ICP_QAT_FW_LA_PARTIAL_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_PARTIAL_NONE);
ICP_QAT_FW_LA_CIPH_IV_FLD_FLAG_SET(header->serv_specif_flags,
@@ -279,8 +277,9 @@ static void qat_alg_init_common_hdr(struct icp_qat_fw_comn_req_hdr *header)
ICP_QAT_FW_LA_NO_UPDATE_STATE);
}
-static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
- int alg, struct crypto_authenc_keys *keys)
+static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx,
+ int alg,
+ struct crypto_authenc_keys *keys)
{
struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
@@ -289,7 +288,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
struct icp_qat_hw_auth_algo_blk *hash =
(struct icp_qat_hw_auth_algo_blk *)((char *)enc_ctx +
sizeof(struct icp_qat_hw_auth_setup) + keys->enckeylen);
- struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req_tmpl;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
void *ptr = &req_tmpl->cd_ctrl;
@@ -297,7 +296,7 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
struct icp_qat_fw_auth_cd_ctrl_hdr *hash_cd_ctrl = ptr;
/* CD setup */
- cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_ENC(alg);
+ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
hash->sha.inner_setup.auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
@@ -311,6 +310,8 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
/* Request setup */
qat_alg_init_common_hdr(header);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER_HASH;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
@@ -356,8 +357,9 @@ static int qat_alg_init_enc_session(struct qat_alg_session_ctx *ctx,
return 0;
}
-static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
- int alg, struct crypto_authenc_keys *keys)
+static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx,
+ int alg,
+ struct crypto_authenc_keys *keys)
{
struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm);
unsigned int digestsize = crypto_aead_crt(aead_tfm)->authsize;
@@ -367,7 +369,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
(struct icp_qat_hw_cipher_algo_blk *)((char *)dec_ctx +
sizeof(struct icp_qat_hw_auth_setup) +
roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2);
- struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req_tmpl;
+ struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req;
struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req_tmpl->cd_pars;
struct icp_qat_fw_comn_req_hdr *header = &req_tmpl->comn_hdr;
void *ptr = &req_tmpl->cd_ctrl;
@@ -379,7 +381,7 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
sizeof(struct icp_qat_fw_la_cipher_req_params));
/* CD setup */
- cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_DEC(alg);
+ cipher->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
memcpy(cipher->aes.key, keys->enckey, keys->enckeylen);
hash->sha.inner_setup.auth_config.config =
ICP_QAT_HW_AUTH_CONFIG_BUILD(ICP_QAT_HW_AUTH_MODE1,
@@ -394,6 +396,8 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
/* Request setup */
qat_alg_init_common_hdr(header);
header->service_cmd_id = ICP_QAT_FW_LA_CMD_HASH_CIPHER;
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER_SET(header->serv_specif_flags,
+ ICP_QAT_FW_LA_DIGEST_IN_BUFFER);
ICP_QAT_FW_LA_RET_AUTH_SET(header->serv_specif_flags,
ICP_QAT_FW_LA_NO_RET_AUTH_RES);
ICP_QAT_FW_LA_CMP_AUTH_SET(header->serv_specif_flags,
@@ -444,36 +448,91 @@ static int qat_alg_init_dec_session(struct qat_alg_session_ctx *ctx,
return 0;
}
-static int qat_alg_init_sessions(struct qat_alg_session_ctx *ctx,
- const uint8_t *key, unsigned int keylen)
+static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx,
+ struct icp_qat_fw_la_bulk_req *req,
+ struct icp_qat_hw_cipher_algo_blk *cd,
+ const uint8_t *key, unsigned int keylen)
{
- struct crypto_authenc_keys keys;
- int alg;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+ struct icp_qat_fw_comn_req_hdr *header = &req->comn_hdr;
+ struct icp_qat_fw_cipher_cd_ctrl_hdr *cd_ctrl = (void *)&req->cd_ctrl;
- if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
- return -EFAULT;
+ memcpy(cd->aes.key, key, keylen);
+ qat_alg_init_common_hdr(header);
+ header->service_cmd_id = ICP_QAT_FW_LA_CMD_CIPHER;
+ cd_pars->u.s.content_desc_params_sz =
+ sizeof(struct icp_qat_hw_cipher_algo_blk) >> 3;
+ /* Cipher CD config setup */
+ cd_ctrl->cipher_key_sz = keylen >> 3;
+ cd_ctrl->cipher_state_sz = AES_BLOCK_SIZE >> 3;
+ cd_ctrl->cipher_cfg_offset = 0;
+ ICP_QAT_FW_COMN_CURR_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_CIPHER);
+ ICP_QAT_FW_COMN_NEXT_ID_SET(cd_ctrl, ICP_QAT_FW_SLICE_DRAM_WR);
+}
- if (crypto_authenc_extractkeys(&keys, key, keylen))
- goto bad_key;
+static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd;
+ struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+ qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen);
+ cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr;
+ enc_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_ENC(alg);
+}
- switch (keys.enckeylen) {
+static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx,
+ int alg, const uint8_t *key,
+ unsigned int keylen)
+{
+ struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd;
+ struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req;
+ struct icp_qat_fw_comn_req_hdr_cd_pars *cd_pars = &req->cd_pars;
+
+ qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen);
+ cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr;
+ dec_cd->aes.cipher_config.val = QAT_AES_HW_CONFIG_CBC_DEC(alg);
+}
+
+static int qat_alg_validate_key(int key_len, int *alg)
+{
+ switch (key_len) {
case AES_KEYSIZE_128:
- alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES128;
break;
case AES_KEYSIZE_192:
- alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES192;
break;
case AES_KEYSIZE_256:
- alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
+ *alg = ICP_QAT_HW_CIPHER_ALGO_AES256;
break;
default:
- goto bad_key;
+ return -EINVAL;
}
+ return 0;
+}
- if (qat_alg_init_enc_session(ctx, alg, &keys))
+static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx,
+ const uint8_t *key, unsigned int keylen)
+{
+ struct crypto_authenc_keys keys;
+ int alg;
+
+ if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE))
+ return -EFAULT;
+
+ if (crypto_authenc_extractkeys(&keys, key, keylen))
+ goto bad_key;
+
+ if (qat_alg_validate_key(keys.enckeylen, &alg))
+ goto bad_key;
+
+ if (qat_alg_aead_init_enc_session(ctx, alg, &keys))
goto error;
- if (qat_alg_init_dec_session(ctx, alg, &keys))
+ if (qat_alg_aead_init_dec_session(ctx, alg, &keys))
goto error;
return 0;
@@ -484,22 +543,37 @@ error:
return -EFAULT;
}
-static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
- unsigned int keylen)
+static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx,
+ const uint8_t *key,
+ unsigned int keylen)
+{
+ int alg;
+
+ if (qat_alg_validate_key(keylen, &alg))
+ goto bad_key;
+
+ qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen);
+ qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen);
+ return 0;
+bad_key:
+ crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ return -EINVAL;
+}
+
+static int qat_alg_aead_setkey(struct crypto_aead *tfm, const uint8_t *key,
+ unsigned int keylen)
{
- struct qat_alg_session_ctx *ctx = crypto_aead_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm);
struct device *dev;
spin_lock(&ctx->lock);
if (ctx->enc_cd) {
/* rekeying */
dev = &GET_DEV(ctx->inst->accel_dev);
- memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
- memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
- memzero_explicit(&ctx->enc_fw_req_tmpl,
- sizeof(struct icp_qat_fw_la_bulk_req));
- memzero_explicit(&ctx->dec_fw_req_tmpl,
- sizeof(struct icp_qat_fw_la_bulk_req));
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
} else {
/* new key */
int node = get_current_node();
@@ -512,16 +586,14 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
dev = &GET_DEV(inst->accel_dev);
ctx->inst = inst;
- ctx->enc_cd = dma_zalloc_coherent(dev,
- sizeof(struct qat_alg_cd),
+ ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
&ctx->enc_cd_paddr,
GFP_ATOMIC);
if (!ctx->enc_cd) {
spin_unlock(&ctx->lock);
return -ENOMEM;
}
- ctx->dec_cd = dma_zalloc_coherent(dev,
- sizeof(struct qat_alg_cd),
+ ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
&ctx->dec_cd_paddr,
GFP_ATOMIC);
if (!ctx->dec_cd) {
@@ -530,18 +602,18 @@ static int qat_alg_setkey(struct crypto_aead *tfm, const uint8_t *key,
}
}
spin_unlock(&ctx->lock);
- if (qat_alg_init_sessions(ctx, key, keylen))
+ if (qat_alg_aead_init_sessions(ctx, key, keylen))
goto out_free_all;
return 0;
out_free_all:
- memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
+ memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
ctx->dec_cd = NULL;
out_free_enc:
- memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
+ memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
ctx->enc_cd = NULL;
@@ -557,7 +629,8 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
dma_addr_t blp = qat_req->buf.blp;
dma_addr_t blpout = qat_req->buf.bloutp;
size_t sz = qat_req->buf.sz;
- int i, bufs = bl->num_bufs;
+ size_t sz_out = qat_req->buf.sz_out;
+ int i;
for (i = 0; i < bl->num_bufs; i++)
dma_unmap_single(dev, bl->bufers[i].addr,
@@ -567,14 +640,14 @@ static void qat_alg_free_bufl(struct qat_crypto_instance *inst,
kfree(bl);
if (blp != blpout) {
/* If out of place operation dma unmap only data */
- int bufless = bufs - blout->num_mapped_bufs;
+ int bufless = blout->num_bufs - blout->num_mapped_bufs;
- for (i = bufless; i < bufs; i++) {
+ for (i = bufless; i < blout->num_bufs; i++) {
dma_unmap_single(dev, blout->bufers[i].addr,
blout->bufers[i].len,
DMA_BIDIRECTIONAL);
}
- dma_unmap_single(dev, blpout, sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, blpout, sz_out, DMA_TO_DEVICE);
kfree(blout);
}
}
@@ -587,19 +660,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
struct qat_crypto_request *qat_req)
{
struct device *dev = &GET_DEV(inst->accel_dev);
- int i, bufs = 0, n = sg_nents(sgl), assoc_n = sg_nents(assoc);
+ int i, bufs = 0, sg_nctr = 0;
+ int n = sg_nents(sgl), assoc_n = sg_nents(assoc);
struct qat_alg_buf_list *bufl;
struct qat_alg_buf_list *buflout = NULL;
dma_addr_t blp;
dma_addr_t bloutp = 0;
struct scatterlist *sg;
- size_t sz = sizeof(struct qat_alg_buf_list) +
+ size_t sz_out, sz = sizeof(struct qat_alg_buf_list) +
((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
if (unlikely(!n))
return -EINVAL;
- bufl = kmalloc_node(sz, GFP_ATOMIC,
+ bufl = kzalloc_node(sz, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!bufl))
return -ENOMEM;
@@ -620,15 +694,20 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
goto err;
bufs++;
}
- bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
- DMA_BIDIRECTIONAL);
- bufl->bufers[bufs].len = ivlen;
- if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
- goto err;
- bufs++;
+ if (ivlen) {
+ bufl->bufers[bufs].addr = dma_map_single(dev, iv, ivlen,
+ DMA_BIDIRECTIONAL);
+ bufl->bufers[bufs].len = ivlen;
+ if (unlikely(dma_mapping_error(dev, bufl->bufers[bufs].addr)))
+ goto err;
+ bufs++;
+ }
for_each_sg(sgl, sg, n, i) {
- int y = i + bufs;
+ int y = sg_nctr + bufs;
+
+ if (!sg->length)
+ continue;
bufl->bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
@@ -636,8 +715,9 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufl->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufl->bufers[y].addr)))
goto err;
+ sg_nctr++;
}
- bufl->num_bufs = n + bufs;
+ bufl->num_bufs = sg_nctr + bufs;
qat_req->buf.bl = bufl;
qat_req->buf.blp = blp;
qat_req->buf.sz = sz;
@@ -645,11 +725,15 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
if (sgl != sglout) {
struct qat_alg_buf *bufers;
- buflout = kmalloc_node(sz, GFP_ATOMIC,
+ n = sg_nents(sglout);
+ sz_out = sizeof(struct qat_alg_buf_list) +
+ ((1 + n + assoc_n) * sizeof(struct qat_alg_buf));
+ sg_nctr = 0;
+ buflout = kzalloc_node(sz_out, GFP_ATOMIC,
dev_to_node(&GET_DEV(inst->accel_dev)));
if (unlikely(!buflout))
goto err;
- bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
+ bloutp = dma_map_single(dev, buflout, sz_out, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, bloutp)))
goto err;
bufers = buflout->bufers;
@@ -660,60 +744,62 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
bufers[i].addr = bufl->bufers[i].addr;
}
for_each_sg(sglout, sg, n, i) {
- int y = i + bufs;
+ int y = sg_nctr + bufs;
+
+ if (!sg->length)
+ continue;
bufers[y].addr = dma_map_single(dev, sg_virt(sg),
sg->length,
DMA_BIDIRECTIONAL);
- buflout->bufers[y].len = sg->length;
if (unlikely(dma_mapping_error(dev, bufers[y].addr)))
goto err;
+ bufers[y].len = sg->length;
+ sg_nctr++;
}
- buflout->num_bufs = n + bufs;
- buflout->num_mapped_bufs = n;
+ buflout->num_bufs = sg_nctr + bufs;
+ buflout->num_mapped_bufs = sg_nctr;
qat_req->buf.blout = buflout;
qat_req->buf.bloutp = bloutp;
+ qat_req->buf.sz_out = sz_out;
} else {
/* Otherwise set the src and dst to the same address */
qat_req->buf.bloutp = qat_req->buf.blp;
+ qat_req->buf.sz_out = 0;
}
return 0;
err:
dev_err(dev, "Failed to map buf for dma\n");
- for_each_sg(sgl, sg, n + bufs, i) {
- if (!dma_mapping_error(dev, bufl->bufers[i].addr)) {
+ sg_nctr = 0;
+ for (i = 0; i < n + bufs; i++)
+ if (!dma_mapping_error(dev, bufl->bufers[i].addr))
dma_unmap_single(dev, bufl->bufers[i].addr,
bufl->bufers[i].len,
DMA_BIDIRECTIONAL);
- }
- }
+
if (!dma_mapping_error(dev, blp))
dma_unmap_single(dev, blp, sz, DMA_TO_DEVICE);
kfree(bufl);
if (sgl != sglout && buflout) {
- for_each_sg(sglout, sg, n, i) {
- int y = i + bufs;
-
- if (!dma_mapping_error(dev, buflout->bufers[y].addr))
- dma_unmap_single(dev, buflout->bufers[y].addr,
- buflout->bufers[y].len,
+ n = sg_nents(sglout);
+ for (i = bufs; i < n + bufs; i++)
+ if (!dma_mapping_error(dev, buflout->bufers[i].addr))
+ dma_unmap_single(dev, buflout->bufers[i].addr,
+ buflout->bufers[i].len,
DMA_BIDIRECTIONAL);
- }
if (!dma_mapping_error(dev, bloutp))
- dma_unmap_single(dev, bloutp, sz, DMA_TO_DEVICE);
+ dma_unmap_single(dev, bloutp, sz_out, DMA_TO_DEVICE);
kfree(buflout);
}
return -ENOMEM;
}
-void qat_alg_callback(void *resp)
+static void qat_aead_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
{
- struct icp_qat_fw_la_resp *qat_resp = resp;
- struct qat_crypto_request *qat_req =
- (void *)(__force long)qat_resp->opaque_data;
- struct qat_alg_session_ctx *ctx = qat_req->ctx;
+ struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx;
struct qat_crypto_instance *inst = ctx->inst;
- struct aead_request *areq = qat_req->areq;
+ struct aead_request *areq = qat_req->aead_req;
uint8_t stat_filed = qat_resp->comn_resp.comn_status;
int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
@@ -723,11 +809,35 @@ void qat_alg_callback(void *resp)
areq->base.complete(&areq->base, res);
}
-static int qat_alg_dec(struct aead_request *areq)
+static void qat_ablkcipher_alg_callback(struct icp_qat_fw_la_resp *qat_resp,
+ struct qat_crypto_request *qat_req)
+{
+ struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx;
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct ablkcipher_request *areq = qat_req->ablkcipher_req;
+ uint8_t stat_filed = qat_resp->comn_resp.comn_status;
+ int res = 0, qat_res = ICP_QAT_FW_COMN_RESP_CRYPTO_STAT_GET(stat_filed);
+
+ qat_alg_free_bufl(inst, qat_req);
+ if (unlikely(qat_res != ICP_QAT_FW_COMN_STATUS_FLAG_OK))
+ res = -EINVAL;
+ areq->base.complete(&areq->base, res);
+}
+
+void qat_alg_callback(void *resp)
+{
+ struct icp_qat_fw_la_resp *qat_resp = resp;
+ struct qat_crypto_request *qat_req =
+ (void *)(__force long)qat_resp->opaque_data;
+
+ qat_req->cb(qat_resp, qat_req);
+}
+
+static int qat_alg_aead_dec(struct aead_request *areq)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
- struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = aead_request_ctx(areq);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
@@ -741,9 +851,10 @@ static int qat_alg_dec(struct aead_request *areq)
return ret;
msg = &qat_req->req;
- *msg = ctx->dec_fw_req_tmpl;
- qat_req->ctx = ctx;
- qat_req->areq = areq;
+ *msg = ctx->dec_fw_req;
+ qat_req->aead_ctx = ctx;
+ qat_req->aead_req = areq;
+ qat_req->cb = qat_aead_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
@@ -766,12 +877,12 @@ static int qat_alg_dec(struct aead_request *areq)
return -EINPROGRESS;
}
-static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
- int enc_iv)
+static int qat_alg_aead_enc_internal(struct aead_request *areq, uint8_t *iv,
+ int enc_iv)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
- struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_request *qat_req = aead_request_ctx(areq);
struct icp_qat_fw_la_cipher_req_params *cipher_param;
struct icp_qat_fw_la_auth_req_params *auth_param;
@@ -784,9 +895,10 @@ static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
return ret;
msg = &qat_req->req;
- *msg = ctx->enc_fw_req_tmpl;
- qat_req->ctx = ctx;
- qat_req->areq = areq;
+ *msg = ctx->enc_fw_req;
+ qat_req->aead_ctx = ctx;
+ qat_req->aead_req = areq;
+ qat_req->cb = qat_aead_alg_callback;
qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
@@ -815,31 +927,168 @@ static int qat_alg_enc_internal(struct aead_request *areq, uint8_t *iv,
return -EINPROGRESS;
}
-static int qat_alg_enc(struct aead_request *areq)
+static int qat_alg_aead_enc(struct aead_request *areq)
{
- return qat_alg_enc_internal(areq, areq->iv, 0);
+ return qat_alg_aead_enc_internal(areq, areq->iv, 0);
}
-static int qat_alg_genivenc(struct aead_givcrypt_request *req)
+static int qat_alg_aead_genivenc(struct aead_givcrypt_request *req)
{
struct crypto_aead *aead_tfm = crypto_aead_reqtfm(&req->areq);
struct crypto_tfm *tfm = crypto_aead_tfm(aead_tfm);
- struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
__be64 seq;
memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE);
seq = cpu_to_be64(req->seq);
memcpy(req->giv + AES_BLOCK_SIZE - sizeof(uint64_t),
&seq, sizeof(uint64_t));
- return qat_alg_enc_internal(&req->areq, req->giv, 1);
+ return qat_alg_aead_enc_internal(&req->areq, req->giv, 1);
}
-static int qat_alg_init(struct crypto_tfm *tfm,
- enum icp_qat_hw_auth_algo hash, const char *hash_name)
+static int qat_alg_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
+ const uint8_t *key,
+ unsigned int keylen)
{
- struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm);
+ struct device *dev;
+
+ spin_lock(&ctx->lock);
+ if (ctx->enc_cd) {
+ /* rekeying */
+ dev = &GET_DEV(ctx->inst->accel_dev);
+ memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd));
+ memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd));
+ memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req));
+ memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req));
+ } else {
+ /* new key */
+ int node = get_current_node();
+ struct qat_crypto_instance *inst =
+ qat_crypto_get_instance_node(node);
+ if (!inst) {
+ spin_unlock(&ctx->lock);
+ return -EINVAL;
+ }
+
+ dev = &GET_DEV(inst->accel_dev);
+ ctx->inst = inst;
+ ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd),
+ &ctx->enc_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->enc_cd) {
+ spin_unlock(&ctx->lock);
+ return -ENOMEM;
+ }
+ ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd),
+ &ctx->dec_cd_paddr,
+ GFP_ATOMIC);
+ if (!ctx->dec_cd) {
+ spin_unlock(&ctx->lock);
+ goto out_free_enc;
+ }
+ }
+ spin_unlock(&ctx->lock);
+ if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen))
+ goto out_free_all;
+
+ return 0;
+
+out_free_all:
+ memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd));
+ dma_free_coherent(dev, sizeof(*ctx->enc_cd),
+ ctx->dec_cd, ctx->dec_cd_paddr);
+ ctx->dec_cd = NULL;
+out_free_enc:
+ memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd));
+ dma_free_coherent(dev, sizeof(*ctx->dec_cd),
+ ctx->enc_cd, ctx->enc_cd_paddr);
+ ctx->enc_cd = NULL;
+ return -ENOMEM;
+}
+
+static int qat_alg_ablkcipher_encrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+ int ret, ctr = 0;
+
+ ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
+ NULL, 0, qat_req);
+ if (unlikely(ret))
+ return ret;
+
+ msg = &qat_req->req;
+ *msg = ctx->enc_fw_req;
+ qat_req->ablkcipher_ctx = ctx;
+ qat_req->ablkcipher_req = req;
+ qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+ cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_offset = 0;
+ memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+ do {
+ ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ } while (ret == -EAGAIN && ctr++ < 10);
+
+ if (ret == -EAGAIN) {
+ qat_alg_free_bufl(ctx->inst, qat_req);
+ return -EBUSY;
+ }
+ return -EINPROGRESS;
+}
+
+static int qat_alg_ablkcipher_decrypt(struct ablkcipher_request *req)
+{
+ struct crypto_ablkcipher *atfm = crypto_ablkcipher_reqtfm(req);
+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(atfm);
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_request *qat_req = ablkcipher_request_ctx(req);
+ struct icp_qat_fw_la_cipher_req_params *cipher_param;
+ struct icp_qat_fw_la_bulk_req *msg;
+ int ret, ctr = 0;
+
+ ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst,
+ NULL, 0, qat_req);
+ if (unlikely(ret))
+ return ret;
+
+ msg = &qat_req->req;
+ *msg = ctx->dec_fw_req;
+ qat_req->ablkcipher_ctx = ctx;
+ qat_req->ablkcipher_req = req;
+ qat_req->cb = qat_ablkcipher_alg_callback;
+ qat_req->req.comn_mid.opaque_data = (uint64_t)(__force long)qat_req;
+ qat_req->req.comn_mid.src_data_addr = qat_req->buf.blp;
+ qat_req->req.comn_mid.dest_data_addr = qat_req->buf.bloutp;
+ cipher_param = (void *)&qat_req->req.serv_specif_rqpars;
+ cipher_param->cipher_length = req->nbytes;
+ cipher_param->cipher_offset = 0;
+ memcpy(cipher_param->u.cipher_IV_array, req->info, AES_BLOCK_SIZE);
+ do {
+ ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg);
+ } while (ret == -EAGAIN && ctr++ < 10);
+
+ if (ret == -EAGAIN) {
+ qat_alg_free_bufl(ctx->inst, qat_req);
+ return -EBUSY;
+ }
+ return -EINPROGRESS;
+}
+
+static int qat_alg_aead_init(struct crypto_tfm *tfm,
+ enum icp_qat_hw_auth_algo hash,
+ const char *hash_name)
+{
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
- memzero_explicit(ctx, sizeof(*ctx));
ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0);
if (IS_ERR(ctx->hash_tfm))
return -EFAULT;
@@ -851,24 +1100,24 @@ static int qat_alg_init(struct crypto_tfm *tfm,
return 0;
}
-static int qat_alg_sha1_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha1_init(struct crypto_tfm *tfm)
{
- return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA1, "sha1");
}
-static int qat_alg_sha256_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha256_init(struct crypto_tfm *tfm)
{
- return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA256, "sha256");
}
-static int qat_alg_sha512_init(struct crypto_tfm *tfm)
+static int qat_alg_aead_sha512_init(struct crypto_tfm *tfm)
{
- return qat_alg_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
+ return qat_alg_aead_init(tfm, ICP_QAT_HW_AUTH_ALGO_SHA512, "sha512");
}
-static void qat_alg_exit(struct crypto_tfm *tfm)
+static void qat_alg_aead_exit(struct crypto_tfm *tfm)
{
- struct qat_alg_session_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm);
struct qat_crypto_instance *inst = ctx->inst;
struct device *dev;
@@ -880,36 +1129,74 @@ static void qat_alg_exit(struct crypto_tfm *tfm)
dev = &GET_DEV(inst->accel_dev);
if (ctx->enc_cd) {
- memzero_explicit(ctx->enc_cd, sizeof(struct qat_alg_cd));
+ memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->enc_cd, ctx->enc_cd_paddr);
}
if (ctx->dec_cd) {
- memzero_explicit(ctx->dec_cd, sizeof(struct qat_alg_cd));
+ memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd));
dma_free_coherent(dev, sizeof(struct qat_alg_cd),
ctx->dec_cd, ctx->dec_cd_paddr);
}
qat_crypto_put_instance(inst);
}
+static int qat_alg_ablkcipher_init(struct crypto_tfm *tfm)
+{
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ spin_lock_init(&ctx->lock);
+ tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
+ sizeof(struct qat_crypto_request);
+ ctx->tfm = tfm;
+ return 0;
+}
+
+static void qat_alg_ablkcipher_exit(struct crypto_tfm *tfm)
+{
+ struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
+ struct qat_crypto_instance *inst = ctx->inst;
+ struct device *dev;
+
+ if (!inst)
+ return;
+
+ dev = &GET_DEV(inst->accel_dev);
+ if (ctx->enc_cd) {
+ memset(ctx->enc_cd, 0,
+ sizeof(struct icp_qat_hw_cipher_algo_blk));
+ dma_free_coherent(dev,
+ sizeof(struct icp_qat_hw_cipher_algo_blk),
+ ctx->enc_cd, ctx->enc_cd_paddr);
+ }
+ if (ctx->dec_cd) {
+ memset(ctx->dec_cd, 0,
+ sizeof(struct icp_qat_hw_cipher_algo_blk));
+ dma_free_coherent(dev,
+ sizeof(struct icp_qat_hw_cipher_algo_blk),
+ ctx->dec_cd, ctx->dec_cd_paddr);
+ }
+ qat_crypto_put_instance(inst);
+}
+
static struct crypto_alg qat_algs[] = { {
.cra_name = "authenc(hmac(sha1),cbc(aes))",
.cra_driver_name = "qat_aes_cbc_hmac_sha1",
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = qat_alg_sha1_init,
- .cra_exit = qat_alg_exit,
+ .cra_init = qat_alg_aead_sha1_init,
+ .cra_exit = qat_alg_aead_exit,
.cra_u = {
.aead = {
- .setkey = qat_alg_setkey,
- .decrypt = qat_alg_dec,
- .encrypt = qat_alg_enc,
- .givencrypt = qat_alg_genivenc,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .givencrypt = qat_alg_aead_genivenc,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA1_DIGEST_SIZE,
},
@@ -920,18 +1207,18 @@ static struct crypto_alg qat_algs[] = { {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = qat_alg_sha256_init,
- .cra_exit = qat_alg_exit,
+ .cra_init = qat_alg_aead_sha256_init,
+ .cra_exit = qat_alg_aead_exit,
.cra_u = {
.aead = {
- .setkey = qat_alg_setkey,
- .decrypt = qat_alg_dec,
- .encrypt = qat_alg_enc,
- .givencrypt = qat_alg_genivenc,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .givencrypt = qat_alg_aead_genivenc,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA256_DIGEST_SIZE,
},
@@ -942,22 +1229,44 @@ static struct crypto_alg qat_algs[] = { {
.cra_priority = 4001,
.cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct qat_alg_session_ctx),
+ .cra_ctxsize = sizeof(struct qat_alg_aead_ctx),
.cra_alignmask = 0,
.cra_type = &crypto_aead_type,
.cra_module = THIS_MODULE,
- .cra_init = qat_alg_sha512_init,
- .cra_exit = qat_alg_exit,
+ .cra_init = qat_alg_aead_sha512_init,
+ .cra_exit = qat_alg_aead_exit,
.cra_u = {
.aead = {
- .setkey = qat_alg_setkey,
- .decrypt = qat_alg_dec,
- .encrypt = qat_alg_enc,
- .givencrypt = qat_alg_genivenc,
+ .setkey = qat_alg_aead_setkey,
+ .decrypt = qat_alg_aead_dec,
+ .encrypt = qat_alg_aead_enc,
+ .givencrypt = qat_alg_aead_genivenc,
.ivsize = AES_BLOCK_SIZE,
.maxauthsize = SHA512_DIGEST_SIZE,
},
},
+}, {
+ .cra_name = "cbc(aes)",
+ .cra_driver_name = "qat_aes_cbc",
+ .cra_priority = 4001,
+ .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC,
+ .cra_blocksize = AES_BLOCK_SIZE,
+ .cra_ctxsize = sizeof(struct qat_alg_ablkcipher_ctx),
+ .cra_alignmask = 0,
+ .cra_type = &crypto_ablkcipher_type,
+ .cra_module = THIS_MODULE,
+ .cra_init = qat_alg_ablkcipher_init,
+ .cra_exit = qat_alg_ablkcipher_exit,
+ .cra_u = {
+ .ablkcipher = {
+ .setkey = qat_alg_ablkcipher_setkey,
+ .decrypt = qat_alg_ablkcipher_decrypt,
+ .encrypt = qat_alg_ablkcipher_encrypt,
+ .min_keysize = AES_MIN_KEY_SIZE,
+ .max_keysize = AES_MAX_KEY_SIZE,
+ .ivsize = AES_BLOCK_SIZE,
+ },
+ },
} };
int qat_algs_register(void)
@@ -966,8 +1275,11 @@ int qat_algs_register(void)
int i;
for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
- qat_algs[i].cra_flags = CRYPTO_ALG_TYPE_AEAD |
- CRYPTO_ALG_ASYNC;
+ qat_algs[i].cra_flags =
+ (qat_algs[i].cra_type == &crypto_aead_type) ?
+ CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
+ CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
+
return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
}
return 0;
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.h b/drivers/crypto/qat/qat_common/qat_crypto.h
index ab8468d..d503007 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.h
+++ b/drivers/crypto/qat/qat_common/qat_crypto.h
@@ -72,12 +72,24 @@ struct qat_crypto_request_buffs {
struct qat_alg_buf_list *blout;
dma_addr_t bloutp;
size_t sz;
+ size_t sz_out;
};
+struct qat_crypto_request;
+
struct qat_crypto_request {
struct icp_qat_fw_la_bulk_req req;
- struct qat_alg_session_ctx *ctx;
- struct aead_request *areq;
+ union {
+ struct qat_alg_aead_ctx *aead_ctx;
+ struct qat_alg_ablkcipher_ctx *ablkcipher_ctx;
+ };
+ union {
+ struct aead_request *aead_req;
+ struct ablkcipher_request *ablkcipher_req;
+ };
struct qat_crypto_request_buffs buf;
+ void (*cb)(struct icp_qat_fw_la_resp *resp,
+ struct qat_crypto_request *req);
};
+
#endif
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
index ef05825..6a735d5 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_dh895xcc_hw_data.c
@@ -46,6 +46,7 @@
*/
#include <adf_accel_devices.h>
#include "adf_dh895xcc_hw_data.h"
+#include "adf_common_drv.h"
#include "adf_drv.h"
/* Worker thread to service arbiter mappings based on dev SKUs */
@@ -182,6 +183,19 @@ static void adf_enable_error_correction(struct adf_accel_dev *accel_dev)
}
}
+static void adf_enable_ints(struct adf_accel_dev *accel_dev)
+{
+ void __iomem *addr;
+
+ addr = (&GET_BARS(accel_dev)[ADF_DH895XCC_PMISC_BAR])->virt_addr;
+
+ /* Enable bundle and misc interrupts */
+ ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
+ ADF_DH895XCC_SMIA0_MASK);
+ ADF_CSR_WR(addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
+ ADF_DH895XCC_SMIA1_MASK);
+}
+
void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
{
hw_data->dev_class = &dh895xcc_class;
@@ -206,6 +220,11 @@ void adf_init_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
hw_data->get_misc_bar_id = get_misc_bar_id;
hw_data->get_sku = get_sku;
hw_data->fw_name = ADF_DH895XCC_FW;
+ hw_data->init_admin_comms = adf_init_admin_comms;
+ hw_data->exit_admin_comms = adf_exit_admin_comms;
+ hw_data->init_arb = adf_init_arb;
+ hw_data->exit_arb = adf_exit_arb;
+ hw_data->enable_ints = adf_enable_ints;
}
void adf_clean_hw_data_dh895xcc(struct adf_hw_device_data *hw_data)
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 948f66b..8ffdb95 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -90,9 +90,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
struct adf_accel_pci *accel_pci_dev = &accel_dev->accel_pci_dev;
int i;
- adf_exit_admin_comms(accel_dev);
- adf_exit_arb(accel_dev);
- adf_cleanup_etr_data(accel_dev);
+ adf_dev_shutdown(accel_dev);
for (i = 0; i < ADF_PCI_MAX_BARS; i++) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i];
@@ -119,7 +117,7 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
kfree(accel_dev);
}
-static int qat_dev_start(struct adf_accel_dev *accel_dev)
+static int adf_dev_configure(struct adf_accel_dev *accel_dev)
{
int cpus = num_online_cpus();
int banks = GET_MAX_BANKS(accel_dev);
@@ -206,7 +204,7 @@ static int qat_dev_start(struct adf_accel_dev *accel_dev)
goto err;
set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
- return adf_dev_start(accel_dev);
+ return 0;
err:
dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
return -EINVAL;
@@ -217,7 +215,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
struct adf_accel_dev *accel_dev;
struct adf_accel_pci *accel_pci_dev;
struct adf_hw_device_data *hw_data;
- void __iomem *pmisc_bar_addr = NULL;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
int ret;
@@ -347,8 +344,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ret = -EFAULT;
goto out_err;
}
- if (i == ADF_DH895XCC_PMISC_BAR)
- pmisc_bar_addr = bar->virt_addr;
}
pci_set_master(pdev);
@@ -358,36 +353,21 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto out_err;
}
- if (adf_init_etr_data(accel_dev)) {
- dev_err(&pdev->dev, "Failed initialize etr\n");
- ret = -EFAULT;
- goto out_err;
- }
-
- if (adf_init_admin_comms(accel_dev)) {
- dev_err(&pdev->dev, "Failed initialize admin comms\n");
- ret = -EFAULT;
- goto out_err;
- }
-
- if (adf_init_arb(accel_dev)) {
- dev_err(&pdev->dev, "Failed initialize hw arbiter\n");
- ret = -EFAULT;
- goto out_err;
- }
if (pci_save_state(pdev)) {
dev_err(&pdev->dev, "Failed to save pci state\n");
ret = -ENOMEM;
goto out_err;
}
- /* Enable bundle and misc interrupts */
- ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF0_MASK_OFFSET,
- ADF_DH895XCC_SMIA0_MASK);
- ADF_CSR_WR(pmisc_bar_addr, ADF_DH895XCC_SMIAPF1_MASK_OFFSET,
- ADF_DH895XCC_SMIA1_MASK);
+ ret = adf_dev_configure(accel_dev);
+ if (ret)
+ goto out_err;
+
+ ret = adf_dev_init(accel_dev);
+ if (ret)
+ goto out_err;
- ret = qat_dev_start(accel_dev);
+ ret = adf_dev_start(accel_dev);
if (ret) {
adf_dev_stop(accel_dev);
goto out_err;
diff --git a/drivers/crypto/qce/dma.c b/drivers/crypto/qce/dma.c
index 0fb21e1..378cb76 100644
--- a/drivers/crypto/qce/dma.c
+++ b/drivers/crypto/qce/dma.c
@@ -64,7 +64,7 @@ int qce_mapsg(struct device *dev, struct scatterlist *sg, int nents,
err = dma_map_sg(dev, sg, 1, dir);
if (!err)
return -EFAULT;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
} else {
err = dma_map_sg(dev, sg, nents, dir);
@@ -81,7 +81,7 @@ void qce_unmapsg(struct device *dev, struct scatterlist *sg, int nents,
if (chained)
while (sg) {
dma_unmap_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
else
dma_unmap_sg(dev, sg, nents, dir);
@@ -100,7 +100,7 @@ int qce_countsg(struct scatterlist *sglist, int nbytes, bool *chained)
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0 && chained)
*chained = true;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return nents;
diff --git a/drivers/crypto/qce/sha.c b/drivers/crypto/qce/sha.c
index f338593..5c5df1d 100644
--- a/drivers/crypto/qce/sha.c
+++ b/drivers/crypto/qce/sha.c
@@ -285,7 +285,7 @@ static int qce_ahash_update(struct ahash_request *req)
break;
len += sg_dma_len(sg);
sg_last = sg;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
if (!sg_last)
diff --git a/drivers/crypto/sahara.c b/drivers/crypto/sahara.c
index 220b92f..290a7f0 100644
--- a/drivers/crypto/sahara.c
+++ b/drivers/crypto/sahara.c
@@ -940,7 +940,7 @@ static int sahara_walk_and_recalc(struct scatterlist *sg, unsigned int nbytes)
break;
}
nbytes -= sg->length;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return nbytes;
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 067ec21..ebbae8d 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -743,7 +743,7 @@ static int talitos_map_sg(struct device *dev, struct scatterlist *sg,
if (unlikely(chained))
while (sg) {
dma_map_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
else
dma_map_sg(dev, sg, nents, dir);
@@ -755,7 +755,7 @@ static void talitos_unmap_sg_chain(struct device *dev, struct scatterlist *sg,
{
while (sg) {
dma_unmap_sg(dev, sg, 1, dir);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
}
@@ -915,7 +915,7 @@ static int sg_to_link_tbl(struct scatterlist *sg, int sg_count,
link_tbl_ptr->j_extent = 0;
link_tbl_ptr++;
cryptlen -= sg_dma_len(sg);
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
/* adjust (decrease) last one (or two) entry's len to cryptlen */
@@ -1102,7 +1102,7 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
nbytes -= sg->length;
if (!sg_is_last(sg) && (sg + 1)->length == 0)
*chained = true;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
}
return sg_nents;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index f831bb9..d594ae9 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -479,13 +479,13 @@ static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
.dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
.dst_maxburst = 4,
- };
+ };
struct dma_slave_config cryp2mem = {
.direction = DMA_DEV_TO_MEM,
.src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
.src_maxburst = 4,
- };
+ };
dma_cap_zero(device_data->dma.mask);
dma_cap_set(DMA_SLAVE, device_data->dma.mask);
@@ -814,7 +814,7 @@ static int get_nents(struct scatterlist *sg, int nbytes)
while (nbytes > 0) {
nbytes -= sg->length;
- sg = scatterwalk_sg_next(sg);
+ sg = sg_next(sg);
nents++;
}
@@ -1774,8 +1774,8 @@ static int ux500_cryp_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
static const struct of_device_id ux500_cryp_match[] = {
- { .compatible = "stericsson,ux500-cryp" },
- { },
+ { .compatible = "stericsson,ux500-cryp" },
+ { },
};
static struct platform_driver cryp_driver = {

Privacy Policy