summaryrefslogtreecommitdiff
path: root/drivers/crypto
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/crypto')
-rw-r--r--drivers/crypto/nx/nx-aes-cbc.c1
-rw-r--r--drivers/crypto/nx/nx-aes-ecb.c1
-rw-r--r--drivers/crypto/nx/nx-aes-gcm.c2
-rw-r--r--drivers/crypto/nx/nx-sha256.c8
-rw-r--r--drivers/crypto/nx/nx-sha512.c7
-rw-r--r--drivers/crypto/nx/nx.c38
-rw-r--r--drivers/crypto/talitos.c60
-rw-r--r--drivers/crypto/ux500/cryp/cryp.c4
-rw-r--r--drivers/crypto/ux500/cryp/cryp.h7
-rw-r--r--drivers/crypto/ux500/cryp/cryp_core.c63
-rw-r--r--drivers/crypto/ux500/hash/hash_alg.h5
-rw-r--r--drivers/crypto/ux500/hash/hash_core.c63
12 files changed, 125 insertions, 134 deletions
diff --git a/drivers/crypto/nx/nx-aes-cbc.c b/drivers/crypto/nx/nx-aes-cbc.c
index a76d4c4f29f5..35d483f8db66 100644
--- a/drivers/crypto/nx/nx-aes-cbc.c
+++ b/drivers/crypto/nx/nx-aes-cbc.c
@@ -126,6 +126,7 @@ struct crypto_alg nx_cbc_aes_alg = {
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
+ .cra_alignmask = 0xf,
.cra_module = THIS_MODULE,
.cra_init = nx_crypto_ctx_aes_cbc_init,
.cra_exit = nx_crypto_ctx_exit,
diff --git a/drivers/crypto/nx/nx-aes-ecb.c b/drivers/crypto/nx/nx-aes-ecb.c
index ba5f1611336f..7bbc9a81da21 100644
--- a/drivers/crypto/nx/nx-aes-ecb.c
+++ b/drivers/crypto/nx/nx-aes-ecb.c
@@ -123,6 +123,7 @@ struct crypto_alg nx_ecb_aes_alg = {
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
+ .cra_alignmask = 0xf,
.cra_ctxsize = sizeof(struct nx_crypto_ctx),
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
diff --git a/drivers/crypto/nx/nx-aes-gcm.c b/drivers/crypto/nx/nx-aes-gcm.c
index c8109edc5cfb..6cca6c392b00 100644
--- a/drivers/crypto/nx/nx-aes-gcm.c
+++ b/drivers/crypto/nx/nx-aes-gcm.c
@@ -219,7 +219,7 @@ static int gcm_aes_nx_crypt(struct aead_request *req, int enc)
if (enc)
NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT;
else
- nbytes -= AES_BLOCK_SIZE;
+ nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req));
csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8;
diff --git a/drivers/crypto/nx/nx-sha256.c b/drivers/crypto/nx/nx-sha256.c
index 9767315f8c0b..67024f2f0b78 100644
--- a/drivers/crypto/nx/nx-sha256.c
+++ b/drivers/crypto/nx/nx-sha256.c
@@ -69,7 +69,7 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
* 1: <= SHA256_BLOCK_SIZE: copy into state, return 0
* 2: > SHA256_BLOCK_SIZE: process X blocks, copy in leftover
*/
- if (len + sctx->count <= SHA256_BLOCK_SIZE) {
+ if (len + sctx->count < SHA256_BLOCK_SIZE) {
memcpy(sctx->buf + sctx->count, data, len);
sctx->count += len;
goto out;
@@ -110,7 +110,8 @@ static int nx_sha256_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha256_ops));
/* copy the leftover back into the state struct */
- memcpy(sctx->buf, data + len - leftover, leftover);
+ if (leftover)
+ memcpy(sctx->buf, data + len - leftover, leftover);
sctx->count = leftover;
csbcpb->cpb.sha256.message_bit_length += (u64)
@@ -130,6 +131,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
struct nx_sg *in_sg, *out_sg;
int rc;
+
if (NX_CPB_FDM(csbcpb) & NX_FDM_CONTINUATION) {
/* we've hit the nx chip previously, now we're finalizing,
* so copy over the partial digest */
@@ -162,7 +164,7 @@ static int nx_sha256_final(struct shash_desc *desc, u8 *out)
atomic_inc(&(nx_ctx->stats->sha256_ops));
- atomic64_add(csbcpb->cpb.sha256.message_bit_length,
+ atomic64_add(csbcpb->cpb.sha256.message_bit_length / 8,
&(nx_ctx->stats->sha256_bytes));
memcpy(out, csbcpb->cpb.sha256.message_digest, SHA256_DIGEST_SIZE);
out:
diff --git a/drivers/crypto/nx/nx-sha512.c b/drivers/crypto/nx/nx-sha512.c
index 3177b8c3d5f1..08eee1122349 100644
--- a/drivers/crypto/nx/nx-sha512.c
+++ b/drivers/crypto/nx/nx-sha512.c
@@ -69,7 +69,7 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
* 1: <= SHA512_BLOCK_SIZE: copy into state, return 0
* 2: > SHA512_BLOCK_SIZE: process X blocks, copy in leftover
*/
- if ((u64)len + sctx->count[0] <= SHA512_BLOCK_SIZE) {
+ if ((u64)len + sctx->count[0] < SHA512_BLOCK_SIZE) {
memcpy(sctx->buf + sctx->count[0], data, len);
sctx->count[0] += len;
goto out;
@@ -110,7 +110,8 @@ static int nx_sha512_update(struct shash_desc *desc, const u8 *data,
atomic_inc(&(nx_ctx->stats->sha512_ops));
/* copy the leftover back into the state struct */
- memcpy(sctx->buf, data + len - leftover, leftover);
+ if (leftover)
+ memcpy(sctx->buf, data + len - leftover, leftover);
sctx->count[0] = leftover;
spbc_bits = csbcpb->cpb.sha512.spbc * 8;
@@ -168,7 +169,7 @@ static int nx_sha512_final(struct shash_desc *desc, u8 *out)
goto out;
atomic_inc(&(nx_ctx->stats->sha512_ops));
- atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo,
+ atomic64_add(csbcpb->cpb.sha512.message_bit_length_lo / 8,
&(nx_ctx->stats->sha512_bytes));
memcpy(out, csbcpb->cpb.sha512.message_digest, SHA512_DIGEST_SIZE);
diff --git a/drivers/crypto/nx/nx.c b/drivers/crypto/nx/nx.c
index c767f232e693..bbdab6e5ccf0 100644
--- a/drivers/crypto/nx/nx.c
+++ b/drivers/crypto/nx/nx.c
@@ -211,44 +211,20 @@ int nx_build_sg_lists(struct nx_crypto_ctx *nx_ctx,
{
struct nx_sg *nx_insg = nx_ctx->in_sg;
struct nx_sg *nx_outsg = nx_ctx->out_sg;
- struct blkcipher_walk walk;
- int rc;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- rc = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
- if (rc)
- goto out;
if (iv)
- memcpy(iv, walk.iv, AES_BLOCK_SIZE);
+ memcpy(iv, desc->info, AES_BLOCK_SIZE);
- while (walk.nbytes) {
- nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
- walk.nbytes, nx_ctx->ap->sglen);
- nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
- walk.nbytes, nx_ctx->ap->sglen);
-
- rc = blkcipher_walk_done(desc, &walk, 0);
- if (rc)
- break;
- }
-
- if (walk.nbytes) {
- nx_insg = nx_build_sg_list(nx_insg, walk.src.virt.addr,
- walk.nbytes, nx_ctx->ap->sglen);
- nx_outsg = nx_build_sg_list(nx_outsg, walk.dst.virt.addr,
- walk.nbytes, nx_ctx->ap->sglen);
-
- rc = 0;
- }
+ nx_insg = nx_walk_and_build(nx_insg, nx_ctx->ap->sglen, src, 0, nbytes);
+ nx_outsg = nx_walk_and_build(nx_outsg, nx_ctx->ap->sglen, dst, 0, nbytes);
/* these lengths should be negative, which will indicate to phyp that
* the input and output parameters are scatterlists, not linear
* buffers */
nx_ctx->op.inlen = (nx_ctx->in_sg - nx_insg) * sizeof(struct nx_sg);
nx_ctx->op.outlen = (nx_ctx->out_sg - nx_outsg) * sizeof(struct nx_sg);
-out:
- return rc;
+
+ return 0;
}
/**
@@ -454,6 +430,8 @@ static int nx_register_algs(void)
if (rc)
goto out;
+ nx_driver.of.status = NX_OKAY;
+
rc = crypto_register_alg(&nx_ecb_aes_alg);
if (rc)
goto out;
@@ -498,8 +476,6 @@ static int nx_register_algs(void)
if (rc)
goto out_unreg_s512;
- nx_driver.of.status = NX_OKAY;
-
goto out;
out_unreg_s512:
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index 5b2b5e61e4f9..661dc3eb1d66 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -1112,64 +1112,6 @@ static int sg_count(struct scatterlist *sg_list, int nbytes, bool *chained)
return sg_nents;
}
-/**
- * sg_copy_end_to_buffer - Copy end data from SG list to a linear buffer
- * @sgl: The SG list
- * @nents: Number of SG entries
- * @buf: Where to copy to
- * @buflen: The number of bytes to copy
- * @skip: The number of bytes to skip before copying.
- * Note: skip + buflen should equal SG total size.
- *
- * Returns the number of copied bytes.
- *
- **/
-static size_t sg_copy_end_to_buffer(struct scatterlist *sgl, unsigned int nents,
- void *buf, size_t buflen, unsigned int skip)
-{
- unsigned int offset = 0;
- unsigned int boffset = 0;
- struct sg_mapping_iter miter;
- unsigned long flags;
- unsigned int sg_flags = SG_MITER_ATOMIC;
- size_t total_buffer = buflen + skip;
-
- sg_flags |= SG_MITER_FROM_SG;
-
- sg_miter_start(&miter, sgl, nents, sg_flags);
-
- local_irq_save(flags);
-
- while (sg_miter_next(&miter) && offset < total_buffer) {
- unsigned int len;
- unsigned int ignore;
-
- if ((offset + miter.length) > skip) {
- if (offset < skip) {
- /* Copy part of this segment */
- ignore = skip - offset;
- len = miter.length - ignore;
- if (boffset + len > buflen)
- len = buflen - boffset;
- memcpy(buf + boffset, miter.addr + ignore, len);
- } else {
- /* Copy all of this segment (up to buflen) */
- len = miter.length;
- if (boffset + len > buflen)
- len = buflen - boffset;
- memcpy(buf + boffset, miter.addr, len);
- }
- boffset += len;
- }
- offset += miter.length;
- }
-
- sg_miter_stop(&miter);
-
- local_irq_restore(flags);
- return boffset;
-}
-
/*
* allocate and map the extended descriptor
*/
@@ -1800,7 +1742,7 @@ static int ahash_process_req(struct ahash_request *areq, unsigned int nbytes)
if (to_hash_later) {
int nents = sg_count(areq->src, nbytes, &chained);
- sg_copy_end_to_buffer(areq->src, nents,
+ sg_pcopy_to_buffer(areq->src, nents,
req_ctx->bufnext,
to_hash_later,
nbytes - to_hash_later);
diff --git a/drivers/crypto/ux500/cryp/cryp.c b/drivers/crypto/ux500/cryp/cryp.c
index 3eafa903ebcd..43a0c8a26ab0 100644
--- a/drivers/crypto/ux500/cryp/cryp.c
+++ b/drivers/crypto/ux500/cryp/cryp.c
@@ -291,7 +291,7 @@ void cryp_save_device_context(struct cryp_device_data *device_data,
int cryp_mode)
{
enum cryp_algo_mode algomode;
- struct cryp_register *src_reg = device_data->base;
+ struct cryp_register __iomem *src_reg = device_data->base;
struct cryp_config *config =
(struct cryp_config *)device_data->current_ctx;
@@ -349,7 +349,7 @@ void cryp_save_device_context(struct cryp_device_data *device_data,
void cryp_restore_device_context(struct cryp_device_data *device_data,
struct cryp_device_context *ctx)
{
- struct cryp_register *reg = device_data->base;
+ struct cryp_register __iomem *reg = device_data->base;
struct cryp_config *config =
(struct cryp_config *)device_data->current_ctx;
diff --git a/drivers/crypto/ux500/cryp/cryp.h b/drivers/crypto/ux500/cryp/cryp.h
index 14cfd05b777a..d1d6606fe56c 100644
--- a/drivers/crypto/ux500/cryp/cryp.h
+++ b/drivers/crypto/ux500/cryp/cryp.h
@@ -114,6 +114,9 @@ enum cryp_status_id {
};
/* Cryp DMA interface */
+#define CRYP_DMA_TX_FIFO 0x08
+#define CRYP_DMA_RX_FIFO 0x10
+
enum cryp_dma_req_type {
CRYP_DMA_DISABLE_BOTH,
CRYP_DMA_ENABLE_IN_DATA,
@@ -217,7 +220,8 @@ struct cryp_dma {
/**
* struct cryp_device_data - structure for a cryp device.
- * @base: Pointer to the hardware base address.
+ * @base: Pointer to virtual base address of the cryp device.
+ * @phybase: Pointer to physical memory location of the cryp device.
* @dev: Pointer to the devices dev structure.
* @clk: Pointer to the device's clock control.
* @pwr_regulator: Pointer to the device's power control.
@@ -232,6 +236,7 @@ struct cryp_dma {
*/
struct cryp_device_data {
struct cryp_register __iomem *base;
+ phys_addr_t phybase;
struct device *dev;
struct clk *clk;
struct regulator *pwr_regulator;
diff --git a/drivers/crypto/ux500/cryp/cryp_core.c b/drivers/crypto/ux500/cryp/cryp_core.c
index 3f1f9825fadb..a999f537228f 100644
--- a/drivers/crypto/ux500/cryp/cryp_core.c
+++ b/drivers/crypto/ux500/cryp/cryp_core.c
@@ -475,6 +475,19 @@ static int cryp_get_device_data(struct cryp_ctx *ctx,
static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
struct device *dev)
{
+ struct dma_slave_config mem2cryp = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = device_data->phybase + CRYP_DMA_TX_FIFO,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .dst_maxburst = 4,
+ };
+ struct dma_slave_config cryp2mem = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = device_data->phybase + CRYP_DMA_RX_FIFO,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .src_maxburst = 4,
+ };
+
dma_cap_zero(device_data->dma.mask);
dma_cap_set(DMA_SLAVE, device_data->dma.mask);
@@ -490,6 +503,9 @@ static void cryp_dma_setup_channel(struct cryp_device_data *device_data,
stedma40_filter,
device_data->dma.cfg_cryp2mem);
+ dmaengine_slave_config(device_data->dma.chan_mem2cryp, &mem2cryp);
+ dmaengine_slave_config(device_data->dma.chan_cryp2mem, &cryp2mem);
+
init_completion(&device_data->dma.cryp_dma_complete);
}
@@ -537,10 +553,10 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
"(TO_DEVICE)", __func__);
- desc = channel->device->device_prep_slave_sg(channel,
- ctx->device->dma.sg_src,
- ctx->device->dma.sg_src_len,
- direction, DMA_CTRL_ACK, NULL);
+ desc = dmaengine_prep_slave_sg(channel,
+ ctx->device->dma.sg_src,
+ ctx->device->dma.sg_src_len,
+ direction, DMA_CTRL_ACK);
break;
case DMA_FROM_DEVICE:
@@ -561,12 +577,12 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
"(FROM_DEVICE)", __func__);
- desc = channel->device->device_prep_slave_sg(channel,
- ctx->device->dma.sg_dst,
- ctx->device->dma.sg_dst_len,
- direction,
- DMA_CTRL_ACK |
- DMA_PREP_INTERRUPT, NULL);
+ desc = dmaengine_prep_slave_sg(channel,
+ ctx->device->dma.sg_dst,
+ ctx->device->dma.sg_dst_len,
+ direction,
+ DMA_CTRL_ACK |
+ DMA_PREP_INTERRUPT);
desc->callback = cryp_dma_out_callback;
desc->callback_param = ctx;
@@ -578,7 +594,7 @@ static int cryp_set_dma_transfer(struct cryp_ctx *ctx,
return -EFAULT;
}
- cookie = desc->tx_submit(desc);
+ cookie = dmaengine_submit(desc);
dma_async_issue_pending(channel);
return 0;
@@ -591,12 +607,12 @@ static void cryp_dma_done(struct cryp_ctx *ctx)
dev_dbg(ctx->device->dev, "[%s]: ", __func__);
chan = ctx->device->dma.chan_mem2cryp;
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src,
ctx->device->dma.sg_src_len, DMA_TO_DEVICE);
chan = ctx->device->dma.chan_cryp2mem;
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst,
ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE);
}
@@ -1431,6 +1447,7 @@ static int ux500_cryp_probe(struct platform_device *pdev)
goto out_kfree;
}
+ device_data->phybase = res->start;
device_data->base = ioremap(res->start, resource_size(res));
if (!device_data->base) {
dev_err(dev, "[%s]: ioremap failed!", __func__);
@@ -1458,11 +1475,17 @@ static int ux500_cryp_probe(struct platform_device *pdev)
goto out_regulator;
}
+ ret = clk_prepare(device_data->clk);
+ if (ret) {
+ dev_err(dev, "[%s]: clk_prepare() failed!", __func__);
+ goto out_clk;
+ }
+
/* Enable device power (and clock) */
ret = cryp_enable_power(device_data->dev, device_data, false);
if (ret) {
dev_err(dev, "[%s]: cryp_enable_power() failed!", __func__);
- goto out_clk;
+ goto out_clk_unprepare;
}
cryp_error = cryp_check(device_data);
@@ -1518,11 +1541,16 @@ static int ux500_cryp_probe(struct platform_device *pdev)
goto out_power;
}
+ dev_info(dev, "successfully registered\n");
+
return 0;
out_power:
cryp_disable_power(device_data->dev, device_data, false);
+out_clk_unprepare:
+ clk_unprepare(device_data->clk);
+
out_clk:
clk_put(device_data->clk);
@@ -1593,6 +1621,7 @@ static int ux500_cryp_remove(struct platform_device *pdev)
dev_err(&pdev->dev, "[%s]: cryp_disable_power() failed",
__func__);
+ clk_unprepare(device_data->clk);
clk_put(device_data->clk);
regulator_put(device_data->pwr_regulator);
@@ -1743,6 +1772,11 @@ static int ux500_cryp_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ux500_cryp_pm, ux500_cryp_suspend, ux500_cryp_resume);
+static const struct of_device_id ux500_cryp_match[] = {
+ { .compatible = "stericsson,ux500-cryp" },
+ { },
+};
+
static struct platform_driver cryp_driver = {
.probe = ux500_cryp_probe,
.remove = ux500_cryp_remove,
@@ -1750,6 +1784,7 @@ static struct platform_driver cryp_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "cryp1",
+ .of_match_table = ux500_cryp_match,
.pm = &ux500_cryp_pm,
}
};
diff --git a/drivers/crypto/ux500/hash/hash_alg.h b/drivers/crypto/ux500/hash/hash_alg.h
index cd9351cb24df..be6eb54da40f 100644
--- a/drivers/crypto/ux500/hash/hash_alg.h
+++ b/drivers/crypto/ux500/hash/hash_alg.h
@@ -11,6 +11,7 @@
#include <linux/bitops.h>
#define HASH_BLOCK_SIZE 64
+#define HASH_DMA_FIFO 4
#define HASH_DMA_ALIGN_SIZE 4
#define HASH_DMA_PERFORMANCE_MIN_SIZE 1024
#define HASH_BYTES_PER_WORD 4
@@ -347,7 +348,8 @@ struct hash_req_ctx {
/**
* struct hash_device_data - structure for a hash device.
- * @base: Pointer to the hardware base address.
+ * @base: Pointer to virtual base address of the hash device.
+ * @phybase: Pointer to physical memory location of the hash device.
* @list_node: For inclusion in klist.
* @dev: Pointer to the device dev structure.
* @ctx_lock: Spinlock for current_ctx.
@@ -361,6 +363,7 @@ struct hash_req_ctx {
*/
struct hash_device_data {
struct hash_register __iomem *base;
+ phys_addr_t phybase;
struct klist_node list_node;
struct device *dev;
struct spinlock ctx_lock;
diff --git a/drivers/crypto/ux500/hash/hash_core.c b/drivers/crypto/ux500/hash/hash_core.c
index cf5508967539..496ae6aae316 100644
--- a/drivers/crypto/ux500/hash/hash_core.c
+++ b/drivers/crypto/ux500/hash/hash_core.c
@@ -122,6 +122,13 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data,
struct device *dev)
{
struct hash_platform_data *platform_data = dev->platform_data;
+ struct dma_slave_config conf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = device_data->phybase + HASH_DMA_FIFO,
+ .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
+ .dst_maxburst = 16,
+ };
+
dma_cap_zero(device_data->dma.mask);
dma_cap_set(DMA_SLAVE, device_data->dma.mask);
@@ -131,6 +138,8 @@ static void hash_dma_setup_channel(struct hash_device_data *device_data,
platform_data->dma_filter,
device_data->dma.cfg_mem2hash);
+ dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
+
init_completion(&device_data->dma.complete);
}
@@ -171,9 +180,9 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer "
"(TO_DEVICE)", __func__);
- desc = channel->device->device_prep_slave_sg(channel,
+ desc = dmaengine_prep_slave_sg(channel,
ctx->device->dma.sg, ctx->device->dma.sg_len,
- direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT, NULL);
+ direction, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
if (!desc) {
dev_err(ctx->device->dev,
"[%s]: device_prep_slave_sg() failed!", __func__);
@@ -183,7 +192,7 @@ static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
desc->callback = hash_dma_callback;
desc->callback_param = ctx;
- cookie = desc->tx_submit(desc);
+ cookie = dmaengine_submit(desc);
dma_async_issue_pending(channel);
return 0;
@@ -194,7 +203,7 @@ static void hash_dma_done(struct hash_ctx *ctx)
struct dma_chan *chan;
chan = ctx->device->dma.chan_mem2hash;
- chan->device->device_control(chan, DMA_TERMINATE_ALL, 0);
+ dmaengine_device_control(chan, DMA_TERMINATE_ALL, 0);
dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
ctx->device->dma.sg_len, DMA_TO_DEVICE);
@@ -464,12 +473,12 @@ static void hash_hw_write_key(struct hash_device_data *device_data,
HASH_SET_DIN(&word, nwords);
}
- while (device_data->base->str & HASH_STR_DCAL_MASK)
+ while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
HASH_SET_DCAL;
- while (device_data->base->str & HASH_STR_DCAL_MASK)
+ while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
}
@@ -652,7 +661,7 @@ static void hash_messagepad(struct hash_device_data *device_data,
if (index_bytes)
HASH_SET_DIN(message, nwords);
- while (device_data->base->str & HASH_STR_DCAL_MASK)
+ while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
/* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
@@ -667,7 +676,7 @@ static void hash_messagepad(struct hash_device_data *device_data,
(int)(readl_relaxed(&device_data->base->str) &
HASH_STR_NBLW_MASK));
- while (device_data->base->str & HASH_STR_DCAL_MASK)
+ while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
}
@@ -767,7 +776,7 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
/* HW and SW initializations */
/* Note: there is no need to initialize buffer and digest members */
- while (device_data->base->str & HASH_STR_DCAL_MASK)
+ while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
/*
@@ -783,8 +792,7 @@ void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
}
-int hash_process_data(
- struct hash_device_data *device_data,
+static int hash_process_data(struct hash_device_data *device_data,
struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
int msg_length, u8 *data_buffer, u8 *buffer, u8 *index)
{
@@ -953,7 +961,7 @@ static int hash_dma_final(struct ahash_request *req)
wait_for_completion(&ctx->device->dma.complete);
hash_dma_done(ctx);
- while (device_data->base->str & HASH_STR_DCAL_MASK)
+ while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
@@ -983,7 +991,7 @@ out:
* hash_hw_final - The final hash calculation function
* @req: The hash request for the job.
*/
-int hash_hw_final(struct ahash_request *req)
+static int hash_hw_final(struct ahash_request *req)
{
int ret = 0;
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -1051,7 +1059,7 @@ int hash_hw_final(struct ahash_request *req)
req_ctx->state.index);
} else {
HASH_SET_DCAL;
- while (device_data->base->str & HASH_STR_DCAL_MASK)
+ while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
}
@@ -1180,7 +1188,7 @@ int hash_resume_state(struct hash_device_data *device_data,
temp_cr = device_state->temp_cr;
writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
- if (device_data->base->cr & HASH_CR_MODE_MASK)
+ if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
hash_mode = HASH_OPER_MODE_HMAC;
else
hash_mode = HASH_OPER_MODE_HASH;
@@ -1224,7 +1232,7 @@ int hash_save_state(struct hash_device_data *device_data,
* actually makes sure that there isn't any ongoing calculation in the
* hardware.
*/
- while (device_data->base->str & HASH_STR_DCAL_MASK)
+ while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
cpu_relax();
temp_cr = readl_relaxed(&device_data->base->cr);
@@ -1233,7 +1241,7 @@ int hash_save_state(struct hash_device_data *device_data,
device_state->din_reg = readl_relaxed(&device_data->base->din);
- if (device_data->base->cr & HASH_CR_MODE_MASK)
+ if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
hash_mode = HASH_OPER_MODE_HMAC;
else
hash_mode = HASH_OPER_MODE_HASH;
@@ -1699,6 +1707,7 @@ static int ux500_hash_probe(struct platform_device *pdev)
goto out_kfree;
}
+ device_data->phybase = res->start;
device_data->base = ioremap(res->start, resource_size(res));
if (!device_data->base) {
dev_err(dev, "[%s] ioremap() failed!",
@@ -1726,11 +1735,17 @@ static int ux500_hash_probe(struct platform_device *pdev)
goto out_regulator;
}
+ ret = clk_prepare(device_data->clk);
+ if (ret) {
+ dev_err(dev, "[%s] clk_prepare() failed!", __func__);
+ goto out_clk;
+ }
+
/* Enable device power (and clock) */
ret = hash_enable_power(device_data, false);
if (ret) {
dev_err(dev, "[%s]: hash_enable_power() failed!", __func__);
- goto out_clk;
+ goto out_clk_unprepare;
}
ret = hash_check_hw(device_data);
@@ -1756,12 +1771,15 @@ static int ux500_hash_probe(struct platform_device *pdev)
goto out_power;
}
- dev_info(dev, "[%s] successfully probed\n", __func__);
+ dev_info(dev, "successfully registered\n");
return 0;
out_power:
hash_disable_power(device_data, false);
+out_clk_unprepare:
+ clk_unprepare(device_data->clk);
+
out_clk:
clk_put(device_data->clk);
@@ -1826,6 +1844,7 @@ static int ux500_hash_remove(struct platform_device *pdev)
dev_err(dev, "[%s]: hash_disable_power() failed",
__func__);
+ clk_unprepare(device_data->clk);
clk_put(device_data->clk);
regulator_put(device_data->regulator);
@@ -1961,6 +1980,11 @@ static int ux500_hash_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
+static const struct of_device_id ux500_hash_match[] = {
+ { .compatible = "stericsson,ux500-hash" },
+ { },
+};
+
static struct platform_driver hash_driver = {
.probe = ux500_hash_probe,
.remove = ux500_hash_remove,
@@ -1968,6 +1992,7 @@ static struct platform_driver hash_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "hash1",
+ .of_match_table = ux500_hash_match,
.pm = &ux500_hash_pm,
}
};