crypto:msm: Fix crypto 5 aes(ccm) issue.

This patch handles the operation completion status
during the completion of a crypto operation properly.
The crypto device status register is clear at the beginning of
each operation.

Change-Id: I54e38160c939c17fe58ebdce44f8080b3837e3e7
Signed-off-by: Hariprasad Dhalinarasimha <hnamgund@codeaurora.org>
This commit is contained in:
Hariprasad Dhalinarasimha 2013-05-15 11:29:16 -07:00 committed by Stephen Boyd
parent a5c7e3a618
commit d42142435a
2 changed files with 160 additions and 33 deletions

View File

@ -717,6 +717,9 @@ static int _ce_setup_hash_direct(struct qce_device *pce_dev,
bool sha1 = false;
uint32_t auth_cfg = 0;
/* clear status */
writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG);
writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
CRYPTO_CONFIG_REG));
/*
@ -896,6 +899,9 @@ static int _ce_setup_cipher_direct(struct qce_device *pce_dev,
uint32_t ivsize = creq->ivsize;
int i;
/* clear status */
writel_relaxed(0, pce_dev->iobase + CRYPTO_STATUS_REG);
writel_relaxed(pce_dev->reg.crypto_cfg_be, (pce_dev->iobase +
CRYPTO_CONFIG_REG));
/*
@ -1213,6 +1219,7 @@ static int _aead_complete(struct qce_device *pce_dev)
{
struct aead_request *areq;
unsigned char mac[SHA256_DIGEST_SIZE];
uint32_t status;
areq = (struct aead_request *) pce_dev->areq;
if (areq->src != areq->dst) {
@ -1227,16 +1234,55 @@ static int _aead_complete(struct qce_device *pce_dev)
/* check MAC */
memcpy(mac, (char *)(&pce_dev->ce_sps.result->auth_iv[0]),
SHA256_DIGEST_SIZE);
/* read status before unlock */
status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
if (_qce_unlock_other_pipes(pce_dev))
return -EINVAL;
if (pce_dev->mode == QCE_MODE_CCM) {
uint32_t result_status;
int32_t result_status;
/*
* Don't use result dump status. The operation may not
* be complete.
* Instead, use the status we just read of device.
* In case, we need to use result_status from result
* dump the result_status needs to be byte swapped,
* since we set the device to little endian.
*/
result_status = pce_dev->ce_sps.result->status;
result_status &= (1 << CRYPTO_MAC_FAILED);
result_status |= (pce_dev->ce_sps.consumer_status |
pce_dev->ce_sps.producer_status);
pce_dev->ce_sps.result->status = 0;
if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
| (1 << CRYPTO_HSD_ERR))) {
pr_err("aead operation error. Status %x\n",
status);
result_status = -ENXIO;
} else if (pce_dev->ce_sps.consumer_status |
pce_dev->ce_sps.producer_status) {
pr_err("aead sps operation error. sps status %x %x\n",
pce_dev->ce_sps.consumer_status,
pce_dev->ce_sps.producer_status);
result_status = -ENXIO;
} else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) {
pr_err("aead operation not done? Status %x, sps status %x %x\n",
status,
pce_dev->ce_sps.consumer_status,
pce_dev->ce_sps.producer_status);
result_status = -ENXIO;
} else if (status & (1 << CRYPTO_MAC_FAILED)) {
result_status = -EBADMSG;
} else {
result_status = 0;
}
pce_dev->qce_cb(areq, mac, NULL, result_status);
} else {
uint32_t ivsize = 0;
struct crypto_aead *aead;
@ -1260,6 +1306,8 @@ static int _sha_complete(struct qce_device *pce_dev)
struct ahash_request *areq;
unsigned char digest[SHA256_DIGEST_SIZE];
uint32_t bytecount32[2];
int32_t result_status = pce_dev->ce_sps.result->status;
uint32_t status;
areq = (struct ahash_request *) pce_dev->areq;
qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
@ -1269,10 +1317,39 @@ static int _sha_complete(struct qce_device *pce_dev)
_byte_stream_to_net_words(bytecount32,
(unsigned char *)pce_dev->ce_sps.result->auth_byte_count,
2 * CRYPTO_REG_SIZE);
/* read status before unlock */
status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
if (_qce_unlock_other_pipes(pce_dev))
return -EINVAL;
/*
* Don't use result dump status. The operation may not be complete.
* Instead, use the status we just read of device.
* In case, we need to use result_status from result
* dump the result_status needs to be byte swapped,
* since we set the device to little endian.
*/
if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
| (1 << CRYPTO_HSD_ERR))) {
pr_err("sha operation error. Status %x\n", status);
result_status = -ENXIO;
} else if (pce_dev->ce_sps.consumer_status) {
pr_err("sha sps operation error. sps status %x\n",
pce_dev->ce_sps.consumer_status);
result_status = -ENXIO;
} else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) {
pr_err("sha operation not done? Status %x, sps status %x\n",
status, pce_dev->ce_sps.consumer_status);
result_status = -ENXIO;
} else {
result_status = 0;
}
pce_dev->qce_cb(areq, digest, (char *)bytecount32,
pce_dev->ce_sps.consumer_status);
result_status);
return 0;
};
@ -1280,6 +1357,8 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev)
{
struct ablkcipher_request *areq;
unsigned char iv[NUM_OF_CRYPTO_CNTR_IV_REG * CRYPTO_REG_SIZE];
uint32_t status;
int32_t result_status;
areq = (struct ablkcipher_request *) pce_dev->areq;
@ -1290,13 +1369,46 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev)
qce_dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL :
DMA_TO_DEVICE);
/* read status before unlock */
status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG);
if (_qce_unlock_other_pipes(pce_dev))
return -EINVAL;
/*
* Don't use result dump status. The operation may not be complete.
* Instead, use the status we just read of device.
* In case, we need to use result_status from result
* dump the result_status needs to be byte swapped,
* since we set the device to little endian.
*/
if (status & ((1 << CRYPTO_SW_ERR) | (1 << CRYPTO_AXI_ERR)
| (1 << CRYPTO_HSD_ERR))) {
pr_err("ablk_cipher operation error. Status %x\n",
status);
result_status = -ENXIO;
} else if (pce_dev->ce_sps.consumer_status |
pce_dev->ce_sps.producer_status) {
pr_err("ablk_cipher sps operation error. sps status %x %x\n",
pce_dev->ce_sps.consumer_status,
pce_dev->ce_sps.producer_status);
result_status = -ENXIO;
} else if ((status & (1 << CRYPTO_OPERATION_DONE)) == 0) {
pr_err("ablk_cipher operation not done? Status %x, sps status %x %x\n",
status,
pce_dev->ce_sps.consumer_status,
pce_dev->ce_sps.producer_status);
result_status = -ENXIO;
} else {
result_status = 0;
}
if (pce_dev->mode == QCE_MODE_ECB) {
pce_dev->qce_cb(areq, NULL, NULL,
pce_dev->ce_sps.consumer_status |
pce_dev->ce_sps.producer_status);
result_status);
} else {
if (pce_dev->ce_sps.minor_version == 0) {
if (pce_dev->mode == QCE_MODE_CBC) {
@ -1342,9 +1454,7 @@ static int _ablk_cipher_complete(struct qce_device *pce_dev)
(char *)(pce_dev->ce_sps.result->encr_cntr_iv),
sizeof(iv));
}
pce_dev->qce_cb(areq, NULL, iv,
pce_dev->ce_sps.consumer_status |
pce_dev->ce_sps.producer_status);
pce_dev->qce_cb(areq, NULL, iv, result_status);
}
return 0;
};
@ -1993,6 +2103,9 @@ static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev,
break;
}
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
@ -2037,13 +2150,9 @@ static int _setup_cipher_aes_cmdlistptrs(struct qce_device *pdev,
if (mode == QCE_MODE_XTS) {
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
0, &pcl_info->auth_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
0, &pcl_info->auth_seg_size);
} else {
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
0, &pcl_info->auth_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
0, &pcl_info->auth_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_START_REG,
0, &pcl_info->auth_seg_size);
}
@ -2130,6 +2239,9 @@ static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev,
break;
}
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
@ -2156,13 +2268,7 @@ static int _setup_cipher_des_cmdlistptrs(struct qce_device *pdev,
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CNTR1_IV1_REG, 0,
NULL);
}
/* Add dummy to align size to burst-size multiple */
if (!mode_cbc) {
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_SIZE_REG,
0, &pcl_info->auth_seg_size);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
0, &pcl_info->auth_seg_size);
}
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_le, NULL);
@ -2206,10 +2312,13 @@ static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
auth_cfg = pdev->reg.auth_cfg_sha1;
iv_reg = 5;
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
0, NULL);
break;
case QCE_HASH_SHA256:
@ -2218,13 +2327,16 @@ static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
auth_cfg = pdev->reg.auth_cfg_sha256;
iv_reg = 8;
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
/* 1 dummy write */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
0, NULL);
break;
case QCE_HASH_SHA1_HMAC:
cmdlistptr->auth_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
@ -2233,10 +2345,13 @@ static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
auth_cfg = pdev->reg.auth_cfg_hmac_sha1;
key_reg = 16;
iv_reg = 5;
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
0, NULL);
break;
case QCE_AEAD_SHA1_HMAC:
cmdlistptr->aead_sha1_hmac.cmdlist = (uint32_t)ce_vaddr;
@ -2245,13 +2360,16 @@ static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
auth_cfg = pdev->reg.auth_cfg_aead_sha1_hmac;
key_reg = 16;
iv_reg = 5;
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
/* 1 dummy write */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
0, NULL);
break;
case QCE_HASH_SHA256_HMAC:
cmdlistptr->auth_sha256_hmac.cmdlist = (uint32_t)ce_vaddr;
@ -2261,13 +2379,15 @@ static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
key_reg = 16;
iv_reg = 8;
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
/* 1 dummy write */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
0, NULL);
break;
case QCE_HASH_AES_CMAC:
if (key_128 == true) {
@ -2285,13 +2405,16 @@ static int _setup_auth_cmdlistptrs(struct qce_device *pdev,
auth_cfg = pdev->reg.auth_cfg_cmac_256;
key_reg = 8;
}
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0,
NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
/* 1 dummy write */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG,
0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_AUTH_SEG_CFG_REG,
0, NULL);
break;
default:
pr_err("Unknown algorithms %d received, exiting now\n", alg);
@ -2395,10 +2518,13 @@ static int _setup_aead_cmdlistptrs(struct qce_device *pdev,
key_reg = 8;
}
/* clear status register */
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_STATUS_REG, 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_CONFIG_REG,
pdev->reg.crypto_cfg_be, &pcl_info->crypto_cfg);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_SIZE_REG, 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_CFG_REG, 0, NULL);
qce_add_cmd_element(pdev, &ce_vaddr, CRYPTO_ENCR_SEG_START_REG, 0,
NULL);

View File

@ -297,6 +297,7 @@
#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x1F << CRYPTO_DOUT_SIZE_AVAIL)
#define CRYPTO_DIN_SIZE_AVAIL 21 /* bit 21-25 */
#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x1F << CRYPTO_DIN_SIZE_AVAIL)
#define CRYPTO_HSD_ERR 20
#define CRYPTO_ACCESS_VIOL 19
#define CRYPTO_PIPE_ACTIVE_ERR 18
#define CRYPTO_CFG_CHNG_ERR 17