From ffda655125dc2495259ecec8627c847fbf82e0e9 Mon Sep 17 00:00:00 2001 From: Stephen Boyd Date: Fri, 11 Jan 2013 10:09:31 -0800 Subject: [PATCH] crypto: Add MSM crypto drivers Signed-off-by: Stephen Boyd --- Documentation/crypto/msm/qce.txt | 228 ++ Documentation/crypto/msm/qce40.txt | 241 ++ Documentation/crypto/msm/qcedev.txt | 232 ++ Documentation/crypto/msm/qcrypto.txt | 144 + drivers/crypto/Kconfig | 43 + drivers/crypto/Makefile | 1 + drivers/crypto/msm/Makefile | 8 + drivers/crypto/msm/ota_crypto.c | 731 ++++ drivers/crypto/msm/qce.c | 2709 +++++++++++++ drivers/crypto/msm/qce.h | 160 + drivers/crypto/msm/qce40.c | 2609 +++++++++++++ drivers/crypto/msm/qce40.h | 240 ++ drivers/crypto/msm/qce_ota.h | 30 + drivers/crypto/msm/qcedev.c | 2228 +++++++++++ drivers/crypto/msm/qcrypto.c | 3367 +++++++++++++++++ drivers/crypto/msm/qcryptohw_30.h | 308 ++ drivers/crypto/msm/qcryptohw_40.h | 316 ++ include/linux/Kbuild | 1 + .../linux/platform_data/qcom_crypto_device.h | 24 + include/linux/qcedev.h | 241 ++ include/linux/qcota.h | 165 + 21 files changed, 14026 insertions(+) create mode 100644 Documentation/crypto/msm/qce.txt create mode 100644 Documentation/crypto/msm/qce40.txt create mode 100644 Documentation/crypto/msm/qcedev.txt create mode 100644 Documentation/crypto/msm/qcrypto.txt create mode 100644 drivers/crypto/msm/Makefile create mode 100644 drivers/crypto/msm/ota_crypto.c create mode 100644 drivers/crypto/msm/qce.c create mode 100644 drivers/crypto/msm/qce.h create mode 100644 drivers/crypto/msm/qce40.c create mode 100644 drivers/crypto/msm/qce40.h create mode 100644 drivers/crypto/msm/qce_ota.h create mode 100644 drivers/crypto/msm/qcedev.c create mode 100644 drivers/crypto/msm/qcrypto.c create mode 100644 drivers/crypto/msm/qcryptohw_30.h create mode 100644 drivers/crypto/msm/qcryptohw_40.h create mode 100644 include/linux/platform_data/qcom_crypto_device.h create mode 100644 include/linux/qcedev.h create mode 100644 include/linux/qcota.h diff --git a/Documentation/crypto/msm/qce.txt b/Documentation/crypto/msm/qce.txt new file mode 100644 index 000000000000..18435d170e19 --- /dev/null +++ b/Documentation/crypto/msm/qce.txt @@ -0,0 +1,228 @@ +Introduction: +============= + +The Qualcomm crypto engine (qce) driver is a module that +provides common services for accessing the Qualcomm crypto device. +Currently, the two main clients of qce are +-qcrypto driver (module provided for accessing CE HW by kernel space apps) +-qcedev driver (module provided for accessing CE HW by user space apps) + + +The crypto engine (qce) driver is a client to the DMA driver for the Qualcomm +DMA device - Application Data Mover (ADM). ADM is used to provide the DMA +transfer capability between Qualcomm crypto device hardware and DDR memory +for crypto operations. + + Figure 1. + --------- + + Linux kernel + (ex:IPSec)<--*Qualcomm crypto driver----+ + (qcrypto) | + (for kernel space app) | + | + +-->| + | + | *qce <----> Qualcomm + | driver ADM driver <---> ADM HW + +-->| | | + | | | + | | | + | | | + Linux kernel | | | + misc device <--- *QCEDEV Driver-------+ | | + interface (qcedev) (Reg interface) (DMA interface) + (for user space app) \ / + \ / + \ / + \ / + \ / + \ / + \ / + Qualcomm crypto CE3 HW + + + The entities marked with (*) in the Figure 1, are the software components of + the Linux Qualcomm crypto modules. + +=============== +IMPORTANT NOTE: +=============== +(1) The CE hardware can be accessed either from user space OR kernel space, + at one time. Both user space and kernel space clients cannot access the + qce driver (and the CE hardware) at the same time. + - If your device has user space apps that needs to access the crypto + hardware, make sure to have the qcrypto module disabled/unloaded. + This will result in the kernel space apps to use the registered + software implementation of the crypto algorithms. + - If your device has kernel space apps that needs to access the + crypto hardware, make sure to have qcedev module disabled/unloaded + and implement your user space application to use the software + implemenation (ex: openssl/crypto) of the crypto algorithms. + +(2) If your device has Playready(Windows Media DRM) application enabled and + uses the qcedev module to access the crypto hardware accelarator, + please be informed that for performance reasons, the CE hardware will need + to be dedicated to playready application. Any other user space application + should be implemented to use the software implemenation (ex: openssl/crypto) + of the crypto algorithms. + + +Hardware description: +===================== + +Qualcomm Crypto HW device family provides a series of algorithms implemented +in the device hardware. + +Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES +algorithms, and concurrent operations of hashing, and ciphering. + +In addition to those functions provided by Crypto 2 HW, Crypto 3 HW provides +fast AES algorithms. + +In addition to those functions provided by Crypto 3 HW, Crypto 3E provides +HMAC-SHA1 hashing algorithm, and Over The Air (OTA) f8/f9 algorithms as +defined by the 3GPP forum. + + +Software description +==================== + +The crypto device is defined as a platform device. The driver is +independent of the platform. The driver supports multiple instances of +crypto HW. +All the platform specific parameters are defined in the board init +file, eg. arch/arm/mach-msm/board-msm7x30.c for MSM7x30. + +The qce driver provide the common services of HW crypto +access to the two drivers as listed above (qcedev, qcrypto. It sets up +the crypto HW device for the operation, then it requests ADM driver for +the DMA of the crypto operation. + +Two ADM channels and two command lists (one command list for each +channel) are involved in an operation. + +The setting up of the command lists and the procedure of the operation +of the crypto device are described in the following sections. + +The command list for the first DMA channel is set up as follows: + + 1st command of the list is for the DMA transfer from DDR memory to the + crypto device to input data to crypto device. The dst crci of the command + is set for crci-in for this crypto device. + + 2nd command is for the DMA tansfer is from crypto device to DDR memory for + the authentication result. The src crci is set as crci-hash-done of the + crypto device. If authentication is not required in the operation, + the 2nd command is not used. + +The command list for the second DMA channel is set up as follows: + + One command to DMA data from crypto device to DDR memory for encryption or + decryption output from crypto device. + +To accomplish ciphering and authentication concurrent operations, the driver +performs the following steps: + (a). set up HW crypto device + (b). hit the crypto go register. + (c). issue the DMA command of first channel to the ADM driver, + (d). issue the DMA command of 2nd channel to the ADM driver. + +SHA1/SHA256 is an authentication/integrity hash algorithm. To accomplish +hash operation (or any authentication only algorithm), 2nd DMA channel is +not required. Only steps (a) to (c) are performed. + +At the completion of the DMA operation (for (c) and (d)) ADM driver +invokes the callback registered to the DMA driver. This signifies the end of +the DMA operation(s). The driver reads the status and other information from +the CE hardware register and then invokes the callback to the qce driver client. +This signal the completion and the results of the DMA along with the status of +the CE hardware to the qce driver client. This completes a crypto operation. + +In the qce driver initialization, memory for the two command lists, descriptor +lists for each crypto device are allocated out of coherent memory, using Linux +DMA API. The driver pre-configures most of the two ADM command lists +in the initialization. During each crypto operation, minimal set up is required. +src_dscr or/and dst_dscr descriptor list of the ADM command are populated +from the information obtained from the corresponding data structure. eg: for +AEAD request, the following data structure provides the information: + + struct aead_request *req + ...... + req->assoc + req->src + req->dst + +The DMA address of a scatter list will be retrieved and set up in the +descriptor list of an ADM command. + +Power Management +================ + none + + +Interface: +========== + +The interface is defined in kernel/drivers/crypto/msm/inc/qce.h + +The clients qcrypto, qcedev drivers are the clients using +the interfaces. + +The following services are provided by the qce driver - + + qce_open(), qce_close(), qce_ablk_cipher_req(), + qce_hw_support(), qce_process_sha_req() + + qce_open() is the first request from the client, ex. Qualcomm crypto + driver (qcedev, qcrypto), to open a crypto engine. It is normally + called at the probe function of the client for a device. During the + probe, + - ADM command list structure will be set up + - Crypto device will be initialized. + - Resource associated with the crypto engine is retrieved by doing + platform_get_resource() or platform_get_resource_byname(). + + The resources for a device are + - crci-in, crci-out, crci-hash-done + - two DMA channel IDs, one for encryption and decryption input, one for + output. + - base address of the HW crypto device. + + qce_close() is the last request from the client. Normally, it is + called from the remove function of the client. + + qce_hw_support() allows the client to query what is supported + by the crypto engine hardware. + + qce_ablk_cipher_req() provides ciphering service to the client. + qce_process_sha_req() provide hashing service to the client. + qce_aead_req() provide aead service to the client. + +Module parameters: +================== + +The following module parameters are defined in the board init file. +-CE hardware nase register address +-Data mover channel used for transfer to/from CE hardware +These parameters differ in each platform. + + +Dependencies: +============= + +Existing DMA driver. +The transfers are DMA'ed between the crypto hardware and DDR memory via the +data mover, ADM. The data transfers are set up to use the existing dma driver. + +User space utilities: +===================== + n/a + +Known issues: +============= + n/a + +To do: +====== + n/a diff --git a/Documentation/crypto/msm/qce40.txt b/Documentation/crypto/msm/qce40.txt new file mode 100644 index 000000000000..e99f7d7ef6cf --- /dev/null +++ b/Documentation/crypto/msm/qce40.txt @@ -0,0 +1,241 @@ +Introduction: +============= + +The Qualcomm crypto engine (qce40) driver is a module that +provides common services for accessing the Qualcomm crypto device. +Currently, the two main clients of qce40 are +-qcrypto driver (module provided for accessing CE HW by kernel space apps) +-qcedev driver (module provided for accessing CE HW by user space apps) +This module provides the same interface to the clients as does qce.c and is +based off qce.c. Following are the updates from qce.c +- Add support for AES XTS mode +- Add support for CMAC mode +- Add support for AES CCM mode +- Add support for SHA1/SHA256 HMAC +- Read HASH/MAC information directly from CE hardware registers instead of + using datamover. + +The crypto engine (qce40) module is a client to the DMA driver for the Qualcomm +DMA device - Application Data Mover (ADM). ADM is used to provide the DMA +transfer capability between Qualcomm crypto device hardware and DDR memory +for crypto operations. + + Figure 1. + --------- + + Linux kernel + (ex:IPSec)<--*Qualcomm crypto driver----+ + (qcrypto) | + (for kernel space app) | + | + +-->| + | + | *qce40 <----> Qualcomm + | driver ADM driver <---> ADM HW + +-->| | | + | | | + | | | + | | | + Linux kernel | | | + misc device <--- *QCEDEV Driver-------+ | | + interface (qcedev) (Reg interface) (DMA interface) + (for user space app) \ / + \ / + \ / + \ / + \ / + \ / + \ / + Qualcomm crypto CE3 HW + + + The entities marked with (*) in the Figure 1, are the software components of + the Linux Qualcomm crypto modules. + +=============== +IMPORTANT NOTE: +=============== +(1) The CE hardware can be accessed either from user space OR kernel space, + at one time. Both user space and kernel space clients cannot access the + qce driver (and the CE hardware) at the same time. + - If your device has user space apps that needs to access the crypto + hardware, make sure to have the qcrypto module disabled/unloaded. + This will result in the kernel space apps to use the registered + software implementation of the crypto algorithms. + - If your device has kernel space apps that needs to access the + crypto hardware, make sure to have qcedev module disabled/unloaded + and implement your user space application to use the software + implemenation (ex: openssl/crypto) of the crypto algorithms. + +(2) If your device has Playready(Windows Media DRM) application enabled and + uses the qcedev module to access the crypto hardware accelarator, + please be informed that for performance reasons, the CE hardware will need + to be dedicated to playready application. Any other user space application + should be implemented to use the software implemenation (ex: openssl/crypto) + of the crypto algorithms. + + +Hardware description: +===================== + +Qualcomm Crypto HW device family provides a series of algorithms implemented +in the device hardware. + +Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES +algorithms, and concurrent operations of hashing and ciphering. + +In addition to those functions provided by Crypto 2 HW, Crypto 3 HW provides +fast AES algorithms. + +In addition to those functions provided by Crypto 3 HW, Crypto 3E provides +HMAC-SHA1 hashing algorithm, and Over The Air (OTA) f8/f9 algorithms as +defined by the 3GPP forum. + + +Software description +==================== + +The crypto device is defined as a platform device. The driver is +independent of the platform. The driver supports multiple instances of +crypto HW. +All the platform specific parameters are defined in the board init +file, eg. arch/arm/mach-msm/board-msm8960.c for MSM8960. + +The qce40 driver provide the common services of HW crypto +access to the two drivers as listed above (qcedev, qcrypto. It sets up +the crypto HW device for the operation, then it requests ADM driver for +the DMA of the crypto operation. + +Two ADM channels and two command lists (one command list for each +channel) are involved in an operation. + +The setting up of the command lists and the procedure of the operation +of the crypto device are described in the following sections. + +The command lists contains a single command. For the first DMA channel it +is set up as follows: + + The command is for the DMA transfer from DDR memory to the + crypto device to input data to crypto device. The dst crci of the command + is set for crci-in for this crypto device. + +The command list for the second DMA channel is set up as follows: + + One command to DMA data from crypto device to DDR memory for encryption or + decryption output from crypto device. + +To accomplish ciphering and authentication concurrent operations, the driver +performs the following steps: + (a). set up HW crypto device + (b). hit the crypto go register. + (c). issue the DMA command of first channel to the ADM driver, + (d). issue the DMA command of 2nd channel to the ADM driver. + +SHA1/SHA256 is an authentication/integrity hash algorithm. To accomplish +hash operation (or any authentication only algorithm), 2nd DMA channel is +not required. Only steps (a) to (c) are performed. + +At the completion of the DMA operation (for (c) and (d)) ADM driver +invokes the callback registered to the DMA driver. This signifies the end of +the DMA operation(s). The driver reads the status and other information from +the CE hardware register. For HASH functions (SHA1/SHA256, HMAC, CMAC and +CCM) were the MAC/HASH information is read off hardware registers. + +[ NOTE: This is different from what is done in the qce module that support +CE3.x hardware. In CE4.0 there is not CRCI_HASH and hence we cannot rely +on the data mover to populate the HMAC/SHA information. This information +is acquired fromte h ahrdware by reading directly from some registers that +hold this information ] + +The driver than nvokes the callback to the qce driver client. +This signal the completion and the results of the DMA along with the status of +the CE hardware to the qce40 driver client. This completes a crypto operation. + +In the qce40 driver initialization, memory for the two command lists, descriptor +lists for each crypto device are allocated out of coherent memory, using Linux +DMA API. The driver pre-configures most of the two ADM command lists +in the initialization. During each crypto operation, minimal set up is required. +src_dscr or/and dst_dscr descriptor list of the ADM command are populated +from the information obtained from the corresponding data structure. eg: for +AEAD request, the following data structure provides the information: + + struct aead_request *req + ...... + req->assoc + req->src + req->dst + +The DMA address of a scatter list will be retrieved and set up in the +descriptor list of an ADM command. + +Power Management +================ + none + + +Interface: +========== + +The interface is defined in kernel/drivers/crypto/msm/inc/qce.h + +The clients qcrypto, qcedev drivers are the clients using +the interfaces. + +The following services are provided by the qce driver - + + qce_open(), qce_close(), qce_ablk_cipher_req(), + qce_hw_support(), qce_process_sha_req() + + qce_open() is the first request from the client, ex. Qualcomm crypto + driver (qcedev, qcrypto), to open a crypto engine. It is normally + called at the probe function of the client for a device. During the + probe, + - ADM command list structure will be set up + - Crypto device will be initialized. + - Resource associated with the crypto engine is retrieved by doing + platform_get_resource() or platform_get_resource_byname(). + + The resources for a device are + - crci-in, crci-out, crci-hash-done + - two DMA channel IDs, one for encryption and decryption input, one for + output. + - base address of the HW crypto device. + + qce_close() is the last request from the client. Normally, it is + called from the remove function of the client. + + qce_hw_support() allows the client to query what is supported + by the crypto engine hardware. + + qce_ablk_cipher_req() provides ciphering service to the client. + qce_process_sha_req() provides hashing service to the client. + qce_aead_req() provides aead service to the client. + + +Module parameters: +================== + +The following module parameters are defined in the board init file. +-CE hardware base register address +-Data mover channel used for transfer to/from CE hardware +These parameters differ in each platform. + + +Dependencies: +============= + +Existing DMA driver. +The transfers are DMA'ed between the crypto hardware and DDR memory via the +data mover, ADM. The data transfers are set up to use the existing dma driver. + +User space utilities: +===================== + n/a + +Known issues: +============= + n/a + +To do: +====== + n/a diff --git a/Documentation/crypto/msm/qcedev.txt b/Documentation/crypto/msm/qcedev.txt new file mode 100644 index 000000000000..fde69bbed7c0 --- /dev/null +++ b/Documentation/crypto/msm/qcedev.txt @@ -0,0 +1,232 @@ +Introduction: +============= + +This driver provides IOCTLS for user space application to access crypto +engine hardware for the qcedev crypto services. The driver supports the +following crypto algorithms +- AES-128, AES-256 (ECB, CBC and CTR mode) +- AES-192, (ECB, CBC and CTR mode) + (support exists on platform supporting CE 3.x hardware) +- SHA1/SHA256 +- AES-128, AES-256 (XTS), AES CMAC, SHA1/SHA256 HMAC + (support exists on platform supporting CE 4.x hardware) + +Hardware description: +===================== +Crypto 3E provides cipher and hash algorithms as defined in the +3GPP forum specifications. + + +Software description +==================== + +The driver is a Linux platform device driver. For an msm target, +there can be multiple crypto devices assigned for QCEDEV. + +The driver is a misc device driver as well. +The following operations are registered in the driver, +-qcedev_ioctl() +-qcedev_open() +-qcedev_release() + +The following IOCTLS are available to the user space application(s)- + + Cipher IOCTLs: + -------------- + QCEDEV_IOCTL_ENC_REQ is for encrypting data. + QCEDEV_IOCTL_DEC_REQ is for decrypting data. + + Hashing/HMAC IOCTLs + ------------------- + + QCEDEV_IOCTL_SHA_INIT_REQ is for initializing a hash/hmac request. + QCEDEV_IOCTL_SHA_UPDATE_REQ is for updating hash/hmac. + QCEDEV_IOCTL_SHA_FINAL_REQ is for ending the hash/mac request. + QCEDEV_IOCTL_GET_SHA_REQ is for retrieving the hash/hmac for data + packet of known size. + QCEDEV_IOCTL_GET_CMAC_REQ is for retrieving the MAC (using AES CMAC + algorithm) for data packet of known size. + +The requests are synchronous. The driver will put the process to +sleep, waiting for the completion of the requests using wait_for_completion(). + +Since the requests are coming out of user space application, before giving +the requests to the low level qce driver, the ioctl requests and the +associated input/output buffer will have to be safe checked, and copied +to/from kernel space. + +The extra copying of requests/buffer can affect the performance. The issue +with copying the data buffer is resolved by having the client use PMEM +allocated buffers. + +NOTE: Using memory allocated via PMEM is supported only for in place + operations where source and destination buffers point to the same + location. Support for different source and destination buffers + is not supported currently. + Furthermore, when using PMEM, and in AES CTR mode, when issuing an + encryption or decryption request, a non-zero byteoffset is not + supported. + +The design of the driver is to allow multiple open, and multiple requests +to be issued from application(s). Therefore, the driver will internally queue +the requests, and serialize the requests to the low level qce (or qce40) driver. + +On an IOCTL request from an application, if there is no outstanding +request, a the driver will issue a "qce" request, otherwise, +the request is queued in the driver queue. The process is suspended +waiting for completion. + +On completion of a request by the low level qce driver, the internal +tasklet (done_tasklet) is scheduled. The sole purpose of done_tasklet is +to call the completion of the current active request (complete()), and +issue more requests to the qce, if any. +When the process wakes up from wait_for_completion(), it will collect the +return code, and return the ioctl. + +A spin lock is used to protect the critical section of internal queue to +be accessed from multiple tasks, SMP, and completion callback +from qce. + +The driver maintains a set of statistics using debug fs. The files are +in /debug/qcedev/stats1, /debug/qcedev/stats2, /debug/qcedev/stats3; +one for each instance of device. Reading the file associated with +a device will retrieve the driver statistics for that device. +Any write to the file will clear the statistics. + + +Power Management +================ +n/a + + +Interface: +========== + +Linux user space applications will need to open a handle +(file desrciptor) to the qcedev device. This is achieved by doing +the following to retrieve a file desrciptor to the device. + + fd = open("/dev/qce", O_RDWR); + .. + ioctl(fd, ...); + +Once a valid fd is retrieved, user can call the following ioctls with +the fd as the first parameter and a pointer to an appropriate data +structure, qcedev_cipher_op_req or qcedev_sha_op_req (depending on +cipher/hash functionality) as the second parameter. + +The following IOCTLS are available to the user space application(s)- + + Cipher IOCTLs: + -------------- + QCEDEV_IOCTL_ENC_REQ is for encrypting data. + QCEDEV_IOCTL_DEC_REQ is for decrypting data. + + The caller of the IOCTL passes a pointer to the structure shown + below, as the second parameter. + + struct qcedev_cipher_op_req { + int use_pmem; + union{ + struct qcedev_pmem_info pmem; + struct qcedev_vbuf_info vbuf; + }; + uint32_t entries; + uint32_t data_len; + uint8_t in_place_op; + uint8_t enckey[QCEDEV_MAX_KEY_SIZE]; + uint32_t encklen; + uint8_t iv[QCEDEV_MAX_IV_SIZE]; + uint32_t ivlen; + uint32_t byteoffset; + enum qcedev_cipher_alg_enum alg; + enum qcedev_cipher_mode_enum mode; + enum qcedev_oper_enum op; + }; + + Hashing/HMAC IOCTLs + ------------------- + + QCEDEV_IOCTL_SHA_INIT_REQ is for initializing a hash/hmac request. + QCEDEV_IOCTL_SHA_UPDATE_REQ is for updating hash/hmac. + QCEDEV_IOCTL_SHA_FINAL_REQ is for ending the hash/mac request. + QCEDEV_IOCTL_GET_SHA_REQ is for retrieving the hash/hmac for data + packet of known size. + QCEDEV_IOCTL_GET_CMAC_REQ is for retrieving the MAC (using AES CMAC + algorithm) for data packet of known size. + + The caller of the IOCTL passes a pointer to the structure shown + below, as the second parameter. + + struct qcedev_sha_op_req { + struct buf_info data[QCEDEV_MAX_BUFFERS]; + uint32_t entries; + uint32_t data_len; + uint8_t digest[QCEDEV_MAX_SHA_DIGEST]; + uint32_t diglen; + uint8_t *authkey; + uint32_t authklen; + enum qcedev_sha_alg_enum alg; + struct qcedev_sha_ctxt ctxt; + }; + +The IOCTLs and associated request data structures are defined in + kernel/drivers/crypto/msm/inc/qcedev.h.. + + +Module parameters: +================== + +The following module parameters are defined in the board init file. +-CE hardware nase register address +-Data mover channel used for transfer to/from CE hardware +These parameters differ in each platform. + + + +Dependencies: +============= +qce driver. Please see Documentation/arm/msm/qce.txt. + + +User space utilities: +===================== + +none + +Known issues: +============= + +none. + + +To do: +====== + Enhance Cipher functionality: + (1) Add support for handling > 32KB for ciphering functionality when + - operation is not an "in place" operation (source != destination). + (when using PMEM allocated memory) + +Limitations: +============ + (1) In case of cipher functionality, Driver does not support + a combination of different memory sources for source/destination. + In other words, memory pointed to by src and dst, + must BOTH (src/dst) be "pmem" or BOTH(src/dst) be "vbuf". + + (2) In case of hash functionality, driver does not support handling data + buffers allocated via PMEM. + + (3) Do not load this driver if your device already has kernel space apps + that need to access the crypto hardware. + Make sure to have qcedev module disabled/unloaded and implement your user + space application to use the software implemenation (ex: openssl/crypto) + of the crypto algorithms. + (NOTE: Please refer to details on the limitations listed in qce.txt) + + (4) If your device has Playready (Windows Media DRM) application enabled + and uses the qcedev module to access the crypto hardware accelarator, + please be informed that for performance reasons, the CE hardware will + need to be dedicated to playready application. Any other user space + application should be implemented to use the software implemenation + (ex: openssl/crypto) of the crypto algorithms. diff --git a/Documentation/crypto/msm/qcrypto.txt b/Documentation/crypto/msm/qcrypto.txt new file mode 100644 index 000000000000..81aa1941e157 --- /dev/null +++ b/Documentation/crypto/msm/qcrypto.txt @@ -0,0 +1,144 @@ +Introduction: +============= + +Qualcomm Crypto (qcrypto) driver is a Linux crypto driver which interfaces +with the Linux kernel crypto API layer to provide the HW crypto functions. +This driver is accessed by kernel space apps via the kernel crypto API layer. +At present there is no means for user space apps to access this module. + +Hardware description: +===================== + +Qualcomm Crypto HW device family provides a series of algorithms implemented +in the device. + +Crypto 2 hardware provides hashing - SHA-1, SHA-256, ciphering - DES, 3DES, AES +algorithms, and concurrent operations of hashing, and ciphering. + +In addition to those functions provided by Crypto 2 HW, Crypto 3 provides fast +AES algorithms. + +In addition to those functions provided by Crypto 3 HW, Crypto 3E provides +HMAC-SHA1 hashing algorithm. + +In addition to those functions provided by Crypto 3 HW, Crypto 4.0 provides +HMAC-SHA1/SHA256, AES CBC-MAC hashing algorithm and AES XTS/CCM cipher +algorithms. + + +Software description +==================== + +The module init function (_qcrypto_init()), does a platform_register(), +to register the driver. As the result, the driver probe function, +_qcrypto_probe(), will be invoked for each registered device. + +In the probe function, driver opens the low level CE (qce_open), and +registers the supported algorithms to the kernel crypto API layer. +Currently, qcrypto supports the following algorithms. + + ablkcipher - + cbc(aes),ecb(aes),ctr(aes) + ahash - + sha1, sha256 + aead - + authenc(hmac(sha1),cbc(aes)) + + The hmac(sha1), hmac(sha256, authenc(hmac(sha1),cbc(aes)), ccm(aes) + and xts(aes) algorithms are registered for some platforms that + support these in the CE hardware + +The HW device can support various algorithms. However, the most important +algorithms to gain the performance using a HW crypto accelerator are +AEAD, and ABLKCIPHER. + +AEAD stands for "authentication encryption with association data". +ABLKCIPHER stands of "asynchronous block cipher". + +The AEAD structure is described in the following header file + LINUX/opensource/kernel/include/crypto/aead.h + +The design of the driver is to allow multiple requests +issued from kernel client SW (eg IPSec). +Therefore, the driver will have to internally queue the requests, and +serialize the requests to the low level qce driver. + +When a request is received from the client, if there is no outstanding +request, a qce (or qce40) request is issued, otherwise, the request is +queued in the driver queue. + +On completion of a request, the qce (or qce40) invokes the registered +callback from the qcrypto. The internal tasklet (done_tasklet) is scheduled +in this callback function. The sole purpose of done_tasklet is +to call the completion of the current active request, and +issue more requests to the qce (or qce40), if any exists. + +A spin lock is used to protect the critical section of internal queue to +be accessed from multiple tasks, SMP, and completion callback +from qce. + +The driver maintains a set of statistics using debug fs. The files are +in /debug/qcrypto/stats1, /debug/qcrypto/stats2, /debug/qcrypto/stats3; +one for each instance of device. Reading the file associated with +a device will retrieve the driver statistics for that device. +Any write to the file will clear the statistics. + +Test vectors for authenc(hmac(sha1),cbc(aes)) algorithm are +developed offline, and imported to crypto/testmgr.c, and crypto/testmgr.h. + + +Power Management +================ + none + + +Interface: +========== +The kernel interface is defined in + LINUX/opensource/kernel/include/linux/crypto.h. + + +Module parameters: +================== + +All the platform specific parameters are defined in the board init +file, eg. arch/arm/mach-msm/board-mssm7x30.c for msm7x30. + +Dependencies: +============= +qce driver. + + +User space utilities: +===================== + n/a + +Known issues: +============= + n/a + +To do: +====== + Add Hashing algorithms. + + +Limitations: +=============== +(1) Each packet transfer size (for cipher and hash) is limited to maximum of + 32KB. This is a limitation in the crypto engine hardware. Client will + have to break packets larger than 32KB into multiple requests of smaller + size data packets. + +(2) Do not load this driver if your device has user space apps that needs to + access the crypto hardware. Please make sure to have the qcrypto module + disabled/unloaded. + Not having the driver loaded, will result in the kernel space apps to use + the registered software implementation of the crypto algorithms. + +(3) If your device has Playready application enabled and uses the qcedev module + to access the crypto hardware accelarator, please be informed that for + performance reasons, the CE hardware will need to be dedicated to playready + application. Any other user space or kernel application should be implemented + to use the software implemenation of the crypto algorithms. + + (NOTE: Please refer to details on the limitations listed in qce/40.txt) diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index dd414d9350ef..fb1ffd0fa463 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -285,6 +285,49 @@ config CRYPTO_DEV_S5P Select this to offload Samsung S5PV210 or S5PC110 from AES algorithms execution. +config CRYPTO_DEV_QCE40 + bool + +config CRYPTO_DEV_QCRYPTO + tristate "Qualcomm Crypto accelerator" + select CRYPTO_DES + select CRYPTO_ALGAPI + select CRYPTO_AUTHENC + select CRYPTO_BLKCIPHER + default n + help + This driver supports Qualcomm crypto acceleration. + To compile this driver as a module, choose M here: the + module will be called qcrypto. + +config CRYPTO_DEV_QCE + tristate "Qualcomm Crypto Engine (QCE) module" + select CRYPTO_DEV_QCE40 if ARCH_MSM8960 || ARCH_MSM9615 + default n + help + This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660 + MSM8x55, MSM8960 and MSM9615 + To compile this driver as a module, choose M here: the + For MSM7x30 MSM8660 and MSM8x55 the module is called qce + For MSM8960 and MSM9615 the module is called qce40 + +config CRYPTO_DEV_QCEDEV + tristate "QCEDEV Interface to CE module" + default n + help + This driver supports Qualcomm QCEDEV Crypto in MSM7x30, MSM8660, + MSM8960 and MSM9615. + This exposes the interface to the QCE hardware accelerator via IOCTLs + To compile this driver as a module, choose M here: the + module will be called qcedev. + +config CRYPTO_DEV_OTA_CRYPTO + tristate "OTA Crypto module" + help + This driver supports Qualcomm OTA Crypto in the FSM9xxx. + To compile this driver as a module, choose M here: the + module will be called ota_crypto. + config CRYPTO_DEV_TEGRA_AES tristate "Support for TEGRA AES hw engine" depends on ARCH_TEGRA diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index f3e64eadd7af..780620c61f3c 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -9,6 +9,7 @@ obj-$(CONFIG_CRYPTO_DEV_TALITOS) += talitos.o obj-$(CONFIG_CRYPTO_DEV_FSL_CAAM) += caam/ obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o obj-$(CONFIG_CRYPTO_DEV_PPC4XX) += amcc/ +obj-$(CONFIG_CRYPTO_DEV_QCE) += msm/ obj-$(CONFIG_CRYPTO_DEV_OMAP_SHAM) += omap-sham.o obj-$(CONFIG_CRYPTO_DEV_OMAP_AES) += omap-aes.o obj-$(CONFIG_CRYPTO_DEV_PICOXCELL) += picoxcell_crypto.o diff --git a/drivers/crypto/msm/Makefile b/drivers/crypto/msm/Makefile new file mode 100644 index 000000000000..61406b9531c3 --- /dev/null +++ b/drivers/crypto/msm/Makefile @@ -0,0 +1,8 @@ +obj-$(CONFIG_CRYPTO_DEV_QCEDEV) += qcedev.o +ifeq ($(CONFIG_CRYPTO_DEV_QCE40), y) + obj-$(CONFIG_CRYPTO_DEV_QCE) += qce40.o +else + obj-$(CONFIG_CRYPTO_DEV_QCE) += qce.o +endif +obj-$(CONFIG_CRYPTO_DEV_QCRYPTO) += qcrypto.o +obj-$(CONFIG_CRYPTO_DEV_OTA_CRYPTO) += ota_crypto.o diff --git a/drivers/crypto/msm/ota_crypto.c b/drivers/crypto/msm/ota_crypto.c new file mode 100644 index 000000000000..b129c052714e --- /dev/null +++ b/drivers/crypto/msm/ota_crypto.c @@ -0,0 +1,731 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* Qualcomm Over the Air (OTA) Crypto driver */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#include +#include "qce.h" +#include "qce_ota.h" + +enum qce_ota_oper_enum { + QCE_OTA_F8_OPER = 0, + QCE_OTA_MPKT_F8_OPER = 1, + QCE_OTA_F9_OPER = 2, + QCE_OTA_OPER_LAST +}; + +struct ota_dev_control; + +struct ota_async_req { + struct list_head list; + struct completion complete; + int err; + enum qce_ota_oper_enum op; + union { + struct qce_f9_req f9_req; + struct qce_f8_req f8_req; + struct qce_f8_multi_pkt_req f8_mp_req; + } req; + + struct ota_dev_control *podev; +}; + +/* + * Register ourselves as a misc device to be able to access the ota + * from userspace. + */ + + +#define QCOTA_DEV "qcota" + + +struct ota_dev_control { + + /* misc device */ + struct miscdevice miscdevice; + + /* qce handle */ + void *qce; + + /* platform device */ + struct platform_device *pdev; + + unsigned magic; + + struct list_head ready_commands; + struct ota_async_req *active_command; + spinlock_t lock; + struct tasklet_struct done_tasklet; +}; + +#define OTA_MAGIC 0x4f544143 + +static long qcota_ioctl(struct file *file, + unsigned cmd, unsigned long arg); +static int qcota_open(struct inode *inode, struct file *file); +static int qcota_release(struct inode *inode, struct file *file); +static int start_req(struct ota_dev_control *podev); + +static const struct file_operations qcota_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = qcota_ioctl, + .open = qcota_open, + .release = qcota_release, +}; + +static struct ota_dev_control qcota_dev[] = { + { + .miscdevice = { + .minor = MISC_DYNAMIC_MINOR, + .name = "qcota0", + .fops = &qcota_fops, + }, + .magic = OTA_MAGIC, + }, + { + .miscdevice = { + .minor = MISC_DYNAMIC_MINOR, + .name = "qcota1", + .fops = &qcota_fops, + }, + .magic = OTA_MAGIC, + }, + { + .miscdevice = { + .minor = MISC_DYNAMIC_MINOR, + .name = "qcota2", + .fops = &qcota_fops, + }, + .magic = OTA_MAGIC, + } +}; + +#define MAX_OTA_DEVICE ARRAY_SIZE(qcota_dev) + +#define DEBUG_MAX_FNAME 16 +#define DEBUG_MAX_RW_BUF 1024 + +struct qcota_stat { + u32 f8_req; + u32 f8_mp_req; + u32 f9_req; + u32 f8_op_success; + u32 f8_op_fail; + u32 f8_mp_op_success; + u32 f8_mp_op_fail; + u32 f9_op_success; + u32 f9_op_fail; +}; +static struct qcota_stat _qcota_stat[MAX_OTA_DEVICE]; +static struct dentry *_debug_dent; +static char _debug_read_buf[DEBUG_MAX_RW_BUF]; +static int _debug_qcota[MAX_OTA_DEVICE]; + +static struct ota_dev_control *qcota_minor_to_control(unsigned n) +{ + int i; + + for (i = 0; i < MAX_OTA_DEVICE; i++) { + if (qcota_dev[i].miscdevice.minor == n) + return &qcota_dev[i]; + } + return NULL; +} + +static int qcota_open(struct inode *inode, struct file *file) +{ + struct ota_dev_control *podev; + + podev = qcota_minor_to_control(MINOR(inode->i_rdev)); + if (podev == NULL) { + pr_err("%s: no such device %d\n", __func__, + MINOR(inode->i_rdev)); + return -ENOENT; + } + + file->private_data = podev; + + return 0; +} + +static int qcota_release(struct inode *inode, struct file *file) +{ + struct ota_dev_control *podev; + + podev = file->private_data; + + if (podev != NULL && podev->magic != OTA_MAGIC) { + pr_err("%s: invalid handle %p\n", + __func__, podev); + } + + file->private_data = NULL; + + return 0; +} + +static void req_done(unsigned long data) +{ + struct ota_dev_control *podev = (struct ota_dev_control *)data; + struct ota_async_req *areq; + unsigned long flags; + struct ota_async_req *new_req = NULL; + int ret = 0; + + spin_lock_irqsave(&podev->lock, flags); + areq = podev->active_command; + podev->active_command = NULL; + +again: + if (!list_empty(&podev->ready_commands)) { + new_req = container_of(podev->ready_commands.next, + struct ota_async_req, list); + list_del(&new_req->list); + podev->active_command = new_req; + new_req->err = 0; + ret = start_req(podev); + } + + spin_unlock_irqrestore(&podev->lock, flags); + + if (areq) + complete(&areq->complete); + + if (new_req && ret) { + complete(&new_req->complete); + spin_lock_irqsave(&podev->lock, flags); + podev->active_command = NULL; + areq = NULL; + ret = 0; + new_req = NULL; + goto again; + } + + return; +} + +static void f9_cb(void *cookie, unsigned char *icv, unsigned char *iv, + int ret) +{ + struct ota_async_req *areq = (struct ota_async_req *) cookie; + struct ota_dev_control *podev; + struct qcota_stat *pstat; + + podev = areq->podev; + pstat = &_qcota_stat[podev->pdev->id]; + areq->req.f9_req.mac_i = (uint32_t) icv; + + if (ret) + areq->err = -ENXIO; + else + areq->err = 0; + + tasklet_schedule(&podev->done_tasklet); +}; + +static void f8_cb(void *cookie, unsigned char *icv, unsigned char *iv, + int ret) +{ + struct ota_async_req *areq = (struct ota_async_req *) cookie; + struct ota_dev_control *podev; + struct qcota_stat *pstat; + + podev = areq->podev; + pstat = &_qcota_stat[podev->pdev->id]; + + if (ret) + areq->err = -ENXIO; + else + areq->err = 0; + + tasklet_schedule(&podev->done_tasklet); +}; + +static int start_req(struct ota_dev_control *podev) +{ + struct ota_async_req *areq; + struct qce_f9_req *pf9; + struct qce_f8_multi_pkt_req *p_mp_f8; + struct qce_f8_req *pf8; + int ret = 0; + + /* start the command on the podev->active_command */ + areq = podev->active_command; + areq->podev = podev; + + switch (areq->op) { + case QCE_OTA_F8_OPER: + pf8 = &areq->req.f8_req; + ret = qce_f8_req(podev->qce, pf8, areq, f8_cb); + break; + case QCE_OTA_MPKT_F8_OPER: + p_mp_f8 = &areq->req.f8_mp_req; + ret = qce_f8_multi_pkt_req(podev->qce, p_mp_f8, areq, f8_cb); + break; + + case QCE_OTA_F9_OPER: + pf9 = &areq->req.f9_req; + ret = qce_f9_req(podev->qce, pf9, areq, f9_cb); + break; + + default: + ret = -ENOTSUPP; + break; + }; + areq->err = ret; + return ret; +}; + +static int submit_req(struct ota_async_req *areq, struct ota_dev_control *podev) +{ + unsigned long flags; + int ret = 0; + struct qcota_stat *pstat; + + areq->err = 0; + spin_lock_irqsave(&podev->lock, flags); + if (podev->active_command == NULL) { + podev->active_command = areq; + ret = start_req(podev); + } else { + list_add_tail(&areq->list, &podev->ready_commands); + } + + if (ret != 0) + podev->active_command = NULL; + spin_unlock_irqrestore(&podev->lock, flags); + + if (ret == 0) + wait_for_completion(&areq->complete); + + pstat = &_qcota_stat[podev->pdev->id]; + switch (areq->op) { + case QCE_OTA_F8_OPER: + if (areq->err) + pstat->f8_op_fail++; + else + pstat->f8_op_success++; + break; + + case QCE_OTA_MPKT_F8_OPER: + + if (areq->err) + pstat->f8_mp_op_fail++; + else + pstat->f8_mp_op_success++; + break; + + case QCE_OTA_F9_OPER: + default: + if (areq->err) + pstat->f9_op_fail++; + else + pstat->f9_op_success++; + break; + }; + + return areq->err; +}; + +static long qcota_ioctl(struct file *file, + unsigned cmd, unsigned long arg) +{ + int err = 0; + struct ota_dev_control *podev; + uint8_t *user_src; + uint8_t *user_dst; + uint8_t *k_buf = NULL; + struct ota_async_req areq; + uint32_t total; + struct qcota_stat *pstat; + + podev = file->private_data; + if (podev == NULL || podev->magic != OTA_MAGIC) { + pr_err("%s: invalid handle %p\n", + __func__, podev); + return -ENOENT; + } + + /* Verify user arguments. */ + if (_IOC_TYPE(cmd) != QCOTA_IOC_MAGIC) + return -ENOTTY; + + init_completion(&areq.complete); + + pstat = &_qcota_stat[podev->pdev->id]; + + switch (cmd) { + case QCOTA_F9_REQ: + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qce_f9_req))) + return -EFAULT; + if (__copy_from_user(&areq.req.f9_req, (void __user *)arg, + sizeof(struct qce_f9_req))) + return -EFAULT; + + user_src = areq.req.f9_req.message; + if (!access_ok(VERIFY_READ, (void __user *)user_src, + areq.req.f9_req.msize)) + return -EFAULT; + + k_buf = kmalloc(areq.req.f9_req.msize, GFP_KERNEL); + if (k_buf == NULL) + return -ENOMEM; + + if (__copy_from_user(k_buf, (void __user *)user_src, + areq.req.f9_req.msize)) { + kfree(k_buf); + return -EFAULT; + } + + areq.req.f9_req.message = k_buf; + areq.op = QCE_OTA_F9_OPER; + + pstat->f9_req++; + err = submit_req(&areq, podev); + + areq.req.f9_req.message = user_src; + if (err == 0 && __copy_to_user((void __user *)arg, + &areq.req.f9_req, sizeof(struct qce_f9_req))) { + err = -EFAULT; + } + kfree(k_buf); + break; + + case QCOTA_F8_REQ: + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qce_f8_req))) + return -EFAULT; + if (__copy_from_user(&areq.req.f8_req, (void __user *)arg, + sizeof(struct qce_f8_req))) + return -EFAULT; + total = areq.req.f8_req.data_len; + user_src = areq.req.f8_req.data_in; + if (user_src != NULL) { + if (!access_ok(VERIFY_READ, (void __user *) + user_src, total)) + return -EFAULT; + + }; + + user_dst = areq.req.f8_req.data_out; + if (!access_ok(VERIFY_WRITE, (void __user *) + user_dst, total)) + return -EFAULT; + + k_buf = kmalloc(total, GFP_KERNEL); + if (k_buf == NULL) + return -ENOMEM; + + /* k_buf returned from kmalloc should be cache line aligned */ + if (user_src && __copy_from_user(k_buf, + (void __user *)user_src, total)) { + kfree(k_buf); + return -EFAULT; + } + + if (user_src) + areq.req.f8_req.data_in = k_buf; + else + areq.req.f8_req.data_in = NULL; + areq.req.f8_req.data_out = k_buf; + + areq.op = QCE_OTA_F8_OPER; + + pstat->f8_req++; + err = submit_req(&areq, podev); + + if (err == 0 && __copy_to_user(user_dst, k_buf, total)) + err = -EFAULT; + kfree(k_buf); + + break; + + case QCOTA_F8_MPKT_REQ: + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qce_f8_multi_pkt_req))) + return -EFAULT; + if (__copy_from_user(&areq.req.f8_mp_req, (void __user *)arg, + sizeof(struct qce_f8_multi_pkt_req))) + return -EFAULT; + + total = areq.req.f8_mp_req.num_pkt * + areq.req.f8_mp_req.qce_f8_req.data_len; + + user_src = areq.req.f8_mp_req.qce_f8_req.data_in; + if (!access_ok(VERIFY_READ, (void __user *) + user_src, total)) + return -EFAULT; + + user_dst = areq.req.f8_mp_req.qce_f8_req.data_out; + if (!access_ok(VERIFY_WRITE, (void __user *) + user_dst, total)) + return -EFAULT; + + k_buf = kmalloc(total, GFP_KERNEL); + if (k_buf == NULL) + return -ENOMEM; + /* k_buf returned from kmalloc should be cache line aligned */ + if (__copy_from_user(k_buf, (void __user *)user_src, total)) { + kfree(k_buf); + + return -EFAULT; + } + + areq.req.f8_mp_req.qce_f8_req.data_out = k_buf; + areq.req.f8_mp_req.qce_f8_req.data_in = k_buf; + + areq.op = QCE_OTA_MPKT_F8_OPER; + + pstat->f8_mp_req++; + err = submit_req(&areq, podev); + + if (err == 0 && __copy_to_user(user_dst, k_buf, total)) + err = -EFAULT; + kfree(k_buf); + break; + + default: + return -ENOTTY; + } + + return err; +} + +static int qcota_probe(struct platform_device *pdev) +{ + void *handle = NULL; + int rc = 0; + struct ota_dev_control *podev; + struct ce_hw_support ce_support; + + if (pdev->id >= MAX_OTA_DEVICE) { + pr_err("%s: device id %d exceeds allowed %d\n", + __func__, pdev->id, MAX_OTA_DEVICE); + return -ENOENT; + } + + podev = &qcota_dev[pdev->id]; + + INIT_LIST_HEAD(&podev->ready_commands); + podev->active_command = NULL; + spin_lock_init(&podev->lock); + tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev); + + /* open qce */ + handle = qce_open(pdev, &rc); + if (handle == NULL) { + pr_err("%s: device id %d, can not open qce\n", + __func__, pdev->id); + platform_set_drvdata(pdev, NULL); + return rc; + } + if (qce_hw_support(handle, &ce_support) < 0 || + ce_support.ota == false) { + pr_err("%s: device id %d, qce does not support ota capability\n", + __func__, pdev->id); + rc = -ENODEV; + goto err; + } + podev->qce = handle; + podev->pdev = pdev; + platform_set_drvdata(pdev, podev); + + rc = misc_register(&podev->miscdevice); + if (rc < 0) + goto err; + + return 0; +err: + if (handle) + qce_close(handle); + platform_set_drvdata(pdev, NULL); + podev->qce = NULL; + podev->pdev = NULL; + return rc; +}; + +static int qcota_remove(struct platform_device *pdev) +{ + struct ota_dev_control *podev; + + podev = platform_get_drvdata(pdev); + if (!podev) + return 0; + if (podev->qce) + qce_close(podev->qce); + + if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR) + misc_deregister(&podev->miscdevice); + tasklet_kill(&podev->done_tasklet); + return 0; +}; + +static struct platform_driver qcota_plat_driver = { + .probe = qcota_probe, + .remove = qcota_remove, + .driver = { + .name = "qcota", + .owner = THIS_MODULE, + }, +}; + +static int _disp_stats(int id) +{ + struct qcota_stat *pstat; + int len = 0; + + pstat = &_qcota_stat[id]; + len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1, + "\nQualcomm OTA crypto accelerator %d Statistics:\n", + id + 1); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 request : %d\n", + pstat->f8_req); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 operation success : %d\n", + pstat->f8_op_success); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 operation fail : %d\n", + pstat->f8_op_fail); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 MP request : %d\n", + pstat->f8_mp_req); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 MP operation success: %d\n", + pstat->f8_mp_op_success); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F8 MP operation fail : %d\n", + pstat->f8_mp_op_fail); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F9 request : %d\n", + pstat->f9_req); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F9 operation success : %d\n", + pstat->f9_op_success); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " F9 operation fail : %d\n", + pstat->f9_op_fail); + + return len; +} + +static int _debug_stats_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t _debug_stats_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + int rc = -EINVAL; + int qcota = *((int *) file->private_data); + int len; + + len = _disp_stats(qcota); + + rc = simple_read_from_buffer((void __user *) buf, len, + ppos, (void *) _debug_read_buf, len); + + return rc; +} + +static ssize_t _debug_stats_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + + int qcota = *((int *) file->private_data); + + memset((char *)&_qcota_stat[qcota], 0, sizeof(struct qcota_stat)); + return count; +}; + +static const struct file_operations _debug_stats_ops = { + .open = _debug_stats_open, + .read = _debug_stats_read, + .write = _debug_stats_write, +}; + +static int _qcota_debug_init(void) +{ + int rc; + char name[DEBUG_MAX_FNAME]; + int i; + struct dentry *dent; + + _debug_dent = debugfs_create_dir("qcota", NULL); + if (IS_ERR(_debug_dent)) { + pr_err("qcota debugfs_create_dir fail, error %ld\n", + PTR_ERR(_debug_dent)); + return PTR_ERR(_debug_dent); + } + + for (i = 0; i < MAX_OTA_DEVICE; i++) { + snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1); + _debug_qcota[i] = i; + dent = debugfs_create_file(name, 0644, _debug_dent, + &_debug_qcota[i], &_debug_stats_ops); + if (dent == NULL) { + pr_err("qcota debugfs_create_file fail, error %ld\n", + PTR_ERR(dent)); + rc = PTR_ERR(dent); + goto err; + } + } + return 0; +err: + debugfs_remove_recursive(_debug_dent); + return rc; +} + +static int __init qcota_init(void) +{ + int rc; + + rc = _qcota_debug_init(); + if (rc) + return rc; + return platform_driver_register(&qcota_plat_driver); +} +static void __exit qcota_exit(void) +{ + debugfs_remove_recursive(_debug_dent); + platform_driver_unregister(&qcota_plat_driver); +} + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Rohit Vaswani "); +MODULE_DESCRIPTION("Qualcomm Ota Crypto driver"); +MODULE_VERSION("1.01"); + +module_init(qcota_init); +module_exit(qcota_exit); diff --git a/drivers/crypto/msm/qce.c b/drivers/crypto/msm/qce.c new file mode 100644 index 000000000000..55cf651f5689 --- /dev/null +++ b/drivers/crypto/msm/qce.c @@ -0,0 +1,2709 @@ +/* Qualcomm Crypto Engine driver. + * + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qce.h" +#include "qcryptohw_30.h" +#include "qce_ota.h" + +/* ADM definitions */ +#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */ +#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16) +#define DST_INDEX_SG_CMD(index) (index & 0x3fff) +#define ADM_DESC_LAST (1 << 31) + +/* Data xfer between DM and CE in blocks of 16 bytes */ +#define ADM_CE_BLOCK_SIZE 16 + +#define QCE_FIFO_SIZE 0x8000 + +/* Data xfer between DM and CE in blocks of 64 bytes */ +#define ADM_SHA_BLOCK_SIZE 64 + +#define ADM_DESC_LENGTH_MASK 0xffff +#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK) + +struct dmov_desc { + uint32_t addr; + uint32_t len; +}; + +#define ADM_STATUS_OK 0x80000002 + +/* Misc definitions */ + +/* QCE max number of descriptor in a descriptor list */ +#define QCE_MAX_NUM_DESC 128 + +/* State of DM channel */ +enum qce_chan_st_enum { + QCE_CHAN_STATE_IDLE = 0, + QCE_CHAN_STATE_IN_PROG = 1, + QCE_CHAN_STATE_COMP = 2, + QCE_CHAN_STATE_LAST +}; + +/* + * CE HW device structure. + * Each engine has an instance of the structure. + * Each engine can only handle one crypto operation at one time. It is up to + * the sw above to ensure single threading of operation on an engine. + */ +struct qce_device { + struct device *pdev; /* Handle to platform_device structure */ + unsigned char *coh_vmem; /* Allocated coherent virtual memory */ + dma_addr_t coh_pmem; /* Allocated coherent physical memory */ + void __iomem *iobase; /* Virtual io base of CE HW */ + unsigned int phy_iobase; /* Physical io base of CE HW */ + struct clk *ce_clk; /* Handle to CE clk */ + unsigned int crci_in; /* CRCI for CE DM IN Channel */ + unsigned int crci_out; /* CRCI for CE DM OUT Channel */ + unsigned int crci_hash; /* CRCI for CE HASH */ + unsigned int chan_ce_in; /* ADM channel used for CE input + * and auth result if authentication + * only operation. */ + unsigned int chan_ce_out; /* ADM channel used for CE output, + and icv for esp */ + + + unsigned int *cmd_pointer_list_ce_in; + dma_addr_t phy_cmd_pointer_list_ce_in; + + unsigned int *cmd_pointer_list_ce_out; + dma_addr_t phy_cmd_pointer_list_ce_out; + + unsigned char *cmd_list_ce_in; + dma_addr_t phy_cmd_list_ce_in; + + unsigned char *cmd_list_ce_out; + dma_addr_t phy_cmd_list_ce_out; + + struct dmov_desc *ce_out_src_desc; + dma_addr_t phy_ce_out_src_desc; + + struct dmov_desc *ce_out_dst_desc; + dma_addr_t phy_ce_out_dst_desc; + + struct dmov_desc *ce_in_src_desc; + dma_addr_t phy_ce_in_src_desc; + + struct dmov_desc *ce_in_dst_desc; + dma_addr_t phy_ce_in_dst_desc; + + unsigned char *ce_out_ignore; + dma_addr_t phy_ce_out_ignore; + + unsigned char *ce_pad; + dma_addr_t phy_ce_pad; + + struct msm_dmov_cmd *chan_ce_in_cmd; + struct msm_dmov_cmd *chan_ce_out_cmd; + + uint32_t ce_out_ignore_size; + + int ce_out_dst_desc_index; + int ce_in_dst_desc_index; + + int ce_out_src_desc_index; + int ce_in_src_desc_index; + + enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */ + enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */ + + int chan_ce_in_status; /* chan ce_in status */ + int chan_ce_out_status; /* chan ce_out status */ + + + unsigned char *dig_result; + dma_addr_t phy_dig_result; + + /* cached aes key */ + uint32_t aeskey[AES256_KEY_SIZE/sizeof(uint32_t)]; + + uint32_t aes_key_size; /* cached aes key size in bytes */ + int fastaes; /* ce supports fast aes */ + int hmac; /* ce support hmac-sha1 */ + bool ota; /* ce support ota */ + + qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */ + + int assoc_nents; + int src_nents; + int dst_nents; + + void *areq; + enum qce_cipher_mode_enum mode; + + dma_addr_t phy_iv_in; + dma_addr_t phy_ota_src; + dma_addr_t phy_ota_dst; + unsigned int ota_size; + int err; +}; + +/* Standard initialization vector for SHA-1, source: FIPS 180-2 */ +static uint32_t _std_init_vector_sha1[] = { + 0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0 +}; +/* Standard initialization vector for SHA-256, source: FIPS 180-2 */ +static uint32_t _std_init_vector_sha256[] = { + 0x6A09E667, 0xBB67AE85, 0x3C6EF372, 0xA54FF53A, + 0x510E527F, 0x9B05688C, 0x1F83D9AB, 0x5BE0CD19 +}; + +/* Source: FIPS 197, Figure 7. S-box: substitution values for the byte xy */ +static const uint32_t _s_box[256] = { + 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, + 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76, + + 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, + 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0, + + 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, + 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15, + + 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, + 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75, + + 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, + 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84, + + 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, + 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf, + + 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, + 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8, + + 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, + 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2, + + 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, + 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73, + + 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, + 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb, + + 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, + 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79, + + 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, + 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08, + + 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, + 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a, + + 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, + 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e, + + 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, + 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf, + + 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, + 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16 }; + + +/* + * Source: FIPS 197, Sec 5.2 Key Expansion, Figure 11. Pseudo Code for Key + * Expansion. + */ +static void _aes_expand_key_schedule(uint32_t keysize, uint32_t *AES_KEY, + uint32_t *AES_RND_KEY) +{ + uint32_t i; + uint32_t Nk; + uint32_t Nr, rot_data; + uint32_t Rcon = 0x01000000; + uint32_t temp; + uint32_t data_in; + uint32_t MSB_store; + uint32_t byte_for_sub; + uint32_t word_sub[4]; + + switch (keysize) { + case 192: + Nk = 6; + Nr = 12; + break; + + case 256: + Nk = 8; + Nr = 14; + break; + + case 128: + default: /* default to AES128 */ + Nk = 4; + Nr = 10; + break; + } + + /* key expansion */ + i = 0; + while (i < Nk) { + AES_RND_KEY[i] = AES_KEY[i]; + i = i + 1; + } + + i = Nk; + while (i < (4 * (Nr + 1))) { + temp = AES_RND_KEY[i-1]; + if (Nr == 14) { + switch (i) { + case 8: + Rcon = 0x01000000; + break; + + case 16: + Rcon = 0x02000000; + break; + + case 24: + Rcon = 0x04000000; + break; + + case 32: + Rcon = 0x08000000; + break; + + case 40: + Rcon = 0x10000000; + break; + + case 48: + Rcon = 0x20000000; + break; + + case 56: + Rcon = 0x40000000; + break; + } + } else if (Nr == 12) { + switch (i) { + case 6: + Rcon = 0x01000000; + break; + + case 12: + Rcon = 0x02000000; + break; + + case 18: + Rcon = 0x04000000; + break; + + case 24: + Rcon = 0x08000000; + break; + + case 30: + Rcon = 0x10000000; + break; + + case 36: + Rcon = 0x20000000; + break; + + case 42: + Rcon = 0x40000000; + break; + + case 48: + Rcon = 0x80000000; + break; + } + } else if (Nr == 10) { + switch (i) { + case 4: + Rcon = 0x01000000; + break; + + case 8: + Rcon = 0x02000000; + break; + + case 12: + Rcon = 0x04000000; + break; + + case 16: + Rcon = 0x08000000; + break; + + case 20: + Rcon = 0x10000000; + break; + + case 24: + Rcon = 0x20000000; + break; + + case 28: + Rcon = 0x40000000; + break; + + case 32: + Rcon = 0x80000000; + break; + + case 36: + Rcon = 0x1b000000; + break; + + case 40: + Rcon = 0x36000000; + break; + } + } + + if ((i % Nk) == 0) { + data_in = temp; + MSB_store = (data_in >> 24 & 0xff); + rot_data = (data_in << 8) | MSB_store; + byte_for_sub = rot_data; + word_sub[0] = _s_box[(byte_for_sub & 0xff)]; + word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)] + << 8); + word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)] + << 16); + word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) + >> 24)] << 24); + word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] | + word_sub[3]; + temp = word_sub[0] ^ Rcon; + } else if ((Nk > 6) && ((i % Nk) == 4)) { + byte_for_sub = temp; + word_sub[0] = _s_box[(byte_for_sub & 0xff)]; + word_sub[1] = (_s_box[((byte_for_sub & 0xff00) >> 8)] + << 8); + word_sub[2] = (_s_box[((byte_for_sub & 0xff0000) >> 16)] + << 16); + word_sub[3] = (_s_box[((byte_for_sub & 0xff000000) >> + 24)] << 24); + word_sub[0] = word_sub[0] | word_sub[1] | word_sub[2] | + word_sub[3]; + temp = word_sub[0]; + } + + AES_RND_KEY[i] = AES_RND_KEY[i-Nk]^temp; + i = i+1; + } +} + +static void _byte_stream_to_net_words(uint32_t *iv, unsigned char *b, + unsigned int len) +{ + unsigned n; + + n = len / sizeof(uint32_t) ; + for (; n > 0; n--) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000) | + (((*(b+2)) << 8) & 0xff00) | + (*(b+3) & 0xff); + b += sizeof(uint32_t); + iv++; + } + + n = len % sizeof(uint32_t); + if (n == 3) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000) | + (((*(b+2)) << 8) & 0xff00) ; + } else if (n == 2) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000) ; + } else if (n == 1) { + *iv = ((*b << 24) & 0xff000000) ; + } +} + +static void _net_words_to_byte_stream(uint32_t *iv, unsigned char *b, + unsigned int len) +{ + unsigned n = len / sizeof(uint32_t); + + for (; n > 0; n--) { + *b++ = (unsigned char) ((*iv >> 24) & 0xff); + *b++ = (unsigned char) ((*iv >> 16) & 0xff); + *b++ = (unsigned char) ((*iv >> 8) & 0xff); + *b++ = (unsigned char) (*iv & 0xff); + iv++; + } + n = len % sizeof(uint32_t); + if (n == 3) { + *b++ = (unsigned char) ((*iv >> 24) & 0xff); + *b++ = (unsigned char) ((*iv >> 16) & 0xff); + *b = (unsigned char) ((*iv >> 8) & 0xff); + } else if (n == 2) { + *b++ = (unsigned char) ((*iv >> 24) & 0xff); + *b = (unsigned char) ((*iv >> 16) & 0xff); + } else if (n == 1) { + *b = (unsigned char) ((*iv >> 24) & 0xff); + } +} + +static int count_sg(struct scatterlist *sg, int nbytes) +{ + int i; + + for (i = 0; nbytes > 0; i++, sg = sg_next(sg)) + nbytes -= sg->length; + return i; +} + +static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries, + struct scatterlist *sg) +{ + int i = 0; + for (i = 0; i < entries; i++) { + + sg->dma_address = (dma_addr_t)pmem->offset; + sg++; + pmem++; + } + return 0; +} + +static int _probe_ce_engine(struct qce_device *pce_dev) +{ + unsigned int val; + unsigned int rev; + unsigned int eng_availability; /* engine available functions */ + + val = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); + if ((val & 0xfffffff) != 0x0200004) { + dev_err(pce_dev->pdev, + "unknown Qualcomm crypto device at 0x%x 0x%x\n", + pce_dev->phy_iobase, val); + return -EIO; + }; + rev = (val & CRYPTO_CORE_REV_MASK) >> CRYPTO_CORE_REV; + if (rev == 0x2) { + dev_info(pce_dev->pdev, + "Qualcomm Crypto 3e device found at 0x%x\n", + pce_dev->phy_iobase); + } else if (rev == 0x1) { + dev_info(pce_dev->pdev, + "Qualcomm Crypto 3 device found at 0x%x\n", + pce_dev->phy_iobase); + } else if (rev == 0x0) { + dev_info(pce_dev->pdev, + "Qualcomm Crypto 2 device found at 0x%x\n", + pce_dev->phy_iobase); + } else { + dev_err(pce_dev->pdev, + "unknown Qualcomm crypto device at 0x%x\n", + pce_dev->phy_iobase); + return -EIO; + } + + eng_availability = readl_relaxed(pce_dev->iobase + + CRYPTO_ENGINES_AVAIL); + + if (((eng_availability & CRYPTO_AES_SEL_MASK) >> CRYPTO_AES_SEL) + == CRYPTO_AES_SEL_FAST) + pce_dev->fastaes = 1; + else + pce_dev->fastaes = 0; + + if (eng_availability & (1 << CRYPTO_HMAC_SEL)) + pce_dev->hmac = 1; + else + pce_dev->hmac = 0; + + if ((eng_availability & (1 << CRYPTO_F9_SEL)) && + (eng_availability & (1 << CRYPTO_F8_SEL))) + pce_dev->ota = true; + else + pce_dev->ota = false; + + pce_dev->aes_key_size = 0; + + return 0; +}; + +static int _init_ce_engine(struct qce_device *pce_dev) +{ + unsigned int val; + + /* reset qce */ + writel_relaxed(1 << CRYPTO_SW_RST, pce_dev->iobase + CRYPTO_CONFIG_REG); + + /* Ensure previous instruction (write to reset bit) + * was completed. + */ + mb(); + /* configure ce */ + val = (1 << CRYPTO_MASK_DOUT_INTR) | (1 << CRYPTO_MASK_DIN_INTR) | + (1 << CRYPTO_MASK_AUTH_DONE_INTR) | + (1 << CRYPTO_MASK_ERR_INTR); + writel_relaxed(val, pce_dev->iobase + CRYPTO_CONFIG_REG); + + if (_probe_ce_engine(pce_dev) < 0) + return -EIO; + if (readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG) != val) { + dev_err(pce_dev->pdev, + "unknown Qualcomm crypto device at 0x%x\n", + pce_dev->phy_iobase); + return -EIO; + }; + return 0; +}; + +static int _sha_ce_setup(struct qce_device *pce_dev, struct qce_sha_req *sreq) +{ + uint32_t auth32[SHA256_DIGEST_SIZE / sizeof(uint32_t)]; + uint32_t diglen; + int rc; + int i; + uint32_t cfg = 0; + + /* if not the last, the size has to be on the block boundary */ + if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE)) + return -EIO; + + switch (sreq->alg) { + case QCE_HASH_SHA1: + diglen = SHA1_DIGEST_SIZE; + break; + case QCE_HASH_SHA256: + diglen = SHA256_DIGEST_SIZE; + break; + default: + return -EINVAL; + } + /* + * write 20/32 bytes, 5/8 words into auth_iv + * for SHA1/SHA256 + */ + + if (sreq->first_blk) { + if (sreq->alg == QCE_HASH_SHA1) { + for (i = 0; i < 5; i++) + auth32[i] = _std_init_vector_sha1[i]; + } else { + for (i = 0; i < 8; i++) + auth32[i] = _std_init_vector_sha256[i]; + } + } else + _byte_stream_to_net_words(auth32, sreq->digest, diglen); + + rc = clk_enable(pce_dev->ce_clk); + if (rc) + return rc; + + writel_relaxed(auth32[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG); + writel_relaxed(auth32[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG); + writel_relaxed(auth32[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG); + writel_relaxed(auth32[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG); + writel_relaxed(auth32[4], pce_dev->iobase + CRYPTO_AUTH_IV4_REG); + + if (sreq->alg == QCE_HASH_SHA256) { + writel_relaxed(auth32[5], pce_dev->iobase + + CRYPTO_AUTH_IV5_REG); + writel_relaxed(auth32[6], pce_dev->iobase + + CRYPTO_AUTH_IV6_REG); + writel_relaxed(auth32[7], pce_dev->iobase + + CRYPTO_AUTH_IV7_REG); + } + /* write auth_bytecnt 0/1, start with 0 */ + writel_relaxed(sreq->auth_data[0], pce_dev->iobase + + CRYPTO_AUTH_BYTECNT0_REG); + writel_relaxed(sreq->auth_data[1], pce_dev->iobase + + CRYPTO_AUTH_BYTECNT1_REG); + + /* write auth_seg_cfg */ + writel_relaxed(sreq->size << CRYPTO_AUTH_SEG_SIZE, + pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); + + /* + * write seg_cfg + */ + + if (sreq->alg == QCE_HASH_SHA1) + cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE); + else + cfg = (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE); + + if (sreq->first_blk) + cfg |= 1 << CRYPTO_FIRST; + if (sreq->last_blk) + cfg |= 1 << CRYPTO_LAST; + cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG; + writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG); + + /* write seg_size */ + writel_relaxed(sreq->size, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); + + /* issue go to crypto */ + writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG); + /* Ensure previous instructions (setting the GO register) + * was completed before issuing a DMA transfer request + */ + mb(); + + return 0; +} + +static int _ce_setup(struct qce_device *pce_dev, struct qce_req *q_req, + uint32_t totallen, uint32_t coffset) +{ + uint32_t hmackey[HMAC_KEY_SIZE/sizeof(uint32_t)] = { + 0, 0, 0, 0, 0}; + uint32_t enckey32[MAX_CIPHER_KEY_SIZE/sizeof(uint32_t)] = { + 0, 0, 0, 0, 0, 0, 0, 0}; + uint32_t enciv32[MAX_IV_LENGTH / sizeof(uint32_t)] = { + 0, 0, 0, 0}; + uint32_t enck_size_in_word = q_req->encklen / sizeof(uint32_t); + int aes_key_chg; + int i, rc; + uint32_t aes_round_key[CRYPTO_AES_RNDKEYS]; + uint32_t cfg; + uint32_t ivsize = q_req->ivsize; + + rc = clk_enable(pce_dev->ce_clk); + if (rc) + return rc; + + cfg = (1 << CRYPTO_FIRST) | (1 << CRYPTO_LAST); + if (q_req->op == QCE_REQ_AEAD) { + + /* do authentication setup */ + + cfg |= (CRYPTO_AUTH_SIZE_HMAC_SHA1 << CRYPTO_AUTH_SIZE)| + (CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG); + + /* write sha1 init vector */ + writel_relaxed(_std_init_vector_sha1[0], + pce_dev->iobase + CRYPTO_AUTH_IV0_REG); + writel_relaxed(_std_init_vector_sha1[1], + pce_dev->iobase + CRYPTO_AUTH_IV1_REG); + writel_relaxed(_std_init_vector_sha1[2], + pce_dev->iobase + CRYPTO_AUTH_IV2_REG); + writel_relaxed(_std_init_vector_sha1[3], + pce_dev->iobase + CRYPTO_AUTH_IV3_REG); + writel_relaxed(_std_init_vector_sha1[4], + pce_dev->iobase + CRYPTO_AUTH_IV4_REG); + /* write hmac key */ + _byte_stream_to_net_words(hmackey, q_req->authkey, + q_req->authklen); + writel_relaxed(hmackey[0], pce_dev->iobase + + CRYPTO_AUTH_IV5_REG); + writel_relaxed(hmackey[1], pce_dev->iobase + + CRYPTO_AUTH_IV6_REG); + writel_relaxed(hmackey[2], pce_dev->iobase + + CRYPTO_AUTH_IV7_REG); + writel_relaxed(hmackey[3], pce_dev->iobase + + CRYPTO_AUTH_IV8_REG); + writel_relaxed(hmackey[4], pce_dev->iobase + + CRYPTO_AUTH_IV9_REG); + writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG); + writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_BYTECNT1_REG); + + /* write auth_seg_cfg */ + writel_relaxed((totallen << CRYPTO_AUTH_SEG_SIZE) & 0xffff0000, + pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); + + } + + _byte_stream_to_net_words(enckey32, q_req->enckey, q_req->encklen); + + switch (q_req->mode) { + case QCE_MODE_ECB: + cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); + break; + + case QCE_MODE_CBC: + cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); + break; + + case QCE_MODE_CTR: + default: + cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE); + break; + } + pce_dev->mode = q_req->mode; + + switch (q_req->alg) { + case CIPHER_ALG_DES: + if (q_req->mode != QCE_MODE_ECB) { + _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); + writel_relaxed(enciv32[0], pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG); + writel_relaxed(enciv32[1], pce_dev->iobase + + CRYPTO_CNTR1_IV1_REG); + } + writel_relaxed(enckey32[0], pce_dev->iobase + + CRYPTO_DES_KEY0_REG); + writel_relaxed(enckey32[1], pce_dev->iobase + + CRYPTO_DES_KEY1_REG); + cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG)); + break; + + case CIPHER_ALG_3DES: + if (q_req->mode != QCE_MODE_ECB) { + _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); + writel_relaxed(enciv32[0], pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG); + writel_relaxed(enciv32[1], pce_dev->iobase + + CRYPTO_CNTR1_IV1_REG); + } + writel_relaxed(enckey32[0], pce_dev->iobase + + CRYPTO_DES_KEY0_REG); + writel_relaxed(enckey32[1], pce_dev->iobase + + CRYPTO_DES_KEY1_REG); + writel_relaxed(enckey32[2], pce_dev->iobase + + CRYPTO_DES_KEY2_REG); + writel_relaxed(enckey32[3], pce_dev->iobase + + CRYPTO_DES_KEY3_REG); + writel_relaxed(enckey32[4], pce_dev->iobase + + CRYPTO_DES_KEY4_REG); + writel_relaxed(enckey32[5], pce_dev->iobase + + CRYPTO_DES_KEY5_REG); + cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG)); + break; + + case CIPHER_ALG_AES: + default: + if (q_req->mode != QCE_MODE_ECB) { + _byte_stream_to_net_words(enciv32, q_req->iv, ivsize); + writel_relaxed(enciv32[0], pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG); + writel_relaxed(enciv32[1], pce_dev->iobase + + CRYPTO_CNTR1_IV1_REG); + writel_relaxed(enciv32[2], pce_dev->iobase + + CRYPTO_CNTR2_IV2_REG); + writel_relaxed(enciv32[3], pce_dev->iobase + + CRYPTO_CNTR3_IV3_REG); + } + /* set number of counter bits */ + writel_relaxed(0xffff, pce_dev->iobase + CRYPTO_CNTR_MASK_REG); + + if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) { + cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 << + CRYPTO_ENCR_KEY_SZ); + cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG; + } else { + switch (q_req->encklen) { + case AES128_KEY_SIZE: + cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 << + CRYPTO_ENCR_KEY_SZ); + break; + case AES192_KEY_SIZE: + cfg |= (CRYPTO_ENCR_KEY_SZ_AES192 << + CRYPTO_ENCR_KEY_SZ); + break; + case AES256_KEY_SIZE: + default: + cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 << + CRYPTO_ENCR_KEY_SZ); + + /* check for null key. If null, use hw key*/ + for (i = 0; i < enck_size_in_word; i++) { + if (enckey32[i] != 0) + break; + } + if (i == enck_size_in_word) + cfg |= 1 << CRYPTO_USE_HW_KEY; + break; + } /* end of switch (q_req->encklen) */ + + cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG; + if (pce_dev->aes_key_size != q_req->encklen) + aes_key_chg = 1; + else { + for (i = 0; i < enck_size_in_word; i++) { + if (enckey32[i] != pce_dev->aeskey[i]) + break; + } + aes_key_chg = (i == enck_size_in_word) ? 0 : 1; + } + + if (aes_key_chg) { + if (pce_dev->fastaes) { + for (i = 0; i < enck_size_in_word; + i++) { + writel_relaxed(enckey32[i], + pce_dev->iobase + + CRYPTO_AES_RNDKEY0 + + (i * sizeof(uint32_t))); + } + } else { + /* size in bit */ + _aes_expand_key_schedule( + q_req->encklen * 8, + enckey32, aes_round_key); + + for (i = 0; i < CRYPTO_AES_RNDKEYS; + i++) { + writel_relaxed(aes_round_key[i], + pce_dev->iobase + + CRYPTO_AES_RNDKEY0 + + (i * sizeof(uint32_t))); + } + } + + pce_dev->aes_key_size = q_req->encklen; + for (i = 0; i < enck_size_in_word; i++) + pce_dev->aeskey[i] = enckey32[i]; + } /*if (aes_key_chg) { */ + } /* else of if (q_req->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */ + break; + } /* end of switch (q_req->mode) */ + + if (q_req->dir == QCE_ENCRYPT) + cfg |= (1 << CRYPTO_AUTH_POS); + cfg |= ((q_req->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE; + + /* write encr seg cfg */ + writel_relaxed((q_req->cryptlen << CRYPTO_ENCR_SEG_SIZE) | + (coffset & 0xffff), /* cipher offset */ + pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); + + /* write seg cfg and size */ + writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG); + writel_relaxed(totallen, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); + + /* issue go to crypto */ + writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG); + /* Ensure previous instructions (setting the GO register) + * was completed before issuing a DMA transfer request + */ + mb(); + return 0; +}; + +static int _aead_complete(struct qce_device *pce_dev) +{ + struct aead_request *areq; + struct crypto_aead *aead; + uint32_t ivsize; + uint32_t iv_out[4]; + unsigned char iv[4 * sizeof(uint32_t)]; + uint32_t status; + + areq = (struct aead_request *) pce_dev->areq; + aead = crypto_aead_reqtfm(areq); + ivsize = crypto_aead_ivsize(aead); + + if (areq->src != areq->dst) { + dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, + DMA_FROM_DEVICE); + } + dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in, + ivsize, DMA_TO_DEVICE); + dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, + DMA_TO_DEVICE); + + /* check ce error status */ + status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); + if (status & (1 << CRYPTO_SW_ERR)) { + pce_dev->err++; + dev_err(pce_dev->pdev, + "Qualcomm Crypto Error at 0x%x, status%x\n", + pce_dev->phy_iobase, status); + _init_ce_engine(pce_dev); + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO); + return 0; + }; + + /* get iv out */ + if (pce_dev->mode == QCE_MODE_ECB) { + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, + pce_dev->chan_ce_in_status | + pce_dev->chan_ce_out_status); + } else { + + iv_out[0] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG); + iv_out[1] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR1_IV1_REG); + iv_out[2] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR2_IV2_REG); + iv_out[3] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR3_IV3_REG); + + _net_words_to_byte_stream(iv_out, iv, sizeof(iv)); + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, pce_dev->dig_result, iv, + pce_dev->chan_ce_in_status | + pce_dev->chan_ce_out_status); + }; + return 0; +}; + +static void _sha_complete(struct qce_device *pce_dev) +{ + + struct ahash_request *areq; + uint32_t auth_data[2]; + uint32_t status; + + areq = (struct ahash_request *) pce_dev->areq; + dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + DMA_TO_DEVICE); + + /* check ce error status */ + status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); + if (status & (1 << CRYPTO_SW_ERR)) { + pce_dev->err++; + dev_err(pce_dev->pdev, + "Qualcomm Crypto Error at 0x%x, status%x\n", + pce_dev->phy_iobase, status); + _init_ce_engine(pce_dev); + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, pce_dev->dig_result, NULL, -ENXIO); + return; + }; + + auth_data[0] = readl_relaxed(pce_dev->iobase + + CRYPTO_AUTH_BYTECNT0_REG); + auth_data[1] = readl_relaxed(pce_dev->iobase + + CRYPTO_AUTH_BYTECNT1_REG); + /* Ensure previous instruction (retriving byte count information) + * was completed before disabling the clk. + */ + mb(); + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, pce_dev->dig_result, (unsigned char *)auth_data, + pce_dev->chan_ce_in_status); +}; + +static int _ablk_cipher_complete(struct qce_device *pce_dev) +{ + struct ablkcipher_request *areq; + uint32_t iv_out[4]; + unsigned char iv[4 * sizeof(uint32_t)]; + uint32_t status; + + areq = (struct ablkcipher_request *) pce_dev->areq; + + if (areq->src != areq->dst) { + dma_unmap_sg(pce_dev->pdev, areq->dst, + pce_dev->dst_nents, DMA_FROM_DEVICE); + } + dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + + /* check ce error status */ + status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); + if (status & (1 << CRYPTO_SW_ERR)) { + pce_dev->err++; + dev_err(pce_dev->pdev, + "Qualcomm Crypto Error at 0x%x, status%x\n", + pce_dev->phy_iobase, status); + _init_ce_engine(pce_dev); + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, NULL, NULL, -ENXIO); + return 0; + }; + + /* get iv out */ + if (pce_dev->mode == QCE_MODE_ECB) { + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status | + pce_dev->chan_ce_out_status); + } else { + iv_out[0] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG); + iv_out[1] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR1_IV1_REG); + iv_out[2] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR2_IV2_REG); + iv_out[3] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR3_IV3_REG); + + _net_words_to_byte_stream(iv_out, iv, sizeof(iv)); + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status | + pce_dev->chan_ce_out_status); + } + + return 0; +}; + +static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev) +{ + struct ablkcipher_request *areq; + uint32_t iv_out[4]; + unsigned char iv[4 * sizeof(uint32_t)]; + uint32_t status; + + areq = (struct ablkcipher_request *) pce_dev->areq; + + /* check ce error status */ + status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); + if (status & (1 << CRYPTO_SW_ERR)) { + pce_dev->err++; + dev_err(pce_dev->pdev, + "Qualcomm Crypto Error at 0x%x, status%x\n", + pce_dev->phy_iobase, status); + _init_ce_engine(pce_dev); + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, NULL, NULL, -ENXIO); + return 0; + }; + + /* get iv out */ + if (pce_dev->mode == QCE_MODE_ECB) { + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, NULL, NULL, pce_dev->chan_ce_in_status | + pce_dev->chan_ce_out_status); + } else { + iv_out[0] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR0_IV0_REG); + iv_out[1] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR1_IV1_REG); + iv_out[2] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR2_IV2_REG); + iv_out[3] = readl_relaxed(pce_dev->iobase + + CRYPTO_CNTR3_IV3_REG); + + _net_words_to_byte_stream(iv_out, iv, sizeof(iv)); + clk_disable(pce_dev->ce_clk); + pce_dev->qce_cb(areq, NULL, iv, pce_dev->chan_ce_in_status | + pce_dev->chan_ce_out_status); + } + + return 0; +}; + +static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc, + unsigned int plen, unsigned int paddr, int *index) +{ + while (plen > QCE_FIFO_SIZE) { + pdesc->len = QCE_FIFO_SIZE; + if (paddr > 0) { + pdesc->addr = paddr; + paddr += QCE_FIFO_SIZE; + } + plen -= pdesc->len; + if (plen > 0) { + *index = (*index) + 1; + if ((*index) >= QCE_MAX_NUM_DESC) + return -ENOMEM; + pdesc++; + } + } + if ((plen > 0) && (plen <= QCE_FIFO_SIZE)) { + pdesc->len = plen; + if (paddr > 0) + pdesc->addr = paddr; + } + + return 0; +} + +static int _chain_sg_buffer_in(struct qce_device *pce_dev, + struct scatterlist *sg, unsigned int nbytes) +{ + unsigned int len; + unsigned int dlen; + struct dmov_desc *pdesc; + + pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index; + /* + * Two consective chunks may be handled by the old + * buffer descriptor. + */ + while (nbytes > 0) { + len = min(nbytes, sg_dma_len(sg)); + dlen = pdesc->len & ADM_DESC_LENGTH_MASK; + nbytes -= len; + if (dlen == 0) { + pdesc->addr = sg_dma_address(sg); + pdesc->len = len; + if (pdesc->len > QCE_FIFO_SIZE) { + if (qce_split_and_insert_dm_desc(pdesc, + pdesc->len, sg_dma_address(sg), + &pce_dev->ce_in_src_desc_index)) + return -EIO; + } + } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) { + pdesc->len = dlen + len; + if (pdesc->len > QCE_FIFO_SIZE) { + if (qce_split_and_insert_dm_desc(pdesc, + pdesc->len, pdesc->addr, + &pce_dev->ce_in_src_desc_index)) + return -EIO; + } + } else { + pce_dev->ce_in_src_desc_index++; + if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC) + return -ENOMEM; + pdesc++; + pdesc->len = len; + pdesc->addr = sg_dma_address(sg); + if (pdesc->len > QCE_FIFO_SIZE) { + if (qce_split_and_insert_dm_desc(pdesc, + pdesc->len, sg_dma_address(sg), + &pce_dev->ce_in_src_desc_index)) + return -EIO; + } + } + if (nbytes > 0) + sg = sg_next(sg); + } + return 0; +} + +static int _chain_pm_buffer_in(struct qce_device *pce_dev, + unsigned int pmem, unsigned int nbytes) +{ + unsigned int dlen; + struct dmov_desc *pdesc; + + pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index; + dlen = pdesc->len & ADM_DESC_LENGTH_MASK; + if (dlen == 0) { + pdesc->addr = pmem; + pdesc->len = nbytes; + } else if (pmem == (pdesc->addr + dlen)) { + pdesc->len = dlen + nbytes; + } else { + pce_dev->ce_in_src_desc_index++; + if (pce_dev->ce_in_src_desc_index >= QCE_MAX_NUM_DESC) + return -ENOMEM; + pdesc++; + pdesc->len = nbytes; + pdesc->addr = pmem; + } + return 0; +} + +static void _chain_buffer_in_init(struct qce_device *pce_dev) +{ + struct dmov_desc *pdesc; + + pce_dev->ce_in_src_desc_index = 0; + pce_dev->ce_in_dst_desc_index = 0; + pdesc = pce_dev->ce_in_src_desc; + pdesc->len = 0; +} + +static void _ce_in_final(struct qce_device *pce_dev, int ncmd, unsigned total) +{ + struct dmov_desc *pdesc; + dmov_sg *pcmd; + + pdesc = pce_dev->ce_in_src_desc + pce_dev->ce_in_src_desc_index; + pdesc->len |= ADM_DESC_LAST; + + pdesc = pce_dev->ce_in_dst_desc; + if (total > QCE_FIFO_SIZE) { + qce_split_and_insert_dm_desc(pdesc, total, 0, + &pce_dev->ce_in_dst_desc_index); + pdesc = pce_dev->ce_in_dst_desc + pce_dev->ce_in_dst_desc_index; + pdesc->len |= ADM_DESC_LAST; + } else + pdesc->len = ADM_DESC_LAST | total; + + pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in; + if (ncmd == 1) + pcmd->cmd |= CMD_LC; + else { + dmov_s *pscmd; + + pcmd->cmd &= ~CMD_LC; + pcmd++; + pscmd = (dmov_s *)pcmd; + pscmd->cmd |= CMD_LC; + } + +#ifdef QCE_DEBUG + dev_info(pce_dev->pdev, "_ce_in_final %d\n", + pce_dev->ce_in_src_desc_index); +#endif +} + +#ifdef QCE_DEBUG +static void _ce_in_dump(struct qce_device *pce_dev) +{ + int i; + struct dmov_desc *pdesc; + + dev_info(pce_dev->pdev, "_ce_in_dump: src\n"); + for (i = 0; i <= pce_dev->ce_in_src_desc_index; i++) { + pdesc = pce_dev->ce_in_src_desc + i; + dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, + pdesc->len); + } + dev_info(pce_dev->pdev, "_ce_in_dump: dst\n"); + for (i = 0; i <= pce_dev->ce_in_dst_desc_index; i++) { + pdesc = pce_dev->ce_in_dst_desc + i; + dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, + pdesc->len); + } +}; + +static void _ce_out_dump(struct qce_device *pce_dev) +{ + int i; + struct dmov_desc *pdesc; + + dev_info(pce_dev->pdev, "_ce_out_dump: src\n"); + for (i = 0; i <= pce_dev->ce_out_src_desc_index; i++) { + pdesc = pce_dev->ce_out_src_desc + i; + dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, + pdesc->len); + } + + dev_info(pce_dev->pdev, "_ce_out_dump: dst\n"); + for (i = 0; i <= pce_dev->ce_out_dst_desc_index; i++) { + pdesc = pce_dev->ce_out_dst_desc + i; + dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, + pdesc->len); + } +}; +#endif + +static int _chain_sg_buffer_out(struct qce_device *pce_dev, + struct scatterlist *sg, unsigned int nbytes) +{ + unsigned int len; + unsigned int dlen; + struct dmov_desc *pdesc; + + pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index; + /* + * Two consective chunks may be handled by the old + * buffer descriptor. + */ + while (nbytes > 0) { + len = min(nbytes, sg_dma_len(sg)); + dlen = pdesc->len & ADM_DESC_LENGTH_MASK; + nbytes -= len; + if (dlen == 0) { + pdesc->addr = sg_dma_address(sg); + pdesc->len = len; + if (pdesc->len > QCE_FIFO_SIZE) { + if (qce_split_and_insert_dm_desc(pdesc, + pdesc->len, sg_dma_address(sg), + &pce_dev->ce_out_dst_desc_index)) + return -EIO; + } + } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) { + pdesc->len = dlen + len; + if (pdesc->len > QCE_FIFO_SIZE) { + if (qce_split_and_insert_dm_desc(pdesc, + pdesc->len, pdesc->addr, + &pce_dev->ce_out_dst_desc_index)) + return -EIO; + } + } else { + pce_dev->ce_out_dst_desc_index++; + if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC) + return -EIO; + pdesc++; + pdesc->len = len; + pdesc->addr = sg_dma_address(sg); + if (pdesc->len > QCE_FIFO_SIZE) { + if (qce_split_and_insert_dm_desc(pdesc, + pdesc->len, sg_dma_address(sg), + &pce_dev->ce_out_dst_desc_index)) + return -EIO; + } + } + if (nbytes > 0) + sg = sg_next(sg); + } + return 0; +} + +static int _chain_pm_buffer_out(struct qce_device *pce_dev, + unsigned int pmem, unsigned int nbytes) +{ + unsigned int dlen; + struct dmov_desc *pdesc; + + pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index; + dlen = pdesc->len & ADM_DESC_LENGTH_MASK; + + if (dlen == 0) { + pdesc->addr = pmem; + pdesc->len = nbytes; + } else if (pmem == (pdesc->addr + dlen)) { + pdesc->len = dlen + nbytes; + } else { + pce_dev->ce_out_dst_desc_index++; + if (pce_dev->ce_out_dst_desc_index >= QCE_MAX_NUM_DESC) + return -EIO; + pdesc++; + pdesc->len = nbytes; + pdesc->addr = pmem; + } + return 0; +}; + +static void _chain_buffer_out_init(struct qce_device *pce_dev) +{ + struct dmov_desc *pdesc; + + pce_dev->ce_out_dst_desc_index = 0; + pce_dev->ce_out_src_desc_index = 0; + pdesc = pce_dev->ce_out_dst_desc; + pdesc->len = 0; +}; + +static void _ce_out_final(struct qce_device *pce_dev, int ncmd, unsigned total) +{ + struct dmov_desc *pdesc; + dmov_sg *pcmd; + + pdesc = pce_dev->ce_out_dst_desc + pce_dev->ce_out_dst_desc_index; + pdesc->len |= ADM_DESC_LAST; + + pdesc = pce_dev->ce_out_src_desc; + if (total > QCE_FIFO_SIZE) { + qce_split_and_insert_dm_desc(pdesc, total, 0, + &pce_dev->ce_out_src_desc_index); + pdesc = pce_dev->ce_out_src_desc + + pce_dev->ce_out_src_desc_index; + pdesc->len |= ADM_DESC_LAST; + } else + pdesc->len = ADM_DESC_LAST | total; + + pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out; + if (ncmd == 1) + pcmd->cmd |= CMD_LC; + else { + dmov_s *pscmd; + + pcmd->cmd &= ~CMD_LC; + pcmd++; + pscmd = (dmov_s *)pcmd; + pscmd->cmd |= CMD_LC; + } +#ifdef QCE_DEBUG + dev_info(pce_dev->pdev, "_ce_out_final %d\n", + pce_dev->ce_out_dst_desc_index); +#endif + +}; + +static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->chan_ce_in_status = -1; + } else + pce_dev->chan_ce_in_status = 0; + + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP; + if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) { + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _aead_complete(pce_dev); + } +}; + +static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->chan_ce_out_status = -1; + } else { + pce_dev->chan_ce_out_status = 0; + }; + + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP; + if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) { + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _aead_complete(pce_dev); + } + +}; + +static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->chan_ce_in_status = -1; + } else + pce_dev->chan_ce_in_status = 0; + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + _sha_complete(pce_dev); +}; + +static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->chan_ce_in_status = -1; + } else + pce_dev->chan_ce_in_status = 0; + + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP; + if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) { + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _ablk_cipher_complete(pce_dev); + } +}; + +static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->chan_ce_out_status = -1; + } else { + pce_dev->chan_ce_out_status = 0; + }; + + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP; + if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) { + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _ablk_cipher_complete(pce_dev); + } +}; + + +static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->chan_ce_in_status = -1; + } else + pce_dev->chan_ce_in_status = 0; + + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP; + if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) { + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _ablk_cipher_use_pmem_complete(pce_dev); + } +}; + +static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->chan_ce_out_status = -1; + } else { + pce_dev->chan_ce_out_status = 0; + }; + + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP; + if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) { + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _ablk_cipher_use_pmem_complete(pce_dev); + } +}; + +static int _setup_cmd_template(struct qce_device *pce_dev) +{ + dmov_sg *pcmd; + dmov_s *pscmd; + struct dmov_desc *pdesc; + unsigned char *vaddr; + int i = 0; + + /* Divide up the 4K coherent memory */ + /* 1. ce_in channel 1st command src descriptors, 128 entries */ + vaddr = pce_dev->coh_vmem; + vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); + pce_dev->ce_in_src_desc = (struct dmov_desc *) vaddr; + pce_dev->phy_ce_in_src_desc = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); + + /* 2. ce_in channel 1st command dst descriptor, 1 entry */ + vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); + pce_dev->ce_in_dst_desc = (struct dmov_desc *) vaddr; + pce_dev->phy_ce_in_dst_desc = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); + + /* + * 3. ce_in channel command list of one scatter gather command + * and one simple command. + */ + pce_dev->cmd_list_ce_in = vaddr; + pce_dev->phy_cmd_list_ce_in = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg); + + /* 4. authentication result. */ + pce_dev->dig_result = vaddr; + pce_dev->phy_dig_result = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + vaddr = vaddr + SHA256_DIGESTSIZE; + + /* + * 5. ce_out channel command list of one scatter gather command + * and one simple command. + */ + pce_dev->cmd_list_ce_out = vaddr; + pce_dev->phy_cmd_list_ce_out = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + vaddr = vaddr + sizeof(dmov_s) + sizeof(dmov_sg); + + /* 6. ce_out channel command src descriptors, 1 entry */ + vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); + pce_dev->ce_out_src_desc = (struct dmov_desc *) vaddr; + pce_dev->phy_ce_out_src_desc = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); + + /* 7. ce_out channel command dst descriptors, 128 entries. */ + vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); + pce_dev->ce_out_dst_desc = (struct dmov_desc *) vaddr; + pce_dev->phy_ce_out_dst_desc = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); + + /* 8. pad area. */ + pce_dev->ce_pad = vaddr; + pce_dev->phy_ce_pad = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + vaddr = vaddr + ADM_CE_BLOCK_SIZE; + + /* 9. ce_in channel command pointer list. */ + vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16); + pce_dev->cmd_pointer_list_ce_in = (unsigned int *) vaddr; + pce_dev->phy_cmd_pointer_list_ce_in = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + vaddr = vaddr + sizeof(unsigned char *); + + /* 10. ce_ou channel command pointer list. */ + vaddr = (unsigned char *) ALIGN(((unsigned int) vaddr), 16); + pce_dev->cmd_pointer_list_ce_out = (unsigned int *) vaddr; + pce_dev->phy_cmd_pointer_list_ce_out = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + vaddr = vaddr + sizeof(unsigned char *); + + /* 11. throw away area to store by-pass data from ce_out. */ + pce_dev->ce_out_ignore = (unsigned char *) vaddr; + pce_dev->phy_ce_out_ignore = pce_dev->coh_pmem + + (vaddr - pce_dev->coh_vmem); + pce_dev->ce_out_ignore_size = (2 * PAGE_SIZE) - (vaddr - + pce_dev->coh_vmem); /* at least 1.5 K of space */ + /* + * The first command of command list ce_in is for the input of + * concurrent operation of encrypt/decrypt or for the input + * of authentication. + */ + pcmd = (dmov_sg *) pce_dev->cmd_list_ce_in; + /* swap byte and half word , dst crci , scatter gather */ + pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS | + CMD_DST_CRCI(pce_dev->crci_in) | CMD_MODE_SG; + pdesc = pce_dev->ce_in_src_desc; + pdesc->addr = 0; /* to be filled in each operation */ + pdesc->len = 0; /* to be filled in each operation */ + pcmd->src_dscr = (unsigned) pce_dev->phy_ce_in_src_desc; + + pdesc = pce_dev->ce_in_dst_desc; + for (i = 0; i < QCE_MAX_NUM_DESC; i++) { + pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase); + pdesc->len = 0; /* to be filled in each operation */ + pdesc++; + } + pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_in_dst_desc; + pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) | + DST_INDEX_SG_CMD(0); + pcmd++; + /* + * The second command is for the digested data of + * hashing operation only. For others, this command is not used. + */ + pscmd = (dmov_s *) pcmd; + /* last command, swap byte, half word, src crci, single */ + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS | + CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE; + pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); + pscmd->len = SHA256_DIGESTSIZE; /* to be filled. */ + pscmd->dst = (unsigned) pce_dev->phy_dig_result; + /* setup command pointer list */ + *(pce_dev->cmd_pointer_list_ce_in) = (CMD_PTR_LP | DMOV_CMD_LIST | + DMOV_CMD_ADDR((unsigned int) + pce_dev->phy_cmd_list_ce_in)); + pce_dev->chan_ce_in_cmd->user = (void *) pce_dev; + pce_dev->chan_ce_in_cmd->exec_func = NULL; + pce_dev->chan_ce_in_cmd->cmdptr = DMOV_CMD_ADDR( + (unsigned int) pce_dev->phy_cmd_pointer_list_ce_in); + /* + * The first command in the command list ce_out. + * It is for encry/decryp output. + * If hashing only, ce_out is not used. + */ + pcmd = (dmov_sg *) pce_dev->cmd_list_ce_out; + /* swap byte, half word, source crci, scatter gather */ + pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS | + CMD_SRC_CRCI(pce_dev->crci_out) | CMD_MODE_SG; + + pdesc = pce_dev->ce_out_src_desc; + for (i = 0; i < QCE_MAX_NUM_DESC; i++) { + pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase); + pdesc->len = 0; /* to be filled in each operation */ + pdesc++; + } + pcmd->src_dscr = (unsigned) pce_dev->phy_ce_out_src_desc; + + pdesc = pce_dev->ce_out_dst_desc; + pdesc->addr = 0; /* to be filled in each operation */ + pdesc->len = 0; /* to be filled in each operation */ + pcmd->dst_dscr = (unsigned) pce_dev->phy_ce_out_dst_desc; + pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) | + DST_INDEX_SG_CMD(0); + + pcmd++; + /* + * The second command is for digested data of esp operation. + * For ciphering, this command is not used. + */ + pscmd = (dmov_s *) pcmd; + /* last command, swap byte, half word, src crci, single */ + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS | + CMD_SRC_CRCI(pce_dev->crci_hash) | CMD_MODE_SINGLE; + pscmd->src = (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); + pscmd->len = SHA1_DIGESTSIZE; /* we only support hmac(sha1) */ + pscmd->dst = (unsigned) pce_dev->phy_dig_result; + /* setup command pointer list */ + *(pce_dev->cmd_pointer_list_ce_out) = (CMD_PTR_LP | DMOV_CMD_LIST | + DMOV_CMD_ADDR((unsigned int)pce_dev-> + phy_cmd_list_ce_out)); + + pce_dev->chan_ce_out_cmd->user = pce_dev; + pce_dev->chan_ce_out_cmd->exec_func = NULL; + pce_dev->chan_ce_out_cmd->cmdptr = DMOV_CMD_ADDR( + (unsigned int) pce_dev->phy_cmd_pointer_list_ce_out); + + + return 0; +}; + +static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out) +{ + + if (ce_in) + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IN_PROG; + else + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP; + + if (ce_out) + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IN_PROG; + else + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP; + + if (ce_in) + msm_dmov_enqueue_cmd(pce_dev->chan_ce_in, + pce_dev->chan_ce_in_cmd); + if (ce_out) + msm_dmov_enqueue_cmd(pce_dev->chan_ce_out, + pce_dev->chan_ce_out_cmd); + + return 0; +}; + +static void _f9_complete(struct qce_device *pce_dev) +{ + uint32_t mac_i; + uint32_t status; + + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, + pce_dev->ota_size, DMA_TO_DEVICE); + + /* check ce error status */ + status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); + if (status & (1 << CRYPTO_SW_ERR)) { + pce_dev->err++; + dev_err(pce_dev->pdev, + "Qualcomm Crypto Error at 0x%x, status%x\n", + pce_dev->phy_iobase, status); + _init_ce_engine(pce_dev); + pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO); + return; + }; + + mac_i = readl_relaxed(pce_dev->iobase + CRYPTO_AUTH_IV0_REG); + pce_dev->qce_cb(pce_dev->areq, (void *) mac_i, NULL, + pce_dev->chan_ce_in_status); +}; + +static void _f8_complete(struct qce_device *pce_dev) +{ + uint32_t status; + + if (pce_dev->phy_ota_dst != 0) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, + pce_dev->ota_size, DMA_FROM_DEVICE); + if (pce_dev->phy_ota_src != 0) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, + pce_dev->ota_size, (pce_dev->phy_ota_dst) ? + DMA_TO_DEVICE : DMA_BIDIRECTIONAL); + + /* check ce error status */ + status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); + if (status & (1 << CRYPTO_SW_ERR)) { + pce_dev->err++; + dev_err(pce_dev->pdev, + "Qualcomm Crypto Error at 0x%x, status%x\n", + pce_dev->phy_iobase, status); + _init_ce_engine(pce_dev); + pce_dev->qce_cb(pce_dev->areq, NULL, NULL, -ENXIO); + return; + }; + + pce_dev->qce_cb(pce_dev->areq, NULL, NULL, + pce_dev->chan_ce_in_status | + pce_dev->chan_ce_out_status); +}; + + +static void _f9_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->chan_ce_in_status = -1; + } else + pce_dev->chan_ce_in_status = 0; + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + _f9_complete(pce_dev); +}; + +static void _f8_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->chan_ce_in_status = -1; + } else + pce_dev->chan_ce_in_status = 0; + + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_COMP; + if (pce_dev->chan_ce_out_state == QCE_CHAN_STATE_COMP) { + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _f8_complete(pce_dev); + } +}; + +static void _f8_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->chan_ce_out_status = -1; + } else { + pce_dev->chan_ce_out_status = 0; + }; + + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_COMP; + if (pce_dev->chan_ce_in_state == QCE_CHAN_STATE_COMP) { + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _f8_complete(pce_dev); + } +}; + +static int _ce_f9_setup(struct qce_device *pce_dev, struct qce_f9_req * req) +{ + uint32_t cfg; + uint32_t ikey[OTA_KEY_SIZE/sizeof(uint32_t)]; + + _byte_stream_to_net_words(ikey, &req->ikey[0], OTA_KEY_SIZE); + writel_relaxed(ikey[0], pce_dev->iobase + CRYPTO_AUTH_IV0_REG); + writel_relaxed(ikey[1], pce_dev->iobase + CRYPTO_AUTH_IV1_REG); + writel_relaxed(ikey[2], pce_dev->iobase + CRYPTO_AUTH_IV2_REG); + writel_relaxed(ikey[3], pce_dev->iobase + CRYPTO_AUTH_IV3_REG); + writel_relaxed(req->last_bits, pce_dev->iobase + CRYPTO_AUTH_IV4_REG); + + writel_relaxed(req->fresh, pce_dev->iobase + CRYPTO_AUTH_BYTECNT0_REG); + writel_relaxed(req->count_i, pce_dev->iobase + + CRYPTO_AUTH_BYTECNT1_REG); + + /* write auth_seg_cfg */ + writel_relaxed((uint32_t)req->msize << CRYPTO_AUTH_SEG_SIZE, + pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); + + /* write seg_cfg */ + cfg = (CRYPTO_AUTH_ALG_F9 << CRYPTO_AUTH_ALG) | (1 << CRYPTO_FIRST) | + (1 << CRYPTO_LAST); + + if (req->algorithm == QCE_OTA_ALGO_KASUMI) + cfg |= (CRYPTO_AUTH_SIZE_UIA1 << CRYPTO_AUTH_SIZE); + else + cfg |= (CRYPTO_AUTH_SIZE_UIA2 << CRYPTO_AUTH_SIZE) ; + + if (req->direction == QCE_OTA_DIR_DOWNLINK) + cfg |= 1 << CRYPTO_F9_DIRECTION; + + writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG); + + /* write seg_size */ + writel_relaxed(req->msize, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); + + /* issue go to crypto */ + writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG); + + /* + * barrier to ensure previous instructions + * (including GO) to CE finish before issue DMA transfer + * request. + */ + mb(); + return 0; +}; + +static int _ce_f8_setup(struct qce_device *pce_dev, struct qce_f8_req *req, + bool key_stream_mode, uint16_t npkts, uint16_t cipher_offset, + uint16_t cipher_size) +{ + uint32_t cfg; + uint32_t ckey[OTA_KEY_SIZE/sizeof(uint32_t)]; + + if ((key_stream_mode && (req->data_len & 0xf || npkts > 1)) || + (req->bearer >= QCE_OTA_MAX_BEARER)) + return -EINVAL; + + /* write seg_cfg */ + cfg = (CRYPTO_ENCR_ALG_F8 << CRYPTO_ENCR_ALG) | (1 << CRYPTO_FIRST) | + (1 << CRYPTO_LAST); + if (req->algorithm == QCE_OTA_ALGO_KASUMI) + cfg |= (CRYPTO_ENCR_KEY_SZ_UEA1 << CRYPTO_ENCR_KEY_SZ); + else + cfg |= (CRYPTO_ENCR_KEY_SZ_UEA2 << CRYPTO_ENCR_KEY_SZ) ; + if (key_stream_mode) + cfg |= 1 << CRYPTO_F8_KEYSTREAM_ENABLE; + if (req->direction == QCE_OTA_DIR_DOWNLINK) + cfg |= 1 << CRYPTO_F8_DIRECTION; + writel_relaxed(cfg, pce_dev->iobase + CRYPTO_SEG_CFG_REG); + + /* write seg_size */ + writel_relaxed(req->data_len, pce_dev->iobase + CRYPTO_SEG_SIZE_REG); + + /* write 0 to auth_size, auth_offset */ + writel_relaxed(0, pce_dev->iobase + CRYPTO_AUTH_SEG_CFG_REG); + + /* write encr_seg_cfg seg_size, seg_offset */ + writel_relaxed((((uint32_t) cipher_size) << CRYPTO_ENCR_SEG_SIZE) | + (cipher_offset & 0xffff), + pce_dev->iobase + CRYPTO_ENCR_SEG_CFG_REG); + + /* write keys */ + _byte_stream_to_net_words(ckey, &req->ckey[0], OTA_KEY_SIZE); + writel_relaxed(ckey[0], pce_dev->iobase + CRYPTO_DES_KEY0_REG); + writel_relaxed(ckey[1], pce_dev->iobase + CRYPTO_DES_KEY1_REG); + writel_relaxed(ckey[2], pce_dev->iobase + CRYPTO_DES_KEY2_REG); + writel_relaxed(ckey[3], pce_dev->iobase + CRYPTO_DES_KEY3_REG); + + /* write cntr0_iv0 for countC */ + writel_relaxed(req->count_c, pce_dev->iobase + CRYPTO_CNTR0_IV0_REG); + + /* write cntr1_iv1 for nPkts, and bearer */ + if (npkts == 1) + npkts = 0; + writel_relaxed(req->bearer << CRYPTO_CNTR1_IV1_REG_F8_BEARER | + npkts << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT, + pce_dev->iobase + CRYPTO_CNTR1_IV1_REG); + + /* issue go to crypto */ + writel_relaxed(1 << CRYPTO_GO, pce_dev->iobase + CRYPTO_GOPROC_REG); + + /* + * barrier to ensure previous instructions + * (including GO) to CE finish before issue DMA transfer + * request. + */ + mb(); + return 0; +}; + +int qce_aead_req(void *handle, struct qce_req *q_req) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + struct aead_request *areq = (struct aead_request *) q_req->areq; + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + uint32_t ivsize = crypto_aead_ivsize(aead); + uint32_t totallen; + uint32_t pad_len; + uint32_t authsize = crypto_aead_authsize(aead); + int rc = 0; + + q_req->ivsize = ivsize; + if (q_req->dir == QCE_ENCRYPT) + q_req->cryptlen = areq->cryptlen; + else + q_req->cryptlen = areq->cryptlen - authsize; + + totallen = q_req->cryptlen + ivsize + areq->assoclen; + pad_len = ALIGN(totallen, ADM_CE_BLOCK_SIZE) - totallen; + + _chain_buffer_in_init(pce_dev); + _chain_buffer_out_init(pce_dev); + + pce_dev->assoc_nents = 0; + pce_dev->phy_iv_in = 0; + pce_dev->src_nents = 0; + pce_dev->dst_nents = 0; + + pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen); + dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, + DMA_TO_DEVICE); + if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) { + rc = -ENOMEM; + goto bad; + } + + /* cipher iv for input */ + pce_dev->phy_iv_in = dma_map_single(pce_dev->pdev, q_req->iv, + ivsize, DMA_TO_DEVICE); + if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_iv_in, ivsize) < 0) { + rc = -ENOMEM; + goto bad; + } + + /* for output, ignore associated data and cipher iv */ + if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_out_ignore, + ivsize + areq->assoclen) < 0) { + rc = -ENOMEM; + goto bad; + } + + /* cipher input */ + pce_dev->src_nents = count_sg(areq->src, q_req->cryptlen); + dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + if (_chain_sg_buffer_in(pce_dev, areq->src, q_req->cryptlen) < 0) { + rc = -ENOMEM; + goto bad; + } + + /* cipher output */ + if (areq->src != areq->dst) { + pce_dev->dst_nents = count_sg(areq->dst, q_req->cryptlen); + dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, + DMA_FROM_DEVICE); + }; + if (_chain_sg_buffer_out(pce_dev, areq->dst, q_req->cryptlen) < 0) { + rc = -ENOMEM; + goto bad; + } + + /* pad data */ + if (pad_len) { + if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + } + + /* finalize the ce_in and ce_out channels command lists */ + _ce_in_final(pce_dev, 1, ALIGN(totallen, ADM_CE_BLOCK_SIZE)); + _ce_out_final(pce_dev, 2, ALIGN(totallen, ADM_CE_BLOCK_SIZE)); + + /* set up crypto device */ + rc = _ce_setup(pce_dev, q_req, totallen, ivsize + areq->assoclen); + if (rc < 0) + goto bad; + + /* setup for callback, and issue command to adm */ + pce_dev->areq = q_req->areq; + pce_dev->qce_cb = q_req->qce_cb; + + pce_dev->chan_ce_in_cmd->complete_func = _aead_ce_in_call_back; + pce_dev->chan_ce_out_cmd->complete_func = _aead_ce_out_call_back; + + rc = _qce_start_dma(pce_dev, true, true); + if (rc == 0) + return 0; +bad: + if (pce_dev->assoc_nents) { + dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, + DMA_TO_DEVICE); + } + if (pce_dev->phy_iv_in) { + dma_unmap_single(pce_dev->pdev, pce_dev->phy_iv_in, + ivsize, DMA_TO_DEVICE); + } + if (pce_dev->src_nents) { + dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + } + if (pce_dev->dst_nents) { + dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, + DMA_FROM_DEVICE); + } + return rc; +} +EXPORT_SYMBOL(qce_aead_req); + +int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) +{ + int rc = 0; + struct qce_device *pce_dev = (struct qce_device *) handle; + struct ablkcipher_request *areq = (struct ablkcipher_request *) + c_req->areq; + + uint32_t pad_len = ALIGN(areq->nbytes, ADM_CE_BLOCK_SIZE) + - areq->nbytes; + + _chain_buffer_in_init(pce_dev); + _chain_buffer_out_init(pce_dev); + + pce_dev->src_nents = 0; + pce_dev->dst_nents = 0; + /* cipher input */ + pce_dev->src_nents = count_sg(areq->src, areq->nbytes); + + if (c_req->use_pmem != 1) + dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + else + dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents, + areq->src); + + if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) { + rc = -ENOMEM; + goto bad; + } + + /* cipher output */ + if (areq->src != areq->dst) { + pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes); + if (c_req->use_pmem != 1) + dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, + DMA_FROM_DEVICE); + else + dma_map_pmem_sg(&c_req->pmem->dst[0], + pce_dev->dst_nents, areq->dst); + }; + if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) { + rc = -ENOMEM; + goto bad; + } + + /* pad data */ + if (pad_len) { + if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + } + + /* finalize the ce_in and ce_out channels command lists */ + _ce_in_final(pce_dev, 1, areq->nbytes + pad_len); + _ce_out_final(pce_dev, 1, areq->nbytes + pad_len); + +#ifdef QCE_DEBUG + _ce_in_dump(pce_dev); + _ce_out_dump(pce_dev); +#endif + /* set up crypto device */ + rc = _ce_setup(pce_dev, c_req, areq->nbytes, 0); + if (rc < 0) + goto bad; + + /* setup for callback, and issue command to adm */ + pce_dev->areq = areq; + pce_dev->qce_cb = c_req->qce_cb; + if (c_req->use_pmem == 1) { + pce_dev->chan_ce_in_cmd->complete_func = + _ablk_cipher_ce_in_call_back_pmem; + pce_dev->chan_ce_out_cmd->complete_func = + _ablk_cipher_ce_out_call_back_pmem; + } else { + pce_dev->chan_ce_in_cmd->complete_func = + _ablk_cipher_ce_in_call_back; + pce_dev->chan_ce_out_cmd->complete_func = + _ablk_cipher_ce_out_call_back; + } + rc = _qce_start_dma(pce_dev, true, true); + + if (rc == 0) + return 0; +bad: + if (c_req->use_pmem != 1) { + if (pce_dev->dst_nents) { + dma_unmap_sg(pce_dev->pdev, areq->dst, + pce_dev->dst_nents, DMA_FROM_DEVICE); + } + if (pce_dev->src_nents) { + dma_unmap_sg(pce_dev->pdev, areq->src, + pce_dev->src_nents, + (areq->src == areq->dst) ? + DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + } + } + return rc; +} +EXPORT_SYMBOL(qce_ablk_cipher_req); + +int qce_process_sha_req(void *handle, struct qce_sha_req *sreq) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + int rc; + uint32_t pad_len = ALIGN(sreq->size, ADM_CE_BLOCK_SIZE) - sreq->size; + struct ahash_request *areq = (struct ahash_request *)sreq->areq; + + _chain_buffer_in_init(pce_dev); + pce_dev->src_nents = count_sg(sreq->src, sreq->size); + dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents, + DMA_TO_DEVICE); + + if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) { + rc = -ENOMEM; + goto bad; + } + + if (pad_len) { + if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + } + _ce_in_final(pce_dev, 2, sreq->size + pad_len); + +#ifdef QCE_DEBUG + _ce_in_dump(pce_dev); +#endif + + rc = _sha_ce_setup(pce_dev, sreq); + + if (rc < 0) + goto bad; + + pce_dev->areq = areq; + pce_dev->qce_cb = sreq->qce_cb; + pce_dev->chan_ce_in_cmd->complete_func = _sha_ce_in_call_back; + + rc = _qce_start_dma(pce_dev, true, false); + + if (rc == 0) + return 0; +bad: + if (pce_dev->src_nents) { + dma_unmap_sg(pce_dev->pdev, sreq->src, + pce_dev->src_nents, DMA_TO_DEVICE); + } + + return rc; +} +EXPORT_SYMBOL(qce_process_sha_req); + +/* + * crypto engine open function. + */ +void *qce_open(struct platform_device *pdev, int *rc) +{ + struct qce_device *pce_dev; + struct resource *resource; + struct clk *ce_clk; + + pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL); + if (!pce_dev) { + *rc = -ENOMEM; + dev_err(&pdev->dev, "Can not allocate memory\n"); + return NULL; + } + pce_dev->pdev = &pdev->dev; + ce_clk = clk_get(pce_dev->pdev, "core_clk"); + if (IS_ERR(ce_clk)) { + kfree(pce_dev); + *rc = PTR_ERR(ce_clk); + return NULL; + } + pce_dev->ce_clk = ce_clk; + *rc = clk_enable(pce_dev->ce_clk); + if (*rc) { + kfree(pce_dev); + return NULL; + } + + resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!resource) { + *rc = -ENXIO; + dev_err(pce_dev->pdev, "Missing MEM resource\n"); + goto err; + }; + pce_dev->phy_iobase = resource->start; + pce_dev->iobase = ioremap_nocache(resource->start, + resource->end - resource->start + 1); + if (!pce_dev->iobase) { + *rc = -ENOMEM; + dev_err(pce_dev->pdev, "Can not map io memory\n"); + goto err; + } + + pce_dev->chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd), + GFP_KERNEL); + pce_dev->chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd), + GFP_KERNEL); + if (pce_dev->chan_ce_in_cmd == NULL || + pce_dev->chan_ce_out_cmd == NULL) { + dev_err(pce_dev->pdev, "Can not allocate memory\n"); + *rc = -ENOMEM; + goto err; + } + + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "crypto_channels"); + if (!resource) { + *rc = -ENXIO; + dev_err(pce_dev->pdev, "Missing DMA channel resource\n"); + goto err; + }; + pce_dev->chan_ce_in = resource->start; + pce_dev->chan_ce_out = resource->end; + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "crypto_crci_in"); + if (!resource) { + *rc = -ENXIO; + dev_err(pce_dev->pdev, "Missing DMA crci in resource\n"); + goto err; + }; + pce_dev->crci_in = resource->start; + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "crypto_crci_out"); + if (!resource) { + *rc = -ENXIO; + dev_err(pce_dev->pdev, "Missing DMA crci out resource\n"); + goto err; + }; + pce_dev->crci_out = resource->start; + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "crypto_crci_hash"); + if (!resource) { + *rc = -ENXIO; + dev_err(pce_dev->pdev, "Missing DMA crci hash resource\n"); + goto err; + }; + pce_dev->crci_hash = resource->start; + pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev, + 2*PAGE_SIZE, &pce_dev->coh_pmem, GFP_KERNEL); + + if (pce_dev->coh_vmem == NULL) { + *rc = -ENOMEM; + dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n"); + goto err; + } + _setup_cmd_template(pce_dev); + + pce_dev->chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + if (_init_ce_engine(pce_dev)) { + *rc = -ENXIO; + clk_disable(pce_dev->ce_clk); + goto err; + } + *rc = 0; + clk_disable(pce_dev->ce_clk); + + pce_dev->err = 0; + + return pce_dev; +err: + if (pce_dev) + qce_close(pce_dev); + return NULL; +} +EXPORT_SYMBOL(qce_open); + +/* + * crypto engine close function. + */ +int qce_close(void *handle) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + + if (handle == NULL) + return -ENODEV; + if (pce_dev->iobase) + iounmap(pce_dev->iobase); + + if (pce_dev->coh_vmem) + dma_free_coherent(pce_dev->pdev, 2*PAGE_SIZE, pce_dev->coh_vmem, + pce_dev->coh_pmem); + kfree(pce_dev->chan_ce_in_cmd); + kfree(pce_dev->chan_ce_out_cmd); + + clk_put(pce_dev->ce_clk); + kfree(handle); + return 0; +} +EXPORT_SYMBOL(qce_close); + +int qce_hw_support(void *handle, struct ce_hw_support *ce_support) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + + if (ce_support == NULL) + return -EINVAL; + + if (pce_dev->hmac == 1) + ce_support->sha1_hmac_20 = true; + else + ce_support->sha1_hmac_20 = false; + ce_support->sha1_hmac = false; + ce_support->sha256_hmac = false; + ce_support->sha_hmac = false; + ce_support->cmac = false; + ce_support->aes_key_192 = true; + ce_support->aes_xts = false; + ce_support->aes_ccm = false; + ce_support->ota = pce_dev->ota; + return 0; +} +EXPORT_SYMBOL(qce_hw_support); + +int qce_f8_req(void *handle, struct qce_f8_req *req, + void *cookie, qce_comp_func_ptr_t qce_cb) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + bool key_stream_mode; + dma_addr_t dst; + int rc; + uint32_t pad_len = ALIGN(req->data_len, ADM_CE_BLOCK_SIZE) - + req->data_len; + + _chain_buffer_in_init(pce_dev); + _chain_buffer_out_init(pce_dev); + + key_stream_mode = (req->data_in == NULL); + + /* F8 cipher input */ + if (key_stream_mode) + pce_dev->phy_ota_src = 0; + else { + pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, + req->data_in, req->data_len, + (req->data_in == req->data_out) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, + req->data_len) < 0) { + pce_dev->phy_ota_dst = 0; + rc = -ENOMEM; + goto bad; + } + } + + /* F8 cipher output */ + if (req->data_in != req->data_out) { + dst = dma_map_single(pce_dev->pdev, req->data_out, + req->data_len, DMA_FROM_DEVICE); + pce_dev->phy_ota_dst = dst; + } else { + dst = pce_dev->phy_ota_src; + pce_dev->phy_ota_dst = 0; + } + if (_chain_pm_buffer_out(pce_dev, dst, req->data_len) < 0) { + rc = -ENOMEM; + goto bad; + } + + pce_dev->ota_size = req->data_len; + + /* pad data */ + if (pad_len) { + if (!key_stream_mode && _chain_pm_buffer_in(pce_dev, + pce_dev->phy_ce_pad, pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + } + + /* finalize the ce_in and ce_out channels command lists */ + if (!key_stream_mode) + _ce_in_final(pce_dev, 1, req->data_len + pad_len); + _ce_out_final(pce_dev, 1, req->data_len + pad_len); + + /* set up crypto device */ + rc = _ce_f8_setup(pce_dev, req, key_stream_mode, 1, 0, req->data_len); + if (rc < 0) + goto bad; + + /* setup for callback, and issue command to adm */ + pce_dev->areq = cookie; + pce_dev->qce_cb = qce_cb; + + if (!key_stream_mode) + pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back; + + pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back; + + rc = _qce_start_dma(pce_dev, !(key_stream_mode), true); + if (rc == 0) + return 0; +bad: + if (pce_dev->phy_ota_dst != 0) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, + req->data_len, DMA_FROM_DEVICE); + if (pce_dev->phy_ota_src != 0) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, + req->data_len, + (req->data_in == req->data_out) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + return rc; +} +EXPORT_SYMBOL(qce_f8_req); + +int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *mreq, + void *cookie, qce_comp_func_ptr_t qce_cb) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + uint16_t num_pkt = mreq->num_pkt; + uint16_t cipher_start = mreq->cipher_start; + uint16_t cipher_size = mreq->cipher_size; + struct qce_f8_req *req = &mreq->qce_f8_req; + uint32_t total; + uint32_t pad_len; + dma_addr_t dst = 0; + int rc = 0; + + total = num_pkt * req->data_len; + pad_len = ALIGN(total, ADM_CE_BLOCK_SIZE) - total; + + _chain_buffer_in_init(pce_dev); + _chain_buffer_out_init(pce_dev); + + /* F8 cipher input */ + pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, + req->data_in, total, + (req->data_in == req->data_out) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, + total) < 0) { + pce_dev->phy_ota_dst = 0; + rc = -ENOMEM; + goto bad; + } + /* F8 cipher output */ + if (req->data_in != req->data_out) { + dst = dma_map_single(pce_dev->pdev, req->data_out, total, + DMA_FROM_DEVICE); + pce_dev->phy_ota_dst = dst; + } else { + dst = pce_dev->phy_ota_src; + pce_dev->phy_ota_dst = 0; + } + if (_chain_pm_buffer_out(pce_dev, dst, total) < 0) { + rc = -ENOMEM; + goto bad; + } + + pce_dev->ota_size = total; + + /* pad data */ + if (pad_len) { + if (_chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + if (_chain_pm_buffer_out(pce_dev, pce_dev->phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + } + + /* finalize the ce_in and ce_out channels command lists */ + _ce_in_final(pce_dev, 1, total + pad_len); + _ce_out_final(pce_dev, 1, total + pad_len); + + + /* set up crypto device */ + rc = _ce_f8_setup(pce_dev, req, false, num_pkt, cipher_start, + cipher_size); + if (rc) + goto bad ; + + /* setup for callback, and issue command to adm */ + pce_dev->areq = cookie; + pce_dev->qce_cb = qce_cb; + + pce_dev->chan_ce_in_cmd->complete_func = _f8_ce_in_call_back; + pce_dev->chan_ce_out_cmd->complete_func = _f8_ce_out_call_back; + + rc = _qce_start_dma(pce_dev, true, true); + if (rc == 0) + return 0; +bad: + if (pce_dev->phy_ota_dst) + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_dst, total, + DMA_FROM_DEVICE); + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, total, + (req->data_in == req->data_out) ? + DMA_BIDIRECTIONAL : DMA_TO_DEVICE); + return rc; +} +EXPORT_SYMBOL(qce_f8_multi_pkt_req); + +int qce_f9_req(void *handle, struct qce_f9_req *req, void *cookie, + qce_comp_func_ptr_t qce_cb) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + int rc; + uint32_t pad_len = ALIGN(req->msize, ADM_CE_BLOCK_SIZE) - req->msize; + + pce_dev->phy_ota_src = dma_map_single(pce_dev->pdev, req->message, + req->msize, DMA_TO_DEVICE); + + _chain_buffer_in_init(pce_dev); + rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ota_src, req->msize); + if (rc < 0) { + rc = -ENOMEM; + goto bad; + } + + pce_dev->ota_size = req->msize; + if (pad_len) { + rc = _chain_pm_buffer_in(pce_dev, pce_dev->phy_ce_pad, + pad_len); + if (rc < 0) { + rc = -ENOMEM; + goto bad; + } + } + _ce_in_final(pce_dev, 2, req->msize + pad_len); + rc = _ce_f9_setup(pce_dev, req); + if (rc < 0) + goto bad; + + /* setup for callback, and issue command to adm */ + pce_dev->areq = cookie; + pce_dev->qce_cb = qce_cb; + + pce_dev->chan_ce_in_cmd->complete_func = _f9_ce_in_call_back; + + rc = _qce_start_dma(pce_dev, true, false); + if (rc == 0) + return 0; +bad: + dma_unmap_single(pce_dev->pdev, pce_dev->phy_ota_src, + req->msize, DMA_TO_DEVICE); + return rc; +} +EXPORT_SYMBOL(qce_f9_req); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Mona Hossain "); +MODULE_DESCRIPTION("Crypto Engine driver"); +MODULE_VERSION("1.15"); + diff --git a/drivers/crypto/msm/qce.h b/drivers/crypto/msm/qce.h new file mode 100644 index 000000000000..edd2089b4558 --- /dev/null +++ b/drivers/crypto/msm/qce.h @@ -0,0 +1,160 @@ +/* Qualcomm Crypto Engine driver API + * + * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + + +#ifndef __CRYPTO_MSM_QCE_H +#define __CRYPTO_MSM_QCE_H + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +/* SHA digest size in bytes */ +#define SHA256_DIGESTSIZE 32 +#define SHA1_DIGESTSIZE 20 + +/* key size in bytes */ +#define HMAC_KEY_SIZE (SHA1_DIGESTSIZE) /* hmac-sha1 */ +#define SHA_HMAC_KEY_SIZE 64 +#define DES_KEY_SIZE 8 +#define TRIPLE_DES_KEY_SIZE 24 +#define AES128_KEY_SIZE 16 +#define AES192_KEY_SIZE 24 +#define AES256_KEY_SIZE 32 +#define MAX_CIPHER_KEY_SIZE AES256_KEY_SIZE + +/* iv length in bytes */ +#define AES_IV_LENGTH 16 +#define DES_IV_LENGTH 8 +#define MAX_IV_LENGTH AES_IV_LENGTH + +/* Maximum number of bytes per transfer */ +#define QCE_MAX_OPER_DATA 0xFF00 + +/* Maximum Nonce bytes */ +#define MAX_NONCE 16 + +typedef void (*qce_comp_func_ptr_t)(void *areq, + unsigned char *icv, unsigned char *iv, int ret); + +/* Cipher algorithms supported */ +enum qce_cipher_alg_enum { + CIPHER_ALG_DES = 0, + CIPHER_ALG_3DES = 1, + CIPHER_ALG_AES = 2, + CIPHER_ALG_LAST +}; + +/* Hash and hmac algorithms supported */ +enum qce_hash_alg_enum { + QCE_HASH_SHA1 = 0, + QCE_HASH_SHA256 = 1, + QCE_HASH_SHA1_HMAC = 2, + QCE_HASH_SHA256_HMAC = 3, + QCE_HASH_AES_CMAC = 4, + QCE_HASH_LAST +}; + +/* Cipher encryption/decryption operations */ +enum qce_cipher_dir_enum { + QCE_ENCRYPT = 0, + QCE_DECRYPT = 1, + QCE_CIPHER_DIR_LAST +}; + +/* Cipher algorithms modes */ +enum qce_cipher_mode_enum { + QCE_MODE_CBC = 0, + QCE_MODE_ECB = 1, + QCE_MODE_CTR = 2, + QCE_MODE_XTS = 3, + QCE_MODE_CCM = 4, + QCE_CIPHER_MODE_LAST +}; + +/* Cipher operation type */ +enum qce_req_op_enum { + QCE_REQ_ABLK_CIPHER = 0, + QCE_REQ_ABLK_CIPHER_NO_KEY = 1, + QCE_REQ_AEAD = 2, + QCE_REQ_LAST +}; + +/* Algorithms/features supported in CE HW engine */ +struct ce_hw_support { + bool sha1_hmac_20; /* Supports 20 bytes of HMAC key*/ + bool sha1_hmac; /* supports max HMAC key of 64 bytes*/ + bool sha256_hmac; /* supports max HMAC key of 64 bytes*/ + bool sha_hmac; /* supports SHA1 and SHA256 MAX HMAC key of 64 bytes*/ + bool cmac; + bool aes_key_192; + bool aes_xts; + bool aes_ccm; + bool ota; +}; + +/* Sha operation parameters */ +struct qce_sha_req { + qce_comp_func_ptr_t qce_cb; /* call back */ + enum qce_hash_alg_enum alg; /* sha algorithm */ + unsigned char *digest; /* sha digest */ + struct scatterlist *src; /* pointer to scatter list entry */ + uint32_t auth_data[4]; /* byte count */ + unsigned char *authkey; /* auth key */ + unsigned int authklen; /* auth key length */ + bool first_blk; /* first block indicator */ + bool last_blk; /* last block indicator */ + unsigned int size; /* data length in bytes */ + void *areq; +}; + +struct qce_req { + enum qce_req_op_enum op; /* operation type */ + qce_comp_func_ptr_t qce_cb; /* call back */ + void *areq; + enum qce_cipher_alg_enum alg; /* cipher algorithms*/ + enum qce_cipher_dir_enum dir; /* encryption? decryption? */ + enum qce_cipher_mode_enum mode; /* algorithm mode */ + unsigned char *authkey; /* authentication key */ + unsigned int authklen; /* authentication key kength */ + unsigned int authsize; /* authentication key kength */ + unsigned char nonce[MAX_NONCE];/* nonce for ccm mode */ + unsigned char *assoc; /* Ptr to formatted associated data */ + unsigned int assoclen; /* Formatted associated data length */ + struct scatterlist *asg; /* Formatted associated data sg */ + unsigned char *enckey; /* cipher key */ + unsigned int encklen; /* cipher key length */ + unsigned char *iv; /* initialization vector */ + unsigned int ivsize; /* initialization vector size*/ + unsigned int cryptlen; /* data length */ + unsigned int use_pmem; /* is source of data PMEM allocated? */ + struct qcedev_pmem_info *pmem; /* pointer to pmem_info structure*/ +}; + +void *qce_open(struct platform_device *pdev, int *rc); +int qce_close(void *handle); +int qce_aead_req(void *handle, struct qce_req *req); +int qce_ablk_cipher_req(void *handle, struct qce_req *req); +int qce_hw_support(void *handle, struct ce_hw_support *support); +int qce_process_sha_req(void *handle, struct qce_sha_req *s_req); + +#endif /* __CRYPTO_MSM_QCE_H */ diff --git a/drivers/crypto/msm/qce40.c b/drivers/crypto/msm/qce40.c new file mode 100644 index 000000000000..c203fc515de3 --- /dev/null +++ b/drivers/crypto/msm/qce40.c @@ -0,0 +1,2609 @@ +/* Qualcomm Crypto Engine driver. + * + * Copyright (c) 2011 - 2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "qce.h" +#include "qce40.h" +#include "qcryptohw_40.h" + +/* ADM definitions */ +#define LI_SG_CMD (1 << 31) /* last index in the scatter gather cmd */ +#define SRC_INDEX_SG_CMD(index) ((index & 0x3fff) << 16) +#define DST_INDEX_SG_CMD(index) (index & 0x3fff) +#define ADM_DESC_LAST (1 << 31) +#define QCE_FIFO_SIZE 0x8000 + +/* + * CE HW device structure. + * Each engine has an instance of the structure. + * Each engine can only handle one crypto operation at one time. It is up to + * the sw above to ensure single threading of operation on an engine. + */ +struct qce_device { + struct device *pdev; /* Handle to platform_device structure */ + + unsigned char *coh_vmem; /* Allocated coherent virtual memory */ + dma_addr_t coh_pmem; /* Allocated coherent physical memory */ + int memsize; /* Memory allocated */ + + void __iomem *iobase; /* Virtual io base of CE HW */ + unsigned int phy_iobase; /* Physical io base of CE HW */ + + struct clk *ce_core_src_clk; /* Handle to CE src clk*/ + struct clk *ce_core_clk; /* Handle to CE clk */ + struct clk *ce_clk; /* Handle to CE clk */ + + qce_comp_func_ptr_t qce_cb; /* qce callback function pointer */ + + int assoc_nents; + int ivsize; + int authsize; + int src_nents; + int dst_nents; + + void *areq; + enum qce_cipher_mode_enum mode; + struct ce_dm_data ce_dm; +}; + +/* Standard initialization vector for SHA-1, source: FIPS 180-2 */ +static uint8_t _std_init_vector_sha1_uint8[] = { + 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, + 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76, + 0xC3, 0xD2, 0xE1, 0xF0 +}; + +/* Standard initialization vector for SHA-256, source: FIPS 180-2 */ +static uint8_t _std_init_vector_sha256_uint8[] = { + 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85, + 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A, + 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C, + 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19 +}; + +static void _byte_stream_swap_to_net_words(uint32_t *iv, unsigned char *b, + unsigned int len) +{ + unsigned i, j; + unsigned char swap_iv[AES_IV_LENGTH]; + + memset(swap_iv, 0, AES_IV_LENGTH); + for (i = (AES_IV_LENGTH-len), j = len-1; i < AES_IV_LENGTH; i++, j--) + swap_iv[i] = b[j]; + memcpy(iv, swap_iv, AES_IV_LENGTH); +} + +static int count_sg(struct scatterlist *sg, int nbytes) +{ + int i; + + for (i = 0; nbytes > 0; i++, sg = sg_next(sg)) + nbytes -= sg->length; + return i; +} + +static int dma_map_pmem_sg(struct buf_info *pmem, unsigned entries, + struct scatterlist *sg) +{ + int i; + for (i = 0; i < entries; i++) { + + sg->dma_address = (dma_addr_t)pmem->offset; + sg++; + pmem++; + } + return 0; +} + +static int _probe_ce_engine(struct qce_device *pce_dev) +{ + unsigned int val; + unsigned int rev; + unsigned int ret; + + val = (uint32_t)(*((uint32_t *)pce_dev->ce_dm.buffer.version)); + if (((val & 0xfffffff) != 0x0000043) && + ((val & 0xfffffff) != 0x0000042) && + ((val & 0xfffffff) != 0x0000040)) { + dev_err(pce_dev->pdev, + "Unknown Qualcomm crypto device at 0x%x 0x%x\n", + pce_dev->phy_iobase, val); + return -EIO; + }; + rev = (val & CRYPTO_CORE_REV_MASK); + if (rev >= 0x42) { + dev_info(pce_dev->pdev, + "Qualcomm Crypto 4.2 device found at 0x%x\n", + pce_dev->phy_iobase); + pce_dev->ce_dm.ce_block_size = 64; + + /* Configure the crypto register to support 64byte CRCI if it + * is not XPU protected and the HW version of device is greater + * than 0x42. + * Crypto config register returns a 0 when it is XPU protected. + */ + + ret = readl_relaxed(pce_dev->iobase + CRYPTO_CONFIG_REG); + if (ret) { + val = BIT(CRYPTO_MASK_DOUT_INTR) | + BIT(CRYPTO_MASK_DIN_INTR) | + BIT(CRYPTO_MASK_OP_DONE_INTR) | + BIT(CRYPTO_MASK_ERR_INTR) | + (CRYPTO_REQ_SIZE_ENUM_64_BYTES << + CRYPTO_REQ_SIZE) | + (CRYPTO_FIFO_ENUM_64_BYTES << + CRYPTO_FIFO_THRESHOLD); + + writel_relaxed(val, pce_dev->iobase + + CRYPTO_CONFIG_REG); + } /* end of if (ret) */ + } else { + if (rev == 0x40) { + dev_info(pce_dev->pdev, + "Qualcomm Crypto 4.0 device found at 0x%x\n", + pce_dev->phy_iobase); + pce_dev->ce_dm.ce_block_size = 16; + } + } + + dev_info(pce_dev->pdev, + "IO base 0x%x\n, ce_in channel %d , " + "ce_out channel %d\n, " + "crci_in %d, crci_out %d\n", + (unsigned int) pce_dev->iobase, + pce_dev->ce_dm.chan_ce_in, pce_dev->ce_dm.chan_ce_out, + pce_dev->ce_dm.crci_in, pce_dev->ce_dm.crci_out); + + return 0; +}; + + +static void _check_probe_done_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + pce_dev = (struct qce_device *) cmd_ptr->user; + + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->ce_dm.chan_ce_in_status = -1; + } else { + _probe_ce_engine(pce_dev); + pce_dev->ce_dm.chan_ce_in_status = 0; + } + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IDLE; +}; + +static int _init_ce_engine(struct qce_device *pce_dev) +{ + int status; + /* Reset ce */ + clk_reset(pce_dev->ce_core_clk, CLK_RESET_ASSERT); + clk_reset(pce_dev->ce_core_clk, CLK_RESET_DEASSERT); + + /* + * Ensure previous instruction (any writes to CLK registers) + * to toggle the CLK reset lines was completed before configuring + * ce engine. The ce engine configuration settings should not be lost + * becasue of clk reset. + */ + mb(); + + /* + * Clear ACCESS_VIOL bit in CRYPTO_STATUS REGISTER + */ + status = readl_relaxed(pce_dev->iobase + CRYPTO_STATUS_REG); + *((uint32_t *)(pce_dev->ce_dm.buffer.status)) = status & (~0x40000); + /* + * Ensure ce configuration is completed. + */ + mb(); + + pce_dev->ce_dm.chan_ce_in_cmd->complete_func = + _check_probe_done_call_back; + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + pce_dev->ce_dm.cmdptrlist.probe_ce_hw; + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IN_PROG; + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_COMP; + msm_dmov_enqueue_cmd(pce_dev->ce_dm.chan_ce_in, + pce_dev->ce_dm.chan_ce_in_cmd); + + return 0; +}; + +static int _ce_setup_hash_cmdrptrlist(struct qce_device *pce_dev, + struct qce_sha_req *sreq) +{ + struct ce_cmdptrlists_ops *cmdptrlist = &pce_dev->ce_dm.cmdptrlist; + + switch (sreq->alg) { + case QCE_HASH_SHA1: + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = cmdptrlist->auth_sha1; + break; + + case QCE_HASH_SHA256: + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = cmdptrlist->auth_sha256; + break; + case QCE_HASH_SHA1_HMAC: + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->auth_sha1_hmac; + break; + + case QCE_HASH_SHA256_HMAC: + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->auth_sha256_hmac; + break; + case QCE_HASH_AES_CMAC: + if (sreq->authklen == AES128_KEY_SIZE) + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->auth_aes_128_cmac; + else + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->auth_aes_256_cmac; + break; + + default: + break; + } + + return 0; +} + +static int _ce_setup_hash(struct qce_device *pce_dev, struct qce_sha_req *sreq) +{ + uint32_t diglen; + int i; + uint32_t auth_cfg = 0; + bool sha1 = false; + + if (sreq->alg == QCE_HASH_AES_CMAC) { + + memcpy(pce_dev->ce_dm.buffer.auth_key, sreq->authkey, + sreq->authklen); + auth_cfg |= (1 << CRYPTO_LAST); + auth_cfg |= (CRYPTO_AUTH_MODE_CMAC << CRYPTO_AUTH_MODE); + auth_cfg |= (CRYPTO_AUTH_SIZE_ENUM_16_BYTES << + CRYPTO_AUTH_SIZE); + auth_cfg |= CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG; + + switch (sreq->authklen) { + case AES128_KEY_SIZE: + auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 << + CRYPTO_AUTH_KEY_SIZE); + break; + case AES256_KEY_SIZE: + auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 << + CRYPTO_AUTH_KEY_SIZE); + break; + default: + break; + } + + goto go_proc; + } + + /* if not the last, the size has to be on the block boundary */ + if (sreq->last_blk == 0 && (sreq->size % SHA256_BLOCK_SIZE)) + return -EIO; + + switch (sreq->alg) { + case QCE_HASH_SHA1: + case QCE_HASH_SHA1_HMAC: + diglen = SHA1_DIGEST_SIZE; + sha1 = true; + break; + case QCE_HASH_SHA256: + case QCE_HASH_SHA256_HMAC: + diglen = SHA256_DIGEST_SIZE; + break; + default: + return -EINVAL; + } + + if ((sreq->alg == QCE_HASH_SHA1_HMAC) || + (sreq->alg == QCE_HASH_SHA256_HMAC)) { + + memcpy(pce_dev->ce_dm.buffer.auth_key, sreq->authkey, + sreq->authklen); + auth_cfg |= (CRYPTO_AUTH_MODE_HMAC << CRYPTO_AUTH_MODE); + } else { + auth_cfg |= (CRYPTO_AUTH_MODE_HASH << CRYPTO_AUTH_MODE); + } + + /* write 20/32 bytes, 5/8 words into auth_iv for SHA1/SHA256 */ + if (sreq->first_blk) { + if (sha1) + memcpy(pce_dev->ce_dm.buffer.auth_iv, + _std_init_vector_sha1_uint8, diglen); + else + memcpy(pce_dev->ce_dm.buffer.auth_iv, + _std_init_vector_sha256_uint8, diglen); + } else { + memcpy(pce_dev->ce_dm.buffer.auth_iv, sreq->digest, + diglen); + } + + /* write auth_bytecnt 0/1/2/3, start with 0 */ + for (i = 0; i < 4; i++) + *(((uint32_t *)(pce_dev->ce_dm.buffer.auth_byte_count) + i)) = + sreq->auth_data[i]; + + /* write seg_cfg */ + if (sha1) + auth_cfg |= (CRYPTO_AUTH_SIZE_SHA1 << CRYPTO_AUTH_SIZE); + else + auth_cfg |= (CRYPTO_AUTH_SIZE_SHA256 << CRYPTO_AUTH_SIZE); + + if (sreq->last_blk) + auth_cfg |= 1 << CRYPTO_LAST; + + auth_cfg |= CRYPTO_AUTH_ALG_SHA << CRYPTO_AUTH_ALG; + +go_proc: + auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); + + /* write auth seg cfg */ + *((uint32_t *)(pce_dev->ce_dm.buffer.auth_seg_cfg_size_start)) = + auth_cfg; + /* write auth seg size */ + *((uint32_t *)(pce_dev->ce_dm.buffer.auth_seg_cfg_size_start) + 1) = + sreq->size; + + /* write auth seg size start*/ + *((uint32_t *)(pce_dev->ce_dm.buffer.auth_seg_cfg_size_start)+2) = 0; + + /* write seg size */ + *((uint32_t *)(pce_dev->ce_dm.buffer.seg_size)) = sreq->size; + + _ce_setup_hash_cmdrptrlist(pce_dev, sreq); + + return 0; +} + +static int _ce_setup_cipher_cmdrptrlist(struct qce_device *pce_dev, + struct qce_req *creq) +{ + struct ce_cmdptrlists_ops *cmdptrlist = + &pce_dev->ce_dm.cmdptrlist; + + if (creq->alg != CIPHER_ALG_AES) { + switch (creq->alg) { + case CIPHER_ALG_DES: + if (creq->mode == QCE_MODE_ECB) { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_des_ecb; + } else { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_des_cbc; + } + break; + + case CIPHER_ALG_3DES: + if (creq->mode == QCE_MODE_ECB) { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_3des_ecb; + } else { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_3des_cbc; + } + break; + default: + break; + } + } else { + switch (creq->mode) { + case QCE_MODE_ECB: + if (creq->encklen == AES128_KEY_SIZE) { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_aes_128_ecb; + } else { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_aes_256_ecb; + } + break; + + case QCE_MODE_CBC: + if (creq->encklen == AES128_KEY_SIZE) { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_aes_128_cbc_ctr; + } else { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_aes_256_cbc_ctr; + } + break; + + case QCE_MODE_CTR: + if (creq->encklen == AES128_KEY_SIZE) { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_aes_128_cbc_ctr; + } else { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_aes_256_cbc_ctr; + } + break; + + case QCE_MODE_XTS: + if (creq->encklen == AES128_KEY_SIZE) { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_aes_128_xts; + } else { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->cipher_aes_256_xts; + } + break; + case QCE_MODE_CCM: + if (creq->encklen == AES128_KEY_SIZE) { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->aead_aes_128_ccm; + } else { + pce_dev->ce_dm.chan_ce_in_cmd->cmdptr = + cmdptrlist->aead_aes_256_ccm; + } + break; + default: + break; + } + } + + switch (creq->mode) { + case QCE_MODE_CCM: + pce_dev->ce_dm.chan_ce_out_cmd->cmdptr = + cmdptrlist->aead_ce_out; + break; + case QCE_MODE_ECB: + pce_dev->ce_dm.chan_ce_out_cmd->cmdptr = + cmdptrlist->cipher_ce_out; + break; + default: + pce_dev->ce_dm.chan_ce_out_cmd->cmdptr = + cmdptrlist->cipher_ce_out_get_iv; + break; + } + + return 0; +} + +static int _ce_setup_cipher(struct qce_device *pce_dev, struct qce_req *creq, + uint32_t totallen_in, uint32_t coffset) +{ + uint32_t enck_size_in_word = creq->encklen / sizeof(uint32_t); + uint32_t encr_cfg = 0; + uint32_t ivsize = creq->ivsize; + struct ce_reg_buffer_addr *buffer = &pce_dev->ce_dm.buffer; + + if (creq->mode == QCE_MODE_XTS) + memcpy(buffer->encr_key, creq->enckey, + creq->encklen/2); + else + memcpy(buffer->encr_key, creq->enckey, creq->encklen); + + if ((creq->op == QCE_REQ_AEAD) && (creq->mode == QCE_MODE_CCM)) { + uint32_t noncelen32 = MAX_NONCE/sizeof(uint32_t); + uint32_t auth_cfg = 0; + + /* write nonce */ + memcpy(buffer->auth_nonce_info, creq->nonce, MAX_NONCE); + memcpy(buffer->auth_key, creq->enckey, creq->encklen); + + auth_cfg |= (noncelen32 << CRYPTO_AUTH_NONCE_NUM_WORDS); + auth_cfg &= ~(1 << CRYPTO_USE_HW_KEY_AUTH); + auth_cfg |= (1 << CRYPTO_LAST); + if (creq->dir == QCE_ENCRYPT) + auth_cfg |= (CRYPTO_AUTH_POS_BEFORE << CRYPTO_AUTH_POS); + else + auth_cfg |= (CRYPTO_AUTH_POS_AFTER << CRYPTO_AUTH_POS); + auth_cfg |= (((creq->authsize >> 1) - 2) << CRYPTO_AUTH_SIZE); + auth_cfg |= (CRYPTO_AUTH_MODE_CCM << CRYPTO_AUTH_MODE); + if (creq->authklen == AES128_KEY_SIZE) + auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES128 << + CRYPTO_AUTH_KEY_SIZE); + else { + if (creq->authklen == AES256_KEY_SIZE) + auth_cfg |= (CRYPTO_AUTH_KEY_SZ_AES256 << + CRYPTO_AUTH_KEY_SIZE); + } + auth_cfg |= (CRYPTO_AUTH_ALG_AES << CRYPTO_AUTH_ALG); + *((uint32_t *)(buffer->auth_seg_cfg_size_start)) = auth_cfg; + + if (creq->dir == QCE_ENCRYPT) + *((uint32_t *)(buffer->auth_seg_cfg_size_start) + 1) = + totallen_in; + else + *((uint32_t *)(buffer->auth_seg_cfg_size_start) + 1) = + (totallen_in - creq->authsize); + *((uint32_t *)(buffer->auth_seg_cfg_size_start) + 2) = 0; + } + + *((uint32_t *)(buffer->auth_seg_cfg_size_start) + 2) = 0; + + switch (creq->mode) { + case QCE_MODE_ECB: + encr_cfg |= (CRYPTO_ENCR_MODE_ECB << CRYPTO_ENCR_MODE); + break; + + case QCE_MODE_CBC: + encr_cfg |= (CRYPTO_ENCR_MODE_CBC << CRYPTO_ENCR_MODE); + break; + + case QCE_MODE_XTS: + encr_cfg |= (CRYPTO_ENCR_MODE_XTS << CRYPTO_ENCR_MODE); + break; + + case QCE_MODE_CCM: + encr_cfg |= (CRYPTO_ENCR_MODE_CCM << CRYPTO_ENCR_MODE); + break; + + case QCE_MODE_CTR: + default: + encr_cfg |= (CRYPTO_ENCR_MODE_CTR << CRYPTO_ENCR_MODE); + break; + } + pce_dev->mode = creq->mode; + + switch (creq->alg) { + case CIPHER_ALG_DES: + if (creq->mode != QCE_MODE_ECB) + memcpy(buffer->encr_cntr_iv, creq->iv, ivsize); + + encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_DES << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG)); + break; + + case CIPHER_ALG_3DES: + if (creq->mode != QCE_MODE_ECB) + memcpy(buffer->encr_cntr_iv, creq->iv, ivsize); + + encr_cfg |= ((CRYPTO_ENCR_KEY_SZ_3DES << CRYPTO_ENCR_KEY_SZ) | + (CRYPTO_ENCR_ALG_DES << CRYPTO_ENCR_ALG)); + break; + + case CIPHER_ALG_AES: + default: + if (creq->mode == QCE_MODE_XTS) { + memcpy(buffer->encr_xts_key, (creq->enckey + + creq->encklen/2), creq->encklen/2); + *((uint32_t *)(buffer->encr_xts_du_size)) = + creq->cryptlen; + + } + if (creq->mode != QCE_MODE_ECB) { + if (creq->mode == QCE_MODE_XTS) + _byte_stream_swap_to_net_words( + (uint32_t *)(buffer->encr_cntr_iv), + creq->iv, ivsize); + else + memcpy(buffer->encr_cntr_iv, creq->iv, + ivsize); + } + /* set number of counter bits */ + *((uint32_t *)(buffer->encr_mask)) = (uint32_t)0xffffffff; + + if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) { + encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 << + CRYPTO_ENCR_KEY_SZ); + encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG; + } else { + uint32_t key_size; + + if (creq->mode == QCE_MODE_XTS) { + key_size = creq->encklen/2; + enck_size_in_word = key_size/sizeof(uint32_t); + } else { + key_size = creq->encklen; + } + + switch (key_size) { + case AES128_KEY_SIZE: + encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES128 << + CRYPTO_ENCR_KEY_SZ); + break; + case AES256_KEY_SIZE: + default: + encr_cfg |= (CRYPTO_ENCR_KEY_SZ_AES256 << + CRYPTO_ENCR_KEY_SZ); + break; + } /* end of switch (creq->encklen) */ + + encr_cfg |= CRYPTO_ENCR_ALG_AES << CRYPTO_ENCR_ALG; + } /* else of if (creq->op == QCE_REQ_ABLK_CIPHER_NO_KEY) */ + break; + } /* end of switch (creq->mode) */ + + /* write encr seg cfg */ + encr_cfg |= ((creq->dir == QCE_ENCRYPT) ? 1 : 0) << CRYPTO_ENCODE; + + /* write encr seg cfg */ + *((uint32_t *)(buffer->encr_seg_cfg_size_start)) = encr_cfg; + /* write encr seg size */ + if ((creq->mode == QCE_MODE_CCM) && (creq->dir == QCE_DECRYPT)) + *((uint32_t *)(buffer->encr_seg_cfg_size_start) + 1) = + (creq->cryptlen + creq->authsize); + else + *((uint32_t *)(buffer->encr_seg_cfg_size_start) + 1) = + creq->cryptlen; + + + *((uint32_t *)(buffer->encr_seg_cfg_size_start) + 2) = + (coffset & 0xffff); + + *((uint32_t *)(buffer->seg_size)) = totallen_in; + + _ce_setup_cipher_cmdrptrlist(pce_dev, creq); + return 0; +}; + +static int _aead_complete(struct qce_device *pce_dev) +{ + struct aead_request *areq; + + areq = (struct aead_request *) pce_dev->areq; + + if (areq->src != areq->dst) { + dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, + DMA_FROM_DEVICE); + } + dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + + dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, + DMA_TO_DEVICE); + + /* check MAC */ + if (pce_dev->mode == QCE_MODE_CCM) { + uint32_t result; + + result = + (uint32_t)(*((uint32_t *)pce_dev->ce_dm.buffer.status)); + result &= (1 << CRYPTO_MAC_FAILED); + result |= (pce_dev->ce_dm.chan_ce_in_status | + pce_dev->ce_dm.chan_ce_out_status); + pce_dev->qce_cb(areq, pce_dev->ce_dm.buffer.auth_result, NULL, + result); + } + return 0; +}; + +static void _sha_complete(struct qce_device *pce_dev) +{ + struct ahash_request *areq; + + areq = (struct ahash_request *) pce_dev->areq; + dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + DMA_TO_DEVICE); + + pce_dev->qce_cb(areq, pce_dev->ce_dm.buffer.auth_result, + pce_dev->ce_dm.buffer.auth_byte_count, + pce_dev->ce_dm.chan_ce_in_status); + +}; + +static int _ablk_cipher_complete(struct qce_device *pce_dev) +{ + struct ablkcipher_request *areq; + + areq = (struct ablkcipher_request *) pce_dev->areq; + + if (areq->src != areq->dst) { + dma_unmap_sg(pce_dev->pdev, areq->dst, + pce_dev->dst_nents, DMA_FROM_DEVICE); + } + dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + + if (pce_dev->mode == QCE_MODE_ECB) { + pce_dev->qce_cb(areq, NULL, NULL, + pce_dev->ce_dm.chan_ce_in_status | + pce_dev->ce_dm.chan_ce_out_status); + } else { + + pce_dev->qce_cb(areq, NULL, pce_dev->ce_dm.buffer.encr_cntr_iv, + pce_dev->ce_dm.chan_ce_in_status | + pce_dev->ce_dm.chan_ce_out_status); + } + + return 0; +}; + +static int _ablk_cipher_use_pmem_complete(struct qce_device *pce_dev) +{ + struct ablkcipher_request *areq; + + areq = (struct ablkcipher_request *) pce_dev->areq; + + if (pce_dev->mode == QCE_MODE_ECB) { + pce_dev->qce_cb(areq, NULL, NULL, + pce_dev->ce_dm.chan_ce_in_status | + pce_dev->ce_dm.chan_ce_out_status); + } else { + pce_dev->qce_cb(areq, NULL, pce_dev->ce_dm.buffer.encr_cntr_iv, + pce_dev->ce_dm.chan_ce_in_status | + pce_dev->ce_dm.chan_ce_out_status); + } + + return 0; +}; + +static int qce_split_and_insert_dm_desc(struct dmov_desc *pdesc, + unsigned int plen, unsigned int paddr, int *index) +{ + while (plen > QCE_FIFO_SIZE) { + pdesc->len = QCE_FIFO_SIZE; + if (paddr > 0) { + pdesc->addr = paddr; + paddr += QCE_FIFO_SIZE; + } + plen -= pdesc->len; + if (plen > 0) { + *index = (*index) + 1; + if ((*index) >= QCE_MAX_NUM_DESC) + return -ENOMEM; + pdesc++; + } + } + if ((plen > 0) && (plen <= QCE_FIFO_SIZE)) { + pdesc->len = plen; + if (paddr > 0) + pdesc->addr = paddr; + } + + return 0; +} + +static int _chain_sg_buffer_in(struct qce_device *pce_dev, + struct scatterlist *sg, unsigned int nbytes) +{ + unsigned int len; + unsigned int dlen; + struct dmov_desc *pdesc; + + pdesc = pce_dev->ce_dm.ce_in_src_desc + + pce_dev->ce_dm.ce_in_src_desc_index; + /* + * Two consective chunks may be handled by the old + * buffer descriptor. + */ + while (nbytes > 0) { + len = min(nbytes, sg_dma_len(sg)); + dlen = pdesc->len & ADM_DESC_LENGTH_MASK; + nbytes -= len; + if (dlen == 0) { + pdesc->addr = sg_dma_address(sg); + pdesc->len = len; + if (pdesc->len > QCE_FIFO_SIZE) + qce_split_and_insert_dm_desc(pdesc, pdesc->len, + sg_dma_address(sg), + &pce_dev->ce_dm.ce_in_src_desc_index); + } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) { + pdesc->len = dlen + len; + if (pdesc->len > QCE_FIFO_SIZE) + qce_split_and_insert_dm_desc(pdesc, pdesc->len, + pdesc->addr, + &pce_dev->ce_dm.ce_in_src_desc_index); + } else { + pce_dev->ce_dm.ce_in_src_desc_index++; + if (pce_dev->ce_dm.ce_in_src_desc_index >= + QCE_MAX_NUM_DESC) + return -ENOMEM; + pdesc++; + pdesc->len = len; + pdesc->addr = sg_dma_address(sg); + if (pdesc->len > QCE_FIFO_SIZE) + qce_split_and_insert_dm_desc(pdesc, pdesc->len, + sg_dma_address(sg), + &pce_dev->ce_dm.ce_in_src_desc_index); + } + if (nbytes > 0) + sg = sg_next(sg); + } + return 0; +} + +static int _chain_pm_buffer_in(struct qce_device *pce_dev, + unsigned int pmem, unsigned int nbytes) +{ + unsigned int dlen; + struct dmov_desc *pdesc; + + pdesc = pce_dev->ce_dm.ce_in_src_desc + + pce_dev->ce_dm.ce_in_src_desc_index; + dlen = pdesc->len & ADM_DESC_LENGTH_MASK; + if (dlen == 0) { + pdesc->addr = pmem; + pdesc->len = nbytes; + } else if (pmem == (pdesc->addr + dlen)) { + pdesc->len = dlen + nbytes; + } else { + pce_dev->ce_dm.ce_in_src_desc_index++; + if (pce_dev->ce_dm.ce_in_src_desc_index >= + QCE_MAX_NUM_DESC) + return -ENOMEM; + pdesc++; + pdesc->len = nbytes; + pdesc->addr = pmem; + } + return 0; +} + +static void _chain_buffer_in_init(struct qce_device *pce_dev) +{ + struct dmov_desc *pdesc; + + pce_dev->ce_dm.ce_in_src_desc_index = 0; + pce_dev->ce_dm.ce_in_dst_desc_index = 0; + pdesc = pce_dev->ce_dm.ce_in_src_desc; + pdesc->len = 0; +} + +static void _ce_in_final(struct qce_device *pce_dev, unsigned total) +{ + struct dmov_desc *pdesc; + dmov_sg *pcmd; + + pdesc = pce_dev->ce_dm.ce_in_src_desc + + pce_dev->ce_dm.ce_in_src_desc_index; + pdesc->len |= ADM_DESC_LAST; + + pdesc = pce_dev->ce_dm.ce_in_dst_desc; + if (total > QCE_FIFO_SIZE) { + qce_split_and_insert_dm_desc(pdesc, total, 0, + &pce_dev->ce_dm.ce_in_dst_desc_index); + pdesc = pce_dev->ce_dm.ce_in_dst_desc + + pce_dev->ce_dm.ce_in_dst_desc_index; + pdesc->len |= ADM_DESC_LAST; + } else + pdesc->len = ADM_DESC_LAST | total; + + pcmd = (dmov_sg *) pce_dev->ce_dm.cmdlist.ce_data_in; + pcmd->cmd |= CMD_LC; + +} + +#ifdef QCE_DEBUG +static void _ce_in_dump(struct qce_device *pce_dev) +{ + int i; + struct dmov_desc *pdesc; + + dev_info(pce_dev->pdev, "_ce_in_dump: src\n"); + for (i = 0; i <= pce_dev->ce_dm.ce_in_src_desc_index; i++) { + pdesc = pce_dev->ce_dm.ce_in_src_desc + i; + dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, + pdesc->len); + } + dev_info(pce_dev->pdev, "_ce_in_dump: dst\n"); + for (i = 0; i <= pce_dev->ce_dm.ce_in_dst_desc_index; i++) { + pdesc = pce_dev->ce_dm.ce_in_dst_desc + i; + dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, + pdesc->len); + } +}; + +static void _ce_out_dump(struct qce_device *pce_dev) +{ + int i; + struct dmov_desc *pdesc; + + dev_info(pce_dev->pdev, "_ce_out_dump: src\n"); + for (i = 0; i <= pce_dev->ce_dm.ce_out_src_desc_index; i++) { + pdesc = pce_dev->ce_dm.ce_out_src_desc + i; + dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, + pdesc->len); + } + + dev_info(pce_dev->pdev, "_ce_out_dump: dst\n"); + for (i = 0; i <= pce_dev->ce_dm.ce_out_dst_desc_index; i++) { + pdesc = pce_dev->ce_dm.ce_out_dst_desc + i; + dev_info(pce_dev->pdev, "%x , %x\n", pdesc->addr, + pdesc->len); + } +}; + +#else + +static void _ce_in_dump(struct qce_device *pce_dev) +{ +}; + +static void _ce_out_dump(struct qce_device *pce_dev) +{ +}; + +#endif + +static int _chain_sg_buffer_out(struct qce_device *pce_dev, + struct scatterlist *sg, unsigned int nbytes) +{ + unsigned int len; + unsigned int dlen; + struct dmov_desc *pdesc; + + pdesc = pce_dev->ce_dm.ce_out_dst_desc + + pce_dev->ce_dm.ce_out_dst_desc_index; + /* + * Two consective chunks may be handled by the old + * buffer descriptor. + */ + while (nbytes > 0) { + len = min(nbytes, sg_dma_len(sg)); + dlen = pdesc->len & ADM_DESC_LENGTH_MASK; + nbytes -= len; + if (dlen == 0) { + pdesc->addr = sg_dma_address(sg); + pdesc->len = len; + if (pdesc->len > QCE_FIFO_SIZE) + qce_split_and_insert_dm_desc(pdesc, pdesc->len, + sg_dma_address(sg), + &pce_dev->ce_dm.ce_out_dst_desc_index); + } else if (sg_dma_address(sg) == (pdesc->addr + dlen)) { + pdesc->len = dlen + len; + if (pdesc->len > QCE_FIFO_SIZE) + qce_split_and_insert_dm_desc(pdesc, pdesc->len, + pdesc->addr, + &pce_dev->ce_dm.ce_out_dst_desc_index); + + } else { + pce_dev->ce_dm.ce_out_dst_desc_index++; + if (pce_dev->ce_dm.ce_out_dst_desc_index >= + QCE_MAX_NUM_DESC) + return -EIO; + pdesc++; + pdesc->len = len; + pdesc->addr = sg_dma_address(sg); + if (pdesc->len > QCE_FIFO_SIZE) + qce_split_and_insert_dm_desc(pdesc, pdesc->len, + sg_dma_address(sg), + &pce_dev->ce_dm.ce_out_dst_desc_index); + + } + if (nbytes > 0) + sg = sg_next(sg); + } + return 0; +} + +static int _chain_pm_buffer_out(struct qce_device *pce_dev, + unsigned int pmem, unsigned int nbytes) +{ + unsigned int dlen; + struct dmov_desc *pdesc; + + pdesc = pce_dev->ce_dm.ce_out_dst_desc + + pce_dev->ce_dm.ce_out_dst_desc_index; + dlen = pdesc->len & ADM_DESC_LENGTH_MASK; + + if (dlen == 0) { + pdesc->addr = pmem; + pdesc->len = nbytes; + } else if (pmem == (pdesc->addr + dlen)) { + pdesc->len = dlen + nbytes; + } else { + pce_dev->ce_dm.ce_out_dst_desc_index++; + if (pce_dev->ce_dm.ce_out_dst_desc_index >= QCE_MAX_NUM_DESC) + return -EIO; + pdesc++; + pdesc->len = nbytes; + pdesc->addr = pmem; + } + return 0; +}; + +static void _chain_buffer_out_init(struct qce_device *pce_dev) +{ + struct dmov_desc *pdesc; + + pce_dev->ce_dm.ce_out_dst_desc_index = 0; + pce_dev->ce_dm.ce_out_src_desc_index = 0; + pdesc = pce_dev->ce_dm.ce_out_dst_desc; + pdesc->len = 0; +}; + +static void _ce_out_final(struct qce_device *pce_dev, unsigned total) +{ + struct dmov_desc *pdesc; + dmov_sg *pcmd; + + pdesc = pce_dev->ce_dm.ce_out_dst_desc + + pce_dev->ce_dm.ce_out_dst_desc_index; + pdesc->len |= ADM_DESC_LAST; + + pdesc = pce_dev->ce_dm.ce_out_src_desc + + pce_dev->ce_dm.ce_out_src_desc_index; + if (total > QCE_FIFO_SIZE) { + qce_split_and_insert_dm_desc(pdesc, total, 0, + &pce_dev->ce_dm.ce_out_src_desc_index); + pdesc = pce_dev->ce_dm.ce_out_src_desc + + pce_dev->ce_dm.ce_out_src_desc_index; + pdesc->len |= ADM_DESC_LAST; + } else + pdesc->len = ADM_DESC_LAST | total; + + pcmd = (dmov_sg *) pce_dev->ce_dm.cmdlist.ce_data_out; + pcmd->cmd |= CMD_LC; +}; + +static void _aead_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->ce_dm.chan_ce_in_status = -1; + } else { + pce_dev->ce_dm.chan_ce_in_status = 0; + } + + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_COMP; + if (pce_dev->ce_dm.chan_ce_out_state == QCE_CHAN_STATE_COMP) { + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _aead_complete(pce_dev); + } +}; + +static void _aead_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->ce_dm.chan_ce_out_status = -1; + } else { + pce_dev->ce_dm.chan_ce_out_status = 0; + }; + + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_COMP; + if (pce_dev->ce_dm.chan_ce_in_state == QCE_CHAN_STATE_COMP) { + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _aead_complete(pce_dev); + } + +}; + +static void _sha_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->ce_dm.chan_ce_in_status = -1; + } else { + pce_dev->ce_dm.chan_ce_in_status = 0; + } + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IDLE; + _sha_complete(pce_dev); +}; + +static void _ablk_cipher_ce_in_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->ce_dm.chan_ce_in_status = -1; + } else { + pce_dev->ce_dm.chan_ce_in_status = 0; + } + + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_COMP; + if (pce_dev->ce_dm.chan_ce_out_state == QCE_CHAN_STATE_COMP) { + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _ablk_cipher_complete(pce_dev); + } +}; + +static void _ablk_cipher_ce_out_call_back(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->ce_dm.chan_ce_out_status = -1; + } else { + pce_dev->ce_dm.chan_ce_out_status = 0; + }; + + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_COMP; + if (pce_dev->ce_dm.chan_ce_in_state == QCE_CHAN_STATE_COMP) { + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _ablk_cipher_complete(pce_dev); + } +}; + + +static void _ablk_cipher_ce_in_call_back_pmem(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->ce_dm.chan_ce_in_status = -1; + } else { + pce_dev->ce_dm.chan_ce_in_status = 0; + } + + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_COMP; + if (pce_dev->ce_dm.chan_ce_out_state == QCE_CHAN_STATE_COMP) { + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _ablk_cipher_use_pmem_complete(pce_dev); + } +}; + +static void _ablk_cipher_ce_out_call_back_pmem(struct msm_dmov_cmd *cmd_ptr, + unsigned int result, struct msm_dmov_errdata *err) +{ + struct qce_device *pce_dev; + + pce_dev = (struct qce_device *) cmd_ptr->user; + if (result != ADM_STATUS_OK) { + dev_err(pce_dev->pdev, "Qualcomm ADM status error %x\n", + result); + pce_dev->ce_dm.chan_ce_out_status = -1; + } else { + pce_dev->ce_dm.chan_ce_out_status = 0; + }; + + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_COMP; + if (pce_dev->ce_dm.chan_ce_in_state == QCE_CHAN_STATE_COMP) { + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_IDLE; + + /* done */ + _ablk_cipher_use_pmem_complete(pce_dev); + } +}; + +static int qce_setup_cmd_buffers(struct qce_device *pce_dev, + unsigned char **pvaddr) +{ + struct ce_reg_buffers *addr = (struct ce_reg_buffers *)(*pvaddr); + struct ce_reg_buffer_addr *buffer = &pce_dev->ce_dm.buffer; + + /* + * Designate chunks of the allocated memory to various + * buffer pointers + */ + buffer->reset_buf_64 = addr->reset_buf_64; + buffer->version = addr->version; + buffer->encr_seg_cfg_size_start = addr->encr_seg_cfg_size_start; + buffer->encr_key = addr->encr_key; + buffer->encr_xts_key = addr->encr_xts_key; + buffer->encr_xts_du_size = addr->encr_xts_du_size; + buffer->encr_cntr_iv = addr->encr_cntr_iv; + buffer->encr_mask = addr->encr_mask; + buffer->auth_seg_cfg_size_start = addr->auth_seg_cfg_size_start; + buffer->auth_key = addr->auth_key; + buffer->auth_iv = addr->auth_iv; + buffer->auth_result = addr->auth_result; + buffer->auth_nonce_info = addr->auth_nonce_info; + buffer->auth_byte_count = addr->auth_byte_count; + buffer->seg_size = addr->seg_size; + buffer->go_proc = addr->go_proc; + buffer->status = addr->status; + buffer->pad = addr->pad; + + memset(buffer->reset_buf_64, 0, 64); + *((uint32_t *)buffer->encr_mask) = (uint32_t)(0xffffffff); + *((uint32_t *)buffer->go_proc) = (uint32_t)(1 << CRYPTO_GO); + + *pvaddr += sizeof(struct ce_reg_buffers); + + return 0; + +} + +static int _setup_cipher_cmdlists(struct qce_device *pce_dev, + unsigned char **pvaddr) +{ + dmov_s *pscmd = (dmov_s *)(*pvaddr); + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to cipher operation + */ + pce_dev->ce_dm.cmdlist.set_cipher_cfg = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_SEG_CFG_REG + + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 3; + pscmd->src = + GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_seg_cfg_size_start); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_cipher_aes_128_key = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_KEY0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_key); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_cipher_aes_256_key = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_KEY0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 8; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_key); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_cipher_des_key = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_KEY0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 2; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_key); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_cipher_3des_key = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_KEY0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 6; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_key); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_cipher_aes_128_xts_key = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_XTS_KEY0_REG + + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_xts_key); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_cipher_aes_256_xts_key = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_XTS_KEY0_REG + + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 8; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_xts_key); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_cipher_xts_du_size = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_XTS_DU_SIZE_REG + + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_xts_du_size); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_cipher_aes_iv = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_CNTR0_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_cntr_iv); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_cipher_des_iv = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_CNTR0_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 2; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_cntr_iv); + pscmd++; + + pce_dev->ce_dm.cmdlist.get_cipher_iv = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->src = (unsigned) (CRYPTO_CNTR0_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->dst = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_cntr_iv); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_cipher_mask = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_CNTR_MASK_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.encr_mask); + pscmd++; + + /* RESET CIPHER AND AUTH REGISTERS COMMAND LISTS*/ + + pce_dev->ce_dm.cmdlist.reset_cipher_key = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_KEY0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 8; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.reset_buf_64); + pscmd++; + + pce_dev->ce_dm.cmdlist.reset_cipher_xts_key = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_XTS_KEY0_REG + + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 8; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.reset_buf_64); + pscmd++; + + pce_dev->ce_dm.cmdlist.reset_cipher_iv = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_CNTR0_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.reset_buf_64); + pscmd++; + + pce_dev->ce_dm.cmdlist.reset_cipher_cfg = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_ENCR_SEG_CFG_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.reset_buf_64); + pscmd++; + + *pvaddr = (unsigned char *) pscmd; + + return 0; +} + +static int _setup_auth_cmdlists(struct qce_device *pce_dev, + unsigned char **pvaddr) +{ + dmov_s *pscmd = (dmov_s *)(*pvaddr); + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to authentication operation + */ + pce_dev->ce_dm.cmdlist.set_auth_cfg = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_SEG_CFG_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 3; + pscmd->src = + GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_seg_cfg_size_start); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_auth_key_128 = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_KEY0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_key); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_auth_key_256 = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_KEY0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 8; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_key); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_auth_key_512 = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_KEY0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 16; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_key); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_auth_iv_16 = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_iv); + pscmd++; + + pce_dev->ce_dm.cmdlist.get_auth_result_16 = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->dst = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_result); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_auth_iv_20 = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 5; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_iv); + pscmd++; + + pce_dev->ce_dm.cmdlist.get_auth_result_20 = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 5; + pscmd->dst = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_result); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_auth_iv_32 = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 8; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_iv); + pscmd++; + + + pce_dev->ce_dm.cmdlist.get_auth_result_32 = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->src = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 8; + pscmd->dst = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_result); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_auth_byte_count = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_BYTECNT0_REG + + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_byte_count); + pscmd++; + + pce_dev->ce_dm.cmdlist.get_auth_byte_count = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->src = (unsigned) (CRYPTO_AUTH_BYTECNT0_REG + + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->dst = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_byte_count); + pscmd++; + + pce_dev->ce_dm.cmdlist.set_auth_nonce_info = pscmd; + pscmd->cmd = CMD_LC | CMD_SRC_SWAP_BYTES | + CMD_SRC_SWAP_SHORTS | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_INFO_NONCE0_REG + + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.auth_nonce_info); + pscmd++; + + /* RESET CIPHER AND AUTH REGISTERS COMMAND LISTS*/ + + pce_dev->ce_dm.cmdlist.reset_auth_key = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_KEY0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 16; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.reset_buf_64); + pscmd++; + + pce_dev->ce_dm.cmdlist.reset_auth_iv = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_IV0_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 16; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.reset_buf_64); + pscmd++; + + pce_dev->ce_dm.cmdlist.reset_auth_cfg = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_SEG_CFG_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.reset_buf_64); + pscmd++; + + + pce_dev->ce_dm.cmdlist.reset_auth_byte_count = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_AUTH_BYTECNT0_REG + + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE * 4; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.reset_buf_64); + pscmd++; + + /* WAIT UNTIL MAC OP IS DONE*/ + + pce_dev->ce_dm.cmdlist.get_status_wait = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->src = (unsigned) (CRYPTO_STATUS_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE; + pscmd->dst = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.status); + pscmd++; + + *pvaddr = (unsigned char *) pscmd; + + return 0; +} + +static int qce_setup_cmdlists(struct qce_device *pce_dev, + unsigned char **pvaddr) +{ + dmov_sg *pcmd; + dmov_s *pscmd; + unsigned char *vaddr = *pvaddr; + struct dmov_desc *pdesc; + int i = 0; + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to operation define + * in ce_cmdlists structure. + */ + vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); + *pvaddr = (unsigned char *) vaddr; + + _setup_cipher_cmdlists(pce_dev, pvaddr); + _setup_auth_cmdlists(pce_dev, pvaddr); + + pscmd = (dmov_s *)(*pvaddr); + + /* GET HW VERSION COMMAND LIST */ + pce_dev->ce_dm.cmdlist.get_hw_version = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE | CMD_OCB; + pscmd->src = (unsigned) (CRYPTO_VERSION_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE; + pscmd->dst = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.version); + pscmd++; + + + /* SET SEG SIZE REGISTER and OCB COMMAND LIST */ + pce_dev->ce_dm.cmdlist.set_seg_size_ocb = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE | CMD_OCB; + pscmd->dst = (unsigned) (CRYPTO_SEG_SIZE_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.seg_size); + pscmd++; + + + /* OCU COMMAND LIST */ + pce_dev->ce_dm.cmdlist.get_status_ocu = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE | CMD_OCU; + pscmd->src = (unsigned) (CRYPTO_STATUS_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE; + pscmd->dst = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.status); + pscmd++; + + /* CLEAR STATUS COMMAND LIST */ + pce_dev->ce_dm.cmdlist.clear_status = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE | CMD_OCU; + pscmd->dst = (unsigned) (CRYPTO_STATUS_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.status); + pscmd++; + + /* SET GO_PROC REGISTERS COMMAND LIST */ + pce_dev->ce_dm.cmdlist.set_go_proc = pscmd; + pscmd->cmd = CMD_LC | CMD_MODE_SINGLE; + pscmd->dst = (unsigned) (CRYPTO_GOPROC_REG + pce_dev->phy_iobase); + pscmd->len = CRYPTO_REG_SIZE; + pscmd->src = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.go_proc); + pscmd++; + + pcmd = (dmov_sg *)pscmd; + pce_dev->ce_dm.cmdlist.ce_data_in = pcmd; + /* swap byte and half word , dst crci , scatter gather */ + pcmd->cmd = CMD_DST_SWAP_BYTES | CMD_DST_SWAP_SHORTS | + CMD_DST_CRCI(pce_dev->ce_dm.crci_in) | CMD_MODE_SG; + + pdesc = pce_dev->ce_dm.ce_in_src_desc; + pdesc->addr = 0; /* to be filled in each operation */ + pdesc->len = 0; /* to be filled in each operation */ + + pdesc = pce_dev->ce_dm.ce_in_dst_desc; + for (i = 0; i < QCE_MAX_NUM_DESC; i++) { + pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase); + pdesc->len = 0; /* to be filled in each operation */ + pdesc++; + } + pcmd->src_dscr = GET_PHYS_ADDR(pce_dev->ce_dm.ce_in_src_desc); + pcmd->dst_dscr = GET_PHYS_ADDR(pce_dev->ce_dm.ce_in_dst_desc); + pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) | + DST_INDEX_SG_CMD(0); + + + pcmd++; + pce_dev->ce_dm.cmdlist.ce_data_out = pcmd; + /* swap byte, half word, source crci, scatter gather */ + pcmd->cmd = CMD_SRC_SWAP_BYTES | CMD_SRC_SWAP_SHORTS | + CMD_SRC_CRCI(pce_dev->ce_dm.crci_out) | CMD_MODE_SG; + + pdesc = pce_dev->ce_dm.ce_out_src_desc; + for (i = 0; i < QCE_MAX_NUM_DESC; i++) { + pdesc->addr = (CRYPTO_DATA_SHADOW0 + pce_dev->phy_iobase); + pdesc->len = 0; /* to be filled in each operation */ + pdesc++; + } + + pdesc = pce_dev->ce_dm.ce_out_dst_desc; + pdesc->addr = 0; /* to be filled in each operation */ + pdesc->len = 0; /* to be filled in each operation */ + + pcmd->src_dscr = GET_PHYS_ADDR(pce_dev->ce_dm.ce_out_src_desc); + pcmd->dst_dscr = GET_PHYS_ADDR(pce_dev->ce_dm.ce_out_dst_desc); + pcmd->_reserved = LI_SG_CMD | SRC_INDEX_SG_CMD(0) | + DST_INDEX_SG_CMD(0); + pcmd++; + + *pvaddr = (unsigned char *) pcmd; + + return 0; +} + +static int _setup_cipher_cmdptrlists(struct qce_device *pce_dev, + unsigned char **pvaddr) +{ + uint32_t * cmd_ptr_vaddr = (uint32_t *)(*pvaddr); + struct ce_cmdlists *cmdlist = &pce_dev->ce_dm.cmdlist; + struct ce_cmdptrlists_ops *cmdptrlist = &pce_dev->ce_dm.cmdptrlist; + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to cipher operations defined + * in ce_cmdptrlists_ops structure. + */ + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_aes_128_cbc_ctr = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_128_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_mask); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_aes_256_cbc_ctr = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_256_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_mask); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_aes_128_ecb = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_128_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_mask); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *)ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_aes_256_ecb = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_256_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_mask); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *)ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_aes_128_xts = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_128_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_128_xts_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_xts_du_size); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_mask); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_aes_256_xts = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_256_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_256_xts_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_xts_du_size); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_mask); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *)ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_des_cbc = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_des_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_des_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *)ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_des_ecb = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_des_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_3des_cbc = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_3des_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_des_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_3des_ecb = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_3des_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_ce_out = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->ce_data_out); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->get_status_ocu); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->cipher_ce_out_get_iv = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->ce_data_out); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_cipher_iv); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->get_status_ocu); + + *pvaddr = (unsigned char *) cmd_ptr_vaddr; + + return 0; +} + +static int _setup_auth_cmdptrlists(struct qce_device *pce_dev, + unsigned char **pvaddr) +{ + uint32_t * cmd_ptr_vaddr = (uint32_t *)(*pvaddr); + struct ce_cmdlists *cmdlist = &pce_dev->ce_dm.cmdlist; + struct ce_cmdptrlists_ops *cmdptrlist = &pce_dev->ce_dm.cmdptrlist; + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to authentication operations + * defined in ce_cmdptrlists_ops structure. + */ + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->auth_sha1 = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_iv_20); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->ce_data_in); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_result_20); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->get_status_ocu); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->auth_sha256 = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_iv_32); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->ce_data_in); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_result_32); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->get_status_ocu); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->auth_sha1_hmac = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_key_512); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_iv_20); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->ce_data_in); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_result_20); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->get_status_ocu); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->auth_sha256_hmac = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_key_512); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_iv_32); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->ce_data_in); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_result_32); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->get_status_ocu); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->auth_aes_128_cmac = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_key_128); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->ce_data_in); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_result_16); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->get_status_ocu); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->auth_aes_256_cmac = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_key_256); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->ce_data_in); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_auth_result_16); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->get_status_ocu); + + *pvaddr = (unsigned char *) cmd_ptr_vaddr; + + return 0; +} + +static int _setup_aead_cmdptrlists(struct qce_device *pce_dev, + unsigned char **pvaddr) +{ + uint32_t * cmd_ptr_vaddr = (uint32_t *)(*pvaddr); + struct ce_cmdlists *cmdlist = &pce_dev->ce_dm.cmdlist; + struct ce_cmdptrlists_ops *cmdptrlist = &pce_dev->ce_dm.cmdptrlist; + + /* + * Designate chunks of the allocated memory to various + * command list pointers related to aead operations + * defined in ce_cmdptrlists_ops structure. + */ + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->aead_aes_128_ccm = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_key_128); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_nonce_info); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_128_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_mask); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->aead_aes_256_ccm = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_seg_size_ocb); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->reset_auth_byte_count); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_key_256); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_nonce_info); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_auth_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_cfg); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_256_key); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_aes_iv); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_cipher_mask); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->set_go_proc); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->ce_data_in); + + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->aead_ce_out = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->ce_data_out); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_status_wait); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->get_status_ocu); + + *pvaddr = (unsigned char *) cmd_ptr_vaddr; + + return 0; +} + +static int qce_setup_cmdptrlists(struct qce_device *pce_dev, + unsigned char **pvaddr) +{ + uint32_t * cmd_ptr_vaddr = (uint32_t *)(*pvaddr); + struct ce_cmdlists *cmdlist = &pce_dev->ce_dm.cmdlist; + struct ce_cmdptrlists_ops *cmdptrlist = &pce_dev->ce_dm.cmdptrlist; + /* + * Designate chunks of the allocated memory to various + * command list pointers related to operations defined + * in ce_cmdptrlists_ops structure. + */ + cmd_ptr_vaddr = (uint32_t *) ALIGN(((unsigned int) cmd_ptr_vaddr), 16); + cmdptrlist->probe_ce_hw = QCE_SET_CMD_PTR(cmd_ptr_vaddr); + + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->get_hw_version); + *cmd_ptr_vaddr++ = QCE_SET_CMD_PTR(cmdlist->clear_status); + *cmd_ptr_vaddr++ = QCE_SET_LAST_CMD_PTR(cmdlist->get_status_ocu); + + *pvaddr = (unsigned char *) cmd_ptr_vaddr; + + _setup_cipher_cmdptrlists(pce_dev, pvaddr); + _setup_auth_cmdptrlists(pce_dev, pvaddr); + _setup_aead_cmdptrlists(pce_dev, pvaddr); + + return 0; +} + + +static int qce_setup_ce_dm_data(struct qce_device *pce_dev) +{ + unsigned char *vaddr; + + /* 1. ce_in channel data xfer command src descriptors, 128 entries */ + vaddr = pce_dev->coh_vmem; + vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); + pce_dev->ce_dm.ce_in_src_desc = (struct dmov_desc *) vaddr; + vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); + + /* 2. ce_in channel data xfer command dst descriptors, 128 entries */ + vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); + pce_dev->ce_dm.ce_in_dst_desc = (struct dmov_desc *) vaddr; + vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); + + + /* 3. ce_out channel data xfer command src descriptors, 128 entries */ + vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); + pce_dev->ce_dm.ce_out_src_desc = (struct dmov_desc *) vaddr; + vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); + + /* 4. ce_out channel data xfer command dst descriptors, 128 entries. */ + vaddr = (unsigned char *) ALIGN(((unsigned int)vaddr), 16); + pce_dev->ce_dm.ce_out_dst_desc = (struct dmov_desc *) vaddr; + vaddr = vaddr + (sizeof(struct dmov_desc) * QCE_MAX_NUM_DESC); + + qce_setup_cmd_buffers(pce_dev, &vaddr); + qce_setup_cmdlists(pce_dev, &vaddr); + qce_setup_cmdptrlists(pce_dev, &vaddr); + + pce_dev->ce_dm.buffer.ignore_data = vaddr; + + pce_dev->ce_dm.phy_ce_pad = GET_PHYS_ADDR(pce_dev->ce_dm.buffer.pad); + pce_dev->ce_dm.phy_ce_out_ignore = + GET_PHYS_ADDR(pce_dev->ce_dm.buffer.ignore_data); + + pce_dev->ce_dm.chan_ce_in_cmd->user = (void *) pce_dev; + pce_dev->ce_dm.chan_ce_in_cmd->exec_func = NULL; + + pce_dev->ce_dm.chan_ce_out_cmd->user = (void *) pce_dev; + pce_dev->ce_dm.chan_ce_out_cmd->exec_func = NULL; + + return 0; +} + +static int _qce_start_dma(struct qce_device *pce_dev, bool ce_in, bool ce_out) +{ + + if (ce_in) + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IN_PROG; + else + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_COMP; + + if (ce_out) + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_IN_PROG; + else + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_COMP; + + if (ce_in) + msm_dmov_enqueue_cmd(pce_dev->ce_dm.chan_ce_in, + pce_dev->ce_dm.chan_ce_in_cmd); + if (ce_out) + msm_dmov_enqueue_cmd(pce_dev->ce_dm.chan_ce_out, + pce_dev->ce_dm.chan_ce_out_cmd); + + return 0; +}; + +int qce_aead_req(void *handle, struct qce_req *q_req) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + struct aead_request *areq = (struct aead_request *) q_req->areq; + uint32_t authsize = q_req->authsize; + uint32_t totallen_in, totallen_out, out_len; + uint32_t pad_len_in, pad_len_out; + int rc = 0; + int ce_block_size; + + ce_block_size = pce_dev->ce_dm.ce_block_size; + if (q_req->dir == QCE_ENCRYPT) { + uint32_t pad_mac_len_out; + + q_req->cryptlen = areq->cryptlen; + totallen_in = q_req->cryptlen + areq->assoclen; + pad_len_in = ALIGN(totallen_in, ce_block_size) - totallen_in; + + out_len = areq->cryptlen + authsize; + totallen_out = q_req->cryptlen + authsize + areq->assoclen; + pad_mac_len_out = ALIGN(authsize, ce_block_size) - authsize; + totallen_out += pad_mac_len_out; + pad_len_out = ALIGN(totallen_out, ce_block_size) - + totallen_out + pad_mac_len_out; + + } else { + q_req->cryptlen = areq->cryptlen - authsize; + totallen_in = areq->cryptlen + areq->assoclen; + pad_len_in = ALIGN(totallen_in, ce_block_size) - totallen_in; + + out_len = q_req->cryptlen; + totallen_out = totallen_in; + pad_len_out = ALIGN(totallen_out, ce_block_size) - totallen_out; + pad_len_out += authsize; + } + + _chain_buffer_in_init(pce_dev); + _chain_buffer_out_init(pce_dev); + + pce_dev->assoc_nents = 0; + pce_dev->src_nents = 0; + pce_dev->dst_nents = 0; + pce_dev->ivsize = q_req->ivsize; + pce_dev->authsize = q_req->authsize; + + /* associated data input */ + pce_dev->assoc_nents = count_sg(areq->assoc, areq->assoclen); + dma_map_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, + DMA_TO_DEVICE); + if (_chain_sg_buffer_in(pce_dev, areq->assoc, areq->assoclen) < 0) { + rc = -ENOMEM; + goto bad; + } + /* cipher input */ + pce_dev->src_nents = count_sg(areq->src, areq->cryptlen); + dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + if (_chain_sg_buffer_in(pce_dev, areq->src, areq->cryptlen) < 0) { + rc = -ENOMEM; + goto bad; + } + /* pad data in */ + if (pad_len_in) { + if (_chain_pm_buffer_in(pce_dev, pce_dev->ce_dm.phy_ce_pad, + pad_len_in) < 0) { + rc = -ENOMEM; + goto bad; + } + } + + /* ignore associated data */ + if (_chain_pm_buffer_out(pce_dev, pce_dev->ce_dm.phy_ce_out_ignore, + areq->assoclen) < 0) { + rc = -ENOMEM; + goto bad; + } + /* cipher + mac output for encryption */ + if (areq->src != areq->dst) { + pce_dev->dst_nents = count_sg(areq->dst, out_len); + dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, + DMA_FROM_DEVICE); + }; + if (_chain_sg_buffer_out(pce_dev, areq->dst, out_len) < 0) { + rc = -ENOMEM; + goto bad; + } + /* pad data out */ + if (pad_len_out) { + if (_chain_pm_buffer_out(pce_dev, pce_dev->ce_dm.phy_ce_pad, + pad_len_out) < 0) { + rc = -ENOMEM; + goto bad; + } + } + + /* finalize the ce_in and ce_out channels command lists */ + _ce_in_final(pce_dev, ALIGN(totallen_in, ce_block_size)); + _ce_out_final(pce_dev, ALIGN(totallen_out, ce_block_size)); + + /* set up crypto device */ + rc = _ce_setup_cipher(pce_dev, q_req, totallen_in, areq->assoclen); + if (rc < 0) + goto bad; + + /* setup for callback, and issue command to adm */ + pce_dev->areq = q_req->areq; + pce_dev->qce_cb = q_req->qce_cb; + + pce_dev->ce_dm.chan_ce_in_cmd->complete_func = _aead_ce_in_call_back; + pce_dev->ce_dm.chan_ce_out_cmd->complete_func = _aead_ce_out_call_back; + + _ce_in_dump(pce_dev); + _ce_out_dump(pce_dev); + + rc = _qce_start_dma(pce_dev, true, true); + if (rc == 0) + return 0; +bad: + if (pce_dev->assoc_nents) { + dma_unmap_sg(pce_dev->pdev, areq->assoc, pce_dev->assoc_nents, + DMA_TO_DEVICE); + } + + if (pce_dev->src_nents) { + dma_unmap_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + } + if (pce_dev->dst_nents) { + dma_unmap_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, + DMA_FROM_DEVICE); + } + return rc; +} +EXPORT_SYMBOL(qce_aead_req); + +int qce_ablk_cipher_req(void *handle, struct qce_req *c_req) +{ + int rc = 0; + struct qce_device *pce_dev = (struct qce_device *) handle; + struct ablkcipher_request *areq = (struct ablkcipher_request *) + c_req->areq; + + uint32_t pad_len = ALIGN(areq->nbytes, pce_dev->ce_dm.ce_block_size) + - areq->nbytes; + + _chain_buffer_in_init(pce_dev); + _chain_buffer_out_init(pce_dev); + + pce_dev->src_nents = 0; + pce_dev->dst_nents = 0; + + /* cipher input */ + pce_dev->src_nents = count_sg(areq->src, areq->nbytes); + + if (c_req->use_pmem != 1) + dma_map_sg(pce_dev->pdev, areq->src, pce_dev->src_nents, + (areq->src == areq->dst) ? DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + else + dma_map_pmem_sg(&c_req->pmem->src[0], pce_dev->src_nents, + areq->src); + + if (_chain_sg_buffer_in(pce_dev, areq->src, areq->nbytes) < 0) { + rc = -ENOMEM; + goto bad; + } + + /* cipher output */ + if (areq->src != areq->dst) { + pce_dev->dst_nents = count_sg(areq->dst, areq->nbytes); + if (c_req->use_pmem != 1) + dma_map_sg(pce_dev->pdev, areq->dst, pce_dev->dst_nents, + DMA_FROM_DEVICE); + else + dma_map_pmem_sg(&c_req->pmem->dst[0], + pce_dev->dst_nents, areq->dst); + }; + if (_chain_sg_buffer_out(pce_dev, areq->dst, areq->nbytes) < 0) { + rc = -ENOMEM; + goto bad; + } + + /* pad data */ + if (pad_len) { + if (_chain_pm_buffer_in(pce_dev, pce_dev->ce_dm.phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + if (_chain_pm_buffer_out(pce_dev, pce_dev->ce_dm.phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + } + + /* finalize the ce_in and ce_out channels command lists */ + _ce_in_final(pce_dev, areq->nbytes + pad_len); + _ce_out_final(pce_dev, areq->nbytes + pad_len); + + _ce_in_dump(pce_dev); + _ce_out_dump(pce_dev); + + /* set up crypto device */ + rc = _ce_setup_cipher(pce_dev, c_req, areq->nbytes, 0); + if (rc < 0) + goto bad; + + /* setup for callback, and issue command to adm */ + pce_dev->areq = areq; + pce_dev->qce_cb = c_req->qce_cb; + if (c_req->use_pmem == 1) { + pce_dev->ce_dm.chan_ce_in_cmd->complete_func = + _ablk_cipher_ce_in_call_back_pmem; + pce_dev->ce_dm.chan_ce_out_cmd->complete_func = + _ablk_cipher_ce_out_call_back_pmem; + } else { + pce_dev->ce_dm.chan_ce_in_cmd->complete_func = + _ablk_cipher_ce_in_call_back; + pce_dev->ce_dm.chan_ce_out_cmd->complete_func = + _ablk_cipher_ce_out_call_back; + } + rc = _qce_start_dma(pce_dev, true, true); + + if (rc == 0) + return 0; +bad: + if (c_req->use_pmem != 1) { + if (pce_dev->dst_nents) { + dma_unmap_sg(pce_dev->pdev, areq->dst, + pce_dev->dst_nents, DMA_FROM_DEVICE); + } + if (pce_dev->src_nents) { + dma_unmap_sg(pce_dev->pdev, areq->src, + pce_dev->src_nents, + (areq->src == areq->dst) ? + DMA_BIDIRECTIONAL : + DMA_TO_DEVICE); + } + } + return rc; +} +EXPORT_SYMBOL(qce_ablk_cipher_req); + +int qce_process_sha_req(void *handle, struct qce_sha_req *sreq) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + int rc; + uint32_t pad_len = ALIGN(sreq->size, pce_dev->ce_dm.ce_block_size) - + sreq->size; + struct ahash_request *areq = (struct ahash_request *)sreq->areq; + + _chain_buffer_in_init(pce_dev); + pce_dev->src_nents = count_sg(sreq->src, sreq->size); + dma_map_sg(pce_dev->pdev, sreq->src, pce_dev->src_nents, + DMA_TO_DEVICE); + + if (_chain_sg_buffer_in(pce_dev, sreq->src, sreq->size) < 0) { + rc = -ENOMEM; + goto bad; + } + + if (pad_len) { + if (_chain_pm_buffer_in(pce_dev, pce_dev->ce_dm.phy_ce_pad, + pad_len) < 0) { + rc = -ENOMEM; + goto bad; + } + } + _ce_in_final(pce_dev, sreq->size + pad_len); + + _ce_in_dump(pce_dev); + + rc = _ce_setup_hash(pce_dev, sreq); + + if (rc < 0) + goto bad; + + pce_dev->areq = areq; + pce_dev->qce_cb = sreq->qce_cb; + pce_dev->ce_dm.chan_ce_in_cmd->complete_func = _sha_ce_in_call_back; + + rc = _qce_start_dma(pce_dev, true, false); + + if (rc == 0) + return 0; +bad: + if (pce_dev->src_nents) { + dma_unmap_sg(pce_dev->pdev, sreq->src, + pce_dev->src_nents, DMA_TO_DEVICE); + } + + return rc; +} +EXPORT_SYMBOL(qce_process_sha_req); + +/* crypto engine open function. */ +void *qce_open(struct platform_device *pdev, int *rc) +{ + struct qce_device *pce_dev; + struct resource *resource; + struct clk *ce_core_clk; + struct clk *ce_clk; + struct clk *ce_core_src_clk; + int ret = 0; + + pce_dev = kzalloc(sizeof(struct qce_device), GFP_KERNEL); + if (!pce_dev) { + *rc = -ENOMEM; + dev_err(&pdev->dev, "Can not allocate memory\n"); + return NULL; + } + pce_dev->pdev = &pdev->dev; + + resource = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!resource) { + *rc = -ENXIO; + dev_err(pce_dev->pdev, "Missing MEM resource\n"); + goto err_pce_dev; + }; + pce_dev->phy_iobase = resource->start; + pce_dev->iobase = ioremap_nocache(resource->start, + resource->end - resource->start + 1); + if (!pce_dev->iobase) { + *rc = -ENOMEM; + dev_err(pce_dev->pdev, "Can not map io memory\n"); + goto err_pce_dev; + } + + pce_dev->ce_dm.chan_ce_in_cmd = kzalloc(sizeof(struct msm_dmov_cmd), + GFP_KERNEL); + pce_dev->ce_dm.chan_ce_out_cmd = kzalloc(sizeof(struct msm_dmov_cmd), + GFP_KERNEL); + if (pce_dev->ce_dm.chan_ce_in_cmd == NULL || + pce_dev->ce_dm.chan_ce_out_cmd == NULL) { + dev_err(pce_dev->pdev, "Can not allocate memory\n"); + *rc = -ENOMEM; + goto err_dm_chan_cmd; + } + + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "crypto_channels"); + if (!resource) { + *rc = -ENXIO; + dev_err(pce_dev->pdev, "Missing DMA channel resource\n"); + goto err_dm_chan_cmd; + }; + pce_dev->ce_dm.chan_ce_in = resource->start; + pce_dev->ce_dm.chan_ce_out = resource->end; + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "crypto_crci_in"); + if (!resource) { + *rc = -ENXIO; + dev_err(pce_dev->pdev, "Missing DMA crci in resource\n"); + goto err_dm_chan_cmd; + }; + pce_dev->ce_dm.crci_in = resource->start; + resource = platform_get_resource_byname(pdev, IORESOURCE_DMA, + "crypto_crci_out"); + if (!resource) { + *rc = -ENXIO; + dev_err(pce_dev->pdev, "Missing DMA crci out resource\n"); + goto err_dm_chan_cmd; + }; + pce_dev->ce_dm.crci_out = resource->start; + pce_dev->memsize = 2 * PAGE_SIZE; + pce_dev->coh_vmem = dma_alloc_coherent(pce_dev->pdev, + pce_dev->memsize, &pce_dev->coh_pmem, GFP_KERNEL); + + if (pce_dev->coh_vmem == NULL) { + *rc = -ENOMEM; + dev_err(pce_dev->pdev, "Can not allocate coherent memory.\n"); + goto err; + } + + /* Get CE3 src core clk. */ + ce_core_src_clk = clk_get(pce_dev->pdev, "ce3_core_src_clk"); + if (!IS_ERR(ce_core_src_clk)) { + pce_dev->ce_core_src_clk = ce_core_src_clk; + + /* Set the core src clk @100Mhz */ + ret = clk_set_rate(pce_dev->ce_core_src_clk, 100000000); + if (ret) { + clk_put(pce_dev->ce_core_src_clk); + goto err; + } + } else + pce_dev->ce_core_src_clk = NULL; + + /* Get CE core clk */ + ce_core_clk = clk_get(pce_dev->pdev, "core_clk"); + if (IS_ERR(ce_core_clk)) { + *rc = PTR_ERR(ce_core_clk); + if (pce_dev->ce_core_src_clk != NULL) + clk_put(pce_dev->ce_core_src_clk); + goto err; + } + pce_dev->ce_core_clk = ce_core_clk; + /* Get CE clk */ + ce_clk = clk_get(pce_dev->pdev, "iface_clk"); + if (IS_ERR(ce_clk)) { + *rc = PTR_ERR(ce_clk); + if (pce_dev->ce_core_src_clk != NULL) + clk_put(pce_dev->ce_core_src_clk); + clk_put(pce_dev->ce_core_clk); + goto err; + } + pce_dev->ce_clk = ce_clk; + + /* Enable CE core clk */ + *rc = clk_prepare_enable(pce_dev->ce_core_clk); + if (*rc) { + if (pce_dev->ce_core_src_clk != NULL) + clk_put(pce_dev->ce_core_src_clk); + clk_put(pce_dev->ce_core_clk); + clk_put(pce_dev->ce_clk); + goto err; + } else { + /* Enable CE clk */ + *rc = clk_prepare_enable(pce_dev->ce_clk); + if (*rc) { + clk_disable_unprepare(pce_dev->ce_core_clk); + if (pce_dev->ce_core_src_clk != NULL) + clk_put(pce_dev->ce_core_src_clk); + clk_put(pce_dev->ce_core_clk); + clk_put(pce_dev->ce_clk); + goto err; + + } + } + qce_setup_ce_dm_data(pce_dev); + + pce_dev->ce_dm.chan_ce_in_state = QCE_CHAN_STATE_IDLE; + pce_dev->ce_dm.chan_ce_out_state = QCE_CHAN_STATE_IDLE; + if (_init_ce_engine(pce_dev)) { + *rc = -ENXIO; + goto err; + } + *rc = 0; + return pce_dev; + +err: + if (pce_dev->coh_vmem) + dma_free_coherent(pce_dev->pdev, pce_dev->memsize, + pce_dev->coh_vmem, pce_dev->coh_pmem); +err_dm_chan_cmd: + kfree(pce_dev->ce_dm.chan_ce_in_cmd); + kfree(pce_dev->ce_dm.chan_ce_out_cmd); + if (pce_dev->iobase) + iounmap(pce_dev->iobase); + +err_pce_dev: + + kfree(pce_dev); + + return NULL; +} +EXPORT_SYMBOL(qce_open); + +/* crypto engine close function. */ +int qce_close(void *handle) +{ + struct qce_device *pce_dev = (struct qce_device *) handle; + + if (handle == NULL) + return -ENODEV; + if (pce_dev->iobase) + iounmap(pce_dev->iobase); + + if (pce_dev->coh_vmem) + dma_free_coherent(pce_dev->pdev, pce_dev->memsize, + pce_dev->coh_vmem, pce_dev->coh_pmem); + clk_disable_unprepare(pce_dev->ce_clk); + clk_disable_unprepare(pce_dev->ce_core_clk); + + if (pce_dev->ce_core_src_clk != NULL) + clk_put(pce_dev->ce_core_src_clk); + + clk_put(pce_dev->ce_clk); + clk_put(pce_dev->ce_core_clk); + + kfree(pce_dev->ce_dm.chan_ce_in_cmd); + kfree(pce_dev->ce_dm.chan_ce_out_cmd); + kfree(handle); + + return 0; +} +EXPORT_SYMBOL(qce_close); + +int qce_hw_support(void *handle, struct ce_hw_support *ce_support) +{ + if (ce_support == NULL) + return -EINVAL; + + ce_support->sha1_hmac_20 = false; + ce_support->sha1_hmac = false; + ce_support->sha256_hmac = false; + ce_support->sha_hmac = false; + ce_support->cmac = true; + ce_support->aes_key_192 = false; + ce_support->aes_xts = true; + ce_support->aes_ccm = true; + ce_support->ota = false; + return 0; +} +EXPORT_SYMBOL(qce_hw_support); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Mona Hossain "); +MODULE_DESCRIPTION("Crypto Engine driver"); +MODULE_VERSION("2.17"); diff --git a/drivers/crypto/msm/qce40.h b/drivers/crypto/msm/qce40.h new file mode 100644 index 000000000000..809ba7f869f2 --- /dev/null +++ b/drivers/crypto/msm/qce40.h @@ -0,0 +1,240 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DRIVERS_CRYPTO_MSM_QCE40_H_ +#define _DRIVERS_CRYPTO_MSM_QCE40_H_ + + +#define GET_VIRT_ADDR(x) \ + ((uint32_t)pce_dev->coh_vmem + \ + ((uint32_t)x - pce_dev->coh_pmem)) +#define GET_PHYS_ADDR(x) \ + (pce_dev->coh_pmem + ((unsigned char *)x - \ + pce_dev->coh_vmem)) + +/* Sets the adddress of a command list in command pointer list */ +#define QCE_SET_CMD_PTR(x) \ + (uint32_t)(DMOV_CMD_ADDR(GET_PHYS_ADDR((unsigned char *)x))) + +/* Sets the adddress of the last command list in command pointer list */ +#define SET_LAST_CMD_PTR(x) \ + ((DMOV_CMD_ADDR(x)) | CMD_PTR_LP) + +/* Get the adddress of the last command list in command pointer list */ +#define QCE_SET_LAST_CMD_PTR(x) \ + SET_LAST_CMD_PTR((GET_PHYS_ADDR((unsigned char *)x))) + + +/* MAX Data xfer block size between DM and CE */ +#define MAX_ADM_CE_BLOCK_SIZE 64 +#define ADM_DESC_LENGTH_MASK 0xffff +#define ADM_DESC_LENGTH(x) (x & ADM_DESC_LENGTH_MASK) + +#define ADM_STATUS_OK 0x80000002 + +/* QCE max number of descriptor in a descriptor list */ +#define QCE_MAX_NUM_DESC 128 + +#define CRYPTO_REG_SIZE 0x4 + +struct dmov_desc { + uint32_t addr; + uint32_t len; +}; + +/* State of DM channel */ +enum qce_chan_st_enum { + QCE_CHAN_STATE_IDLE = 0, + QCE_CHAN_STATE_IN_PROG = 1, + QCE_CHAN_STATE_COMP = 2, + QCE_CHAN_STATE_LAST +}; + +/* CE buffers */ +struct ce_reg_buffer_addr { + + unsigned char *reset_buf_64; + unsigned char *version; + + unsigned char *encr_seg_cfg_size_start; + unsigned char *encr_key; + unsigned char *encr_xts_key; + unsigned char *encr_cntr_iv; + unsigned char *encr_mask; + unsigned char *encr_xts_du_size; + + unsigned char *auth_seg_cfg_size_start; + unsigned char *auth_key; + unsigned char *auth_iv; + unsigned char *auth_result; + unsigned char *auth_nonce_info; + unsigned char *auth_byte_count; + + unsigned char *seg_size; + unsigned char *go_proc; + unsigned char *status; + + unsigned char *pad; + unsigned char *ignore_data; +}; + +/* CE buffers */ +struct ce_reg_buffers { + + unsigned char reset_buf_64[64]; + unsigned char version[CRYPTO_REG_SIZE]; + + unsigned char encr_seg_cfg_size_start[3 * CRYPTO_REG_SIZE]; + unsigned char encr_key[8 * CRYPTO_REG_SIZE]; + unsigned char encr_xts_key[8 * CRYPTO_REG_SIZE]; + unsigned char encr_cntr_iv[4 * CRYPTO_REG_SIZE]; + unsigned char encr_mask[CRYPTO_REG_SIZE]; + unsigned char encr_xts_du_size[CRYPTO_REG_SIZE]; + + unsigned char auth_seg_cfg_size_start[3 * CRYPTO_REG_SIZE]; + unsigned char auth_key[16 * CRYPTO_REG_SIZE]; + unsigned char auth_iv[16 * CRYPTO_REG_SIZE]; + unsigned char auth_result[16 * CRYPTO_REG_SIZE]; + unsigned char auth_nonce_info[4 * CRYPTO_REG_SIZE]; + unsigned char auth_byte_count[4 * CRYPTO_REG_SIZE]; + + unsigned char seg_size[CRYPTO_REG_SIZE]; + unsigned char go_proc[CRYPTO_REG_SIZE]; + unsigned char status[CRYPTO_REG_SIZE]; + + unsigned char pad[2 * MAX_ADM_CE_BLOCK_SIZE]; +}; + +/* CE Command lists */ +struct ce_cmdlists { + dmov_s *get_hw_version; + dmov_s *clear_status; + dmov_s *get_status_ocu; + + dmov_s *set_cipher_cfg; + + dmov_s *set_cipher_aes_128_key; + dmov_s *set_cipher_aes_256_key; + dmov_s *set_cipher_des_key; + dmov_s *set_cipher_3des_key; + + dmov_s *set_cipher_aes_128_xts_key; + dmov_s *set_cipher_aes_256_xts_key; + dmov_s *set_cipher_xts_du_size; + + dmov_s *set_cipher_aes_iv; + dmov_s *set_cipher_aes_xts_iv; + dmov_s *set_cipher_des_iv; + dmov_s *get_cipher_iv; + + dmov_s *set_cipher_mask; + + dmov_s *set_auth_cfg; + dmov_s *set_auth_key_128; + dmov_s *set_auth_key_256; + dmov_s *set_auth_key_512; + dmov_s *set_auth_iv_16; + dmov_s *get_auth_result_16; + dmov_s *set_auth_iv_20; + dmov_s *get_auth_result_20; + dmov_s *set_auth_iv_32; + dmov_s *get_auth_result_32; + dmov_s *set_auth_byte_count; + dmov_s *get_auth_byte_count; + + dmov_s *set_auth_nonce_info; + + dmov_s *reset_cipher_key; + dmov_s *reset_cipher_xts_key; + dmov_s *reset_cipher_iv; + dmov_s *reset_cipher_cfg; + dmov_s *reset_auth_key; + dmov_s *reset_auth_iv; + dmov_s *reset_auth_cfg; + dmov_s *reset_auth_byte_count; + + dmov_s *set_seg_size_ocb; + dmov_s *get_status_wait; + dmov_s *set_go_proc; + + dmov_sg *ce_data_in; + dmov_sg *ce_data_out; +}; + +/* Command pointer lists */ +struct ce_cmdptrlists_ops { + + uint32_t probe_ce_hw; + uint32_t cipher_aes_128_cbc_ctr; + uint32_t cipher_aes_256_cbc_ctr; + uint32_t cipher_aes_128_ecb; + uint32_t cipher_aes_256_ecb; + uint32_t cipher_aes_128_xts; + uint32_t cipher_aes_256_xts; + uint32_t cipher_des_cbc; + uint32_t cipher_des_ecb; + uint32_t cipher_3des_cbc; + uint32_t cipher_3des_ecb; + uint32_t auth_sha1; + uint32_t auth_sha256; + uint32_t auth_sha1_hmac; + uint32_t auth_sha256_hmac; + uint32_t auth_aes_128_cmac; + uint32_t auth_aes_256_cmac; + uint32_t aead_aes_128_ccm; + uint32_t aead_aes_256_ccm; + + uint32_t cipher_ce_out; + uint32_t cipher_ce_out_get_iv; + uint32_t aead_ce_out; +}; + +/* DM data structure with buffers, commandlists & commmand pointer lists */ +struct ce_dm_data { + unsigned int chan_ce_in; /* ADM channel used for CE input + * and auth result if authentication + * only operation. */ + unsigned int chan_ce_out; /* ADM channel used for CE output, + * and icv for esp */ + + unsigned int crci_in; /* CRCI for CE DM IN Channel */ + unsigned int crci_out; /* CRCI for CE DM OUT Channel */ + + enum qce_chan_st_enum chan_ce_in_state; /* chan ce_in state */ + enum qce_chan_st_enum chan_ce_out_state; /* chan ce_out state */ + + int chan_ce_in_status; /* chan ce_in status */ + int chan_ce_out_status; /* chan ce_out status */ + + struct dmov_desc *ce_out_src_desc; + struct dmov_desc *ce_out_dst_desc; + struct dmov_desc *ce_in_src_desc; + struct dmov_desc *ce_in_dst_desc; + + int ce_out_src_desc_index; + int ce_out_dst_desc_index; + int ce_in_src_desc_index; + int ce_in_dst_desc_index; + + int ce_block_size; + + dma_addr_t phy_ce_out_ignore; + dma_addr_t phy_ce_pad; + + struct ce_reg_buffer_addr buffer; + struct ce_cmdlists cmdlist; + struct ce_cmdptrlists_ops cmdptrlist; + + struct msm_dmov_cmd *chan_ce_in_cmd; + struct msm_dmov_cmd *chan_ce_out_cmd; +}; +#endif /* _DRIVERS_CRYPTO_MSM_QCE40_H */ diff --git a/drivers/crypto/msm/qce_ota.h b/drivers/crypto/msm/qce_ota.h new file mode 100644 index 000000000000..72af58575258 --- /dev/null +++ b/drivers/crypto/msm/qce_ota.h @@ -0,0 +1,30 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +/* Qualcomm Crypto Engine driver OTA APIi */ + +#ifndef __CRYPTO_MSM_QCE_OTA_H +#define __CRYPTO_MSM_QCE_OTA_H + +#include +#include + + +int qce_f8_req(void *handle, struct qce_f8_req *req, + void *cookie, qce_comp_func_ptr_t qce_cb); +int qce_f8_multi_pkt_req(void *handle, struct qce_f8_multi_pkt_req *req, + void *cookie, qce_comp_func_ptr_t qce_cb); +int qce_f9_req(void *handle, struct qce_f9_req *req, + void *cookie, qce_comp_func_ptr_t qce_cb); + +#endif /* __CRYPTO_MSM_QCE_OTA_H */ diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c new file mode 100644 index 000000000000..2a191d54202d --- /dev/null +++ b/drivers/crypto/msm/qcedev.c @@ -0,0 +1,2228 @@ +/* Qualcomm CE device driver. + * + * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "qce.h" + + +#define CACHE_LINE_SIZE 32 +#define CE_SHA_BLOCK_SIZE SHA256_BLOCK_SIZE + +static uint8_t _std_init_vector_sha1_uint8[] = { + 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, + 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76, + 0xC3, 0xD2, 0xE1, 0xF0 +}; +/* standard initialization vector for SHA-256, source: FIPS 180-2 */ +static uint8_t _std_init_vector_sha256_uint8[] = { + 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85, + 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A, + 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C, + 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19 +}; + +enum qcedev_crypto_oper_type { + QCEDEV_CRYPTO_OPER_CIPHER = 0, + QCEDEV_CRYPTO_OPER_SHA = 1, + QCEDEV_CRYPTO_OPER_LAST +}; + +struct qcedev_handle; + +struct qcedev_cipher_req { + struct ablkcipher_request creq; + void *cookie; +}; + +struct qcedev_sha_req { + struct ahash_request sreq; + void *cookie; +}; + +struct qcedev_sha_ctxt { + uint32_t auth_data[4]; + uint8_t digest[QCEDEV_MAX_SHA_DIGEST]; + uint32_t diglen; + uint8_t trailing_buf[64]; + uint32_t trailing_buf_len; + uint8_t first_blk; + uint8_t last_blk; + uint8_t authkey[QCEDEV_MAX_SHA_BLOCK_SIZE]; +}; + +struct qcedev_async_req { + struct list_head list; + struct completion complete; + enum qcedev_crypto_oper_type op_type; + union { + struct qcedev_cipher_op_req cipher_op_req; + struct qcedev_sha_op_req sha_op_req; + }; + union{ + struct qcedev_cipher_req cipher_req; + struct qcedev_sha_req sha_req; + }; + struct qcedev_handle *handle; + int err; +}; + +static DEFINE_MUTEX(send_cmd_lock); +static DEFINE_MUTEX(sent_bw_req); +/********************************************************************** + * Register ourselves as a misc device to be able to access the dev driver + * from userspace. */ + + +#define QCEDEV_DEV "qcedev" + +struct qcedev_control{ + + /* CE features supported by platform */ + struct msm_ce_hw_support platform_support; + + uint32_t ce_lock_count; + uint32_t high_bw_req_count; + + /* CE features/algorithms supported by HW engine*/ + struct ce_hw_support ce_support; + + uint32_t bus_scale_handle; + + /* misc device */ + struct miscdevice miscdevice; + + /* qce handle */ + void *qce; + + /* platform device */ + struct platform_device *pdev; + + unsigned magic; + + struct list_head ready_commands; + struct qcedev_async_req *active_command; + spinlock_t lock; + struct tasklet_struct done_tasklet; +}; + +struct qcedev_handle { + /* qcedev control handle */ + struct qcedev_control *cntl; + /* qce internal sha context*/ + struct qcedev_sha_ctxt sha_ctxt; +}; + +/*------------------------------------------------------------------------- +* Resource Locking Service +* ------------------------------------------------------------------------*/ +#define QCEDEV_CMD_ID 1 +#define QCEDEV_CE_LOCK_CMD 1 +#define QCEDEV_CE_UNLOCK_CMD 0 +#define NUM_RETRY 1000 +#define CE_BUSY 55 + +static int qcedev_scm_cmd(int resource, int cmd, int *response) +{ +#ifdef CONFIG_MSM_SCM + + struct { + int resource; + int cmd; + } cmd_buf; + + cmd_buf.resource = resource; + cmd_buf.cmd = cmd; + + return scm_call(SCM_SVC_TZ, QCEDEV_CMD_ID, &cmd_buf, + sizeof(cmd_buf), response, sizeof(*response)); + +#else + return 0; +#endif +} + +static void qcedev_ce_high_bw_req(struct qcedev_control *podev, + bool high_bw_req) +{ + int ret = 0; + + mutex_lock(&sent_bw_req); + if (high_bw_req) { + if (podev->high_bw_req_count == 0) + ret = msm_bus_scale_client_update_request( + podev->bus_scale_handle, 1); + if (ret) + pr_err("%s Unable to set to high bandwidth\n", + __func__); + podev->high_bw_req_count++; + } else { + if (podev->high_bw_req_count == 1) + ret = msm_bus_scale_client_update_request( + podev->bus_scale_handle, 0); + if (ret) + pr_err("%s Unable to set to low bandwidth\n", + __func__); + podev->high_bw_req_count--; + } + mutex_unlock(&sent_bw_req); +} + + +static int qcedev_unlock_ce(struct qcedev_control *podev) +{ + int ret = 0; + + mutex_lock(&send_cmd_lock); + if (podev->ce_lock_count == 1) { + int response = 0; + + if (qcedev_scm_cmd(podev->platform_support.shared_ce_resource, + QCEDEV_CE_UNLOCK_CMD, &response)) { + pr_err("Failed to release CE lock\n"); + ret = -EIO; + } + } + if (ret == 0) { + if (podev->ce_lock_count) + podev->ce_lock_count--; + else { + /* We should never be here */ + ret = -EIO; + pr_err("CE hardware is already unlocked\n"); + } + } + mutex_unlock(&send_cmd_lock); + + return ret; +} + +static int qcedev_lock_ce(struct qcedev_control *podev) +{ + int ret = 0; + + mutex_lock(&send_cmd_lock); + if (podev->ce_lock_count == 0) { + int response = -CE_BUSY; + int i = 0; + + do { + if (qcedev_scm_cmd( + podev->platform_support.shared_ce_resource, + QCEDEV_CE_LOCK_CMD, &response)) { + response = -EINVAL; + break; + } + } while ((response == -CE_BUSY) && (i++ < NUM_RETRY)); + + if ((response == -CE_BUSY) && (i >= NUM_RETRY)) { + ret = -EUSERS; + } else { + if (response < 0) + ret = -EINVAL; + } + } + if (ret == 0) + podev->ce_lock_count++; + mutex_unlock(&send_cmd_lock); + return ret; +} + +#define QCEDEV_MAGIC 0x56434544 /* "qced" */ + +static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg); +static int qcedev_open(struct inode *inode, struct file *file); +static int qcedev_release(struct inode *inode, struct file *file); +static int start_cipher_req(struct qcedev_control *podev); +static int start_sha_req(struct qcedev_control *podev); + +static const struct file_operations qcedev_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = qcedev_ioctl, + .open = qcedev_open, + .release = qcedev_release, +}; + +static struct qcedev_control qce_dev[] = { + { + .miscdevice = { + .minor = MISC_DYNAMIC_MINOR, + .name = "qce", + .fops = &qcedev_fops, + }, + .magic = QCEDEV_MAGIC, + }, +}; + +#define MAX_QCE_DEVICE ARRAY_SIZE(qce_dev) +#define DEBUG_MAX_FNAME 16 +#define DEBUG_MAX_RW_BUF 1024 + +struct qcedev_stat { + u32 qcedev_dec_success; + u32 qcedev_dec_fail; + u32 qcedev_enc_success; + u32 qcedev_enc_fail; + u32 qcedev_sha_success; + u32 qcedev_sha_fail; +}; + +static struct qcedev_stat _qcedev_stat[MAX_QCE_DEVICE]; +static struct dentry *_debug_dent; +static char _debug_read_buf[DEBUG_MAX_RW_BUF]; +static int _debug_qcedev[MAX_QCE_DEVICE]; + +static struct qcedev_control *qcedev_minor_to_control(unsigned n) +{ + int i; + + for (i = 0; i < MAX_QCE_DEVICE; i++) { + if (qce_dev[i].miscdevice.minor == n) + return &qce_dev[i]; + } + return NULL; +} + +static int qcedev_open(struct inode *inode, struct file *file) +{ + struct qcedev_handle *handle; + struct qcedev_control *podev; + + podev = qcedev_minor_to_control(MINOR(inode->i_rdev)); + if (podev == NULL) { + pr_err("%s: no such device %d\n", __func__, + MINOR(inode->i_rdev)); + return -ENOENT; + } + + handle = kzalloc(sizeof(struct qcedev_handle), GFP_KERNEL); + if (handle == NULL) { + pr_err("Failed to allocate memory %ld\n", + PTR_ERR(handle)); + return -ENOMEM; + } + + handle->cntl = podev; + file->private_data = handle; + if (podev->platform_support.bus_scale_table != NULL) + qcedev_ce_high_bw_req(podev, true); + return 0; +} + +static int qcedev_release(struct inode *inode, struct file *file) +{ + struct qcedev_control *podev; + struct qcedev_handle *handle; + + handle = file->private_data; + podev = handle->cntl; + if (podev != NULL && podev->magic != QCEDEV_MAGIC) { + pr_err("%s: invalid handle %p\n", + __func__, podev); + } + kzfree(handle); + file->private_data = NULL; + if (podev != NULL && podev->platform_support.bus_scale_table != NULL) + qcedev_ce_high_bw_req(podev, false); + return 0; +} + +static void req_done(unsigned long data) +{ + struct qcedev_control *podev = (struct qcedev_control *)data; + struct qcedev_async_req *areq; + unsigned long flags = 0; + struct qcedev_async_req *new_req = NULL; + int ret = 0; + + spin_lock_irqsave(&podev->lock, flags); + areq = podev->active_command; + podev->active_command = NULL; + +again: + if (!list_empty(&podev->ready_commands)) { + new_req = container_of(podev->ready_commands.next, + struct qcedev_async_req, list); + list_del(&new_req->list); + podev->active_command = new_req; + new_req->err = 0; + if (new_req->op_type == QCEDEV_CRYPTO_OPER_CIPHER) + ret = start_cipher_req(podev); + else + ret = start_sha_req(podev); + } + + spin_unlock_irqrestore(&podev->lock, flags); + + if (areq) + complete(&areq->complete); + + if (new_req && ret) { + complete(&new_req->complete); + spin_lock_irqsave(&podev->lock, flags); + podev->active_command = NULL; + areq = NULL; + ret = 0; + new_req = NULL; + goto again; + } + + return; +} + +static void qcedev_sha_req_cb(void *cookie, unsigned char *digest, + unsigned char *authdata, int ret) +{ + struct qcedev_sha_req *areq; + struct qcedev_control *pdev; + struct qcedev_handle *handle; + + uint32_t *auth32 = (uint32_t *)authdata; + + areq = (struct qcedev_sha_req *) cookie; + handle = (struct qcedev_handle *) areq->cookie; + pdev = handle->cntl; + + if (digest) + memcpy(&handle->sha_ctxt.digest[0], digest, 32); + + if (authdata) { + handle->sha_ctxt.auth_data[0] = auth32[0]; + handle->sha_ctxt.auth_data[1] = auth32[1]; + handle->sha_ctxt.auth_data[2] = auth32[2]; + handle->sha_ctxt.auth_data[3] = auth32[3]; + } + + tasklet_schedule(&pdev->done_tasklet); +}; + + +static void qcedev_cipher_req_cb(void *cookie, unsigned char *icv, + unsigned char *iv, int ret) +{ + struct qcedev_cipher_req *areq; + struct qcedev_handle *handle; + struct qcedev_control *podev; + struct qcedev_async_req *qcedev_areq; + + areq = (struct qcedev_cipher_req *) cookie; + handle = (struct qcedev_handle *) areq->cookie; + podev = handle->cntl; + qcedev_areq = podev->active_command; + + if (iv) + memcpy(&qcedev_areq->cipher_op_req.iv[0], iv, + qcedev_areq->cipher_op_req.ivlen); + tasklet_schedule(&podev->done_tasklet); +}; + +static int start_cipher_req(struct qcedev_control *podev) +{ + struct qcedev_async_req *qcedev_areq; + struct qce_req creq; + int ret = 0; + + /* start the command on the podev->active_command */ + qcedev_areq = podev->active_command; + + qcedev_areq->cipher_req.cookie = qcedev_areq->handle; + creq.use_pmem = qcedev_areq->cipher_op_req.use_pmem; + if (qcedev_areq->cipher_op_req.use_pmem == QCEDEV_USE_PMEM) + creq.pmem = &qcedev_areq->cipher_op_req.pmem; + else + creq.pmem = NULL; + + switch (qcedev_areq->cipher_op_req.alg) { + case QCEDEV_ALG_DES: + creq.alg = CIPHER_ALG_DES; + break; + case QCEDEV_ALG_3DES: + creq.alg = CIPHER_ALG_3DES; + break; + case QCEDEV_ALG_AES: + creq.alg = CIPHER_ALG_AES; + break; + default: + return -EINVAL; + }; + + switch (qcedev_areq->cipher_op_req.mode) { + case QCEDEV_AES_MODE_CBC: + case QCEDEV_DES_MODE_CBC: + creq.mode = QCE_MODE_CBC; + break; + case QCEDEV_AES_MODE_ECB: + case QCEDEV_DES_MODE_ECB: + creq.mode = QCE_MODE_ECB; + break; + case QCEDEV_AES_MODE_CTR: + creq.mode = QCE_MODE_CTR; + break; + case QCEDEV_AES_MODE_XTS: + creq.mode = QCE_MODE_XTS; + break; + default: + return -EINVAL; + }; + + if ((creq.alg == CIPHER_ALG_AES) && + (creq.mode == QCE_MODE_CTR)) { + creq.dir = QCE_ENCRYPT; + } else { + if (QCEDEV_OPER_ENC == qcedev_areq->cipher_op_req.op) + creq.dir = QCE_ENCRYPT; + else + creq.dir = QCE_DECRYPT; + } + + creq.iv = &qcedev_areq->cipher_op_req.iv[0]; + creq.ivsize = qcedev_areq->cipher_op_req.ivlen; + + creq.enckey = &qcedev_areq->cipher_op_req.enckey[0]; + creq.encklen = qcedev_areq->cipher_op_req.encklen; + + creq.cryptlen = qcedev_areq->cipher_op_req.data_len; + + if (qcedev_areq->cipher_op_req.encklen == 0) { + if ((qcedev_areq->cipher_op_req.op == QCEDEV_OPER_ENC_NO_KEY) + || (qcedev_areq->cipher_op_req.op == + QCEDEV_OPER_DEC_NO_KEY)) + creq.op = QCE_REQ_ABLK_CIPHER_NO_KEY; + else { + int i; + + for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) { + if (qcedev_areq->cipher_op_req.enckey[i] != 0) + break; + } + + if ((podev->platform_support.hw_key_support == 1) && + (i == QCEDEV_MAX_KEY_SIZE)) + creq.op = QCE_REQ_ABLK_CIPHER; + else { + ret = -EINVAL; + goto unsupported; + } + } + } else { + creq.op = QCE_REQ_ABLK_CIPHER; + } + + creq.qce_cb = qcedev_cipher_req_cb; + creq.areq = (void *)&qcedev_areq->cipher_req; + + ret = qce_ablk_cipher_req(podev->qce, &creq); +unsupported: + if (ret) + qcedev_areq->err = -ENXIO; + else + qcedev_areq->err = 0; + return ret; +}; + +static int start_sha_req(struct qcedev_control *podev) +{ + struct qcedev_async_req *qcedev_areq; + struct qce_sha_req sreq; + int ret = 0; + struct qcedev_handle *handle; + + /* start the command on the podev->active_command */ + qcedev_areq = podev->active_command; + handle = qcedev_areq->handle; + + switch (qcedev_areq->sha_op_req.alg) { + case QCEDEV_ALG_SHA1: + sreq.alg = QCE_HASH_SHA1; + break; + case QCEDEV_ALG_SHA256: + sreq.alg = QCE_HASH_SHA256; + break; + case QCEDEV_ALG_SHA1_HMAC: + if (podev->ce_support.sha_hmac) { + sreq.alg = QCE_HASH_SHA1_HMAC; + sreq.authkey = &handle->sha_ctxt.authkey[0]; + + } else { + sreq.alg = QCE_HASH_SHA1; + sreq.authkey = NULL; + } + break; + case QCEDEV_ALG_SHA256_HMAC: + if (podev->ce_support.sha_hmac) { + sreq.alg = QCE_HASH_SHA256_HMAC; + sreq.authkey = &handle->sha_ctxt.authkey[0]; + + } else { + sreq.alg = QCE_HASH_SHA256; + sreq.authkey = NULL; + } + break; + case QCEDEV_ALG_AES_CMAC: + sreq.alg = QCE_HASH_AES_CMAC; + sreq.authkey = &handle->sha_ctxt.authkey[0]; + sreq.authklen = qcedev_areq->sha_op_req.authklen; + break; + default: + break; + }; + + qcedev_areq->sha_req.cookie = handle; + + sreq.qce_cb = qcedev_sha_req_cb; + if (qcedev_areq->sha_op_req.alg != QCEDEV_ALG_AES_CMAC) { + sreq.auth_data[0] = handle->sha_ctxt.auth_data[0]; + sreq.auth_data[1] = handle->sha_ctxt.auth_data[1]; + sreq.auth_data[2] = handle->sha_ctxt.auth_data[2]; + sreq.auth_data[3] = handle->sha_ctxt.auth_data[3]; + sreq.digest = &handle->sha_ctxt.digest[0]; + sreq.first_blk = handle->sha_ctxt.first_blk; + sreq.last_blk = handle->sha_ctxt.last_blk; + } + sreq.size = qcedev_areq->sha_req.sreq.nbytes; + sreq.src = qcedev_areq->sha_req.sreq.src; + sreq.areq = (void *)&qcedev_areq->sha_req; + + ret = qce_process_sha_req(podev->qce, &sreq); + + if (ret) + qcedev_areq->err = -ENXIO; + else + qcedev_areq->err = 0; + return ret; +}; + +static int submit_req(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + struct qcedev_control *podev; + unsigned long flags = 0; + int ret = 0; + struct qcedev_stat *pstat; + + qcedev_areq->err = 0; + podev = handle->cntl; + + if (podev->platform_support.ce_shared) { + ret = qcedev_lock_ce(podev); + if (ret) + return ret; + } + + spin_lock_irqsave(&podev->lock, flags); + + if (podev->active_command == NULL) { + podev->active_command = qcedev_areq; + if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) + ret = start_cipher_req(podev); + else + ret = start_sha_req(podev); + } else { + list_add_tail(&qcedev_areq->list, &podev->ready_commands); + } + + if (ret != 0) + podev->active_command = NULL; + + spin_unlock_irqrestore(&podev->lock, flags); + + if (ret == 0) + wait_for_completion(&qcedev_areq->complete); + + if (podev->platform_support.ce_shared) + ret = qcedev_unlock_ce(podev); + + if (ret) + qcedev_areq->err = -EIO; + + pstat = &_qcedev_stat[podev->pdev->id]; + if (qcedev_areq->op_type == QCEDEV_CRYPTO_OPER_CIPHER) { + switch (qcedev_areq->cipher_op_req.op) { + case QCEDEV_OPER_DEC: + if (qcedev_areq->err) + pstat->qcedev_dec_fail++; + else + pstat->qcedev_dec_success++; + break; + case QCEDEV_OPER_ENC: + if (qcedev_areq->err) + pstat->qcedev_enc_fail++; + else + pstat->qcedev_enc_success++; + break; + default: + break; + }; + } else { + if (qcedev_areq->err) + pstat->qcedev_sha_fail++; + else + pstat->qcedev_sha_success++; + } + + return qcedev_areq->err; +} + +static int qcedev_sha_init(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + struct qcedev_sha_ctxt *sha_ctxt = &handle->sha_ctxt; + + memset(sha_ctxt, 0, sizeof(struct qcedev_sha_ctxt)); + sha_ctxt->first_blk = 1; + + if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) || + (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC)) { + memcpy(&sha_ctxt->digest[0], + &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE); + sha_ctxt->diglen = SHA1_DIGEST_SIZE; + } else { + if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA256) || + (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC)) { + memcpy(&sha_ctxt->digest[0], + &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctxt->diglen = SHA256_DIGEST_SIZE; + } + } + return 0; +} + + +static int qcedev_sha_update_max_xfer(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + int err = 0; + int i = 0; + struct scatterlist sg_src[2]; + uint32_t total; + + uint8_t *user_src = NULL; + uint8_t *k_src = NULL; + uint8_t *k_buf_src = NULL; + uint8_t *k_align_src = NULL; + + uint32_t sha_pad_len = 0; + uint32_t trailing_buf_len = 0; + uint32_t t_buf = handle->sha_ctxt.trailing_buf_len; + uint32_t sha_block_size; + + total = qcedev_areq->sha_op_req.data_len + t_buf; + + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1) + sha_block_size = SHA1_BLOCK_SIZE; + else + sha_block_size = SHA256_BLOCK_SIZE; + + if (total <= sha_block_size) { + uint32_t len = qcedev_areq->sha_op_req.data_len; + + i = 0; + + k_src = &handle->sha_ctxt.trailing_buf[t_buf]; + + /* Copy data from user src(s) */ + while (len > 0) { + user_src = + (void __user *)qcedev_areq->sha_op_req.data[i].vaddr; + if (user_src && __copy_from_user(k_src, + (void __user *)user_src, + qcedev_areq->sha_op_req.data[i].len)) + return -EFAULT; + + len -= qcedev_areq->sha_op_req.data[i].len; + k_src += qcedev_areq->sha_op_req.data[i].len; + i++; + } + handle->sha_ctxt.trailing_buf_len = total; + + return 0; + } + + + k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2, + GFP_KERNEL); + if (k_buf_src == NULL) { + pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n", + __func__, (uint32_t)k_buf_src); + return -ENOMEM; + } + + k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src), + CACHE_LINE_SIZE); + k_src = k_align_src; + + /* check for trailing buffer from previous updates and append it */ + if (t_buf > 0) { + memcpy(k_src, &handle->sha_ctxt.trailing_buf[0], + t_buf); + k_src += t_buf; + } + + /* Copy data from user src(s) */ + user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr; + if (user_src && __copy_from_user(k_src, + (void __user *)user_src, + qcedev_areq->sha_op_req.data[0].len)) { + kfree(k_buf_src); + return -EFAULT; + } + k_src += qcedev_areq->sha_op_req.data[0].len; + for (i = 1; i < qcedev_areq->sha_op_req.entries; i++) { + user_src = (void __user *)qcedev_areq->sha_op_req.data[i].vaddr; + if (user_src && __copy_from_user(k_src, + (void __user *)user_src, + qcedev_areq->sha_op_req.data[i].len)) { + kfree(k_buf_src); + return -EFAULT; + } + k_src += qcedev_areq->sha_op_req.data[i].len; + } + + /* get new trailing buffer */ + sha_pad_len = ALIGN(total, CE_SHA_BLOCK_SIZE) - total; + trailing_buf_len = CE_SHA_BLOCK_SIZE - sha_pad_len; + + qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0]; + sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, + total-trailing_buf_len); + sg_mark_end(qcedev_areq->sha_req.sreq.src); + + qcedev_areq->sha_req.sreq.nbytes = total - trailing_buf_len; + + /* update sha_ctxt trailing buf content to new trailing buf */ + if (trailing_buf_len > 0) { + memset(&handle->sha_ctxt.trailing_buf[0], 0, 64); + memcpy(&handle->sha_ctxt.trailing_buf[0], + (k_src - trailing_buf_len), + trailing_buf_len); + } + handle->sha_ctxt.trailing_buf_len = trailing_buf_len; + + err = submit_req(qcedev_areq, handle); + + handle->sha_ctxt.last_blk = 0; + handle->sha_ctxt.first_blk = 0; + + kfree(k_buf_src); + return err; +} + +static int qcedev_sha_update(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + int err = 0; + int i = 0; + int j = 0; + int k = 0; + int num_entries = 0; + uint32_t total = 0; + + /* verify address src(s) */ + for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) + if (!access_ok(VERIFY_READ, + (void __user *)qcedev_areq->sha_op_req.data[i].vaddr, + qcedev_areq->sha_op_req.data[i].len)) + return -EFAULT; + + if (qcedev_areq->sha_op_req.data_len > QCE_MAX_OPER_DATA) { + + struct qcedev_sha_op_req *saved_req; + struct qcedev_sha_op_req req; + struct qcedev_sha_op_req *sreq = &qcedev_areq->sha_op_req; + + /* save the original req structure */ + saved_req = + kmalloc(sizeof(struct qcedev_sha_op_req), GFP_KERNEL); + if (saved_req == NULL) { + pr_err("%s:Can't Allocate mem:saved_req 0x%x\n", + __func__, (uint32_t)saved_req); + return -ENOMEM; + } + memcpy(&req, sreq, sizeof(struct qcedev_sha_op_req)); + memcpy(saved_req, sreq, sizeof(struct qcedev_sha_op_req)); + + i = 0; + /* Address 32 KB at a time */ + while ((i < req.entries) && (err == 0)) { + if (sreq->data[i].len > QCE_MAX_OPER_DATA) { + sreq->data[0].len = QCE_MAX_OPER_DATA; + if (i > 0) { + sreq->data[0].vaddr = + sreq->data[i].vaddr; + } + + sreq->data_len = QCE_MAX_OPER_DATA; + sreq->entries = 1; + + err = qcedev_sha_update_max_xfer(qcedev_areq, + handle); + + sreq->data[i].len = req.data[i].len - + QCE_MAX_OPER_DATA; + sreq->data[i].vaddr = req.data[i].vaddr + + QCE_MAX_OPER_DATA; + req.data[i].vaddr = sreq->data[i].vaddr; + req.data[i].len = sreq->data[i].len; + } else { + total = 0; + for (j = i; j < req.entries; j++) { + num_entries++; + if ((total + sreq->data[j].len) >= + QCE_MAX_OPER_DATA) { + sreq->data[j].len = + (QCE_MAX_OPER_DATA - total); + total = QCE_MAX_OPER_DATA; + break; + } + total += sreq->data[j].len; + } + + sreq->data_len = total; + if (i > 0) + for (k = 0; k < num_entries; k++) { + sreq->data[k].len = + sreq->data[i+k].len; + sreq->data[k].vaddr = + sreq->data[i+k].vaddr; + } + sreq->entries = num_entries; + + i = j; + err = qcedev_sha_update_max_xfer(qcedev_areq, + handle); + num_entries = 0; + + sreq->data[i].vaddr = req.data[i].vaddr + + sreq->data[i].len; + sreq->data[i].len = req.data[i].len - + sreq->data[i].len; + req.data[i].vaddr = sreq->data[i].vaddr; + req.data[i].len = sreq->data[i].len; + + if (sreq->data[i].len == 0) + i++; + } + } /* end of while ((i < req.entries) && (err == 0)) */ + + /* Restore the original req structure */ + for (i = 0; i < saved_req->entries; i++) { + sreq->data[i].len = saved_req->data[i].len; + sreq->data[i].vaddr = saved_req->data[i].vaddr; + } + sreq->entries = saved_req->entries; + sreq->data_len = saved_req->data_len; + kfree(saved_req); + } else + err = qcedev_sha_update_max_xfer(qcedev_areq, handle); + + return err; +} + +static int qcedev_sha_final(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + int err = 0; + struct scatterlist sg_src; + uint32_t total; + + uint8_t *k_buf_src = NULL; + uint8_t *k_align_src = NULL; + + handle->sha_ctxt.first_blk = 0; + handle->sha_ctxt.last_blk = 1; + + total = handle->sha_ctxt.trailing_buf_len; + + if (total) { + k_buf_src = kmalloc(total + CACHE_LINE_SIZE * 2, + GFP_KERNEL); + if (k_buf_src == NULL) { + pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n", + __func__, (uint32_t)k_buf_src); + return -ENOMEM; + } + + k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src), + CACHE_LINE_SIZE); + memcpy(k_align_src, &handle->sha_ctxt.trailing_buf[0], total); + } + handle->sha_ctxt.last_blk = 1; + handle->sha_ctxt.first_blk = 0; + + qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src; + sg_set_buf(qcedev_areq->sha_req.sreq.src, k_align_src, total); + sg_mark_end(qcedev_areq->sha_req.sreq.src); + + qcedev_areq->sha_req.sreq.nbytes = total; + + err = submit_req(qcedev_areq, handle); + + handle->sha_ctxt.first_blk = 0; + handle->sha_ctxt.last_blk = 0; + handle->sha_ctxt.auth_data[0] = 0; + handle->sha_ctxt.auth_data[1] = 0; + handle->sha_ctxt.trailing_buf_len = 0; + memset(&handle->sha_ctxt.trailing_buf[0], 0, 64); + + kfree(k_buf_src); + return err; +} + +static int qcedev_hash_cmac(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + int err = 0; + int i = 0; + struct scatterlist sg_src[2]; + uint32_t total; + + uint8_t *user_src = NULL; + uint8_t *k_src = NULL; + uint8_t *k_buf_src = NULL; + + total = qcedev_areq->sha_op_req.data_len; + + /* verify address src(s) */ + for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) + if (!access_ok(VERIFY_READ, + (void __user *)qcedev_areq->sha_op_req.data[i].vaddr, + qcedev_areq->sha_op_req.data[i].len)) + return -EFAULT; + + /* Verify Source Address */ + if (!access_ok(VERIFY_READ, + (void __user *)qcedev_areq->sha_op_req.authkey, + qcedev_areq->sha_op_req.authklen)) + return -EFAULT; + if (__copy_from_user(&handle->sha_ctxt.authkey[0], + (void __user *)qcedev_areq->sha_op_req.authkey, + qcedev_areq->sha_op_req.authklen)) + return -EFAULT; + + + k_buf_src = kmalloc(total, GFP_KERNEL); + if (k_buf_src == NULL) { + pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n", + __func__, (uint32_t)k_buf_src); + return -ENOMEM; + } + + k_src = k_buf_src; + + /* Copy data from user src(s) */ + user_src = (void __user *)qcedev_areq->sha_op_req.data[0].vaddr; + for (i = 0; i < qcedev_areq->sha_op_req.entries; i++) { + user_src = + (void __user *)qcedev_areq->sha_op_req.data[i].vaddr; + if (user_src && __copy_from_user(k_src, (void __user *)user_src, + qcedev_areq->sha_op_req.data[i].len)) { + kfree(k_buf_src); + return -EFAULT; + } + k_src += qcedev_areq->sha_op_req.data[i].len; + } + + qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src[0]; + sg_set_buf(qcedev_areq->sha_req.sreq.src, k_buf_src, total); + sg_mark_end(qcedev_areq->sha_req.sreq.src); + + qcedev_areq->sha_req.sreq.nbytes = total; + handle->sha_ctxt.diglen = qcedev_areq->sha_op_req.diglen; + err = submit_req(qcedev_areq, handle); + + kfree(k_buf_src); + return err; +} + +static int qcedev_set_hmac_auth_key(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + int err = 0; + + if (areq->sha_op_req.authklen <= QCEDEV_MAX_KEY_SIZE) { + /* Verify Source Address */ + if (!access_ok(VERIFY_READ, + (void __user *)areq->sha_op_req.authkey, + areq->sha_op_req.authklen)) + return -EFAULT; + if (__copy_from_user(&handle->sha_ctxt.authkey[0], + (void __user *)areq->sha_op_req.authkey, + areq->sha_op_req.authklen)) + return -EFAULT; + } else { + struct qcedev_async_req authkey_areq; + + init_completion(&authkey_areq.complete); + + authkey_areq.sha_op_req.entries = 1; + authkey_areq.sha_op_req.data[0].vaddr = + areq->sha_op_req.authkey; + authkey_areq.sha_op_req.data[0].len = areq->sha_op_req.authklen; + authkey_areq.sha_op_req.data_len = areq->sha_op_req.authklen; + authkey_areq.sha_op_req.diglen = 0; + memset(&authkey_areq.sha_op_req.digest[0], 0, + QCEDEV_MAX_SHA_DIGEST); + if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) + authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA1; + if (areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) + authkey_areq.sha_op_req.alg = QCEDEV_ALG_SHA256; + + authkey_areq.op_type = QCEDEV_CRYPTO_OPER_SHA; + + qcedev_sha_init(&authkey_areq, handle); + err = qcedev_sha_update(&authkey_areq, handle); + if (!err) + err = qcedev_sha_final(&authkey_areq, handle); + else + return err; + memcpy(&handle->sha_ctxt.authkey[0], + &handle->sha_ctxt.digest[0], + handle->sha_ctxt.diglen); + } + return err; +} + +static int qcedev_hmac_get_ohash(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + int err = 0; + struct scatterlist sg_src; + uint8_t *k_src = NULL; + uint32_t sha_block_size = 0; + uint32_t sha_digest_size = 0; + + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) { + sha_digest_size = SHA1_DIGEST_SIZE; + sha_block_size = SHA1_BLOCK_SIZE; + } else { + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) { + sha_digest_size = SHA256_DIGEST_SIZE; + sha_block_size = SHA256_BLOCK_SIZE; + } + } + k_src = kmalloc(sha_block_size, GFP_KERNEL); + if (k_src == NULL) { + pr_err("%s: Can't Allocate memory: k_src 0x%x\n", + __func__, (uint32_t)k_src); + return -ENOMEM; + } + + /* check for trailing buffer from previous updates and append it */ + memcpy(k_src, &handle->sha_ctxt.trailing_buf[0], + handle->sha_ctxt.trailing_buf_len); + + qcedev_areq->sha_req.sreq.src = (struct scatterlist *) &sg_src; + sg_set_buf(qcedev_areq->sha_req.sreq.src, k_src, sha_block_size); + sg_mark_end(qcedev_areq->sha_req.sreq.src); + + qcedev_areq->sha_req.sreq.nbytes = sha_block_size; + memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size); + memcpy(&handle->sha_ctxt.trailing_buf[0], &handle->sha_ctxt.digest[0], + sha_digest_size); + handle->sha_ctxt.trailing_buf_len = sha_digest_size; + + handle->sha_ctxt.first_blk = 1; + handle->sha_ctxt.last_blk = 0; + handle->sha_ctxt.auth_data[0] = 0; + handle->sha_ctxt.auth_data[1] = 0; + + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) { + memcpy(&handle->sha_ctxt.digest[0], + &_std_init_vector_sha1_uint8[0], SHA1_DIGEST_SIZE); + handle->sha_ctxt.diglen = SHA1_DIGEST_SIZE; + } + + if (qcedev_areq->sha_op_req.alg == QCEDEV_ALG_SHA256_HMAC) { + memcpy(&handle->sha_ctxt.digest[0], + &_std_init_vector_sha256_uint8[0], SHA256_DIGEST_SIZE); + handle->sha_ctxt.diglen = SHA256_DIGEST_SIZE; + } + err = submit_req(qcedev_areq, handle); + + handle->sha_ctxt.last_blk = 0; + handle->sha_ctxt.first_blk = 0; + + kfree(k_src); + return err; +} + +static int qcedev_hmac_update_iokey(struct qcedev_async_req *areq, + struct qcedev_handle *handle, bool ikey) +{ + int i; + uint32_t constant; + uint32_t sha_block_size; + + if (ikey) + constant = 0x36; + else + constant = 0x5c; + + if (areq->sha_op_req.alg == QCEDEV_ALG_SHA1_HMAC) + sha_block_size = SHA1_BLOCK_SIZE; + else + sha_block_size = SHA256_BLOCK_SIZE; + + memset(&handle->sha_ctxt.trailing_buf[0], 0, sha_block_size); + for (i = 0; i < sha_block_size; i++) + handle->sha_ctxt.trailing_buf[i] = + (handle->sha_ctxt.authkey[i] ^ constant); + + handle->sha_ctxt.trailing_buf_len = sha_block_size; + return 0; +} + +static int qcedev_hmac_init(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + int err; + struct qcedev_control *podev = handle->cntl; + + qcedev_sha_init(areq, handle); + err = qcedev_set_hmac_auth_key(areq, handle); + if (err) + return err; + if (!podev->ce_support.sha_hmac) + qcedev_hmac_update_iokey(areq, handle, true); + return 0; +} + +static int qcedev_hmac_final(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + int err; + struct qcedev_control *podev = handle->cntl; + + err = qcedev_sha_final(areq, handle); + if (podev->ce_support.sha_hmac) + return err; + + qcedev_hmac_update_iokey(areq, handle, false); + err = qcedev_hmac_get_ohash(areq, handle); + if (err) + return err; + err = qcedev_sha_final(areq, handle); + + return err; +} + +static int qcedev_hash_init(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) || + (areq->sha_op_req.alg == QCEDEV_ALG_SHA256)) + return qcedev_sha_init(areq, handle); + else + return qcedev_hmac_init(areq, handle); +} + +static int qcedev_hash_update(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + return qcedev_sha_update(qcedev_areq, handle); +} + +static int qcedev_hash_final(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + if ((areq->sha_op_req.alg == QCEDEV_ALG_SHA1) || + (areq->sha_op_req.alg == QCEDEV_ALG_SHA256)) + return qcedev_sha_final(areq, handle); + else + return qcedev_hmac_final(areq, handle); +} + +#ifdef CONFIG_ANDROID_PMEM +static int qcedev_pmem_ablk_cipher_max_xfer(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + int i = 0; + int err = 0; + struct scatterlist *sg_src = NULL; + struct scatterlist *sg_dst = NULL; + struct scatterlist *sg_ndex = NULL; + struct file *file_src = NULL; + struct file *file_dst = NULL; + unsigned long paddr; + unsigned long kvaddr; + unsigned long len; + + sg_src = kmalloc((sizeof(struct scatterlist) * + areq->cipher_op_req.entries), GFP_KERNEL); + if (sg_src == NULL) { + pr_err("%s: Can't Allocate memory:sg_src 0x%x\n", + __func__, (uint32_t)sg_src); + return -ENOMEM; + + } + memset(sg_src, 0, (sizeof(struct scatterlist) * + areq->cipher_op_req.entries)); + sg_ndex = sg_src; + areq->cipher_req.creq.src = sg_src; + + /* address src */ + get_pmem_file(areq->cipher_op_req.pmem.fd_src, &paddr, + &kvaddr, &len, &file_src); + + for (i = 0; i < areq->cipher_op_req.entries; i++) { + sg_set_buf(sg_ndex, + ((uint8_t *)(areq->cipher_op_req.pmem.src[i].offset) + kvaddr), + areq->cipher_op_req.pmem.src[i].len); + sg_ndex++; + } + sg_mark_end(--sg_ndex); + + for (i = 0; i < areq->cipher_op_req.entries; i++) + areq->cipher_op_req.pmem.src[i].offset += (uint32_t)paddr; + + /* address dst */ + /* If not place encryption/decryption */ + if (areq->cipher_op_req.in_place_op != 1) { + sg_dst = kmalloc((sizeof(struct scatterlist) * + areq->cipher_op_req.entries), GFP_KERNEL); + if (sg_dst == NULL) { + pr_err("%s: Can't Allocate memory: sg_dst 0x%x\n", + __func__, (uint32_t)sg_dst); + return -ENOMEM; + } + memset(sg_dst, 0, (sizeof(struct scatterlist) * + areq->cipher_op_req.entries)); + areq->cipher_req.creq.dst = sg_dst; + sg_ndex = sg_dst; + + get_pmem_file(areq->cipher_op_req.pmem.fd_dst, &paddr, + &kvaddr, &len, &file_dst); + for (i = 0; i < areq->cipher_op_req.entries; i++) + sg_set_buf(sg_ndex++, + ((uint8_t *)(areq->cipher_op_req.pmem.dst[i].offset) + + kvaddr), areq->cipher_op_req.pmem.dst[i].len); + sg_mark_end(--sg_ndex); + + for (i = 0; i < areq->cipher_op_req.entries; i++) + areq->cipher_op_req.pmem.dst[i].offset += + (uint32_t)paddr; + } else { + areq->cipher_req.creq.dst = sg_src; + for (i = 0; i < areq->cipher_op_req.entries; i++) { + areq->cipher_op_req.pmem.dst[i].offset = + areq->cipher_op_req.pmem.src[i].offset; + areq->cipher_op_req.pmem.dst[i].len = + areq->cipher_op_req.pmem.src[i].len; + } + } + + areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len; + areq->cipher_req.creq.info = areq->cipher_op_req.iv; + + err = submit_req(areq, handle); + + kfree(sg_src); + kfree(sg_dst); + + if (file_dst) + put_pmem_file(file_dst); + if (file_src) + put_pmem_file(file_src); + + return err; +}; + + +static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + int err = 0; + int i = 0; + int j = 0; + int k = 0; + int num_entries = 0; + uint32_t total = 0; + struct qcedev_cipher_op_req *saved_req; + struct qcedev_cipher_op_req *creq = &qcedev_areq->cipher_op_req; + + saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL); + if (saved_req == NULL) { + pr_err(KERN_ERR "%s:Can't Allocate mem:saved_req 0x%x\n", + __func__, (uint32_t)saved_req); + return -ENOMEM; + } + memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req)); + + if (qcedev_areq->cipher_op_req.data_len > QCE_MAX_OPER_DATA) { + + struct qcedev_cipher_op_req req; + + /* save the original req structure */ + memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req)); + + i = 0; + /* Address 32 KB at a time */ + while ((i < req.entries) && (err == 0)) { + if (creq->pmem.src[i].len > QCE_MAX_OPER_DATA) { + creq->pmem.src[0].len = QCE_MAX_OPER_DATA; + if (i > 0) { + creq->pmem.src[0].offset = + creq->pmem.src[i].offset; + } + + creq->data_len = QCE_MAX_OPER_DATA; + creq->entries = 1; + + err = + qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, + handle); + + creq->pmem.src[i].len = req.pmem.src[i].len - + QCE_MAX_OPER_DATA; + creq->pmem.src[i].offset = + req.pmem.src[i].offset + + QCE_MAX_OPER_DATA; + req.pmem.src[i].offset = + creq->pmem.src[i].offset; + req.pmem.src[i].len = creq->pmem.src[i].len; + } else { + total = 0; + for (j = i; j < req.entries; j++) { + num_entries++; + if ((total + creq->pmem.src[j].len) + >= QCE_MAX_OPER_DATA) { + creq->pmem.src[j].len = + QCE_MAX_OPER_DATA - total; + total = QCE_MAX_OPER_DATA; + break; + } + total += creq->pmem.src[j].len; + } + + creq->data_len = total; + if (i > 0) + for (k = 0; k < num_entries; k++) { + creq->pmem.src[k].len = + creq->pmem.src[i+k].len; + creq->pmem.src[k].offset = + creq->pmem.src[i+k].offset; + } + creq->entries = num_entries; + + i = j; + err = + qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, + handle); + num_entries = 0; + + creq->pmem.src[i].offset = + req.pmem.src[i].offset + + creq->pmem.src[i].len; + creq->pmem.src[i].len = + req.pmem.src[i].len - + creq->pmem.src[i].len; + req.pmem.src[i].offset = + creq->pmem.src[i].offset; + req.pmem.src[i].len = + creq->pmem.src[i].len; + + if (creq->pmem.src[i].len == 0) + i++; + } + + } /* end of while ((i < req.entries) && (err == 0)) */ + + } else + err = qcedev_pmem_ablk_cipher_max_xfer(qcedev_areq, handle); + + /* Restore the original req structure */ + for (i = 0; i < saved_req->entries; i++) { + creq->pmem.src[i].len = saved_req->pmem.src[i].len; + creq->pmem.src[i].offset = saved_req->pmem.src[i].offset; + } + creq->entries = saved_req->entries; + creq->data_len = saved_req->data_len; + kfree(saved_req); + + return err; + +} +#else +static int qcedev_pmem_ablk_cipher(struct qcedev_async_req *qcedev_areq, + struct qcedev_handle *handle) +{ + return -EPERM; +} +#endif/*CONFIG_ANDROID_PMEM*/ + +static int qcedev_vbuf_ablk_cipher_max_xfer(struct qcedev_async_req *areq, + int *di, struct qcedev_handle *handle, + uint8_t *k_align_src) +{ + int err = 0; + int i = 0; + int dst_i = *di; + struct scatterlist sg_src; + uint32_t byteoffset = 0; + uint8_t *user_src = NULL; + uint8_t *k_align_dst = k_align_src; + struct qcedev_cipher_op_req *creq = &areq->cipher_op_req; + + + if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR) + byteoffset = areq->cipher_op_req.byteoffset; + + user_src = (void __user *)areq->cipher_op_req.vbuf.src[0].vaddr; + if (user_src && __copy_from_user((k_align_src + byteoffset), + (void __user *)user_src, + areq->cipher_op_req.vbuf.src[0].len)) + return -EFAULT; + + k_align_src += areq->cipher_op_req.vbuf.src[0].len; + + for (i = 1; i < areq->cipher_op_req.entries; i++) { + user_src = + (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr; + if (user_src && __copy_from_user(k_align_src, + (void __user *)user_src, + areq->cipher_op_req.vbuf.src[i].len)) { + return -EFAULT; + } + k_align_src += areq->cipher_op_req.vbuf.src[i].len; + } + + /* restore src beginning */ + k_align_src = k_align_dst; + areq->cipher_op_req.data_len += byteoffset; + + areq->cipher_req.creq.src = (struct scatterlist *) &sg_src; + areq->cipher_req.creq.dst = (struct scatterlist *) &sg_src; + + /* In place encryption/decryption */ + sg_set_buf(areq->cipher_req.creq.src, + k_align_dst, + areq->cipher_op_req.data_len); + sg_mark_end(areq->cipher_req.creq.src); + + areq->cipher_req.creq.nbytes = areq->cipher_op_req.data_len; + areq->cipher_req.creq.info = areq->cipher_op_req.iv; + areq->cipher_op_req.entries = 1; + + err = submit_req(areq, handle); + + /* copy data to destination buffer*/ + creq->data_len -= byteoffset; + + while (creq->data_len > 0) { + if (creq->vbuf.dst[dst_i].len <= creq->data_len) { + if (err == 0 && __copy_to_user( + (void __user *)creq->vbuf.dst[dst_i].vaddr, + (k_align_dst + byteoffset), + creq->vbuf.dst[dst_i].len)) + return -EFAULT; + + k_align_dst += creq->vbuf.dst[dst_i].len + + byteoffset; + creq->data_len -= creq->vbuf.dst[dst_i].len; + dst_i++; + } else { + if (err == 0 && __copy_to_user( + (void __user *)creq->vbuf.dst[dst_i].vaddr, + (k_align_dst + byteoffset), + creq->data_len)) + return -EFAULT; + + k_align_dst += creq->data_len; + creq->vbuf.dst[dst_i].len -= creq->data_len; + creq->vbuf.dst[dst_i].vaddr += creq->data_len; + creq->data_len = 0; + } + } + *di = dst_i; + + return err; +}; + +static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq, + struct qcedev_handle *handle) +{ + int err = 0; + int di = 0; + int i = 0; + int j = 0; + int k = 0; + uint32_t byteoffset = 0; + int num_entries = 0; + uint32_t total = 0; + uint32_t len; + uint8_t *k_buf_src = NULL; + uint8_t *k_align_src = NULL; + uint32_t max_data_xfer; + struct qcedev_cipher_op_req *saved_req; + struct qcedev_cipher_op_req *creq = &areq->cipher_op_req; + + /* Verify Source Address's */ + for (i = 0; i < areq->cipher_op_req.entries; i++) + if (!access_ok(VERIFY_READ, + (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr, + areq->cipher_op_req.vbuf.src[i].len)) + return -EFAULT; + + /* Verify Destination Address's */ + if (areq->cipher_op_req.in_place_op != 1) + for (i = 0; i < areq->cipher_op_req.entries; i++) + if (!access_ok(VERIFY_READ, + (void __user *)areq->cipher_op_req.vbuf.dst[i].vaddr, + areq->cipher_op_req.vbuf.dst[i].len)) + return -EFAULT; + + if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR) + byteoffset = areq->cipher_op_req.byteoffset; + k_buf_src = kmalloc(QCE_MAX_OPER_DATA + CACHE_LINE_SIZE * 2, + GFP_KERNEL); + if (k_buf_src == NULL) { + pr_err("%s: Can't Allocate memory: k_buf_src 0x%x\n", + __func__, (uint32_t)k_buf_src); + return -ENOMEM; + } + k_align_src = (uint8_t *) ALIGN(((unsigned int)k_buf_src), + CACHE_LINE_SIZE); + max_data_xfer = QCE_MAX_OPER_DATA - byteoffset; + + saved_req = kmalloc(sizeof(struct qcedev_cipher_op_req), GFP_KERNEL); + if (saved_req == NULL) { + pr_err("%s: Can't Allocate memory:saved_req 0x%x\n", + __func__, (uint32_t)saved_req); + kfree(k_buf_src); + return -ENOMEM; + + } + memcpy(saved_req, creq, sizeof(struct qcedev_cipher_op_req)); + + if (areq->cipher_op_req.data_len > max_data_xfer) { + struct qcedev_cipher_op_req req; + + /* save the original req structure */ + memcpy(&req, creq, sizeof(struct qcedev_cipher_op_req)); + + i = 0; + /* Address 32 KB at a time */ + while ((i < req.entries) && (err == 0)) { + if (creq->vbuf.src[i].len > max_data_xfer) { + creq->vbuf.src[0].len = max_data_xfer; + if (i > 0) { + creq->vbuf.src[0].vaddr = + creq->vbuf.src[i].vaddr; + } + + creq->data_len = max_data_xfer; + creq->entries = 1; + + err = qcedev_vbuf_ablk_cipher_max_xfer(areq, + &di, handle, k_align_src); + if (err < 0) { + kfree(k_buf_src); + kfree(saved_req); + return err; + } + + creq->vbuf.src[i].len = req.vbuf.src[i].len - + max_data_xfer; + creq->vbuf.src[i].vaddr = + req.vbuf.src[i].vaddr + + max_data_xfer; + req.vbuf.src[i].vaddr = + creq->vbuf.src[i].vaddr; + req.vbuf.src[i].len = creq->vbuf.src[i].len; + + } else { + total = areq->cipher_op_req.byteoffset; + for (j = i; j < req.entries; j++) { + num_entries++; + if ((total + creq->vbuf.src[j].len) + >= max_data_xfer) { + creq->vbuf.src[j].len = + max_data_xfer - total; + total = max_data_xfer; + break; + } + total += creq->vbuf.src[j].len; + } + + creq->data_len = total; + if (i > 0) + for (k = 0; k < num_entries; k++) { + creq->vbuf.src[k].len = + creq->vbuf.src[i+k].len; + creq->vbuf.src[k].vaddr = + creq->vbuf.src[i+k].vaddr; + } + creq->entries = num_entries; + + i = j; + err = qcedev_vbuf_ablk_cipher_max_xfer(areq, + &di, handle, k_align_src); + if (err < 0) { + kfree(k_buf_src); + kfree(saved_req); + return err; + } + + num_entries = 0; + areq->cipher_op_req.byteoffset = 0; + + creq->vbuf.src[i].vaddr = req.vbuf.src[i].vaddr + + creq->vbuf.src[i].len; + creq->vbuf.src[i].len = req.vbuf.src[i].len - + creq->vbuf.src[i].len; + + req.vbuf.src[i].vaddr = + creq->vbuf.src[i].vaddr; + req.vbuf.src[i].len = creq->vbuf.src[i].len; + + if (creq->vbuf.src[i].len == 0) + i++; + } + + areq->cipher_op_req.byteoffset = 0; + max_data_xfer = QCE_MAX_OPER_DATA; + byteoffset = 0; + + } /* end of while ((i < req.entries) && (err == 0)) */ + } else + err = qcedev_vbuf_ablk_cipher_max_xfer(areq, &di, handle, + k_align_src); + + /* Restore the original req structure */ + for (i = 0; i < saved_req->entries; i++) { + creq->vbuf.src[i].len = saved_req->vbuf.src[i].len; + creq->vbuf.src[i].vaddr = saved_req->vbuf.src[i].vaddr; + } + for (len = 0, i = 0; len < saved_req->data_len; i++) { + creq->vbuf.dst[i].len = saved_req->vbuf.dst[i].len; + creq->vbuf.dst[i].vaddr = saved_req->vbuf.dst[i].vaddr; + len += saved_req->vbuf.dst[i].len; + } + creq->entries = saved_req->entries; + creq->data_len = saved_req->data_len; + creq->byteoffset = saved_req->byteoffset; + + kfree(saved_req); + kfree(k_buf_src); + return err; + +} + +static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req, + struct qcedev_control *podev) +{ + if ((req->entries == 0) || (req->data_len == 0)) + goto error; + if ((req->alg >= QCEDEV_ALG_LAST) || + (req->mode >= QCEDEV_AES_DES_MODE_LAST)) + goto error; + if (req->alg == QCEDEV_ALG_AES) { + if ((req->mode == QCEDEV_AES_MODE_XTS) && + (!podev->ce_support.aes_xts)) + goto error; + /* if intending to use HW key make sure key fields are set + * correctly and HW key is indeed supported in target + */ + if (req->encklen == 0) { + int i; + for (i = 0; i < QCEDEV_MAX_KEY_SIZE; i++) + if (req->enckey[i]) + goto error; + if ((req->op != QCEDEV_OPER_ENC_NO_KEY) && + (req->op != QCEDEV_OPER_DEC_NO_KEY)) + if (!podev->platform_support.hw_key_support) + goto error; + } else { + if (req->encklen == QCEDEV_AES_KEY_192) { + if (!podev->ce_support.aes_key_192) + goto error; + } else { + /* if not using HW key make sure key + * length is valid + */ + if (!((req->encklen == QCEDEV_AES_KEY_128) || + (req->encklen == QCEDEV_AES_KEY_256))) + goto error; + } + } + } + /* if using a byteoffset, make sure it is CTR mode using vbuf */ + if (req->byteoffset) { + if (req->mode != QCEDEV_AES_MODE_CTR) + goto error; + else { /* if using CTR mode make sure not using Pmem */ + if (req->use_pmem) + goto error; + } + } + /* if using PMEM with non-zero byteoffset, ensure it is in_place_op */ + if (req->use_pmem) { + if (!req->in_place_op) + goto error; + } + /* Ensure zer ivlen for ECB mode */ + if (req->ivlen != 0) { + if ((req->mode == QCEDEV_AES_MODE_ECB) || + (req->mode == QCEDEV_DES_MODE_ECB)) + goto error; + } else { + if ((req->mode != QCEDEV_AES_MODE_ECB) && + (req->mode != QCEDEV_DES_MODE_ECB)) + goto error; + } + + return 0; +error: + return -EINVAL; + +} + +static int qcedev_check_sha_params(struct qcedev_sha_op_req *req, + struct qcedev_control *podev) +{ + if ((req->alg == QCEDEV_ALG_AES_CMAC) && + (!podev->ce_support.cmac)) + goto sha_error; + + if ((req->entries == 0) || (req->data_len == 0)) + goto sha_error; + + if (req->alg >= QCEDEV_ALG_SHA_ALG_LAST) + goto sha_error; + + return 0; +sha_error: + return -EINVAL; +} + +static long qcedev_ioctl(struct file *file, unsigned cmd, unsigned long arg) +{ + int err = 0; + struct qcedev_handle *handle; + struct qcedev_control *podev; + struct qcedev_async_req qcedev_areq; + struct qcedev_stat *pstat; + + handle = file->private_data; + podev = handle->cntl; + qcedev_areq.handle = handle; + if (podev == NULL || podev->magic != QCEDEV_MAGIC) { + pr_err("%s: invalid handle %p\n", + __func__, podev); + return -ENOENT; + } + + /* Verify user arguments. */ + if (_IOC_TYPE(cmd) != QCEDEV_IOC_MAGIC) + return -ENOTTY; + + init_completion(&qcedev_areq.complete); + pstat = &_qcedev_stat[podev->pdev->id]; + + switch (cmd) { + case QCEDEV_IOCTL_LOCK_CE: + if (podev->platform_support.ce_shared) + err = qcedev_lock_ce(podev); + else + err = -ENOTTY; + break; + case QCEDEV_IOCTL_UNLOCK_CE: + if (podev->platform_support.ce_shared) + err = qcedev_unlock_ce(podev); + else + err = -ENOTTY; + break; + case QCEDEV_IOCTL_ENC_REQ: + case QCEDEV_IOCTL_DEC_REQ: + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qcedev_cipher_op_req))) + return -EFAULT; + + if (__copy_from_user(&qcedev_areq.cipher_op_req, + (void __user *)arg, + sizeof(struct qcedev_cipher_op_req))) + return -EFAULT; + qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_CIPHER; + + if (qcedev_check_cipher_params(&qcedev_areq.cipher_op_req, + podev)) + return -EINVAL; + + if (qcedev_areq.cipher_op_req.use_pmem) + err = qcedev_pmem_ablk_cipher(&qcedev_areq, handle); + else + err = qcedev_vbuf_ablk_cipher(&qcedev_areq, handle); + if (err) + return err; + if (__copy_to_user((void __user *)arg, + &qcedev_areq.cipher_op_req, + sizeof(struct qcedev_cipher_op_req))) + return -EFAULT; + break; + + case QCEDEV_IOCTL_SHA_INIT_REQ: + + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + + if (__copy_from_user(&qcedev_areq.sha_op_req, + (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) + return -EINVAL; + qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA; + err = qcedev_hash_init(&qcedev_areq, handle); + if (err) + return err; + if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + break; + case QCEDEV_IOCTL_GET_CMAC_REQ: + if (!podev->ce_support.cmac) + return -ENOTTY; + case QCEDEV_IOCTL_SHA_UPDATE_REQ: + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + + if (__copy_from_user(&qcedev_areq.sha_op_req, + (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) + return -EINVAL; + qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA; + + if (qcedev_areq.sha_op_req.alg == QCEDEV_ALG_AES_CMAC) { + err = qcedev_hash_cmac(&qcedev_areq, handle); + if (err) + return err; + } else { + err = qcedev_hash_update(&qcedev_areq, handle); + if (err) + return err; + } + + memcpy(&qcedev_areq.sha_op_req.digest[0], + &handle->sha_ctxt.digest[0], + handle->sha_ctxt.diglen); + if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + break; + + case QCEDEV_IOCTL_SHA_FINAL_REQ: + + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + + if (__copy_from_user(&qcedev_areq.sha_op_req, + (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) + return -EINVAL; + qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA; + err = qcedev_hash_final(&qcedev_areq, handle); + if (err) + return err; + qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen; + memcpy(&qcedev_areq.sha_op_req.digest[0], + &handle->sha_ctxt.digest[0], + handle->sha_ctxt.diglen); + if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + break; + + case QCEDEV_IOCTL_GET_SHA_REQ: + + if (!access_ok(VERIFY_WRITE, (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + + if (__copy_from_user(&qcedev_areq.sha_op_req, + (void __user *)arg, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + if (qcedev_check_sha_params(&qcedev_areq.sha_op_req, podev)) + return -EINVAL; + qcedev_areq.op_type = QCEDEV_CRYPTO_OPER_SHA; + qcedev_hash_init(&qcedev_areq, handle); + err = qcedev_hash_update(&qcedev_areq, handle); + if (err) + return err; + err = qcedev_hash_final(&qcedev_areq, handle); + if (err) + return err; + qcedev_areq.sha_op_req.diglen = handle->sha_ctxt.diglen; + memcpy(&qcedev_areq.sha_op_req.digest[0], + &handle->sha_ctxt.digest[0], + handle->sha_ctxt.diglen); + if (__copy_to_user((void __user *)arg, &qcedev_areq.sha_op_req, + sizeof(struct qcedev_sha_op_req))) + return -EFAULT; + break; + + default: + return -ENOTTY; + } + + return err; +} + +static int qcedev_probe(struct platform_device *pdev) +{ + void *handle = NULL; + int rc = 0; + struct qcedev_control *podev; + struct msm_ce_hw_support *platform_support; + + if (pdev->id >= MAX_QCE_DEVICE) { + pr_err("%s: device id %d exceeds allowed %d\n", + __func__, pdev->id, MAX_QCE_DEVICE); + return -ENOENT; + } + podev = &qce_dev[pdev->id]; + + platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data; + podev->platform_support.ce_shared = platform_support->ce_shared; + podev->platform_support.shared_ce_resource = + platform_support->shared_ce_resource; + podev->platform_support.hw_key_support = + platform_support->hw_key_support; + podev->platform_support.bus_scale_table = + platform_support->bus_scale_table; + podev->ce_lock_count = 0; + podev->high_bw_req_count = 0; + INIT_LIST_HEAD(&podev->ready_commands); + podev->active_command = NULL; + + spin_lock_init(&podev->lock); + + tasklet_init(&podev->done_tasklet, req_done, (unsigned long)podev); + + /* open qce */ + handle = qce_open(pdev, &rc); + if (handle == NULL) { + platform_set_drvdata(pdev, NULL); + return rc; + } + + podev->qce = handle; + podev->pdev = pdev; + platform_set_drvdata(pdev, podev); + qce_hw_support(podev->qce, &podev->ce_support); + + if (podev->platform_support.bus_scale_table != NULL) { + podev->bus_scale_handle = + msm_bus_scale_register_client( + (struct msm_bus_scale_pdata *) + podev->platform_support.bus_scale_table); + if (!podev->bus_scale_handle) { + printk(KERN_ERR "%s not able to get bus scale\n", + __func__); + rc = -ENOMEM; + goto err; + } + } + rc = misc_register(&podev->miscdevice); + + if (rc >= 0) + return 0; + else + if (podev->platform_support.bus_scale_table != NULL) + msm_bus_scale_unregister_client( + podev->bus_scale_handle); +err: + + if (handle) + qce_close(handle); + platform_set_drvdata(pdev, NULL); + podev->qce = NULL; + podev->pdev = NULL; + return rc; +}; + +static int qcedev_remove(struct platform_device *pdev) +{ + struct qcedev_control *podev; + + podev = platform_get_drvdata(pdev); + if (!podev) + return 0; + if (podev->qce) + qce_close(podev->qce); + + if (podev->platform_support.bus_scale_table != NULL) + msm_bus_scale_unregister_client(podev->bus_scale_handle); + + if (podev->miscdevice.minor != MISC_DYNAMIC_MINOR) + misc_deregister(&podev->miscdevice); + tasklet_kill(&podev->done_tasklet); + return 0; +}; + +static struct platform_driver qcedev_plat_driver = { + .probe = qcedev_probe, + .remove = qcedev_remove, + .driver = { + .name = "qce", + .owner = THIS_MODULE, + }, +}; + +static int _disp_stats(int id) +{ + struct qcedev_stat *pstat; + int len = 0; + + pstat = &_qcedev_stat[id]; + len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1, + "\nQualcomm QCE dev driver %d Statistics:\n", + id + 1); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " Encryption operation success : %d\n", + pstat->qcedev_enc_success); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " Encryption operation fail : %d\n", + pstat->qcedev_enc_fail); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " Decryption operation success : %d\n", + pstat->qcedev_dec_success); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " Encryption operation fail : %d\n", + pstat->qcedev_dec_fail); + + return len; +} + +static int _debug_stats_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t _debug_stats_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + int rc = -EINVAL; + int qcedev = *((int *) file->private_data); + int len; + + len = _disp_stats(qcedev); + + rc = simple_read_from_buffer((void __user *) buf, len, + ppos, (void *) _debug_read_buf, len); + + return rc; +} + +static ssize_t _debug_stats_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + + int qcedev = *((int *) file->private_data); + + memset((char *)&_qcedev_stat[qcedev], 0, sizeof(struct qcedev_stat)); + return count; +}; + +static const struct file_operations _debug_stats_ops = { + .open = _debug_stats_open, + .read = _debug_stats_read, + .write = _debug_stats_write, +}; + +static int _qcedev_debug_init(void) +{ + int rc; + char name[DEBUG_MAX_FNAME]; + int i; + struct dentry *dent; + + _debug_dent = debugfs_create_dir("qcedev", NULL); + if (IS_ERR(_debug_dent)) { + pr_err("qcedev debugfs_create_dir fail, error %ld\n", + PTR_ERR(_debug_dent)); + return PTR_ERR(_debug_dent); + } + + for (i = 0; i < MAX_QCE_DEVICE; i++) { + snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1); + _debug_qcedev[i] = i; + dent = debugfs_create_file(name, 0644, _debug_dent, + &_debug_qcedev[i], &_debug_stats_ops); + if (dent == NULL) { + pr_err("qcedev debugfs_create_file fail, error %ld\n", + PTR_ERR(dent)); + rc = PTR_ERR(dent); + goto err; + } + } + return 0; +err: + debugfs_remove_recursive(_debug_dent); + return rc; +} + +static int qcedev_init(void) +{ + int rc; + + rc = _qcedev_debug_init(); + if (rc) + return rc; + return platform_driver_register(&qcedev_plat_driver); +} + +static void qcedev_exit(void) +{ + debugfs_remove_recursive(_debug_dent); + platform_driver_unregister(&qcedev_plat_driver); +} + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Mona Hossain "); +MODULE_DESCRIPTION("Qualcomm DEV Crypto driver"); +MODULE_VERSION("1.26"); + +module_init(qcedev_init); +module_exit(qcedev_exit); diff --git a/drivers/crypto/msm/qcrypto.c b/drivers/crypto/msm/qcrypto.c new file mode 100644 index 000000000000..a41a64b94b8d --- /dev/null +++ b/drivers/crypto/msm/qcrypto.c @@ -0,0 +1,3367 @@ +/* Qualcomm Crypto driver + * + * Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include "qce.h" + + +#define MAX_CRYPTO_DEVICE 3 +#define DEBUG_MAX_FNAME 16 +#define DEBUG_MAX_RW_BUF 1024 + +struct crypto_stat { + u32 aead_sha1_aes_enc; + u32 aead_sha1_aes_dec; + u32 aead_sha1_des_enc; + u32 aead_sha1_des_dec; + u32 aead_sha1_3des_enc; + u32 aead_sha1_3des_dec; + u32 aead_op_success; + u32 aead_op_fail; + u32 ablk_cipher_aes_enc; + u32 ablk_cipher_aes_dec; + u32 ablk_cipher_des_enc; + u32 ablk_cipher_des_dec; + u32 ablk_cipher_3des_enc; + u32 ablk_cipher_3des_dec; + u32 ablk_cipher_op_success; + u32 ablk_cipher_op_fail; + u32 sha1_digest; + u32 sha256_digest; + u32 sha_op_success; + u32 sha_op_fail; + u32 sha1_hmac_digest; + u32 sha256_hmac_digest; + u32 sha_hmac_op_success; + u32 sha_hmac_op_fail; +}; +static struct crypto_stat _qcrypto_stat[MAX_CRYPTO_DEVICE]; +static struct dentry *_debug_dent; +static char _debug_read_buf[DEBUG_MAX_RW_BUF]; + +struct crypto_priv { + /* CE features supported by target device*/ + struct msm_ce_hw_support platform_support; + + /* CE features/algorithms supported by HW engine*/ + struct ce_hw_support ce_support; + + uint32_t bus_scale_handle; + /* the lock protects queue and req*/ + spinlock_t lock; + + /* qce handle */ + void *qce; + + /* list of registered algorithms */ + struct list_head alg_list; + + /* platform device */ + struct platform_device *pdev; + + /* current active request */ + struct crypto_async_request *req; + int res; + + /* request queue */ + struct crypto_queue queue; + + uint32_t ce_lock_count; + uint32_t high_bw_req_count; + + struct work_struct unlock_ce_ws; + + struct tasklet_struct done_tasklet; +}; + + +/*------------------------------------------------------------------------- +* Resource Locking Service +* ------------------------------------------------------------------------*/ +#define QCRYPTO_CMD_ID 1 +#define QCRYPTO_CE_LOCK_CMD 1 +#define QCRYPTO_CE_UNLOCK_CMD 0 +#define NUM_RETRY 1000 +#define CE_BUSY 55 + +static DEFINE_MUTEX(sent_bw_req); + +static int qcrypto_scm_cmd(int resource, int cmd, int *response) +{ +#ifdef CONFIG_MSM_SCM + + struct { + int resource; + int cmd; + } cmd_buf; + + cmd_buf.resource = resource; + cmd_buf.cmd = cmd; + + return scm_call(SCM_SVC_TZ, QCRYPTO_CMD_ID, &cmd_buf, + sizeof(cmd_buf), response, sizeof(*response)); + +#else + return 0; +#endif +} + +static void qcrypto_unlock_ce(struct work_struct *work) +{ + int response = 0; + unsigned long flags; + struct crypto_priv *cp = container_of(work, struct crypto_priv, + unlock_ce_ws); + if (cp->ce_lock_count == 1) + BUG_ON(qcrypto_scm_cmd(cp->platform_support.shared_ce_resource, + QCRYPTO_CE_UNLOCK_CMD, &response) != 0); + spin_lock_irqsave(&cp->lock, flags); + cp->ce_lock_count--; + spin_unlock_irqrestore(&cp->lock, flags); +} + +static int qcrypto_lock_ce(struct crypto_priv *cp) +{ + unsigned long flags; + int response = -CE_BUSY; + int i = 0; + + if (cp->ce_lock_count == 0) { + do { + if (qcrypto_scm_cmd( + cp->platform_support.shared_ce_resource, + QCRYPTO_CE_LOCK_CMD, &response)) { + response = -EINVAL; + break; + } + } while ((response == -CE_BUSY) && (i++ < NUM_RETRY)); + + if ((response == -CE_BUSY) && (i >= NUM_RETRY)) + return -EUSERS; + if (response < 0) + return -EINVAL; + } + spin_lock_irqsave(&cp->lock, flags); + cp->ce_lock_count++; + spin_unlock_irqrestore(&cp->lock, flags); + + + return 0; +} + +enum qcrypto_alg_type { + QCRYPTO_ALG_CIPHER = 0, + QCRYPTO_ALG_SHA = 1, + QCRYPTO_ALG_LAST +}; + +struct qcrypto_alg { + struct list_head entry; + struct crypto_alg cipher_alg; + struct ahash_alg sha_alg; + enum qcrypto_alg_type alg_type; + struct crypto_priv *cp; +}; + +#define QCRYPTO_MAX_KEY_SIZE 64 +/* max of AES_BLOCK_SIZE, DES3_EDE_BLOCK_SIZE */ +#define QCRYPTO_MAX_IV_LENGTH 16 + +struct qcrypto_cipher_ctx { + u8 auth_key[QCRYPTO_MAX_KEY_SIZE]; + u8 iv[QCRYPTO_MAX_IV_LENGTH]; + + u8 enc_key[QCRYPTO_MAX_KEY_SIZE]; + unsigned int enc_key_len; + + unsigned int authsize; + unsigned int auth_key_len; + + struct crypto_priv *cp; +}; + +struct qcrypto_cipher_req_ctx { + u8 *iv; + unsigned int ivsize; + int aead; + struct scatterlist asg; /* Formatted associated data sg */ + unsigned char *assoc; /* Pointer to formatted assoc data */ + unsigned int assoclen; /* Save Unformatted assoc data length */ + struct scatterlist *assoc_sg; /* Save Unformatted assoc data sg */ + enum qce_cipher_alg_enum alg; + enum qce_cipher_dir_enum dir; + enum qce_cipher_mode_enum mode; +}; + +#define SHA_MAX_BLOCK_SIZE SHA256_BLOCK_SIZE +#define SHA_MAX_STATE_SIZE (SHA256_DIGEST_SIZE / sizeof(u32)) +#define SHA_MAX_DIGEST_SIZE SHA256_DIGEST_SIZE + +static uint8_t _std_init_vector_sha1_uint8[] = { + 0x67, 0x45, 0x23, 0x01, 0xEF, 0xCD, 0xAB, 0x89, + 0x98, 0xBA, 0xDC, 0xFE, 0x10, 0x32, 0x54, 0x76, + 0xC3, 0xD2, 0xE1, 0xF0 +}; + +/* standard initialization vector for SHA-256, source: FIPS 180-2 */ +static uint8_t _std_init_vector_sha256_uint8[] = { + 0x6A, 0x09, 0xE6, 0x67, 0xBB, 0x67, 0xAE, 0x85, + 0x3C, 0x6E, 0xF3, 0x72, 0xA5, 0x4F, 0xF5, 0x3A, + 0x51, 0x0E, 0x52, 0x7F, 0x9B, 0x05, 0x68, 0x8C, + 0x1F, 0x83, 0xD9, 0xAB, 0x5B, 0xE0, 0xCD, 0x19 +}; + +struct qcrypto_sha_ctx { + enum qce_hash_alg_enum alg; + uint32_t byte_count[4]; + uint8_t digest[SHA_MAX_DIGEST_SIZE]; + uint32_t diglen; + uint8_t *tmp_tbuf; + uint8_t *trailing_buf; + uint8_t *in_buf; + uint32_t authkey_in_len; + uint32_t trailing_buf_len; + uint8_t first_blk; + uint8_t last_blk; + uint8_t authkey[SHA_MAX_BLOCK_SIZE]; + struct ahash_request *ahash_req; + struct completion ahash_req_complete; + struct scatterlist *sg; + struct scatterlist tmp_sg; + struct crypto_priv *cp; +}; + +struct qcrypto_sha_req_ctx { + union { + struct sha1_state sha1_state_ctx; + struct sha256_state sha256_state_ctx; + }; + struct scatterlist *src; + uint32_t nbytes; +}; + +static void _byte_stream_to_words(uint32_t *iv, unsigned char *b, + unsigned int len) +{ + unsigned n; + + n = len / sizeof(uint32_t) ; + for (; n > 0; n--) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000) | + (((*(b+2)) << 8) & 0xff00) | + (*(b+3) & 0xff); + b += sizeof(uint32_t); + iv++; + } + + n = len % sizeof(uint32_t); + if (n == 3) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000) | + (((*(b+2)) << 8) & 0xff00) ; + } else if (n == 2) { + *iv = ((*b << 24) & 0xff000000) | + (((*(b+1)) << 16) & 0xff0000) ; + } else if (n == 1) { + *iv = ((*b << 24) & 0xff000000) ; + } +} + +static void _words_to_byte_stream(uint32_t *iv, unsigned char *b, + unsigned int len) +{ + unsigned n = len / sizeof(uint32_t); + + for (; n > 0; n--) { + *b++ = (unsigned char) ((*iv >> 24) & 0xff); + *b++ = (unsigned char) ((*iv >> 16) & 0xff); + *b++ = (unsigned char) ((*iv >> 8) & 0xff); + *b++ = (unsigned char) (*iv & 0xff); + iv++; + } + n = len % sizeof(uint32_t); + if (n == 3) { + *b++ = (unsigned char) ((*iv >> 24) & 0xff); + *b++ = (unsigned char) ((*iv >> 16) & 0xff); + *b = (unsigned char) ((*iv >> 8) & 0xff); + } else if (n == 2) { + *b++ = (unsigned char) ((*iv >> 24) & 0xff); + *b = (unsigned char) ((*iv >> 16) & 0xff); + } else if (n == 1) { + *b = (unsigned char) ((*iv >> 24) & 0xff); + } +} + +static void qcrypto_ce_high_bw_req(struct crypto_priv *cp, bool high_bw_req) +{ + int ret = 0; + + mutex_lock(&sent_bw_req); + if (high_bw_req) { + if (cp->high_bw_req_count == 0) + ret = msm_bus_scale_client_update_request( + cp->bus_scale_handle, 1); + if (ret) + pr_err("%s Unable to set to high bandwidth\n", + __func__); + cp->high_bw_req_count++; + } else { + if (cp->high_bw_req_count == 1) + ret = msm_bus_scale_client_update_request( + cp->bus_scale_handle, 0); + if (ret) + pr_err("%s Unable to set to low bandwidth\n", + __func__); + cp->high_bw_req_count--; + } + mutex_unlock(&sent_bw_req); +} + +static void _start_qcrypto_process(struct crypto_priv *cp); + +static struct qcrypto_alg *_qcrypto_sha_alg_alloc(struct crypto_priv *cp, + struct ahash_alg *template) +{ + struct qcrypto_alg *q_alg; + q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL); + if (!q_alg) { + pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n", + PTR_ERR(q_alg)); + return ERR_PTR(-ENOMEM); + } + + q_alg->alg_type = QCRYPTO_ALG_SHA; + q_alg->sha_alg = *template; + q_alg->cp = cp; + + return q_alg; +}; + +static struct qcrypto_alg *_qcrypto_cipher_alg_alloc(struct crypto_priv *cp, + struct crypto_alg *template) +{ + struct qcrypto_alg *q_alg; + + q_alg = kzalloc(sizeof(struct qcrypto_alg), GFP_KERNEL); + if (!q_alg) { + pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n", + PTR_ERR(q_alg)); + return ERR_PTR(-ENOMEM); + } + + q_alg->alg_type = QCRYPTO_ALG_CIPHER; + q_alg->cipher_alg = *template; + q_alg->cp = cp; + + return q_alg; +}; + +static int _qcrypto_cipher_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_alg *alg = tfm->__crt_alg; + struct qcrypto_alg *q_alg; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + q_alg = container_of(alg, struct qcrypto_alg, cipher_alg); + + /* update context with ptr to cp */ + ctx->cp = q_alg->cp; + + /* random first IV */ + get_random_bytes(ctx->iv, QCRYPTO_MAX_IV_LENGTH); + if (ctx->cp->platform_support.bus_scale_table != NULL) + qcrypto_ce_high_bw_req(ctx->cp, true); + + return 0; +}; + +static int _qcrypto_ahash_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm); + struct ahash_alg *alg = container_of(crypto_hash_alg_common(ahash), + struct ahash_alg, halg); + struct qcrypto_alg *q_alg = container_of(alg, struct qcrypto_alg, + sha_alg); + + crypto_ahash_set_reqsize(ahash, sizeof(struct qcrypto_sha_req_ctx)); + /* update context with ptr to cp */ + sha_ctx->cp = q_alg->cp; + sha_ctx->sg = NULL; + sha_ctx->tmp_tbuf = kzalloc(SHA_MAX_BLOCK_SIZE + + SHA_MAX_DIGEST_SIZE, GFP_KERNEL); + if (sha_ctx->tmp_tbuf == NULL) { + pr_err("qcrypto Can't Allocate mem: sha_ctx->tmp_tbuf, error %ld\n", + PTR_ERR(sha_ctx->tmp_tbuf)); + return -ENOMEM; + } + + sha_ctx->trailing_buf = kzalloc(SHA_MAX_BLOCK_SIZE, GFP_KERNEL); + if (sha_ctx->trailing_buf == NULL) { + kfree(sha_ctx->tmp_tbuf); + sha_ctx->tmp_tbuf = NULL; + pr_err("qcrypto Can't Allocate mem: sha_ctx->trailing_buf, error %ld\n", + PTR_ERR(sha_ctx->trailing_buf)); + return -ENOMEM; + } + + sha_ctx->ahash_req = NULL; + if (sha_ctx->cp->platform_support.bus_scale_table != NULL) + qcrypto_ce_high_bw_req(sha_ctx->cp, true); + + return 0; +}; + +static void _qcrypto_ahash_cra_exit(struct crypto_tfm *tfm) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm); + + kfree(sha_ctx->tmp_tbuf); + sha_ctx->tmp_tbuf = NULL; + kfree(sha_ctx->trailing_buf); + sha_ctx->trailing_buf = NULL; + if (sha_ctx->sg != NULL) { + kfree(sha_ctx->sg); + sha_ctx->sg = NULL; + } + if (sha_ctx->ahash_req != NULL) { + ahash_request_free(sha_ctx->ahash_req); + sha_ctx->ahash_req = NULL; + } + if (sha_ctx->cp->platform_support.bus_scale_table != NULL) + qcrypto_ce_high_bw_req(sha_ctx->cp, false); +}; + + +static void _crypto_sha_hmac_ahash_req_complete( + struct crypto_async_request *req, int err); + +static int _qcrypto_ahash_hmac_cra_init(struct crypto_tfm *tfm) +{ + struct crypto_ahash *ahash = __crypto_ahash_cast(tfm); + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(tfm); + int ret = 0; + + ret = _qcrypto_ahash_cra_init(tfm); + if (ret) + return ret; + sha_ctx->ahash_req = ahash_request_alloc(ahash, GFP_KERNEL); + + if (sha_ctx->ahash_req == NULL) { + _qcrypto_ahash_cra_exit(tfm); + return -ENOMEM; + } + + init_completion(&sha_ctx->ahash_req_complete); + ahash_request_set_callback(sha_ctx->ahash_req, + CRYPTO_TFM_REQ_MAY_BACKLOG, + _crypto_sha_hmac_ahash_req_complete, + &sha_ctx->ahash_req_complete); + crypto_ahash_clear_flags(ahash, ~0); + + if (sha_ctx->cp->platform_support.bus_scale_table != NULL) + qcrypto_ce_high_bw_req(sha_ctx->cp, true); + + return 0; +}; + +static int _qcrypto_cra_ablkcipher_init(struct crypto_tfm *tfm) +{ + tfm->crt_ablkcipher.reqsize = sizeof(struct qcrypto_cipher_req_ctx); + return _qcrypto_cipher_cra_init(tfm); +}; + +static int _qcrypto_cra_aead_init(struct crypto_tfm *tfm) +{ + tfm->crt_aead.reqsize = sizeof(struct qcrypto_cipher_req_ctx); + return _qcrypto_cipher_cra_init(tfm); +}; + +static void _qcrypto_cra_ablkcipher_exit(struct crypto_tfm *tfm) +{ + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->cp->platform_support.bus_scale_table != NULL) + qcrypto_ce_high_bw_req(ctx->cp, false); +}; + +static void _qcrypto_cra_aead_exit(struct crypto_tfm *tfm) +{ + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + if (ctx->cp->platform_support.bus_scale_table != NULL) + qcrypto_ce_high_bw_req(ctx->cp, false); +}; + +static int _disp_stats(int id) +{ + struct crypto_stat *pstat; + int len = 0; + + pstat = &_qcrypto_stat[id]; + len = snprintf(_debug_read_buf, DEBUG_MAX_RW_BUF - 1, + "\nQualcomm crypto accelerator %d Statistics:\n", + id + 1); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " ABLK AES CIPHER encryption : %d\n", + pstat->ablk_cipher_aes_enc); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " ABLK AES CIPHER decryption : %d\n", + pstat->ablk_cipher_aes_dec); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " ABLK DES CIPHER encryption : %d\n", + pstat->ablk_cipher_des_enc); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " ABLK DES CIPHER decryption : %d\n", + pstat->ablk_cipher_des_dec); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " ABLK 3DES CIPHER encryption : %d\n", + pstat->ablk_cipher_3des_enc); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " ABLK 3DES CIPHER decryption : %d\n", + pstat->ablk_cipher_3des_dec); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " ABLK CIPHER operation success: %d\n", + pstat->ablk_cipher_op_success); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " ABLK CIPHER operation fail : %d\n", + pstat->ablk_cipher_op_fail); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-AES encryption : %d\n", + pstat->aead_sha1_aes_enc); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-AES decryption : %d\n", + pstat->aead_sha1_aes_dec); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-DES encryption : %d\n", + pstat->aead_sha1_des_enc); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-DES decryption : %d\n", + pstat->aead_sha1_des_dec); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-3DES encryption : %d\n", + pstat->aead_sha1_3des_enc); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD SHA1-3DES decryption : %d\n", + pstat->aead_sha1_3des_dec); + + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD operation success : %d\n", + pstat->aead_op_success); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " AEAD operation fail : %d\n", + pstat->aead_op_fail); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SHA1 digest : %d\n", + pstat->sha1_digest); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SHA256 digest : %d\n", + pstat->sha256_digest); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SHA operation fail : %d\n", + pstat->sha_op_fail); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SHA operation success : %d\n", + pstat->sha_op_success); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SHA1 HMAC digest : %d\n", + pstat->sha1_hmac_digest); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SHA256 HMAC digest : %d\n", + pstat->sha256_hmac_digest); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SHA HMAC operation fail : %d\n", + pstat->sha_hmac_op_fail); + len += snprintf(_debug_read_buf + len, DEBUG_MAX_RW_BUF - len - 1, + " SHA HMAC operation success : %d\n", + pstat->sha_hmac_op_success); + return len; +} + +static int _qcrypto_remove(struct platform_device *pdev) +{ + struct crypto_priv *cp; + struct qcrypto_alg *q_alg; + struct qcrypto_alg *n; + + cp = platform_get_drvdata(pdev); + + if (!cp) + return 0; + + if (cp->platform_support.bus_scale_table != NULL) + msm_bus_scale_unregister_client(cp->bus_scale_handle); + + list_for_each_entry_safe(q_alg, n, &cp->alg_list, entry) { + if (q_alg->alg_type == QCRYPTO_ALG_CIPHER) + crypto_unregister_alg(&q_alg->cipher_alg); + if (q_alg->alg_type == QCRYPTO_ALG_SHA) + crypto_unregister_ahash(&q_alg->sha_alg); + list_del(&q_alg->entry); + kfree(q_alg); + } + + if (cp->qce) + qce_close(cp->qce); + tasklet_kill(&cp->done_tasklet); + kfree(cp); + return 0; +}; + +static int _qcrypto_setkey_aes(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_priv *cp = ctx->cp; + + switch (len) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_256: + break; + case AES_KEYSIZE_192: + if (cp->ce_support.aes_key_192) + break; + default: + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + }; + ctx->enc_key_len = len; + memcpy(ctx->enc_key, key, len); + return 0; +}; + +static int _qcrypto_setkey_des(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + u32 tmp[DES_EXPKEY_WORDS]; + int ret = des_ekey(tmp, key); + + if (len != DES_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + }; + + if (unlikely(ret == 0) && (tfm->crt_flags & CRYPTO_TFM_REQ_WEAK_KEY)) { + tfm->crt_flags |= CRYPTO_TFM_RES_WEAK_KEY; + return -EINVAL; + } + + ctx->enc_key_len = len; + memcpy(ctx->enc_key, key, len); + return 0; +}; + +static int _qcrypto_setkey_3des(struct crypto_ablkcipher *cipher, const u8 *key, + unsigned int len) +{ + struct crypto_tfm *tfm = crypto_ablkcipher_tfm(cipher); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + + if (len != DES3_EDE_KEY_SIZE) { + crypto_ablkcipher_set_flags(cipher, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + }; + ctx->enc_key_len = len; + memcpy(ctx->enc_key, key, len); + return 0; +}; + +static void req_done(unsigned long data) +{ + struct crypto_async_request *areq; + struct crypto_priv *cp = (struct crypto_priv *)data; + unsigned long flags; + + spin_lock_irqsave(&cp->lock, flags); + areq = cp->req; + cp->req = NULL; + spin_unlock_irqrestore(&cp->lock, flags); + + if (areq) + areq->complete(areq, cp->res); + _start_qcrypto_process(cp); +}; + +static void _update_sha1_ctx(struct ahash_request *req) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx; + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + + if (sha_ctx->last_blk == 1) + memset(sha_state_ctx, 0x00, sizeof(struct sha1_state)); + else { + memset(sha_state_ctx->buffer, 0x00, SHA1_BLOCK_SIZE); + memcpy(sha_state_ctx->buffer, sha_ctx->trailing_buf, + sha_ctx->trailing_buf_len); + _byte_stream_to_words(sha_state_ctx->state , sha_ctx->digest, + SHA1_DIGEST_SIZE); + } + return; +} + +static void _update_sha256_ctx(struct ahash_request *req) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx; + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + + if (sha_ctx->last_blk == 1) + memset(sha_state_ctx, 0x00, sizeof(struct sha256_state)); + else { + memset(sha_state_ctx->buf, 0x00, SHA256_BLOCK_SIZE); + memcpy(sha_state_ctx->buf, sha_ctx->trailing_buf, + sha_ctx->trailing_buf_len); + _byte_stream_to_words(sha_state_ctx->state, sha_ctx->digest, + SHA256_DIGEST_SIZE); + } + return; +} + +static void _qce_ahash_complete(void *cookie, unsigned char *digest, + unsigned char *authdata, int ret) +{ + struct ahash_request *areq = (struct ahash_request *) cookie; + struct crypto_ahash *ahash = crypto_ahash_reqtfm(areq); + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(areq->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(areq); + struct crypto_priv *cp = sha_ctx->cp; + struct crypto_stat *pstat; + uint32_t diglen = crypto_ahash_digestsize(ahash); + uint32_t *auth32 = (uint32_t *)authdata; + + pstat = &_qcrypto_stat[cp->pdev->id]; + +#ifdef QCRYPTO_DEBUG + dev_info(&cp->pdev->dev, "_qce_ahash_complete: %p ret %d\n", + areq, ret); +#endif + + if (digest) { + memcpy(sha_ctx->digest, digest, diglen); + memcpy(areq->result, digest, diglen); + } + if (authdata) { + sha_ctx->byte_count[0] = auth32[0]; + sha_ctx->byte_count[1] = auth32[1]; + sha_ctx->byte_count[2] = auth32[2]; + sha_ctx->byte_count[3] = auth32[3]; + } + areq->src = rctx->src; + areq->nbytes = rctx->nbytes; + + if (sha_ctx->sg != NULL) { + kfree(sha_ctx->sg); + sha_ctx->sg = NULL; + } + + if (sha_ctx->alg == QCE_HASH_SHA1) + _update_sha1_ctx(areq); + if (sha_ctx->alg == QCE_HASH_SHA256) + _update_sha256_ctx(areq); + + sha_ctx->last_blk = 0; + sha_ctx->first_blk = 0; + + if (ret) { + cp->res = -ENXIO; + pstat->sha_op_fail++; + } else { + cp->res = 0; + pstat->sha_op_success++; + } + + if (cp->platform_support.ce_shared) + schedule_work(&cp->unlock_ce_ws); + tasklet_schedule(&cp->done_tasklet); +}; + +static void _qce_ablk_cipher_complete(void *cookie, unsigned char *icb, + unsigned char *iv, int ret) +{ + struct ablkcipher_request *areq = (struct ablkcipher_request *) cookie; + struct crypto_ablkcipher *ablk = crypto_ablkcipher_reqtfm(areq); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + +#ifdef QCRYPTO_DEBUG + dev_info(&cp->pdev->dev, "_qce_ablk_cipher_complete: %p ret %d\n", + areq, ret); +#endif + if (iv) + memcpy(ctx->iv, iv, crypto_ablkcipher_ivsize(ablk)); + + if (ret) { + cp->res = -ENXIO; + pstat->ablk_cipher_op_fail++; + } else { + cp->res = 0; + pstat->ablk_cipher_op_success++; + } + if (cp->platform_support.ce_shared) + schedule_work(&cp->unlock_ce_ws); + tasklet_schedule(&cp->done_tasklet); +}; + + +static void _qce_aead_complete(void *cookie, unsigned char *icv, + unsigned char *iv, int ret) +{ + struct aead_request *areq = (struct aead_request *) cookie; + struct crypto_aead *aead = crypto_aead_reqtfm(areq); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(areq); + + if (rctx->mode == QCE_MODE_CCM) { + kzfree(rctx->assoc); + areq->assoc = rctx->assoc_sg; + areq->assoclen = rctx->assoclen; + if (ret) { + if (ret == 0x2000000) + ret = -EBADMSG; + else + ret = -ENXIO; + } + } else { + if (ret == 0) { + if (rctx->dir == QCE_ENCRYPT) { + /* copy the icv to dst */ + scatterwalk_map_and_copy(icv, areq->dst, + areq->cryptlen, + ctx->authsize, 1); + + } else { + unsigned char tmp[SHA256_DIGESTSIZE] = {0}; + + /* compare icv from src */ + scatterwalk_map_and_copy(tmp, + areq->src, areq->cryptlen - + ctx->authsize, ctx->authsize, 0); + ret = memcmp(icv, tmp, ctx->authsize); + if (ret != 0) + ret = -EBADMSG; + + } + } else { + ret = -ENXIO; + } + + if (iv) + memcpy(ctx->iv, iv, crypto_aead_ivsize(aead)); + } + + if (ret) + pstat->aead_op_fail++; + else + pstat->aead_op_success++; + + if (cp->platform_support.ce_shared) + schedule_work(&cp->unlock_ce_ws); + tasklet_schedule(&cp->done_tasklet); +} + +static int aead_ccm_set_msg_len(u8 *block, unsigned int msglen, int csize) +{ + __be32 data; + + memset(block, 0, csize); + block += csize; + + if (csize >= 4) + csize = 4; + else if (msglen > (1 << (8 * csize))) + return -EOVERFLOW; + + data = cpu_to_be32(msglen); + memcpy(block - csize, (u8 *)&data + 4 - csize, csize); + + return 0; +} + +static int qccrypto_set_aead_ccm_nonce(struct qce_req *qreq) +{ + struct aead_request *areq = (struct aead_request *) qreq->areq; + unsigned int i = ((unsigned int)qreq->iv[0]) + 1; + + memcpy(&qreq->nonce[0] , qreq->iv, qreq->ivsize); + /* + * Format control info per RFC 3610 and + * NIST Special Publication 800-38C + */ + qreq->nonce[0] |= (8 * ((qreq->authsize - 2) / 2)); + if (areq->assoclen) + qreq->nonce[0] |= 64; + + if (i > MAX_NONCE) + return -EINVAL; + + return aead_ccm_set_msg_len(qreq->nonce + 16 - i, qreq->cryptlen, i); +} + +static int qcrypto_aead_ccm_format_adata(struct qce_req *qreq, uint32_t alen, + struct scatterlist *sg) +{ + unsigned char *adata; + uint32_t len, l; + + qreq->assoc = kzalloc((alen + 0x64), (GFP_KERNEL | __GFP_DMA)); + if (!qreq->assoc) { + pr_err("qcrypto Memory allocation of adata FAIL, error %ld\n", + PTR_ERR(qreq->assoc)); + return -ENOMEM; + } + adata = qreq->assoc; + /* + * Add control info for associated data + * RFC 3610 and NIST Special Publication 800-38C + */ + if (alen < 65280) { + *(__be16 *)adata = cpu_to_be16(alen); + len = 2; + } else { + if ((alen >= 65280) && (alen <= 0xffffffff)) { + *(__be16 *)adata = cpu_to_be16(0xfffe); + *(__be32 *)&adata[2] = cpu_to_be32(alen); + len = 6; + } else { + *(__be16 *)adata = cpu_to_be16(0xffff); + *(__be32 *)&adata[6] = cpu_to_be32(alen); + len = 10; + } + } + adata += len; + qreq->assoclen = ALIGN((alen + len), 16); + for (l = alen; l > 0; sg = sg_next(sg)) { + memcpy(adata, sg_virt(sg), sg->length); + l -= sg->length; + adata += sg->length; + } + return 0; +} + +static void _start_qcrypto_process(struct crypto_priv *cp) +{ + struct crypto_async_request *async_req = NULL; + struct crypto_async_request *backlog = NULL; + unsigned long flags; + u32 type; + struct qce_req qreq; + int ret; + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *cipher_ctx; + struct qcrypto_sha_ctx *sha_ctx; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + +again: + spin_lock_irqsave(&cp->lock, flags); + if (cp->req == NULL) { + backlog = crypto_get_backlog(&cp->queue); + async_req = crypto_dequeue_request(&cp->queue); + cp->req = async_req; + } + spin_unlock_irqrestore(&cp->lock, flags); + if (!async_req) + return; + if (backlog) + backlog->complete(backlog, -EINPROGRESS); + type = crypto_tfm_alg_type(async_req->tfm); + + if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) { + struct ablkcipher_request *req; + struct crypto_ablkcipher *tfm; + + req = container_of(async_req, struct ablkcipher_request, base); + cipher_ctx = crypto_tfm_ctx(async_req->tfm); + rctx = ablkcipher_request_ctx(req); + tfm = crypto_ablkcipher_reqtfm(req); + + qreq.op = QCE_REQ_ABLK_CIPHER; + qreq.qce_cb = _qce_ablk_cipher_complete; + qreq.areq = req; + qreq.alg = rctx->alg; + qreq.dir = rctx->dir; + qreq.mode = rctx->mode; + qreq.enckey = cipher_ctx->enc_key; + qreq.encklen = cipher_ctx->enc_key_len; + qreq.iv = req->info; + qreq.ivsize = crypto_ablkcipher_ivsize(tfm); + qreq.cryptlen = req->nbytes; + qreq.use_pmem = 0; + + if ((cipher_ctx->enc_key_len == 0) && + (cp->platform_support.hw_key_support == 0)) + ret = -EINVAL; + else + ret = qce_ablk_cipher_req(cp->qce, &qreq); + } else { + if (type == CRYPTO_ALG_TYPE_AHASH) { + + struct ahash_request *req; + struct qce_sha_req sreq; + + req = container_of(async_req, + struct ahash_request, base); + sha_ctx = crypto_tfm_ctx(async_req->tfm); + + sreq.qce_cb = _qce_ahash_complete; + sreq.digest = &sha_ctx->digest[0]; + sreq.src = req->src; + sreq.auth_data[0] = sha_ctx->byte_count[0]; + sreq.auth_data[1] = sha_ctx->byte_count[1]; + sreq.auth_data[2] = sha_ctx->byte_count[2]; + sreq.auth_data[3] = sha_ctx->byte_count[3]; + sreq.first_blk = sha_ctx->first_blk; + sreq.last_blk = sha_ctx->last_blk; + sreq.size = req->nbytes; + sreq.areq = req; + + switch (sha_ctx->alg) { + case QCE_HASH_SHA1: + sreq.alg = QCE_HASH_SHA1; + sreq.authkey = NULL; + break; + case QCE_HASH_SHA256: + sreq.alg = QCE_HASH_SHA256; + sreq.authkey = NULL; + break; + case QCE_HASH_SHA1_HMAC: + sreq.alg = QCE_HASH_SHA1_HMAC; + sreq.authkey = &sha_ctx->authkey[0]; + break; + case QCE_HASH_SHA256_HMAC: + sreq.alg = QCE_HASH_SHA256_HMAC; + sreq.authkey = &sha_ctx->authkey[0]; + break; + default: + break; + }; + ret = qce_process_sha_req(cp->qce, &sreq); + + } else { + struct aead_request *req = container_of(async_req, + struct aead_request, base); + struct crypto_aead *aead = crypto_aead_reqtfm(req); + + rctx = aead_request_ctx(req); + cipher_ctx = crypto_tfm_ctx(async_req->tfm); + + qreq.op = QCE_REQ_AEAD; + qreq.qce_cb = _qce_aead_complete; + + qreq.areq = req; + qreq.alg = rctx->alg; + qreq.dir = rctx->dir; + qreq.mode = rctx->mode; + qreq.iv = rctx->iv; + + qreq.enckey = cipher_ctx->enc_key; + qreq.encklen = cipher_ctx->enc_key_len; + qreq.authkey = cipher_ctx->auth_key; + qreq.authklen = cipher_ctx->auth_key_len; + qreq.authsize = crypto_aead_authsize(aead); + qreq.ivsize = crypto_aead_ivsize(aead); + if (qreq.mode == QCE_MODE_CCM) { + if (qreq.dir == QCE_ENCRYPT) + qreq.cryptlen = req->cryptlen; + else + qreq.cryptlen = req->cryptlen - + qreq.authsize; + /* Get NONCE */ + ret = qccrypto_set_aead_ccm_nonce(&qreq); + if (ret) + goto done; + /* Format Associated data */ + ret = qcrypto_aead_ccm_format_adata(&qreq, + req->assoclen, + req->assoc); + if (ret) + goto done; + /* + * Save the original associated data + * length and sg + */ + rctx->assoc_sg = req->assoc; + rctx->assoclen = req->assoclen; + rctx->assoc = qreq.assoc; + /* + * update req with new formatted associated + * data info + */ + req->assoc = &rctx->asg; + req->assoclen = qreq.assoclen; + sg_set_buf(req->assoc, qreq.assoc, + req->assoclen); + sg_mark_end(req->assoc); + } + ret = qce_aead_req(cp->qce, &qreq); + } + }; +done: + if (ret) { + + spin_lock_irqsave(&cp->lock, flags); + cp->req = NULL; + spin_unlock_irqrestore(&cp->lock, flags); + + if (type == CRYPTO_ALG_TYPE_ABLKCIPHER) + pstat->ablk_cipher_op_fail++; + else + if (type == CRYPTO_ALG_TYPE_AHASH) + pstat->sha_op_fail++; + else + pstat->aead_op_fail++; + + async_req->complete(async_req, ret); + goto again; + }; +}; + +static int _qcrypto_queue_req(struct crypto_priv *cp, + struct crypto_async_request *req) +{ + int ret; + unsigned long flags; + + if (cp->platform_support.ce_shared) { + ret = qcrypto_lock_ce(cp); + if (ret) + return ret; + } + + spin_lock_irqsave(&cp->lock, flags); + ret = crypto_enqueue_request(&cp->queue, req); + spin_unlock_irqrestore(&cp->lock, flags); + _start_qcrypto_process(cp); + + return ret; +} + +static int _qcrypto_enc_aes_ecb(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ecb: %p\n", req); +#endif + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->ablk_cipher_aes_enc++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_enc_aes_cbc(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_cbc: %p\n", req); +#endif + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->ablk_cipher_aes_enc++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_enc_aes_ctr(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&cp->pdev->dev, "_qcrypto_enc_aes_ctr: %p\n", req); +#endif + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CTR; + + pstat->ablk_cipher_aes_enc++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_enc_aes_xts(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_XTS; + + pstat->ablk_cipher_aes_enc++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_aead_encrypt_aes_ccm(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1)) + return -EINVAL; + if ((ctx->auth_key_len != AES_KEYSIZE_128) && + (ctx->auth_key_len != AES_KEYSIZE_256)) + return -EINVAL; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CCM; + rctx->iv = req->iv; + + pstat->aead_sha1_aes_enc++; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _qcrypto_enc_des_ecb(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->ablk_cipher_des_enc++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_enc_des_cbc(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->ablk_cipher_des_enc++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_enc_3des_ecb(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->ablk_cipher_3des_enc++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_enc_3des_cbc(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->ablk_cipher_3des_enc++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_dec_aes_ecb(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ecb: %p\n", req); +#endif + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->ablk_cipher_aes_dec++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_dec_aes_cbc(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_cbc: %p\n", req); +#endif + + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->ablk_cipher_aes_dec++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_dec_aes_ctr(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); +#ifdef QCRYPTO_DEBUG + dev_info(&cp->pdev->dev, "_qcrypto_dec_aes_ctr: %p\n", req); +#endif + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->mode = QCE_MODE_CTR; + + /* Note. There is no such thing as aes/counter mode, decrypt */ + rctx->dir = QCE_ENCRYPT; + + pstat->ablk_cipher_aes_dec++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_dec_des_ecb(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->ablk_cipher_des_dec++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_dec_des_cbc(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->ablk_cipher_des_dec++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_dec_3des_ecb(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_ECB; + + pstat->ablk_cipher_3des_dec++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_dec_3des_cbc(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + + pstat->ablk_cipher_3des_dec++; + return _qcrypto_queue_req(cp, &req->base); +}; + +static int _qcrypto_dec_aes_xts(struct ablkcipher_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + BUG_ON(crypto_tfm_alg_type(req->base.tfm) != + CRYPTO_ALG_TYPE_ABLKCIPHER); + rctx = ablkcipher_request_ctx(req); + rctx->aead = 0; + rctx->alg = CIPHER_ALG_AES; + rctx->mode = QCE_MODE_XTS; + rctx->dir = QCE_DECRYPT; + + pstat->ablk_cipher_aes_dec++; + return _qcrypto_queue_req(cp, &req->base); +}; + + +static int _qcrypto_aead_decrypt_aes_ccm(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + if ((ctx->authsize > 16) || (ctx->authsize < 4) || (ctx->authsize & 1)) + return -EINVAL; + if ((ctx->auth_key_len != AES_KEYSIZE_128) && + (ctx->auth_key_len != AES_KEYSIZE_256)) + return -EINVAL; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CCM; + rctx->iv = req->iv; + + pstat->aead_sha1_aes_dec++; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _qcrypto_aead_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc); + + ctx->authsize = authsize; + return 0; +} + +static int _qcrypto_aead_ccm_setauthsize(struct crypto_aead *authenc, + unsigned int authsize) +{ + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(authenc); + + switch (authsize) { + case 4: + case 6: + case 8: + case 10: + case 12: + case 14: + case 16: + break; + default: + return -EINVAL; + } + ctx->authsize = authsize; + return 0; +} + +static int _qcrypto_aead_setkey(struct crypto_aead *tfm, const u8 *key, + unsigned int keylen) +{ + struct qcrypto_cipher_ctx *ctx = crypto_aead_ctx(tfm); + struct rtattr *rta = (struct rtattr *)key; + struct crypto_authenc_key_param *param; + + if (!RTA_OK(rta, keylen)) + goto badkey; + if (rta->rta_type != CRYPTO_AUTHENC_KEYA_PARAM) + goto badkey; + if (RTA_PAYLOAD(rta) < sizeof(*param)) + goto badkey; + + param = RTA_DATA(rta); + ctx->enc_key_len = be32_to_cpu(param->enckeylen); + + key += RTA_ALIGN(rta->rta_len); + keylen -= RTA_ALIGN(rta->rta_len); + + if (keylen < ctx->enc_key_len) + goto badkey; + + ctx->auth_key_len = keylen - ctx->enc_key_len; + if (ctx->enc_key_len >= QCRYPTO_MAX_KEY_SIZE || + ctx->auth_key_len >= QCRYPTO_MAX_KEY_SIZE) + goto badkey; + memset(ctx->auth_key, 0, QCRYPTO_MAX_KEY_SIZE); + memcpy(ctx->enc_key, key + ctx->auth_key_len, ctx->enc_key_len); + memcpy(ctx->auth_key, key, ctx->auth_key_len); + + return 0; +badkey: + ctx->enc_key_len = 0; + crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; +} + +static int _qcrypto_aead_ccm_setkey(struct crypto_aead *aead, const u8 *key, + unsigned int keylen) +{ + struct crypto_tfm *tfm = crypto_aead_tfm(aead); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(tfm); + struct crypto_priv *cp = ctx->cp; + + switch (keylen) { + case AES_KEYSIZE_128: + case AES_KEYSIZE_256: + break; + case AES_KEYSIZE_192: + if (cp->ce_support.aes_key_192) + break; + default: + ctx->enc_key_len = 0; + crypto_aead_set_flags(aead, CRYPTO_TFM_RES_BAD_KEY_LEN); + return -EINVAL; + }; + ctx->enc_key_len = keylen; + memcpy(ctx->enc_key, key, keylen); + ctx->auth_key_len = keylen; + memcpy(ctx->auth_key, key, keylen); + + return 0; +} + +static int _qcrypto_aead_encrypt_aes_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + +#ifdef QCRYPTO_DEBUG + dev_info(&cp->pdev->dev, "_qcrypto_aead_encrypt_aes_cbc: %p\n", req); +#endif + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + + pstat->aead_sha1_aes_enc++; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _qcrypto_aead_decrypt_aes_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + +#ifdef QCRYPTO_DEBUG + dev_info(&cp->pdev->dev, "_qcrypto_aead_decrypt_aes_cbc: %p\n", req); +#endif + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + + pstat->aead_sha1_aes_dec++; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _qcrypto_aead_givencrypt_aes_cbc(struct aead_givcrypt_request *req) +{ + struct aead_request *areq = &req->areq; + struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(areq); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->giv; /* generated iv */ + + memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); + /* avoid consecutive packets going out with same IV */ + *(__be64 *)req->giv ^= cpu_to_be64(req->seq); + pstat->aead_sha1_aes_enc++; + return _qcrypto_queue_req(cp, &areq->base); +} + +#ifdef QCRYPTO_AEAD_AES_CTR +static int _qcrypto_aead_encrypt_aes_ctr(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CTR; + rctx->iv = req->iv; + + pstat->aead_sha1_aes_enc++; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _qcrypto_aead_decrypt_aes_ctr(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + + /* Note. There is no such thing as aes/counter mode, decrypt */ + rctx->dir = QCE_ENCRYPT; + + rctx->mode = QCE_MODE_CTR; + rctx->iv = req->iv; + + pstat->aead_sha1_aes_dec++; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _qcrypto_aead_givencrypt_aes_ctr(struct aead_givcrypt_request *req) +{ + struct aead_request *areq = &req->areq; + struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(areq); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_AES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CTR; + rctx->iv = req->giv; /* generated iv */ + + memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); + /* avoid consecutive packets going out with same IV */ + *(__be64 *)req->giv ^= cpu_to_be64(req->seq); + pstat->aead_sha1_aes_enc++; + return _qcrypto_queue_req(cp, &areq->base); +}; +#endif /* QCRYPTO_AEAD_AES_CTR */ + +static int _qcrypto_aead_encrypt_des_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + + pstat->aead_sha1_des_enc++; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _qcrypto_aead_decrypt_des_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + + pstat->aead_sha1_des_dec++; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _qcrypto_aead_givencrypt_des_cbc(struct aead_givcrypt_request *req) +{ + struct aead_request *areq = &req->areq; + struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(areq); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->giv; /* generated iv */ + + memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); + /* avoid consecutive packets going out with same IV */ + *(__be64 *)req->giv ^= cpu_to_be64(req->seq); + pstat->aead_sha1_des_enc++; + return _qcrypto_queue_req(cp, &areq->base); +} + +static int _qcrypto_aead_encrypt_3des_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + + pstat->aead_sha1_3des_enc++; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _qcrypto_aead_decrypt_3des_cbc(struct aead_request *req) +{ + struct qcrypto_cipher_req_ctx *rctx; + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(req); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_DECRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->iv; + + pstat->aead_sha1_3des_dec++; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _qcrypto_aead_givencrypt_3des_cbc(struct aead_givcrypt_request *req) +{ + struct aead_request *areq = &req->areq; + struct crypto_aead *authenc = crypto_aead_reqtfm(areq); + struct qcrypto_cipher_ctx *ctx = crypto_tfm_ctx(areq->base.tfm); + struct crypto_priv *cp = ctx->cp; + struct qcrypto_cipher_req_ctx *rctx; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + rctx = aead_request_ctx(areq); + rctx->aead = 1; + rctx->alg = CIPHER_ALG_3DES; + rctx->dir = QCE_ENCRYPT; + rctx->mode = QCE_MODE_CBC; + rctx->iv = req->giv; /* generated iv */ + + memcpy(req->giv, ctx->iv, crypto_aead_ivsize(authenc)); + /* avoid consecutive packets going out with same IV */ + *(__be64 *)req->giv ^= cpu_to_be64(req->seq); + pstat->aead_sha1_3des_enc++; + return _qcrypto_queue_req(cp, &areq->base); +} + +static int qcrypto_count_sg(struct scatterlist *sg, int nbytes) +{ + int i; + + for (i = 0; nbytes > 0; i++, sg = sg_next(sg)) + nbytes -= sg->length; + + return i; +} + +static int _sha_init(struct qcrypto_sha_ctx *ctx) +{ + ctx->first_blk = 1; + ctx->last_blk = 0; + ctx->byte_count[0] = 0; + ctx->byte_count[1] = 0; + ctx->byte_count[2] = 0; + ctx->byte_count[3] = 0; + ctx->trailing_buf_len = 0; + + return 0; +}; + +static int _sha1_init(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + _sha_init(sha_ctx); + sha_ctx->alg = QCE_HASH_SHA1; + + memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE); + memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0], + SHA1_DIGEST_SIZE); + sha_ctx->diglen = SHA1_DIGEST_SIZE; + _update_sha1_ctx(req); + + pstat->sha1_digest++; + return 0; +}; + +static int _sha256_init(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + + _sha_init(sha_ctx); + sha_ctx->alg = QCE_HASH_SHA256; + + memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE); + memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctx->diglen = SHA256_DIGEST_SIZE; + _update_sha256_ctx(req); + + pstat->sha256_digest++; + return 0; +}; + + +static int _sha1_export(struct ahash_request *req, void *out) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx; + struct sha1_state *out_ctx = (struct sha1_state *)out; + + out_ctx->count = sha_state_ctx->count; + memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state)); + memcpy(out_ctx->buffer, sha_state_ctx->buffer, SHA1_BLOCK_SIZE); + + return 0; +}; + +static int _sha1_import(struct ahash_request *req, const void *in) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx; + struct sha1_state *in_ctx = (struct sha1_state *)in; + + sha_state_ctx->count = in_ctx->count; + memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state)); + memcpy(sha_state_ctx->buffer, in_ctx->buffer, SHA1_BLOCK_SIZE); + memcpy(sha_ctx->trailing_buf, in_ctx->buffer, SHA1_BLOCK_SIZE); + + sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0); + sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32); + _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen); + + sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count & + (SHA1_BLOCK_SIZE-1)); + + if (!(in_ctx->count)) + sha_ctx->first_blk = 1; + else + sha_ctx->first_blk = 0; + + return 0; +} +static int _sha256_export(struct ahash_request *req, void *out) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx; + struct sha256_state *out_ctx = (struct sha256_state *)out; + + out_ctx->count = sha_state_ctx->count; + memcpy(out_ctx->state, sha_state_ctx->state, sizeof(out_ctx->state)); + memcpy(out_ctx->buf, sha_state_ctx->buf, SHA256_BLOCK_SIZE); + + return 0; +}; + +static int _sha256_import(struct ahash_request *req, const void *in) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx; + struct sha256_state *in_ctx = (struct sha256_state *)in; + + sha_state_ctx->count = in_ctx->count; + memcpy(sha_state_ctx->state, in_ctx->state, sizeof(in_ctx->state)); + memcpy(sha_state_ctx->buf, in_ctx->buf, SHA256_BLOCK_SIZE); + memcpy(sha_ctx->trailing_buf, in_ctx->buf, SHA256_BLOCK_SIZE); + + sha_ctx->byte_count[0] = (uint32_t)(in_ctx->count & 0xFFFFFFC0); + sha_ctx->byte_count[1] = (uint32_t)(in_ctx->count >> 32); + _words_to_byte_stream(in_ctx->state, sha_ctx->digest, sha_ctx->diglen); + + sha_ctx->trailing_buf_len = (uint32_t)(in_ctx->count & + (SHA256_BLOCK_SIZE-1)); + + if (!(in_ctx->count)) + sha_ctx->first_blk = 1; + else + sha_ctx->first_blk = 0; + + return 0; +} + + +static int _sha_update(struct ahash_request *req, uint32_t sha_block_size) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + uint32_t total, len, i, num_sg; + uint8_t *k_src = NULL; + uint32_t sha_pad_len = 0; + uint32_t end_src = 0; + uint32_t trailing_buf_len = 0; + uint32_t nbytes, index = 0; + uint32_t saved_length = 0; + int ret = 0; + + /* check for trailing buffer from previous updates and append it */ + total = req->nbytes + sha_ctx->trailing_buf_len; + len = req->nbytes; + + if (total <= sha_block_size) { + i = 0; + + k_src = &sha_ctx->trailing_buf[sha_ctx->trailing_buf_len]; + while (len > 0) { + memcpy(k_src, sg_virt(&req->src[i]), + req->src[i].length); + len -= req->src[i].length; + k_src += req->src[i].length; + i++; + } + sha_ctx->trailing_buf_len = total; + if (sha_ctx->alg == QCE_HASH_SHA1) + _update_sha1_ctx(req); + if (sha_ctx->alg == QCE_HASH_SHA256) + _update_sha256_ctx(req); + return 0; + } + + /* save the original req structure fields*/ + rctx->src = req->src; + rctx->nbytes = req->nbytes; + + memcpy(sha_ctx->tmp_tbuf, sha_ctx->trailing_buf, + sha_ctx->trailing_buf_len); + k_src = &sha_ctx->trailing_buf[0]; + /* get new trailing buffer */ + sha_pad_len = ALIGN(total, sha_block_size) - total; + trailing_buf_len = sha_block_size - sha_pad_len; + nbytes = total - trailing_buf_len; + num_sg = qcrypto_count_sg(req->src, req->nbytes); + + len = sha_ctx->trailing_buf_len; + i = 0; + + while (len < nbytes) { + if ((len + req->src[i].length) > nbytes) + break; + len += req->src[i].length; + i++; + } + + end_src = i; + if (len < nbytes) { + uint32_t remnant = (nbytes - len); + memcpy(k_src, (sg_virt(&req->src[i]) + remnant), + (req->src[i].length - remnant)); + k_src += (req->src[i].length - remnant); + saved_length = req->src[i].length; + index = i; + req->src[i].length = remnant; + i++; + } + + while (i < num_sg) { + memcpy(k_src, sg_virt(&req->src[i]), req->src[i].length); + k_src += req->src[i].length; + i++; + } + + if (sha_ctx->trailing_buf_len) { + num_sg = end_src + 2; + sha_ctx->sg = kzalloc(num_sg * (sizeof(struct scatterlist)), + GFP_KERNEL); + if (sha_ctx->sg == NULL) { + pr_err("qcrypto Can't Allocate mem: sha_ctx->sg, error %ld\n", + PTR_ERR(sha_ctx->sg)); + return -ENOMEM; + } + + sg_set_buf(&sha_ctx->sg[0], sha_ctx->tmp_tbuf, + sha_ctx->trailing_buf_len); + for (i = 1; i < num_sg; i++) + sg_set_buf(&sha_ctx->sg[i], sg_virt(&req->src[i-1]), + req->src[i-1].length); + + req->src = sha_ctx->sg; + sg_mark_end(&sha_ctx->sg[num_sg - 1]); + } else + sg_mark_end(&req->src[end_src]); + + req->nbytes = nbytes; + if (saved_length > 0) + rctx->src[index].length = saved_length; + sha_ctx->trailing_buf_len = trailing_buf_len; + + ret = _qcrypto_queue_req(cp, &req->base); + + return ret; +}; + +static int _sha1_update(struct ahash_request *req) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha1_state *sha_state_ctx = &rctx->sha1_state_ctx; + + sha_state_ctx->count += req->nbytes; + return _sha_update(req, SHA1_BLOCK_SIZE); +} + +static int _sha256_update(struct ahash_request *req) +{ + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct sha256_state *sha_state_ctx = &rctx->sha256_state_ctx; + + sha_state_ctx->count += req->nbytes; + return _sha_update(req, SHA256_BLOCK_SIZE); +} + +static int _sha_final(struct ahash_request *req, uint32_t sha_block_size) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + int ret = 0; + + sha_ctx->last_blk = 1; + + /* save the original req structure fields*/ + rctx->src = req->src; + rctx->nbytes = req->nbytes; + + sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf, + sha_ctx->trailing_buf_len); + sg_mark_end(&sha_ctx->tmp_sg); + + req->src = &sha_ctx->tmp_sg; + req->nbytes = sha_ctx->trailing_buf_len; + + ret = _qcrypto_queue_req(cp, &req->base); + + return ret; +}; + +static int _sha1_final(struct ahash_request *req) +{ + return _sha_final(req, SHA1_BLOCK_SIZE); +} + +static int _sha256_final(struct ahash_request *req) +{ + return _sha_final(req, SHA256_BLOCK_SIZE); +} + +static int _sha_digest(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_priv *cp = sha_ctx->cp; + int ret = 0; + + /* save the original req structure fields*/ + rctx->src = req->src; + rctx->nbytes = req->nbytes; + + sha_ctx->last_blk = 1; + ret = _qcrypto_queue_req(cp, &req->base); + + return ret; +} + +static int _sha1_digest(struct ahash_request *req) +{ + _sha1_init(req); + return _sha_digest(req); +} + +static int _sha256_digest(struct ahash_request *req) +{ + _sha256_init(req); + return _sha_digest(req); +} + +static void _crypto_sha_hmac_ahash_req_complete( + struct crypto_async_request *req, int err) +{ + struct completion *ahash_req_complete = req->data; + + if (err == -EINPROGRESS) + return; + complete(ahash_req_complete); +} + +static int _sha_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int len) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); + int ret = 0; + + sha_ctx->in_buf = kzalloc(len, GFP_KERNEL); + if (sha_ctx->in_buf == NULL) { + pr_err("qcrypto Can't Allocate mem: sha_ctx->in_buf, error %ld\n", + PTR_ERR(sha_ctx->in_buf)); + return -ENOMEM; + } + memcpy(sha_ctx->in_buf, key, len); + sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->in_buf, len); + sg_mark_end(&sha_ctx->tmp_sg); + + ahash_request_set_crypt(sha_ctx->ahash_req, &sha_ctx->tmp_sg, + &sha_ctx->authkey[0], len); + + ret = _sha_digest(sha_ctx->ahash_req); + if (ret == -EINPROGRESS || ret == -EBUSY) { + ret = + wait_for_completion_interruptible( + &sha_ctx->ahash_req_complete); + INIT_COMPLETION(sha_ctx->ahash_req_complete); + } + + sha_ctx->authkey_in_len = len; + kfree(sha_ctx->in_buf); + sha_ctx->in_buf = NULL; + + return ret; +} + +static int _sha1_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int len) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); + + if (len <= SHA1_BLOCK_SIZE) + memcpy(&sha_ctx->authkey[0], key, len); + else { + _sha_init(sha_ctx); + sha_ctx->alg = QCE_HASH_SHA1; + memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0], + SHA1_DIGEST_SIZE); + sha_ctx->diglen = SHA1_DIGEST_SIZE; + _sha_hmac_setkey(tfm, key, len); + } + return 0; +} + +static int _sha256_hmac_setkey(struct crypto_ahash *tfm, const u8 *key, + unsigned int len) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(&tfm->base); + + if (len <= SHA256_BLOCK_SIZE) + memcpy(&sha_ctx->authkey[0], key, len); + else { + _sha_init(sha_ctx); + sha_ctx->alg = QCE_HASH_SHA256; + memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctx->diglen = SHA256_DIGEST_SIZE; + _sha_hmac_setkey(tfm, key, len); + } + + return 0; +} + +static int _sha_hmac_init_ihash(struct ahash_request *req, + uint32_t sha_block_size) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + int i; + + for (i = 0; i < sha_block_size; i++) + sha_ctx->trailing_buf[i] = sha_ctx->authkey[i] ^ 0x36; + sha_ctx->trailing_buf_len = sha_block_size; + + return 0; +} + +static int _sha1_hmac_init(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct crypto_stat *pstat; + int ret = 0; + + pstat = &_qcrypto_stat[cp->pdev->id]; + pstat->sha1_hmac_digest++; + + _sha_init(sha_ctx); + memset(&sha_ctx->trailing_buf[0], 0x00, SHA1_BLOCK_SIZE); + memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0], + SHA1_DIGEST_SIZE); + sha_ctx->diglen = SHA1_DIGEST_SIZE; + _update_sha1_ctx(req); + + if (cp->ce_support.sha_hmac) + sha_ctx->alg = QCE_HASH_SHA1_HMAC; + else { + sha_ctx->alg = QCE_HASH_SHA1; + ret = _sha_hmac_init_ihash(req, SHA1_BLOCK_SIZE); + } + + return ret; +} + +static int _sha256_hmac_init(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct crypto_stat *pstat; + int ret = 0; + + pstat = &_qcrypto_stat[cp->pdev->id]; + pstat->sha256_hmac_digest++; + + _sha_init(sha_ctx); + memset(&sha_ctx->trailing_buf[0], 0x00, SHA256_BLOCK_SIZE); + memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctx->diglen = SHA256_DIGEST_SIZE; + _update_sha256_ctx(req); + + if (cp->ce_support.sha_hmac) + sha_ctx->alg = QCE_HASH_SHA256_HMAC; + else { + sha_ctx->alg = QCE_HASH_SHA256; + ret = _sha_hmac_init_ihash(req, SHA256_BLOCK_SIZE); + } + + return ret; +} + +static int _sha1_hmac_update(struct ahash_request *req) +{ + return _sha1_update(req); +} + +static int _sha256_hmac_update(struct ahash_request *req) +{ + return _sha256_update(req); +} + +static int _sha_hmac_outer_hash(struct ahash_request *req, + uint32_t sha_digest_size, uint32_t sha_block_size) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct qcrypto_sha_req_ctx *rctx = ahash_request_ctx(req); + struct crypto_priv *cp = sha_ctx->cp; + int i; + + for (i = 0; i < sha_block_size; i++) + sha_ctx->tmp_tbuf[i] = sha_ctx->authkey[i] ^ 0x5c; + + /* save the original req structure fields*/ + rctx->src = req->src; + rctx->nbytes = req->nbytes; + + memcpy(&sha_ctx->tmp_tbuf[sha_block_size], &sha_ctx->digest[0], + sha_digest_size); + + sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->tmp_tbuf, sha_block_size + + sha_digest_size); + sg_mark_end(&sha_ctx->tmp_sg); + req->src = &sha_ctx->tmp_sg; + req->nbytes = sha_block_size + sha_digest_size; + + _sha_init(sha_ctx); + if (sha_ctx->alg == QCE_HASH_SHA1) { + memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0], + SHA1_DIGEST_SIZE); + sha_ctx->diglen = SHA1_DIGEST_SIZE; + } else { + memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctx->diglen = SHA256_DIGEST_SIZE; + } + + sha_ctx->last_blk = 1; + return _qcrypto_queue_req(cp, &req->base); +} + +static int _sha_hmac_inner_hash(struct ahash_request *req, + uint32_t sha_digest_size, uint32_t sha_block_size) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct ahash_request *areq = sha_ctx->ahash_req; + struct crypto_priv *cp = sha_ctx->cp; + int ret = 0; + + sha_ctx->last_blk = 1; + + sg_set_buf(&sha_ctx->tmp_sg, sha_ctx->trailing_buf, + sha_ctx->trailing_buf_len); + sg_mark_end(&sha_ctx->tmp_sg); + + ahash_request_set_crypt(areq, &sha_ctx->tmp_sg, &sha_ctx->digest[0], + sha_ctx->trailing_buf_len); + sha_ctx->last_blk = 1; + ret = _qcrypto_queue_req(cp, &areq->base); + + if (ret == -EINPROGRESS || ret == -EBUSY) { + ret = + wait_for_completion_interruptible(&sha_ctx->ahash_req_complete); + INIT_COMPLETION(sha_ctx->ahash_req_complete); + } + + return ret; +} + +static int _sha1_hmac_final(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + int ret = 0; + + if (cp->ce_support.sha_hmac) + return _sha_final(req, SHA1_BLOCK_SIZE); + else { + ret = _sha_hmac_inner_hash(req, SHA1_DIGEST_SIZE, + SHA1_BLOCK_SIZE); + if (ret) + return ret; + return _sha_hmac_outer_hash(req, SHA1_DIGEST_SIZE, + SHA1_BLOCK_SIZE); + } +} + +static int _sha256_hmac_final(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + int ret = 0; + + if (cp->ce_support.sha_hmac) + return _sha_final(req, SHA256_BLOCK_SIZE); + else { + ret = _sha_hmac_inner_hash(req, SHA256_DIGEST_SIZE, + SHA256_BLOCK_SIZE); + if (ret) + return ret; + return _sha_hmac_outer_hash(req, SHA256_DIGEST_SIZE, + SHA256_BLOCK_SIZE); + } + return 0; +} + + +static int _sha1_hmac_digest(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + pstat->sha1_hmac_digest++; + + _sha_init(sha_ctx); + memcpy(&sha_ctx->digest[0], &_std_init_vector_sha1_uint8[0], + SHA1_DIGEST_SIZE); + sha_ctx->diglen = SHA1_DIGEST_SIZE; + sha_ctx->alg = QCE_HASH_SHA1_HMAC; + + return _sha_digest(req); +} + +static int _sha256_hmac_digest(struct ahash_request *req) +{ + struct qcrypto_sha_ctx *sha_ctx = crypto_tfm_ctx(req->base.tfm); + struct crypto_priv *cp = sha_ctx->cp; + struct crypto_stat *pstat; + + pstat = &_qcrypto_stat[cp->pdev->id]; + pstat->sha256_hmac_digest++; + + _sha_init(sha_ctx); + memcpy(&sha_ctx->digest[0], &_std_init_vector_sha256_uint8[0], + SHA256_DIGEST_SIZE); + sha_ctx->diglen = SHA256_DIGEST_SIZE; + sha_ctx->alg = QCE_HASH_SHA256_HMAC; + + return _sha_digest(req); +} + +static struct ahash_alg _qcrypto_ahash_algos[] = { + { + .init = _sha1_init, + .update = _sha1_update, + .final = _sha1_final, + .export = _sha1_export, + .import = _sha1_import, + .digest = _sha1_digest, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "sha1", + .cra_driver_name = "qcrypto-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = + sizeof(struct qcrypto_sha_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ahash_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_ahash_cra_init, + .cra_exit = _qcrypto_ahash_cra_exit, + }, + }, + }, + { + .init = _sha256_init, + .update = _sha256_update, + .final = _sha256_final, + .export = _sha256_export, + .import = _sha256_import, + .digest = _sha256_digest, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "sha256", + .cra_driver_name = "qcrypto-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = + sizeof(struct qcrypto_sha_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ahash_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_ahash_cra_init, + .cra_exit = _qcrypto_ahash_cra_exit, + }, + }, + }, +}; + +static struct ahash_alg _qcrypto_sha_hmac_algos[] = { + { + .init = _sha1_hmac_init, + .update = _sha1_hmac_update, + .final = _sha1_hmac_final, + .export = _sha1_export, + .import = _sha1_import, + .digest = _sha1_hmac_digest, + .setkey = _sha1_hmac_setkey, + .halg = { + .digestsize = SHA1_DIGEST_SIZE, + .statesize = sizeof(struct sha1_state), + .base = { + .cra_name = "hmac(sha1)", + .cra_driver_name = "qcrypto-hmac-sha1", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA1_BLOCK_SIZE, + .cra_ctxsize = + sizeof(struct qcrypto_sha_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ahash_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_ahash_hmac_cra_init, + .cra_exit = _qcrypto_ahash_cra_exit, + }, + }, + }, + { + .init = _sha256_hmac_init, + .update = _sha256_hmac_update, + .final = _sha256_hmac_final, + .export = _sha256_export, + .import = _sha256_import, + .digest = _sha256_hmac_digest, + .setkey = _sha256_hmac_setkey, + .halg = { + .digestsize = SHA256_DIGEST_SIZE, + .statesize = sizeof(struct sha256_state), + .base = { + .cra_name = "hmac(sha256)", + .cra_driver_name = "qcrypto-hmac-sha256", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AHASH | + CRYPTO_ALG_ASYNC, + .cra_blocksize = SHA256_BLOCK_SIZE, + .cra_ctxsize = + sizeof(struct qcrypto_sha_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ahash_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_ahash_hmac_cra_init, + .cra_exit = _qcrypto_ahash_cra_exit, + }, + }, + }, +}; + +static struct crypto_alg _qcrypto_ablk_cipher_algos[] = { + { + .cra_name = "ecb(aes)", + .cra_driver_name = "qcrypto-ecb-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_ablkcipher_init, + .cra_exit = _qcrypto_cra_ablkcipher_exit, + .cra_u = { + .ablkcipher = { + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = _qcrypto_setkey_aes, + .encrypt = _qcrypto_enc_aes_ecb, + .decrypt = _qcrypto_dec_aes_ecb, + }, + }, + }, + { + .cra_name = "cbc(aes)", + .cra_driver_name = "qcrypto-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_ablkcipher_init, + .cra_exit = _qcrypto_cra_ablkcipher_exit, + .cra_u = { + .ablkcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = _qcrypto_setkey_aes, + .encrypt = _qcrypto_enc_aes_cbc, + .decrypt = _qcrypto_dec_aes_cbc, + }, + }, + }, + { + .cra_name = "ctr(aes)", + .cra_driver_name = "qcrypto-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_ablkcipher_init, + .cra_exit = _qcrypto_cra_ablkcipher_exit, + .cra_u = { + .ablkcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = _qcrypto_setkey_aes, + .encrypt = _qcrypto_enc_aes_ctr, + .decrypt = _qcrypto_dec_aes_ctr, + }, + }, + }, + { + .cra_name = "ecb(des)", + .cra_driver_name = "qcrypto-ecb-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_ablkcipher_init, + .cra_exit = _qcrypto_cra_ablkcipher_exit, + .cra_u = { + .ablkcipher = { + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = _qcrypto_setkey_des, + .encrypt = _qcrypto_enc_des_ecb, + .decrypt = _qcrypto_dec_des_ecb, + }, + }, + }, + { + .cra_name = "cbc(des)", + .cra_driver_name = "qcrypto-cbc-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_ablkcipher_init, + .cra_exit = _qcrypto_cra_ablkcipher_exit, + .cra_u = { + .ablkcipher = { + .ivsize = DES_BLOCK_SIZE, + .min_keysize = DES_KEY_SIZE, + .max_keysize = DES_KEY_SIZE, + .setkey = _qcrypto_setkey_des, + .encrypt = _qcrypto_enc_des_cbc, + .decrypt = _qcrypto_dec_des_cbc, + }, + }, + }, + { + .cra_name = "ecb(des3_ede)", + .cra_driver_name = "qcrypto-ecb-3des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_ablkcipher_init, + .cra_exit = _qcrypto_cra_ablkcipher_exit, + .cra_u = { + .ablkcipher = { + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = _qcrypto_setkey_3des, + .encrypt = _qcrypto_enc_3des_ecb, + .decrypt = _qcrypto_dec_3des_ecb, + }, + }, + }, + { + .cra_name = "cbc(des3_ede)", + .cra_driver_name = "qcrypto-cbc-3des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_ablkcipher_init, + .cra_exit = _qcrypto_cra_ablkcipher_exit, + .cra_u = { + .ablkcipher = { + .ivsize = DES3_EDE_BLOCK_SIZE, + .min_keysize = DES3_EDE_KEY_SIZE, + .max_keysize = DES3_EDE_KEY_SIZE, + .setkey = _qcrypto_setkey_3des, + .encrypt = _qcrypto_enc_3des_cbc, + .decrypt = _qcrypto_dec_3des_cbc, + }, + }, + }, +}; + +static struct crypto_alg _qcrypto_ablk_cipher_xts_algo = { + .cra_name = "xts(aes)", + .cra_driver_name = "qcrypto-xts-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_ablkcipher_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_ablkcipher_init, + .cra_exit = _qcrypto_cra_ablkcipher_exit, + .cra_u = { + .ablkcipher = { + .ivsize = AES_BLOCK_SIZE, + .min_keysize = AES_MIN_KEY_SIZE, + .max_keysize = AES_MAX_KEY_SIZE, + .setkey = _qcrypto_setkey_aes, + .encrypt = _qcrypto_enc_aes_xts, + .decrypt = _qcrypto_dec_aes_xts, + }, + }, +}; + +static struct crypto_alg _qcrypto_aead_sha1_hmac_algos[] = { + { + .cra_name = "authenc(hmac(sha1),cbc(aes))", + .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_aead_init, + .cra_exit = _qcrypto_cra_aead_exit, + .cra_u = { + .aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .setkey = _qcrypto_aead_setkey, + .setauthsize = _qcrypto_aead_setauthsize, + .encrypt = _qcrypto_aead_encrypt_aes_cbc, + .decrypt = _qcrypto_aead_decrypt_aes_cbc, + .givencrypt = _qcrypto_aead_givencrypt_aes_cbc, + .geniv = "", + } + } + }, + +#ifdef QCRYPTO_AEAD_AES_CTR + { + .cra_name = "authenc(hmac(sha1),ctr(aes))", + .cra_driver_name = "qcrypto-aead-hmac-sha1-ctr-aes", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_aead_init, + .cra_exit = _qcrypto_cra_aead_exit, + .cra_u = { + .aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .setkey = _qcrypto_aead_setkey, + .setauthsize = _qcrypto_aead_setauthsize, + .encrypt = _qcrypto_aead_encrypt_aes_ctr, + .decrypt = _qcrypto_aead_decrypt_aes_ctr, + .givencrypt = _qcrypto_aead_givencrypt_aes_ctr, + .geniv = "", + } + } + }, +#endif /* QCRYPTO_AEAD_AES_CTR */ + { + .cra_name = "authenc(hmac(sha1),cbc(des))", + .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_aead_init, + .cra_exit = _qcrypto_cra_aead_exit, + .cra_u = { + .aead = { + .ivsize = DES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .setkey = _qcrypto_aead_setkey, + .setauthsize = _qcrypto_aead_setauthsize, + .encrypt = _qcrypto_aead_encrypt_des_cbc, + .decrypt = _qcrypto_aead_decrypt_des_cbc, + .givencrypt = _qcrypto_aead_givencrypt_des_cbc, + .geniv = "", + } + } + }, + { + .cra_name = "authenc(hmac(sha1),cbc(des3_ede))", + .cra_driver_name = "qcrypto-aead-hmac-sha1-cbc-3des", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = DES3_EDE_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_aead_init, + .cra_exit = _qcrypto_cra_aead_exit, + .cra_u = { + .aead = { + .ivsize = DES3_EDE_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .setkey = _qcrypto_aead_setkey, + .setauthsize = _qcrypto_aead_setauthsize, + .encrypt = _qcrypto_aead_encrypt_3des_cbc, + .decrypt = _qcrypto_aead_decrypt_3des_cbc, + .givencrypt = _qcrypto_aead_givencrypt_3des_cbc, + .geniv = "", + } + } + }, +}; + +static struct crypto_alg _qcrypto_aead_ccm_algo = { + .cra_name = "ccm(aes)", + .cra_driver_name = "qcrypto-aes-ccm", + .cra_priority = 300, + .cra_flags = CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC, + .cra_blocksize = AES_BLOCK_SIZE, + .cra_ctxsize = sizeof(struct qcrypto_cipher_ctx), + .cra_alignmask = 0, + .cra_type = &crypto_aead_type, + .cra_module = THIS_MODULE, + .cra_init = _qcrypto_cra_aead_init, + .cra_exit = _qcrypto_cra_aead_exit, + .cra_u = { + .aead = { + .ivsize = AES_BLOCK_SIZE, + .maxauthsize = SHA1_DIGEST_SIZE, + .setkey = _qcrypto_aead_ccm_setkey, + .setauthsize = _qcrypto_aead_ccm_setauthsize, + .encrypt = _qcrypto_aead_encrypt_aes_ccm, + .decrypt = _qcrypto_aead_decrypt_aes_ccm, + .geniv = "", + } + } +}; + + +static int _qcrypto_probe(struct platform_device *pdev) +{ + int rc = 0; + void *handle; + struct crypto_priv *cp; + int i; + struct msm_ce_hw_support *platform_support; + + if (pdev->id >= MAX_CRYPTO_DEVICE) { + pr_err("%s: device id %d exceeds allowed %d\n", + __func__, pdev->id, MAX_CRYPTO_DEVICE); + return -ENOENT; + } + + cp = kzalloc(sizeof(*cp), GFP_KERNEL); + if (!cp) { + pr_err("qcrypto Memory allocation of q_alg FAIL, error %ld\n", + PTR_ERR(cp)); + return -ENOMEM; + } + + /* open qce */ + handle = qce_open(pdev, &rc); + if (handle == NULL) { + kfree(cp); + platform_set_drvdata(pdev, NULL); + return rc; + } + + INIT_LIST_HEAD(&cp->alg_list); + platform_set_drvdata(pdev, cp); + spin_lock_init(&cp->lock); + tasklet_init(&cp->done_tasklet, req_done, (unsigned long)cp); + crypto_init_queue(&cp->queue, 50); + cp->qce = handle; + cp->pdev = pdev; + qce_hw_support(cp->qce, &cp->ce_support); + platform_support = (struct msm_ce_hw_support *)pdev->dev.platform_data; + cp->platform_support.ce_shared = platform_support->ce_shared; + cp->platform_support.shared_ce_resource = + platform_support->shared_ce_resource; + cp->platform_support.hw_key_support = + platform_support->hw_key_support; + cp->platform_support.bus_scale_table = + platform_support->bus_scale_table; + cp->high_bw_req_count = 0; + cp->ce_lock_count = 0; + cp->platform_support.sha_hmac = platform_support->sha_hmac; + + if (cp->platform_support.ce_shared) + INIT_WORK(&cp->unlock_ce_ws, qcrypto_unlock_ce); + + if (cp->platform_support.bus_scale_table != NULL) { + cp->bus_scale_handle = + msm_bus_scale_register_client( + (struct msm_bus_scale_pdata *) + cp->platform_support.bus_scale_table); + if (!cp->bus_scale_handle) { + printk(KERN_ERR "%s not able to get bus scale\n", + __func__); + rc = -ENOMEM; + goto err; + } + } + + /* register crypto cipher algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(_qcrypto_ablk_cipher_algos); i++) { + struct qcrypto_alg *q_alg; + + q_alg = _qcrypto_cipher_alg_alloc(cp, + &_qcrypto_ablk_cipher_algos[i]); + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + rc = crypto_register_alg(&q_alg->cipher_alg); + if (rc) { + dev_err(&pdev->dev, "%s alg registration failed\n", + q_alg->cipher_alg.cra_driver_name); + kfree(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->cipher_alg.cra_driver_name); + } + } + + /* register crypto cipher algorithms the device supports */ + if (cp->ce_support.aes_xts) { + struct qcrypto_alg *q_alg; + + q_alg = _qcrypto_cipher_alg_alloc(cp, + &_qcrypto_ablk_cipher_xts_algo); + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + rc = crypto_register_alg(&q_alg->cipher_alg); + if (rc) { + dev_err(&pdev->dev, "%s alg registration failed\n", + q_alg->cipher_alg.cra_driver_name); + kfree(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->cipher_alg.cra_driver_name); + } + } + + /* + * Register crypto hash (sha1 and sha256) algorithms the + * device supports + */ + for (i = 0; i < ARRAY_SIZE(_qcrypto_ahash_algos); i++) { + struct qcrypto_alg *q_alg = NULL; + + q_alg = _qcrypto_sha_alg_alloc(cp, &_qcrypto_ahash_algos[i]); + + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + + rc = crypto_register_ahash(&q_alg->sha_alg); + if (rc) { + dev_err(&pdev->dev, "%s alg registration failed\n", + q_alg->sha_alg.halg.base.cra_driver_name); + kfree(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->sha_alg.halg.base.cra_driver_name); + } + } + + /* register crypto aead (hmac-sha1) algorithms the device supports */ + if (cp->ce_support.sha1_hmac_20 || cp->ce_support.sha1_hmac) { + for (i = 0; i < ARRAY_SIZE(_qcrypto_aead_sha1_hmac_algos); + i++) { + struct qcrypto_alg *q_alg; + + q_alg = _qcrypto_cipher_alg_alloc(cp, + &_qcrypto_aead_sha1_hmac_algos[i]); + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + + rc = crypto_register_alg(&q_alg->cipher_alg); + if (rc) { + dev_err(&pdev->dev, + "%s alg registration failed\n", + q_alg->cipher_alg.cra_driver_name); + kfree(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->cipher_alg.cra_driver_name); + } + } + } + + if ((cp->ce_support.sha_hmac) || (cp->platform_support.sha_hmac)) { + /* register crypto hmac algorithms the device supports */ + for (i = 0; i < ARRAY_SIZE(_qcrypto_sha_hmac_algos); i++) { + struct qcrypto_alg *q_alg = NULL; + + q_alg = _qcrypto_sha_alg_alloc(cp, + &_qcrypto_sha_hmac_algos[i]); + + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + + rc = crypto_register_ahash(&q_alg->sha_alg); + if (rc) { + dev_err(&pdev->dev, + "%s alg registration failed\n", + q_alg->sha_alg.halg.base.cra_driver_name); + kfree(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->sha_alg.halg.base.cra_driver_name); + } + } + } + /* + * Register crypto cipher (aes-ccm) algorithms the + * device supports + */ + if (cp->ce_support.aes_ccm) { + struct qcrypto_alg *q_alg; + + q_alg = _qcrypto_cipher_alg_alloc(cp, &_qcrypto_aead_ccm_algo); + if (IS_ERR(q_alg)) { + rc = PTR_ERR(q_alg); + goto err; + } + rc = crypto_register_alg(&q_alg->cipher_alg); + if (rc) { + dev_err(&pdev->dev, "%s alg registration failed\n", + q_alg->cipher_alg.cra_driver_name); + kfree(q_alg); + } else { + list_add_tail(&q_alg->entry, &cp->alg_list); + dev_info(&pdev->dev, "%s\n", + q_alg->cipher_alg.cra_driver_name); + } + } + + return 0; +err: + _qcrypto_remove(pdev); + return rc; +}; + +static struct platform_driver _qualcomm_crypto = { + .probe = _qcrypto_probe, + .remove = _qcrypto_remove, + .driver = { + .owner = THIS_MODULE, + .name = "qcrypto", + }, +}; + +static int _debug_qcrypto[MAX_CRYPTO_DEVICE]; + +static int _debug_stats_open(struct inode *inode, struct file *file) +{ + file->private_data = inode->i_private; + return 0; +} + +static ssize_t _debug_stats_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + int rc = -EINVAL; + int qcrypto = *((int *) file->private_data); + int len; + + len = _disp_stats(qcrypto); + + rc = simple_read_from_buffer((void __user *) buf, len, + ppos, (void *) _debug_read_buf, len); + + return rc; +} + +static ssize_t _debug_stats_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + + int qcrypto = *((int *) file->private_data); + + memset((char *)&_qcrypto_stat[qcrypto], 0, sizeof(struct crypto_stat)); + return count; +}; + +static const struct file_operations _debug_stats_ops = { + .open = _debug_stats_open, + .read = _debug_stats_read, + .write = _debug_stats_write, +}; + +static int _qcrypto_debug_init(void) +{ + int rc; + char name[DEBUG_MAX_FNAME]; + int i; + struct dentry *dent; + + _debug_dent = debugfs_create_dir("qcrypto", NULL); + if (IS_ERR(_debug_dent)) { + pr_err("qcrypto debugfs_create_dir fail, error %ld\n", + PTR_ERR(_debug_dent)); + return PTR_ERR(_debug_dent); + } + + for (i = 0; i < MAX_CRYPTO_DEVICE; i++) { + snprintf(name, DEBUG_MAX_FNAME-1, "stats-%d", i+1); + _debug_qcrypto[i] = i; + dent = debugfs_create_file(name, 0644, _debug_dent, + &_debug_qcrypto[i], &_debug_stats_ops); + if (dent == NULL) { + pr_err("qcrypto debugfs_create_file fail, error %ld\n", + PTR_ERR(dent)); + rc = PTR_ERR(dent); + goto err; + } + } + return 0; +err: + debugfs_remove_recursive(_debug_dent); + return rc; +} + +static int __init _qcrypto_init(void) +{ + int rc; + + rc = _qcrypto_debug_init(); + if (rc) + return rc; + + return platform_driver_register(&_qualcomm_crypto); +} + +static void __exit _qcrypto_exit(void) +{ + pr_debug("%s Unregister QCRYPTO\n", __func__); + debugfs_remove_recursive(_debug_dent); + platform_driver_unregister(&_qualcomm_crypto); +} + +module_init(_qcrypto_init); +module_exit(_qcrypto_exit); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Mona Hossain "); +MODULE_DESCRIPTION("Qualcomm Crypto driver"); +MODULE_VERSION("1.21"); diff --git a/drivers/crypto/msm/qcryptohw_30.h b/drivers/crypto/msm/qcryptohw_30.h new file mode 100644 index 000000000000..edbee7142155 --- /dev/null +++ b/drivers/crypto/msm/qcryptohw_30.h @@ -0,0 +1,308 @@ +/* Copyright (c)2009- 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_ +#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_ + +#define QCE_AUTH_REG_BYTE_COUNT 2 +#define CRYPTO_DATA_IN_REG 0x0 +#define CRYPTO_DATA_OUT_REG 0x10 +#define CRYPTO_STATUS_REG 0x20 +#define CRYPTO_CONFIG_REG 0x24 +#define CRYPTO_DEBUG_REG 0x28 +#define CRYPTO_REGISTER_LOCK_REG 0x2C +#define CRYPTO_SEG_CFG_REG 0x30 +#define CRYPTO_ENCR_SEG_CFG_REG 0x34 +#define CRYPTO_AUTH_SEG_CFG_REG 0x38 +#define CRYPTO_SEG_SIZE_REG 0x3C +#define CRYPTO_GOPROC_REG 0x40 +#define CRYPTO_ENGINES_AVAIL 0x44 + +#define CRYPTO_DES_KEY0_REG 0x50 +#define CRYPTO_DES_KEY1_REG 0x54 +#define CRYPTO_DES_KEY2_REG 0x58 +#define CRYPTO_DES_KEY3_REG 0x5C +#define CRYPTO_DES_KEY4_REG 0x60 +#define CRYPTO_DES_KEY5_REG 0x64 + +#define CRYPTO_CNTR0_IV0_REG 0x70 +#define CRYPTO_CNTR1_IV1_REG 0x74 +#define CRYPTO_CNTR2_IV2_REG 0x78 +#define CRYPTO_CNTR3_IV3_REG 0x7C +#define CRYPTO_CNTR_MASK_REG 0x80 + +#define CRYPTO_AUTH_BYTECNT0_REG 0x90 +#define CRYPTO_AUTH_BYTECNT1_REG 0x94 +#define CRYPTO_AUTH_BYTECNT2_REG 0x98 +#define CRYPTO_AUTH_BYTECNT3_REG 0x9C + +#define CRYPTO_AUTH_IV0_REG 0x100 +#define CRYPTO_AUTH_IV1_REG 0x104 +#define CRYPTO_AUTH_IV2_REG 0x108 +#define CRYPTO_AUTH_IV3_REG 0x10C +#define CRYPTO_AUTH_IV4_REG 0x110 +#define CRYPTO_AUTH_IV5_REG 0x114 +#define CRYPTO_AUTH_IV6_REG 0x118 +#define CRYPTO_AUTH_IV7_REG 0x11C +#define CRYPTO_AUTH_IV8_REG 0x120 +#define CRYPTO_AUTH_IV9_REG 0x124 +#define CRYPTO_AUTH_IV10_REG 0x128 +#define CRYPTO_AUTH_IV11_REG 0x12C +#define CRYPTO_AUTH_IV12_REG 0x130 +#define CRYPTO_AUTH_IV13_REG 0x134 +#define CRYPTO_AUTH_IV14_REG 0x138 +#define CRYPTO_AUTH_IV15_REG 0x13C + +#define CRYPTO_AES_RNDKEY0 0x200 +#define CRYPTO_AES_RNDKEY1 0x204 +#define CRYPTO_AES_RNDKEY2 0x208 +#define CRYPTO_AES_RNDKEY3 0x20C +#define CRYPTO_AES_RNDKEY4 0x210 +#define CRYPTO_AES_RNDKEY5 0x214 +#define CRYPTO_AES_RNDKEY6 0x218 +#define CRYPTO_AES_RNDKEY7 0x21C +#define CRYPTO_AES_RNDKEY8 0x220 +#define CRYPTO_AES_RNDKEY9 0x224 +#define CRYPTO_AES_RNDKEY10 0x228 +#define CRYPTO_AES_RNDKEY11 0x22c +#define CRYPTO_AES_RNDKEY12 0x230 +#define CRYPTO_AES_RNDKEY13 0x234 +#define CRYPTO_AES_RNDKEY14 0x238 +#define CRYPTO_AES_RNDKEY15 0x23C +#define CRYPTO_AES_RNDKEY16 0x240 +#define CRYPTO_AES_RNDKEY17 0x244 +#define CRYPTO_AES_RNDKEY18 0x248 +#define CRYPTO_AES_RNDKEY19 0x24C +#define CRYPTO_AES_RNDKEY20 0x250 +#define CRYPTO_AES_RNDKEY21 0x254 +#define CRYPTO_AES_RNDKEY22 0x258 +#define CRYPTO_AES_RNDKEY23 0x25C +#define CRYPTO_AES_RNDKEY24 0x260 +#define CRYPTO_AES_RNDKEY25 0x264 +#define CRYPTO_AES_RNDKEY26 0x268 +#define CRYPTO_AES_RNDKEY27 0x26C +#define CRYPTO_AES_RNDKEY28 0x270 +#define CRYPTO_AES_RNDKEY29 0x274 +#define CRYPTO_AES_RNDKEY30 0x278 +#define CRYPTO_AES_RNDKEY31 0x27C +#define CRYPTO_AES_RNDKEY32 0x280 +#define CRYPTO_AES_RNDKEY33 0x284 +#define CRYPTO_AES_RNDKEY34 0x288 +#define CRYPTO_AES_RNDKEY35 0x28c +#define CRYPTO_AES_RNDKEY36 0x290 +#define CRYPTO_AES_RNDKEY37 0x294 +#define CRYPTO_AES_RNDKEY38 0x298 +#define CRYPTO_AES_RNDKEY39 0x29C +#define CRYPTO_AES_RNDKEY40 0x2A0 +#define CRYPTO_AES_RNDKEY41 0x2A4 +#define CRYPTO_AES_RNDKEY42 0x2A8 +#define CRYPTO_AES_RNDKEY43 0x2AC +#define CRYPTO_AES_RNDKEY44 0x2B0 +#define CRYPTO_AES_RNDKEY45 0x2B4 +#define CRYPTO_AES_RNDKEY46 0x2B8 +#define CRYPTO_AES_RNDKEY47 0x2BC +#define CRYPTO_AES_RNDKEY48 0x2C0 +#define CRYPTO_AES_RNDKEY49 0x2C4 +#define CRYPTO_AES_RNDKEY50 0x2C8 +#define CRYPTO_AES_RNDKEY51 0x2CC +#define CRYPTO_AES_RNDKEY52 0x2D0 +#define CRYPTO_AES_RNDKEY53 0x2D4 +#define CRYPTO_AES_RNDKEY54 0x2D8 +#define CRYPTO_AES_RNDKEY55 0x2DC +#define CRYPTO_AES_RNDKEY56 0x2E0 +#define CRYPTO_AES_RNDKEY57 0x2E4 +#define CRYPTO_AES_RNDKEY58 0x2E8 +#define CRYPTO_AES_RNDKEY59 0x2EC + +#define CRYPTO_DATA_SHADOW0 0x8000 +#define CRYPTO_DATA_SHADOW8191 0x8FFC + +/* status reg */ +#define CRYPTO_CORE_REV 28 /* bit 31-28 */ +#define CRYPTO_CORE_REV_MASK (0xf << CRYPTO_CORE_REV) +#define CRYPTO_DOUT_SIZE_AVAIL 22 /* bit 24-22 */ +#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x7 << CRYPTO_DOUT_SIZE_AVAIL) +#define CRYPTO_DIN_SIZE_AVAIL 19 /* bit 21-19 */ +#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x7 << CRYPTO_DIN_SIZE_AVAIL) +#define CRYPTO_ACCESS_VIOL 18 +#define CRYPTO_SEG_CHNG_ERR 17 +#define CRYPTO_CFH_CHNG_ERR 16 +#define CRYPTO_DOUT_ERR 15 +#define CRYPTO_DIN_ERR 14 +#define CRYPTO_LOCKED 13 +#define CRYPTO_CRYPTO_STATE 10 /* bit 12-10 */ +#define CRYPTO_CRYPTO_STATE_MASK (0x7 << CRYPTO_CRYPTO_STATE) +#define CRYPTO_ENCR_BUSY 9 +#define CRYPTO_AUTH_BUSY 8 +#define CRYPTO_DOUT_INTR 7 +#define CRYPTO_DIN_INTR 6 +#define CRYPTO_AUTH_DONE_INTR 5 +#define CRYPTO_ERR_INTR 4 +#define CRYPTO_DOUT_RDY 3 +#define CRYPTO_DIN_RDY 2 +#define CRYPTO_AUTH_DONE 1 +#define CRYPTO_SW_ERR 0 + +#define CRYPTO_CRYPTO_STATE_IDLE 0 +#define CRYPTO_CRYPTO_STATE_LOCKED 1 +#define CRYPTO_CRYPTO_STATE_GO 3 +#define CRYPTO_CRYPTO_STATE_PROCESSING 4 +#define CRYPTO_CRYPTO_STATE_FINAL_READ 5 +#define CRYPTO_CRYPTO_STATE_CTXT_CLEARING 6 +#define CRYPTO_CRYPTO_STATE_UNLOCKING 7 + +/* config reg */ +#define CRYPTO_HIGH_SPD_HASH_EN_N 15 +#define CRYPTO_HIGH_SPD_OUT_EN_N 14 +#define CRYPTO_HIGH_SPD_IN_EN_N 13 +#define CRYPTO_DBG_EN 12 +#define CRYPTO_DBG_SEL 7 /* bit 11:7 */ +#define CRYPTO_DBG_SEL_MASK (0x1F << CRYPTO_DBG_SEL) +#define CRYPTO_MASK_DOUT_INTR 6 +#define CRYPTO_MASK_DIN_INTR 5 +#define CRYPTO_MASK_AUTH_DONE_INTR 4 +#define CRYPTO_MASK_ERR_INTR 3 +#define CRYPTO_AUTO_SHUTDOWN_EN 2 +#define CRYPTO_CLK_EN_N 1 +#define CRYPTO_SW_RST 0 + +/* seg_cfg reg */ +#define CRYPTO_F8_KEYSTREAM_ENABLE 25 +#define CRYPTO_F9_DIRECTION 24 +#define CRYPTO_F8_DIRECTION 23 +#define CRYPTO_USE_HW_KEY 22 + +#define CRYPTO_CNTR_ALG 20 /* bit 21-20 */ +#define CRYPTO_CNTR_ALG_MASK (3 << efine CRYPTO_CNTR_ALG) + +#define CRYPTO_CLR_CNTXT 19 +#define CRYPTO_LAST 18 +#define CRYPTO_FIRST 17 +#define CRYPTO_ENCODE 16 + +#define CRYPTO_AUTH_POS 14 /* bit 15-14 */ +#define CRYPTO_AUTH_POS_MASK (3 << CRYPTO_AUTH_POS) + +#define CRYPTO_AUTH_SIZE 11 /* bit 13-11 */ +#define CRYPTO_AUTH_SIZE_MASK (7 << CRYPTO_AUTH_SIZE) + +#define CRYPTO_AUTH_ALG 9 /* bit 10-9 */ +#define CRYPTO_AUTH_ALG_MASK (3 << CRYPTO_AUTH_ALG) + +#define CRYPTO_ENCR_MODE 6 /* bit 8-6 */ +#define CRYPTO_ENCR_MODE_MASK (7 << CRYPTO_ENCR_MODE) + +#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */ +#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ) + +#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */ +#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG) + +#define CRYPTO_CNTR_ALG_NIST 0 +#define CRYPTO_CNTR_ALG_UMB 1 +#define CRYPTO_CNTR_ALG_VAR2 2 + +#define CRYPTO_AUTH_POS_BEFORE 0 +#define CRYPTO_AUTH_POS_AFTER 1 + +#define CRYPTO_AUTH_SIZE_SHA1 0 +#define CRYPTO_AUTH_SIZE_SHA256 1 +#define CRYPTO_AUTH_SIZE_SHA384 2 +#define CRYPTO_AUTH_SIZE_SHA512 3 +#define CRYPTO_AUTH_SIZE_HMAC_SHA1 4 + +#define CRYPTO_AUTH_SIZE_UIA1 0 +#define CRYPTO_AUTH_SIZE_UIA2 1 + +#define CRYPTO_AUTH_ALG_NONE 0 +#define CRYPTO_AUTH_ALG_SHA 1 +#define CRYPTO_AUTH_ALG_F9 2 +#define CRYPTO_AUTH_ALG_RESERVED1 3 + +#define CRYPTO_ENCR_MODE_ECB 0 +#define CRYPTO_ENCR_MODE_CBC 1 +/* only valid when AES */ +#define CRYPTO_ENCR_MODE_CTR 2 + + +#define CRYPTO_ENCR_KEY_SZ_DES 0 +#define CRYPTO_ENCR_KEY_SZ_3DES 1 + +#define CRYPTO_ENCR_KEY_SZ_AES128 0 +#define CRYPTO_ENCR_KEY_SZ_AES192 1 +#define CRYPTO_ENCR_KEY_SZ_AES256 2 + +#define CRYPTO_ENCR_KEY_SZ_UEA1 0 +#define CRYPTO_ENCR_KEY_SZ_UEA2 1 + +#define CRYPTO_ENCR_ALG_NONE 0 +#define CRYPTO_ENCR_ALG_DES 1 +#define CRYPTO_ENCR_ALG_AES 2 +#define CRYPTO_ENCR_ALG_C2 3 +#define CRYPTO_ENCR_ALG_F8 4 + +/* encr_seg_cfg reg */ +#define CRYPTO_ENCR_SEG_SIZE 16 /* bit 31-16 */ +#define CRYPTO_ENCR_SEG_SIZE_MASK (0xffff << CRYPTO_ENCR_SEG_SIZE) + +#define CRYPTO_ENCR_START 0 +#define CRYPTO_ENCR_START_MASK (0xffff << CRYPTO_ENCR_START) + +/* auth_seg_cfg reg */ +#define CRYPTO_AUTH_SEG_SIZE 16 /* bit 31-16 */ +#define CRYPTO_AUTH_SEG_SIZE_MASK (0xffff << CRYPTO_AUTH_SEG_SIZE) + +#define CRYPTO_AUTH_START 0 +#define CRYPTO_AUTH_START_MASK (0xffff << CRYPTO_AUTH_START) + + +/* seg_size reg */ +#define CRYPTO_SEG_SIZE 0 +#define CRYPTO_SEG_SIZE_MASK (0xffff << CRYPTO_SEG_SIZE) + +/* goproc reg */ +#define CRYPTO_GO 0 + +/* engines_avail */ +#define CRYPTO_F9_SEL 8 +#define CRYPTO_F8_SEL 7 +#define CRYPTO_HMAC_SEL 6 +#define CRYPTO_SHA512_SEL 5 +#define CRYPTO_SHA_SEL 4 +#define CRYPTO_DES_SEL 3 +#define CRYPTO_C2_SEL 2 + +#define CRYPTO_AES_SEL 0 /* bit 1-0 */ +#define CRYPTO_AES_SEL_MASK (3 << CRYPTO_AES_SEL) +#define CRYPTO_AES_SEL_NO 0 +#define CRYPTO_AES_SEL_SLOW 1 +#define CRYPTO_AES_SEL_FAST 2 +#define CRYPTO_AES_SEL_RESERVED 3 + +/* F8 definition of CRYPTO_CNTR1_IV1_REG */ +#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT 16 /* bit 31 - 16 */ +#define CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT_MASK \ + (0xffff << CRYPTO_CNTR1_IV1_REG_F8_PKT_CNT) + +#define CRYPTO_CNTR1_IV1_REG_F8_BEARER 0 /* bit 4 - 0 */ +#define CRYPTO_CNTR1_IV1_REG_F8_BEARER_MASK \ + (0x1f << CRYPTO_CNTR1_IV1_REG_F8_BEARER) + +/* F9 definition of CRYPTO_AUTH_IV4_REG */ +#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS 0 /* bit 2 - 0 */ +#define CRYPTO_AUTH_IV4_REG_F9_VALID_BIS_MASK \ + (0x7 << CRYPTO_AUTH_IV4_REG_F9_VALID_BIS) + +/* misc */ +#define CRYPTO_AES_RNDKEYS 60 + +#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_30_H_ */ diff --git a/drivers/crypto/msm/qcryptohw_40.h b/drivers/crypto/msm/qcryptohw_40.h new file mode 100644 index 000000000000..367bdaaf7ae9 --- /dev/null +++ b/drivers/crypto/msm/qcryptohw_40.h @@ -0,0 +1,316 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_ +#define _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_ + + +#define QCE_AUTH_REG_BYTE_COUNT 4 +#define CRYPTO_VERSION_REG 0x0 +#define CRYPTO_DATA_IN_REG 0x008 +#define CRYPTO_DATA_OUT_REG 0x010 +#define CRYPTO_STATUS_REG 0x100 +#define CRYPTO_ENGINES_AVAIL 0x104 +#define CRYPTO3_VERSION_REG 0x108 +#define CRYPTO_SEG_SIZE_REG 0x200 +#define CRYPTO_GOPROC_REG 0x204 +#define CRYPTO_ENCR_SEG_CFG_REG 0x300 + +#define CRYPTO_ENCR_SEG_SIZE_REG 0x304 +#define CRYPTO_ENCR_SEG_START_REG 0x308 + +#define CRYPTO_ENCR_KEY0_REG 0x310 +#define CRYPTO_ENCR_KEY1_REG 0x314 +#define CRYPTO_ENCR_KEY2_REG 0x318 +#define CRYPTO_ENCR_KEY3_REG 0x31C +#define CRYPTO_ENCR_KEY4_REG 0x320 +#define CRYPTO_ENCR_KEY5_REG 0x324 +#define CRYPTO_ENCR_KEY6_REG 0x328 +#define CRYPTO_ENCR_KEY7_REG 0x32C + +#define CRYPTO_ENCR_XTS_KEY0_REG 0x330 +#define CRYPTO_ENCR_XTS_KEY1_REG 0x334 +#define CRYPTO_ENCR_XTS_KEY2_REG 0x338 +#define CRYPTO_ENCR_XTS_KEY3_REG 0x33C +#define CRYPTO_ENCR_XTS_KEY4_REG 0x340 +#define CRYPTO_ENCR_XTS_KEY5_REG 0x344 +#define CRYPTO_ENCR_XTS_KEY6_REG 0x348 +#define CRYPTO_ENCR_XTS_KEY7_REG 0x34C + +#define CRYPTO_CNTR0_IV0_REG 0x350 +#define CRYPTO_CNTR1_IV1_REG 0x354 +#define CRYPTO_CNTR2_IV2_REG 0x358 +#define CRYPTO_CNTR3_IV3_REG 0x35C + +#define CRYPTO_CNTR_MASK_REG 0x360 + +#define CRYPTO_ENCR_XTS_DU_SIZE_REG 0x364 + +#define CRYPTO_AUTH_SEG_CFG_REG 0x400 +#define CRYPTO_AUTH_SEG_SIZE_REG 0x404 +#define CRYPTO_AUTH_SEG_START_REG 0x408 + +#define CRYPTO_AUTH_KEY0_REG 0x410 +#define CRYPTO_AUTH_KEY1_REG 0x414 +#define CRYPTO_AUTH_KEY2_REG 0x418 +#define CRYPTO_AUTH_KEY3_REG 0x41C +#define CRYPTO_AUTH_KEY4_REG 0x420 +#define CRYPTO_AUTH_KEY5_REG 0x424 +#define CRYPTO_AUTH_KEY6_REG 0x428 +#define CRYPTO_AUTH_KEY7_REG 0x42C +#define CRYPTO_AUTH_KEY8_REG 0x430 +#define CRYPTO_AUTH_KEY9_REG 0x434 +#define CRYPTO_AUTH_KEY10_REG 0x438 +#define CRYPTO_AUTH_KEY11_REG 0x43C +#define CRYPTO_AUTH_KEY12_REG 0x440 +#define CRYPTO_AUTH_KEY13_REG 0x444 +#define CRYPTO_AUTH_KEY14_REG 0x448 +#define CRYPTO_AUTH_KEY15_REG 0x44C + +#define CRYPTO_AUTH_IV0_REG 0x450 +#define CRYPTO_AUTH_IV1_REG 0x454 +#define CRYPTO_AUTH_IV2_REG 0x458 +#define CRYPTO_AUTH_IV3_REG 0x45C +#define CRYPTO_AUTH_IV4_REG 0x460 +#define CRYPTO_AUTH_IV5_REG 0x464 +#define CRYPTO_AUTH_IV6_REG 0x468 +#define CRYPTO_AUTH_IV7_REG 0x46C +#define CRYPTO_AUTH_IV8_REG 0x470 +#define CRYPTO_AUTH_IV9_REG 0x474 +#define CRYPTO_AUTH_IV10_REG 0x478 +#define CRYPTO_AUTH_IV11_REG 0x47C +#define CRYPTO_AUTH_IV12_REG 0x480 +#define CRYPTO_AUTH_IV13_REG 0x484 +#define CRYPTO_AUTH_IV14_REG 0x488 +#define CRYPTO_AUTH_IV15_REG 0x48C + +#define CRYPTO_AUTH_INFO_NONCE0_REG 0x490 +#define CRYPTO_AUTH_INFO_NONCE1_REG 0x494 +#define CRYPTO_AUTH_INFO_NONCE2_REG 0x498 +#define CRYPTO_AUTH_INFO_NONCE3_REG 0x49C + +#define CRYPTO_AUTH_BYTECNT0_REG 0x4A0 +#define CRYPTO_AUTH_BYTECNT1_REG 0x4A4 +#define CRYPTO_AUTH_BYTECNT2_REG 0x4A8 +#define CRYPTO_AUTH_BYTECNT3_REG 0x4AC + +#define CRYPTO_AUTH_EXP_MAC0_REG 0x4B0 +#define CRYPTO_AUTH_EXP_MAC1_REG 0x4B4 +#define CRYPTO_AUTH_EXP_MAC2_REG 0x4B8 +#define CRYPTO_AUTH_EXP_MAC3_REG 0x4BC +#define CRYPTO_AUTH_EXP_MAC4_REG 0x4C0 +#define CRYPTO_AUTH_EXP_MAC5_REG 0x4C4 +#define CRYPTO_AUTH_EXP_MAC6_REG 0x4C8 +#define CRYPTO_AUTH_EXP_MAC7_REG 0x4CC + +#define CRYPTO_CONFIG_REG 0x500 +#define CRYPTO_SACR_REG 0x504 +#define CRYPTO_DEBUG_REG 0x508 + +#define CRYPTO_DATA_SHADOW0 0x8000 +#define CRYPTO_DATA_SHADOW8191 0x8FFC + + +/* Register bits */ + +#define CRYPTO_CORE_MAJOR_REV 4 /* bit 7-4 */ +#define CRYPTO_CORE_MAJOR_REV_MASK (0xF << CRYPTO_CORE_MAJOR_REV) +#define CRYPTO_CORE_MINOR_REV 0 /* bit 3-0 */ +#define CRYPTO_CORE_MINOR_REV_MASK (0xF << CRYPTO_CORE_MINOR_REV) +#define CRYPTO_CORE_REV_MASK 0xFF + +/* status reg */ +#define CRYPTO_MAC_FAILED 25 +#define CRYPTO_DOUT_SIZE_AVAIL 22 /* bit 24-22 */ +#define CRYPTO_DOUT_SIZE_AVAIL_MASK (0x7 << CRYPTO_DOUT_SIZE_AVAIL) +#define CRYPTO_DIN_SIZE_AVAIL 19 /* bit 21-19 */ +#define CRYPTO_DIN_SIZE_AVAIL_MASK (0x7 << CRYPTO_DIN_SIZE_AVAIL) +#define CRYPTO_ACCESS_VIOL 18 +#define CRYPTO_SEG_CHNG_ERR 17 +#define CRYPTO_CFH_CHNG_ERR 16 +#define CRYPTO_DOUT_ERR 15 +#define CRYPTO_DIN_ERR 14 +#define CRYPTO_LOCKED 13 +#define CRYPTO_CRYPTO_STATE 10 /* bit 12-10 */ +#define CRYPTO_CRYPTO_STATE_MASK (0x7 << CRYPTO_CRYPTO_STATE) +#define CRYPTO_ENCR_BUSY 9 +#define CRYPTO_AUTH_BUSY 8 +#define CRYPTO_DOUT_INTR 7 +#define CRYPTO_DIN_INTR 6 +#define CRYPTO_OP_DONE_INTR 5 +#define CRYPTO_ERR_INTR 4 +#define CRYPTO_DOUT_RDY 3 +#define CRYPTO_DIN_RDY 2 +#define CRYPTO_OPERATION_DONE 1 +#define CRYPTO_SW_ERR 0 + +/* config reg */ +#define CRYPTO_REQ_SIZE 30 /* bit 31-30 */ +#define CRYPTO_REQ_SIZE_MASK (0x3 << CRYPTO_REQ_SIZE) +#define CRYPTO_REQ_SIZE_ENUM_16_BYTES 0 +#define CRYPTO_REQ_SIZE_ENUM_32_BYTES 1 +#define CRYPTO_REQ_SIZE_ENUM_64_BYTES 2 + +#define CRYPTO_MAX_QUEUED_REQ 27 /* bit 29-27 */ +#define CRYPTO_MAX_QUEUED_REQ_MASK (0x7 << CRYPTO_MAX_QUEUED_REQ) +#define CRYPTO_ENUM1_QUEUED_REQS 0 +#define CRYPTO_ENUM2_QUEUED_REQS 1 +#define CRYPTO_ENUM3_QUEUED_REQS 2 +#define CRYPTO_ENUM4_QUEUED_REQS 3 + +#define CRYPTO_FIFO_THRESHOLD 24 /* bit 26-24 */ +#define CRYPTO_FIFO_THRESHOLD_MASK (0x7 << CRYPTO_FIFO_THRESHOLD) +#define CRYPTO_FIFO_ENUM_16_BYTES 0 +#define CRYPTO_FIFO_ENUM_32_BYTES 1 +#define CRYPTO_FIFO_ENUM_48_BYTES 2 +#define CRYPTO_FIFO_ENUM_64_BYTES 3 + +#define CRYPTO_IRQ_ENABLES 20 /* bit 23-20 */ +#define CRYPTO_IRQ_ENABLES_MASK (0xF << CRYPTO_IRQ_ENABLES) + +#define CRYPTO_ACR_EN 18 +#define CRYPTO_BAM_MODE 17 +#define CRYPTO_LITTLE_ENDIAN_MODE 16 +#define CRYPTO_HIGH_SPD_OUT_EN_N 14 +#define CRYPTO_HIGH_SPD_IN_EN_N 13 +#define CRYPTO_DBG_EN 12 + +#define CRYPTO_DBG_SEL 7 /* bit 11:7 */ +#define CRYPTO_DBG_SEL_MASK (0x1F << CRYPTO_DBG_SEL) + +#define CRYPTO_MASK_DOUT_INTR 6 +#define CRYPTO_MASK_DIN_INTR 5 +#define CRYPTO_MASK_OP_DONE_INTR 4 +#define CRYPTO_MASK_ERR_INTR 3 +#define CRYPTO_AUTO_SHUTDOWN_EN 2 +#define CRYPTO_CLK_EN_N 1 + +/* auth_seg_cfg reg */ +#define CRYPTO_COMP_EXP_MAC 20 +#define CRYPTO_COMP_EXP_MAC_DISABLED 0 +#define CRYPTO_COMP_EXP_MAC_ENABLED 1 + +#define CRYPTO_F9_DIRECTION 19 +#define CRYPTO_F9_DIRECTION_UPLINK 0 +#define CRYPTO_F9_DIRECTION_DOWNLINK 1 + +#define CRYPTO_AUTH_NONCE_NUM_WORDS 16 +#define CRYPTO_AUTH_NONCE_NUM_WORDS_MASK \ + (0x7 << CRYPTO_AUTH_NONCE_NUM_WORDS) + +#define CRYPTO_USE_HW_KEY_AUTH 15 + +#define CRYPTO_LAST 14 + +#define CRYPTO_AUTH_POS 12 /* bit 13 .. 12*/ +#define CRYPTO_AUTH_POS_MASK (0x3 << CRYPTO_AUTH_POS) +#define CRYPTO_AUTH_POS_BEFORE 0 +#define CRYPTO_AUTH_POS_AFTER 1 + +#define CRYPTO_AUTH_SIZE 9 /* bits 11 .. 9*/ +#define CRYPTO_AUTH_SIZE_MASK (0x7 << CRYPTO_AUTH_SIZE) +#define CRYPTO_AUTH_SIZE_SHA1 0 +#define CRYPTO_AUTH_SIZE_SHA256 1 +#define CRYPTO_AUTH_SIZE_ENUM_4_BYTES 0 +#define CRYPTO_AUTH_SIZE_ENUM_6_BYTES 1 +#define CRYPTO_AUTH_SIZE_ENUM_8_BYTES 2 +#define CRYPTO_AUTH_SIZE_ENUM_10_BYTES 3 +#define CRYPTO_AUTH_SIZE_ENUM_12_BYTES 4 +#define CRYPTO_AUTH_SIZE_ENUM_14_BYTES 5 +#define CRYPTO_AUTH_SIZE_ENUM_16_BYTES 6 + +#define CRYPTO_AUTH_MODE 6 /* bit 8 .. 6*/ +#define CRYPTO_AUTH_MODE_MASK (0x7 << CRYPTO_AUTH_MODE) +#define CRYPTO_AUTH_MODE_HASH 0 +#define CRYPTO_AUTH_MODE_HMAC 1 +#define CRYPTO_AUTH_MODE_CCM 0 +#define CRYPTO_AUTH_MODE_CMAC 1 + +#define CRYPTO_AUTH_KEY_SIZE 3 +#define CRYPTO_AUTH_KEY_SIZE_MASK (0x7 << CRYPTO_AUTH_KEY_SIZE) +#define CRYPTO_AUTH_KEY_SZ_AES128 0 +#define CRYPTO_AUTH_KEY_SZ_AES256 2 + +#define CRYPTO_AUTH_ALG 0 /* bit 2 .. 0*/ +#define CRYPTO_AUTH_ALG_MASK 7 +#define CRYPTO_AUTH_ALG_NONE 0 +#define CRYPTO_AUTH_ALG_SHA 1 +#define CRYPTO_AUTH_ALG_AES 2 +#define CRYPTO_AUTH_ALG_KASUMI 3 +#define CRYPTO_AUTH_ALG_SNOW3G 4 + +/* encr_xts_du_size reg */ +#define CRYPTO_ENCR_XTS_DU_SIZE 0 /* bit 19-0 */ +#define CRYPTO_ENCR_XTS_DU_SIZE_MASK 0xfffff + +/* encr_seg_cfg reg */ +#define CRYPTO_F8_KEYSTREAM_ENABLE 15 +#define CRYPTO_F8_KEYSTREAM_DISABLED 0 +#define CRYPTO_F8_KEYSTREAM_ENABLED 1 + +#define CRYPTO_F8_DIRECTION 14 +#define CRYPTO_F8_DIRECTION_UPLINK 0 +#define CRYPTO_F8_DIRECTION_DOWNLINK 1 + +#define CRYPTO_USE_HW_KEY_ENCR 13 +#define CRYPTO_USE_HW_KEY_REG 0 +#define CRYPTO_USE_HW_KEY 1 + +#define CRYPTO_CNTR_ALG 11 /* bit 12-11 */ +#define CRYPTO_CNTR_ALG_MASK (3 << CRYPTO_CNTR_ALG) +#define CRYPTO_CNTR_ALG_NIST 0 + +#define CRYPTO_ENCODE 10 + +#define CRYPTO_ENCR_MODE 6 /* bit 9-6 */ +#define CRYPTO_ENCR_MODE_MASK (0xF << CRYPTO_ENCR_MODE) +/* only valid when AES */ +#define CRYPTO_ENCR_MODE_ECB 0 +#define CRYPTO_ENCR_MODE_CBC 1 +#define CRYPTO_ENCR_MODE_CTR 2 +#define CRYPTO_ENCR_MODE_XTS 3 +#define CRYPTO_ENCR_MODE_CCM 4 + +#define CRYPTO_ENCR_KEY_SZ 3 /* bit 5-3 */ +#define CRYPTO_ENCR_KEY_SZ_MASK (7 << CRYPTO_ENCR_KEY_SZ) +#define CRYPTO_ENCR_KEY_SZ_DES 0 +#define CRYPTO_ENCR_KEY_SZ_3DES 1 +#define CRYPTO_ENCR_KEY_SZ_AES128 0 +#define CRYPTO_ENCR_KEY_SZ_AES256 2 +#define CRYPTO_ENCR_KEY_SZ_UEA1 0 +#define CRYPTO_ENCR_KEY_SZ_UEA2 1 + +#define CRYPTO_ENCR_ALG 0 /* bit 2-0 */ +#define CRYPTO_ENCR_ALG_MASK (7 << CRYPTO_ENCR_ALG) +#define CRYPTO_ENCR_ALG_NONE 0 +#define CRYPTO_ENCR_ALG_DES 1 +#define CRYPTO_ENCR_ALG_AES 2 +#define CRYPTO_ENCR_ALG_KASUMI 3 +#define CRYPTO_ENCR_ALG_SNOW_3G 5 + +/* goproc reg */ +#define CRYPTO_GO 0 +#define CRYPTO_CLR_CNTXT 1 + +/* engines_avail */ +#define CRYPTO_ENCR_AES_SEL 0 +#define CRYPTO_DES_SEL 3 +#define CRYPTO_ENCR_SNOW3G_SEL 4 +#define CRYPTO_ENCR_KASUMI_SEL 5 +#define CRYPTO_SHA_SEL 6 +#define CRYPTO_SHA512_SEL 7 +#define CRYPTO_AUTH_AES_SEL 8 +#define CRYPTO_AUTH_SNOW3G_SEL 9 +#define CRYPTO_AUTH_KASUMI_SEL 10 +#define CRYPTO_BAM_SEL 11 + +#endif /* _DRIVERS_CRYPTO_MSM_QCRYPTOHW_40_H_ */ diff --git a/include/linux/Kbuild b/include/linux/Kbuild index 2a1abb6a0437..829b56ef851c 100644 --- a/include/linux/Kbuild +++ b/include/linux/Kbuild @@ -413,4 +413,5 @@ header-y += xattr.h header-y += xfrm.h header-y += msm_mdp.h header-y += msm_rotator.h +header-y += qcedev.h header-y += genlock.h diff --git a/include/linux/platform_data/qcom_crypto_device.h b/include/linux/platform_data/qcom_crypto_device.h new file mode 100644 index 000000000000..08aa784fa8c9 --- /dev/null +++ b/include/linux/platform_data/qcom_crypto_device.h @@ -0,0 +1,24 @@ +/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#ifndef __QCOM_CRYPTO_DEVICE__H +#define __QCOM_CRYPTO_DEVICE__H + +struct msm_ce_hw_support { + uint32_t ce_shared; + uint32_t shared_ce_resource; + uint32_t hw_key_support; + uint32_t sha_hmac; + void *bus_scale_table; +}; + +#endif /* __QCOM_CRYPTO_DEVICE__H */ diff --git a/include/linux/qcedev.h b/include/linux/qcedev.h new file mode 100644 index 000000000000..87040df06564 --- /dev/null +++ b/include/linux/qcedev.h @@ -0,0 +1,241 @@ +#ifndef __QCEDEV__H +#define __QCEDEV__H + +#include +#include + +#define QCEDEV_MAX_SHA_BLOCK_SIZE 64 +#define QCEDEV_MAX_BEARER 31 +#define QCEDEV_MAX_KEY_SIZE 64 +#define QCEDEV_MAX_IV_SIZE 32 + +#define QCEDEV_MAX_BUFFERS 16 +#define QCEDEV_MAX_SHA_DIGEST 32 + +#define QCEDEV_USE_PMEM 1 +#define QCEDEV_NO_PMEM 0 + +#define QCEDEV_AES_KEY_128 16 +#define QCEDEV_AES_KEY_192 24 +#define QCEDEV_AES_KEY_256 32 +/** +*qcedev_oper_enum: Operation types +* @QCEDEV_OPER_ENC: Encrypt +* @QCEDEV_OPER_DEC: Decrypt +* @QCEDEV_OPER_ENC_NO_KEY: Encrypt. Do not need key to be specified by +* user. Key already set by an external processor. +* @QCEDEV_OPER_DEC_NO_KEY: Decrypt. Do not need the key to be specified by +* user. Key already set by an external processor. +*/ +enum qcedev_oper_enum { + QCEDEV_OPER_DEC = 0, + QCEDEV_OPER_ENC = 1, + QCEDEV_OPER_DEC_NO_KEY = 2, + QCEDEV_OPER_ENC_NO_KEY = 3, + QCEDEV_OPER_LAST +}; + +/** +*qcedev_oper_enum: Cipher algorithm types +* @QCEDEV_ALG_DES: DES +* @QCEDEV_ALG_3DES: 3DES +* @QCEDEV_ALG_AES: AES +*/ +enum qcedev_cipher_alg_enum { + QCEDEV_ALG_DES = 0, + QCEDEV_ALG_3DES = 1, + QCEDEV_ALG_AES = 2, + QCEDEV_ALG_LAST +}; + +/** +*qcedev_cipher_mode_enum : AES mode +* @QCEDEV_AES_MODE_CBC: CBC +* @QCEDEV_AES_MODE_ECB: ECB +* @QCEDEV_AES_MODE_CTR: CTR +* @QCEDEV_AES_MODE_XTS: XTS +* @QCEDEV_AES_MODE_CCM: CCM +* @QCEDEV_DES_MODE_CBC: CBC +* @QCEDEV_DES_MODE_ECB: ECB +*/ +enum qcedev_cipher_mode_enum { + QCEDEV_AES_MODE_CBC = 0, + QCEDEV_AES_MODE_ECB = 1, + QCEDEV_AES_MODE_CTR = 2, + QCEDEV_AES_MODE_XTS = 3, + QCEDEV_AES_MODE_CCM = 4, + QCEDEV_DES_MODE_CBC = 5, + QCEDEV_DES_MODE_ECB = 6, + QCEDEV_AES_DES_MODE_LAST +}; + +/** +*enum qcedev_sha_alg_enum : Secure Hashing Algorithm +* @QCEDEV_ALG_SHA1: Digest returned: 20 bytes (160 bits) +* @QCEDEV_ALG_SHA256: Digest returned: 32 bytes (256 bit) +* @QCEDEV_ALG_SHA1_HMAC: HMAC returned 20 bytes (160 bits) +* @QCEDEV_ALG_SHA256_HMAC: HMAC returned 32 bytes (256 bit) +* @QCEDEV_ALG_AES_CMAC: Configurable MAC size +*/ +enum qcedev_sha_alg_enum { + QCEDEV_ALG_SHA1 = 0, + QCEDEV_ALG_SHA256 = 1, + QCEDEV_ALG_SHA1_HMAC = 2, + QCEDEV_ALG_SHA256_HMAC = 3, + QCEDEV_ALG_AES_CMAC = 4, + QCEDEV_ALG_SHA_ALG_LAST +}; + +/** +* struct buf_info - Buffer information +* @offset: Offset from the base address of the buffer +* (Used when buffer is allocated using PMEM) +* @vaddr: Virtual buffer address pointer +* @len: Size of the buffer +*/ +struct buf_info { + union { + uint32_t offset; + uint8_t *vaddr; + }; + uint32_t len; +}; + +/** +* struct qcedev_vbuf_info - Source and destination Buffer information +* @src: Array of buf_info for input/source +* @dst: Array of buf_info for output/destination +*/ +struct qcedev_vbuf_info { + struct buf_info src[QCEDEV_MAX_BUFFERS]; + struct buf_info dst[QCEDEV_MAX_BUFFERS]; +}; + +/** +* struct qcedev_pmem_info - Stores PMEM buffer information +* @fd_src: Handle to /dev/adsp_pmem used to allocate +* memory for input/src buffer +* @src: Array of buf_info for input/source +* @fd_dst: Handle to /dev/adsp_pmem used to allocate +* memory for output/dst buffer +* @dst: Array of buf_info for output/destination +* @pmem_src_offset: The offset from input/src buffer +* (allocated by PMEM) +*/ +struct qcedev_pmem_info { + int fd_src; + struct buf_info src[QCEDEV_MAX_BUFFERS]; + int fd_dst; + struct buf_info dst[QCEDEV_MAX_BUFFERS]; +}; + +/** +* struct qcedev_cipher_op_req - Holds the ciphering request information +* @use_pmem (IN): Flag to indicate if buffer source is PMEM +* QCEDEV_USE_PMEM/QCEDEV_NO_PMEM +* @pmem (IN): Stores PMEM buffer information. +* Refer struct qcedev_pmem_info +* @vbuf (IN/OUT): Stores Source and destination Buffer information +* Refer to struct qcedev_vbuf_info +* @data_len (IN): Total Length of input/src and output/dst in bytes +* @in_place_op (IN): Indicates whether the operation is inplace where +* source == destination +* When using PMEM allocated memory, must set this to 1 +* @enckey (IN): 128 bits of confidentiality key +* enckey[0] bit 127-120, enckey[1] bit 119-112,.. +* enckey[15] bit 7-0 +* @encklen (IN): Length of the encryption key(set to 128 bits/16 +* bytes in the driver) +* @iv (IN/OUT): Initialisation vector data +* This is updated by the driver, incremented by +* number of blocks encrypted/decrypted. +* @ivlen (IN): Length of the IV +* @byteoffset (IN): Offset in the Cipher BLOCK (applicable and to be set +* for AES-128 CTR mode only) +* @alg (IN): Type of ciphering algorithm: AES/DES/3DES +* @mode (IN): Mode use when using AES algorithm: ECB/CBC/CTR +* Apllicabel when using AES algorithm only +* @op (IN): Type of operation: QCEDEV_OPER_DEC/QCEDEV_OPER_ENC or +* QCEDEV_OPER_ENC_NO_KEY/QCEDEV_OPER_DEC_NO_KEY +* +*If use_pmem is set to 0, the driver assumes that memory was not allocated +* via PMEM, and kernel will need to allocate memory and copy data from user +* space buffer (data_src/dta_dst) and process accordingly and copy data back +* to the user space buffer +* +* If use_pmem is set to 1, the driver assumes that memory was allocated via +* PMEM. +* The kernel driver will use the fd_src to determine the kernel virtual address +* base that maps to the user space virtual address base for the buffer +* allocated in user space. +* The final input/src and output/dst buffer pointer will be determined +* by adding the offsets to the kernel virtual addr. +* +* If use of hardware key is supported in the target, user can configure the +* key paramters (encklen, enckey) to use the hardware key. +* In order to use the hardware key, set encklen to 0 and set the enckey +* data array to 0. +*/ +struct qcedev_cipher_op_req { + uint8_t use_pmem; + union { + struct qcedev_pmem_info pmem; + struct qcedev_vbuf_info vbuf; + }; + uint32_t entries; + uint32_t data_len; + uint8_t in_place_op; + uint8_t enckey[QCEDEV_MAX_KEY_SIZE]; + uint32_t encklen; + uint8_t iv[QCEDEV_MAX_IV_SIZE]; + uint32_t ivlen; + uint32_t byteoffset; + enum qcedev_cipher_alg_enum alg; + enum qcedev_cipher_mode_enum mode; + enum qcedev_oper_enum op; +}; + +/** +* struct qcedev_sha_op_req - Holds the hashing request information +* @data (IN): Array of pointers to the data to be hashed +* @entries (IN): Number of buf_info entries in the data array +* @data_len (IN): Length of data to be hashed +* @digest (IN/OUT): Returns the hashed data information +* @diglen (OUT): Size of the hashed/digest data +* @authkey (IN): Pointer to authentication key for HMAC +* @authklen (IN): Size of the authentication key +* @alg (IN): Secure Hash algorithm +*/ +struct qcedev_sha_op_req { + struct buf_info data[QCEDEV_MAX_BUFFERS]; + uint32_t entries; + uint32_t data_len; + uint8_t digest[QCEDEV_MAX_SHA_DIGEST]; + uint32_t diglen; + uint8_t *authkey; + uint32_t authklen; + enum qcedev_sha_alg_enum alg; +}; + + +#define QCEDEV_IOC_MAGIC 0x87 + +#define QCEDEV_IOCTL_ENC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 1, struct qcedev_cipher_op_req) +#define QCEDEV_IOCTL_DEC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 2, struct qcedev_cipher_op_req) +#define QCEDEV_IOCTL_SHA_INIT_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 3, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_SHA_UPDATE_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 4, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_SHA_FINAL_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 5, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_GET_SHA_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 6, struct qcedev_sha_op_req) +#define QCEDEV_IOCTL_LOCK_CE \ + _IO(QCEDEV_IOC_MAGIC, 7) +#define QCEDEV_IOCTL_UNLOCK_CE \ + _IO(QCEDEV_IOC_MAGIC, 8) +#define QCEDEV_IOCTL_GET_CMAC_REQ \ + _IOWR(QCEDEV_IOC_MAGIC, 9, struct qcedev_cipher_op_req) +#endif /* _QCEDEV__H */ diff --git a/include/linux/qcota.h b/include/linux/qcota.h new file mode 100644 index 000000000000..afc6b7fbd154 --- /dev/null +++ b/include/linux/qcota.h @@ -0,0 +1,165 @@ +/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __QCOTA__H +#define __QCOTA__H + +#include +#include + +#define QCE_OTA_MAX_BEARER 31 +#define OTA_KEY_SIZE 16 /* 128 bits of keys. */ + +enum qce_ota_dir_enum { + QCE_OTA_DIR_UPLINK = 0, + QCE_OTA_DIR_DOWNLINK = 1, + QCE_OTA_DIR_LAST +}; + +enum qce_ota_algo_enum { + QCE_OTA_ALGO_KASUMI = 0, + QCE_OTA_ALGO_SNOW3G = 1, + QCE_OTA_ALGO_LAST +}; + +/** + * struct qce_f8_req - qce f8 request + * @data_in: packets input data stream to be ciphered. + * If NULL, streaming mode operation. + * @data_out: ciphered packets output data. + * @data_len: length of data_in and data_out in bytes. + * @count_c: count-C, ciphering sequence number, 32 bit + * @bearer: 5 bit of radio bearer identifier. + * @ckey: 128 bits of confidentiality key, + * ckey[0] bit 127-120, ckey[1] bit 119-112,.., ckey[15] bit 7-0. + * @direction: uplink or donwlink. + * @algorithm: Kasumi, or Snow3G. + * + * If data_in is NULL, the engine will run in a special mode called + * key stream mode. In this special mode, the engine will generate + * key stream output for the number of bytes specified in the + * data_len, based on the input parameters of direction, algorithm, + * ckey, bearer, and count_c. The data_len is restricted to + * the length of multiple of 16 bytes. Application can then take the + * output stream, do a exclusive or to the input data stream, and + * generate the final cipher data stream. + */ +struct qce_f8_req { + uint8_t *data_in; + uint8_t *data_out; + uint16_t data_len; + uint32_t count_c; + uint8_t bearer; + uint8_t ckey[OTA_KEY_SIZE]; + enum qce_ota_dir_enum direction; + enum qce_ota_algo_enum algorithm; +}; + +/** + * struct qce_f8_multi_pkt_req - qce f8 multiple packet request + * Muliptle packets with uniform size, and + * F8 ciphering parameters can be ciphered in a + * single request. + * + * @num_pkt: number of packets. + * + * @cipher_start: ciphering starts offset within a packet. + * + * @cipher_size: number of bytes to be ciphered within a packet. + * + * @qce_f8_req: description of the packet and F8 parameters. + * The following fields have special meaning for + * multiple packet operation, + * + * @data_len: data_len indicates the length of a packet. + * + * @data_in: packets are concatenated together in a byte + * stream started at data_in. + * + * @data_out: The returned ciphered output for multiple + * packets. + * Each packet ciphered output are concatenated + * together into a byte stream started at data_out. + * Note, each ciphered packet output area from + * offset 0 to cipher_start-1, and from offset + * cipher_size to data_len -1 are remained + * unaltered from packet input area. + * @count_c: count-C of the first packet, 32 bit. + * + * + * In one request, multiple packets can be ciphered, and output to the + * data_out stream. + * + * Packet data are layed out contiguously in sequence in data_in, + * and data_out area. Every packet is identical size. + * If the PDU is not byte aligned, set the data_len value of + * to the rounded up value of the packet size. Eg, PDU size of + * 253 bits, set the packet size to 32 bytes. Next packet starts on + * the next byte boundary. + * + * For each packet, data from offset 0 to cipher_start + * will be left unchanged and output to the data_out area. + * This area of the packet can be for the RLC header, which is not + * to be ciphered. + * + * The ciphering of a packet starts from offset cipher_start, for + * cipher_size bytes of data. Data starting from + * offset cipher_start + cipher_size to the end of packet will be left + * unchanged and output to the dataOut area. + * + * For each packet the input arguments of bearer, direction, + * ckey, algoritm have to be the same. count_c is the ciphering sequence + * number of the first packet. The 2nd packet's ciphering sequence + * number is assumed to be count_c + 1. The 3rd packet's ciphering sequence + * number is count_c + 2..... + * + */ +struct qce_f8_multi_pkt_req { + uint16_t num_pkt; + uint16_t cipher_start; + uint16_t cipher_size; + struct qce_f8_req qce_f8_req; +}; + +/** + * struct qce_f9_req - qce f9 request + * @message: message + * @msize: message size in bytes (include the last partial byte). + * @last_bits: valid bits in the last byte of message. + * @mac_i: 32 bit message authentication code, to be returned. + * @fresh: random 32 bit number, one per user. + * @count_i: 32 bit count-I integrity sequence number. + * @direction: uplink or donwlink. + * @ikey: 128 bits of integrity key, + * ikey[0] bit 127-120, ikey[1] bit 119-112,.., ikey[15] bit 7-0. + * @algorithm: Kasumi, or Snow3G. + */ +struct qce_f9_req { + uint8_t *message; + uint16_t msize; + uint8_t last_bits; + uint32_t mac_i; + uint32_t fresh; + uint32_t count_i; + enum qce_ota_dir_enum direction; + uint8_t ikey[OTA_KEY_SIZE]; + enum qce_ota_algo_enum algorithm; +}; + +#define QCOTA_IOC_MAGIC 0x85 + +#define QCOTA_F8_REQ _IOWR(QCOTA_IOC_MAGIC, 1, struct qce_f8_req) +#define QCOTA_F8_MPKT_REQ _IOWR(QCOTA_IOC_MAGIC, 2, struct qce_f8_multi_pkt_req) +#define QCOTA_F9_REQ _IOWR(QCOTA_IOC_MAGIC, 3, struct qce_f9_req) + +#endif /* __QCOTA__H */