Merge tag 'LA.BF.1.1.3-02310-8x26.0' into lineage-16.0

This commit is contained in:
Francescodario Cuzzocrea 2019-08-05 11:18:51 +02:00
commit e20e6a0613
101 changed files with 8180 additions and 359 deletions

View File

@ -16,6 +16,7 @@ Optional properties:
- qcom,support-bus-scaling : indicates if driver support scaling the bus for crypto operation.
- qcom,support-fde : indicates if driver support key managing for full disk encryption feature.
- qcom,support-pfe : indicates if driver support key managing for per file encryption feature.
- qcom,appsbl-qseecom-support : indicates if there is qseecom support in appsbootloader
Example:
qcom,qseecom@fe806000 {
@ -32,6 +33,7 @@ Example:
qcom,msm_bus,num_cases = <4>;
qcom,msm_bus,active_only = <0>;
qcom,msm_bus,num_paths = <1>;
qcom,appsbl-qseecom-support;
qcom,msm_bus,vectors =
<55 512 0 0>,
<55 512 3936000000 393600000>,

View File

@ -2447,6 +2447,13 @@ config NEON
Say Y to include support code for NEON, the ARMv7 Advanced SIMD
Extension.
config KERNEL_MODE_NEON
bool "Support for NEON in kernel mode"
default n
depends on NEON
help
Say Y to include support for NEON in kernel mode.
endmenu
menu "Userspace binary formats"

View File

@ -17,6 +17,8 @@ CONFIG_NAMESPACES=y
# CONFIG_IPC_NS is not set
# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_DEVMEM is not set
# CONFIG_DEVKMEM is not set
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_BZIP2=y
@ -149,9 +151,11 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
CONFIG_NETFILTER_XT_MATCH_COMMENT=y
CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y
CONFIG_NETFILTER_XT_MATCH_CONNMARK=y
@ -474,6 +478,15 @@ CONFIG_NFC_QNCI=y
CONFIG_CRYPTO_DEV_QCRYPTO=m
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=m
CONFIG_KERNEL_MODE_NEON=y
CONFIG_CRYPTO_AES_ARM=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_SHA1_ARM_NEON=y
CONFIG_CRYPTO_SHA512_ARM_NEON=y
CONFIG_CRYPTO_AES_ARM_BS=y
CONFIG_CRYPTO_SHA1_ARM=y
CONFIG_MOBICORE_SUPPORT=m
CONFIG_MOBICORE_API=m
CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y

View File

@ -17,6 +17,8 @@ CONFIG_NAMESPACES=y
# CONFIG_IPC_NS is not set
# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_DEVMEM is not set
# CONFIG_DEVKMEM is not set
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_BZIP2=y
@ -149,6 +151,7 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@ -527,6 +530,15 @@ CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_CRYPTO_AES_ARM=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_SHA1_ARM_NEON=y
CONFIG_CRYPTO_SHA512_ARM_NEON=y
CONFIG_CRYPTO_AES_ARM_BS=y
CONFIG_CRYPTO_SHA1_ARM=y
CONFIG_NFC_QNCI=y
CONFIG_CRYPTO_DEV_QCRYPTO=m
CONFIG_CRYPTO_DEV_QCE=y

View File

@ -18,6 +18,8 @@ CONFIG_NAMESPACES=y
# CONFIG_IPC_NS is not set
# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_DEVMEM is not set
# CONFIG_DEVKMEM is not set
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_BZIP2=y
@ -161,9 +163,11 @@ CONFIG_NF_CT_NETLINK=y
CONFIG_NETFILTER_TPROXY=y
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@ -507,6 +511,15 @@ CONFIG_NFC_QNCI=y
CONFIG_CRYPTO_DEV_QCRYPTO=m
CONFIG_CRYPTO_DEV_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_CRYPTO_AES_ARM=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_SHA1_ARM_NEON=y
CONFIG_CRYPTO_SHA512_ARM_NEON=y
CONFIG_CRYPTO_AES_ARM_BS=y
CONFIG_CRYPTO_SHA1_ARM=y
CONFIG_PFT=y
CONFIG_SND_SOC_MSM_HDMI_CODEC_RX=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y

View File

@ -17,6 +17,8 @@ CONFIG_NAMESPACES=y
# CONFIG_IPC_NS is not set
# CONFIG_USER_NS is not set
# CONFIG_PID_NS is not set
# CONFIG_DEVMEM is not set
# CONFIG_DEVKMEM is not set
CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_RD_BZIP2=y
@ -171,6 +173,7 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
CONFIG_NETFILTER_XT_TARGET_LOG=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
CONFIG_NETFILTER_XT_TARGET_NOTRACK=y
CONFIG_NETFILTER_XT_TARGET_SECMARK=y
@ -562,6 +565,15 @@ CONFIG_CRYPTO_MD4=y
CONFIG_CRYPTO_ARC4=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_XTS=y
CONFIG_KERNEL_MODE_NEON=y
CONFIG_CRYPTO_AES_ARM=y
CONFIG_CRYPTO_ALGAPI=y
CONFIG_CRYPTO_AES=y
CONFIG_CRYPTO_ALGAPI2=y
CONFIG_CRYPTO_SHA1_ARM_NEON=y
CONFIG_CRYPTO_SHA512_ARM_NEON=y
CONFIG_CRYPTO_AES_ARM_BS=y
CONFIG_CRYPTO_SHA1_ARM=y
CONFIG_NFC_QNCI=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
CONFIG_CRYPTO_DEV_QCE=y

1
arch/arm/crypto/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
aesbs-core.S

View File

@ -6,10 +6,24 @@
obj-$(CONFIG_CRYPTO_FIPS) += first_file_asm.o
obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o
aes-arm-y := aes-armv4.o aes_glue.o
aes-arm-y := aes-armv4.o aes_glue.o
aes-arm-bs-y := aesbs-core.o aesbs-glue.o
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
$(call cmd,perl)
.PRECIOUS: $(obj)/aesbs-core.S
#Keep this at the bottom
obj-$(CONFIG_CRYPTO_FIPS) += last_file_asm.o

View File

@ -6,22 +6,12 @@
#include <linux/crypto.h>
#include <crypto/aes.h>
#define AES_MAXNR 14
#include "aes_glue.h"
typedef struct {
unsigned int rd_key[4 *(AES_MAXNR + 1)];
int rounds;
} AES_KEY;
struct AES_CTX {
AES_KEY enc_key;
AES_KEY dec_key;
};
asmlinkage void AES_encrypt(const u8 *in, u8 *out, AES_KEY *ctx);
asmlinkage void AES_decrypt(const u8 *in, u8 *out, AES_KEY *ctx);
asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey, const int bits, AES_KEY *key);
EXPORT_SYMBOL(AES_encrypt);
EXPORT_SYMBOL(AES_decrypt);
EXPORT_SYMBOL(private_AES_set_encrypt_key);
EXPORT_SYMBOL(private_AES_set_decrypt_key);
static void aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
{
@ -81,7 +71,7 @@ static struct crypto_alg aes_alg = {
.cipher = {
.cia_min_keysize = AES_MIN_KEY_SIZE,
.cia_max_keysize = AES_MAX_KEY_SIZE,
.cia_setkey = aes_set_key,
.cia_setkey = aes_set_key,
.cia_encrypt = aes_encrypt,
.cia_decrypt = aes_decrypt
}

View File

@ -0,0 +1,19 @@
#define AES_MAXNR 14
struct AES_KEY {
unsigned int rd_key[4 * (AES_MAXNR + 1)];
int rounds;
};
struct AES_CTX {
struct AES_KEY enc_key;
struct AES_KEY dec_key;
};
asmlinkage void AES_encrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
asmlinkage void AES_decrypt(const u8 *in, u8 *out, struct AES_KEY *ctx);
asmlinkage int private_AES_set_decrypt_key(const unsigned char *userKey,
const int bits, struct AES_KEY *key);
asmlinkage int private_AES_set_encrypt_key(const unsigned char *userKey,
const int bits, struct AES_KEY *key);

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,434 @@
/*
* linux/arch/arm/crypto/aesbs-glue.c - glue code for NEON bit sliced AES
*
* Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/neon.h>
#include <crypto/aes.h>
#include <crypto/ablk_helper.h>
#include <crypto/algapi.h>
#include <linux/module.h>
#include "aes_glue.h"
#define BIT_SLICED_KEY_MAXSIZE (128 * (AES_MAXNR - 1) + 2 * AES_BLOCK_SIZE)
struct BS_KEY {
struct AES_KEY rk;
int converted;
u8 __aligned(8) bs[BIT_SLICED_KEY_MAXSIZE];
} __aligned(8);
asmlinkage void bsaes_enc_key_convert(u8 out[], struct AES_KEY const *in);
asmlinkage void bsaes_dec_key_convert(u8 out[], struct AES_KEY const *in);
asmlinkage void bsaes_cbc_encrypt(u8 const in[], u8 out[], u32 bytes,
struct BS_KEY *key, u8 iv[]);
asmlinkage void bsaes_ctr32_encrypt_blocks(u8 const in[], u8 out[], u32 blocks,
struct BS_KEY *key, u8 const iv[]);
asmlinkage void bsaes_xts_encrypt(u8 const in[], u8 out[], u32 bytes,
struct BS_KEY *key, u8 tweak[]);
asmlinkage void bsaes_xts_decrypt(u8 const in[], u8 out[], u32 bytes,
struct BS_KEY *key, u8 tweak[]);
struct aesbs_cbc_ctx {
struct AES_KEY enc;
struct BS_KEY dec;
};
struct aesbs_ctr_ctx {
struct BS_KEY enc;
};
struct aesbs_xts_ctx {
struct BS_KEY enc;
struct BS_KEY dec;
struct AES_KEY twkey;
};
static int aesbs_cbc_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
int bits = key_len * 8;
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->dec.rk = ctx->enc;
private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
ctx->dec.converted = 0;
return 0;
}
static int aesbs_ctr_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_ctr_ctx *ctx = crypto_tfm_ctx(tfm);
int bits = key_len * 8;
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->enc.converted = 0;
return 0;
}
static int aesbs_xts_set_key(struct crypto_tfm *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_xts_ctx *ctx = crypto_tfm_ctx(tfm);
int bits = key_len * 4;
if (private_AES_set_encrypt_key(in_key, bits, &ctx->enc.rk)) {
tfm->crt_flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
return -EINVAL;
}
ctx->dec.rk = ctx->enc.rk;
private_AES_set_decrypt_key(in_key, bits, &ctx->dec.rk);
private_AES_set_encrypt_key(in_key + key_len / 2, bits, &ctx->twkey);
ctx->enc.converted = ctx->dec.converted = 0;
return 0;
}
static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt(desc, &walk);
while (walk.nbytes) {
u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *src = walk.src.virt.addr;
if (walk.dst.virt.addr == walk.src.virt.addr) {
u8 *iv = walk.iv;
do {
crypto_xor(src, iv, AES_BLOCK_SIZE);
AES_encrypt(src, src, &ctx->enc);
iv = src;
src += AES_BLOCK_SIZE;
} while (--blocks);
memcpy(walk.iv, iv, AES_BLOCK_SIZE);
} else {
u8 *dst = walk.dst.virt.addr;
do {
crypto_xor(walk.iv, src, AES_BLOCK_SIZE);
AES_encrypt(walk.iv, dst, &ctx->enc);
memcpy(walk.iv, dst, AES_BLOCK_SIZE);
src += AES_BLOCK_SIZE;
dst += AES_BLOCK_SIZE;
} while (--blocks);
}
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct aesbs_cbc_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
while ((walk.nbytes / AES_BLOCK_SIZE) >= 8) {
kernel_neon_begin();
bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->dec, walk.iv);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
while (walk.nbytes) {
u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
u8 *dst = walk.dst.virt.addr;
u8 *src = walk.src.virt.addr;
u8 bk[2][AES_BLOCK_SIZE];
u8 *iv = walk.iv;
do {
if (walk.dst.virt.addr == walk.src.virt.addr)
memcpy(bk[blocks & 1], src, AES_BLOCK_SIZE);
AES_decrypt(src, dst, &ctx->dec.rk);
crypto_xor(dst, iv, AES_BLOCK_SIZE);
if (walk.dst.virt.addr == walk.src.virt.addr)
iv = bk[blocks & 1];
else
iv = src;
dst += AES_BLOCK_SIZE;
src += AES_BLOCK_SIZE;
} while (--blocks);
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
static void inc_be128_ctr(__be32 ctr[], u32 addend)
{
int i;
for (i = 3; i >= 0; i--, addend = 1) {
u32 n = be32_to_cpu(ctr[i]) + addend;
ctr[i] = cpu_to_be32(n);
if (n >= addend)
break;
}
}
static int aesbs_ctr_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
struct aesbs_ctr_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
u32 blocks;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
while ((blocks = walk.nbytes / AES_BLOCK_SIZE)) {
u32 tail = walk.nbytes % AES_BLOCK_SIZE;
__be32 *ctr = (__be32 *)walk.iv;
u32 headroom = UINT_MAX - be32_to_cpu(ctr[3]);
/* avoid 32 bit counter overflow in the NEON code */
if (unlikely(headroom < blocks)) {
blocks = headroom + 1;
tail = walk.nbytes - blocks * AES_BLOCK_SIZE;
}
kernel_neon_begin();
bsaes_ctr32_encrypt_blocks(walk.src.virt.addr,
walk.dst.virt.addr, blocks,
&ctx->enc, walk.iv);
kernel_neon_end();
inc_be128_ctr(ctr, blocks);
nbytes -= blocks * AES_BLOCK_SIZE;
if (nbytes && nbytes == tail && nbytes <= AES_BLOCK_SIZE)
break;
err = blkcipher_walk_done(desc, &walk, tail);
}
if (walk.nbytes) {
u8 *tdst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE;
u8 *tsrc = walk.src.virt.addr + blocks * AES_BLOCK_SIZE;
u8 ks[AES_BLOCK_SIZE];
AES_encrypt(walk.iv, ks, &ctx->enc.rk);
if (tdst != tsrc)
memcpy(tdst, tsrc, nbytes);
crypto_xor(tdst, ks, nbytes);
err = blkcipher_walk_done(desc, &walk, 0);
}
return err;
}
static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
/* generate the initial tweak */
AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
while (walk.nbytes) {
kernel_neon_begin();
bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->enc, walk.iv);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst,
struct scatterlist *src, unsigned int nbytes)
{
struct aesbs_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
int err;
blkcipher_walk_init(&walk, dst, src, nbytes);
err = blkcipher_walk_virt_block(desc, &walk, 8 * AES_BLOCK_SIZE);
/* generate the initial tweak */
AES_encrypt(walk.iv, walk.iv, &ctx->twkey);
while (walk.nbytes) {
kernel_neon_begin();
bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
walk.nbytes, &ctx->dec, walk.iv);
kernel_neon_end();
err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
}
return err;
}
static struct crypto_alg aesbs_algs[] = { {
.cra_name = "__cbc-aes-neonbs",
.cra_driver_name = "__driver-cbc-aes-neonbs",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aesbs_cbc_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aesbs_cbc_set_key,
.encrypt = aesbs_cbc_encrypt,
.decrypt = aesbs_cbc_decrypt,
},
}, {
.cra_name = "__ctr-aes-neonbs",
.cra_driver_name = "__driver-ctr-aes-neonbs",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct aesbs_ctr_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aesbs_ctr_set_key,
.encrypt = aesbs_ctr_encrypt,
.decrypt = aesbs_ctr_encrypt,
},
}, {
.cra_name = "__xts-aes-neonbs",
.cra_driver_name = "__driver-xts-aes-neonbs",
.cra_priority = 0,
.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct aesbs_xts_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_blkcipher_type,
.cra_module = THIS_MODULE,
.cra_blkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = aesbs_xts_set_key,
.encrypt = aesbs_xts_encrypt,
.decrypt = aesbs_xts_decrypt,
},
}, {
.cra_name = "cbc(aes)",
.cra_driver_name = "cbc-aes-neonbs",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = __ablk_encrypt,
.decrypt = ablk_decrypt,
}
}, {
.cra_name = "ctr(aes)",
.cra_driver_name = "ctr-aes-neonbs",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = 1,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
}
}, {
.cra_name = "xts(aes)",
.cra_driver_name = "xts-aes-neonbs",
.cra_priority = 300,
.cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER|CRYPTO_ALG_ASYNC,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct async_helper_ctx),
.cra_alignmask = 7,
.cra_type = &crypto_ablkcipher_type,
.cra_module = THIS_MODULE,
.cra_init = ablk_init,
.cra_exit = ablk_exit,
.cra_ablkcipher = {
.min_keysize = 2 * AES_MIN_KEY_SIZE,
.max_keysize = 2 * AES_MAX_KEY_SIZE,
.ivsize = AES_BLOCK_SIZE,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
}
} };
static int __init aesbs_mod_init(void)
{
if (!cpu_has_neon())
return -ENODEV;
return crypto_register_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
}
static void __exit aesbs_mod_exit(void)
{
crypto_unregister_algs(aesbs_algs, ARRAY_SIZE(aesbs_algs));
}
module_init(aesbs_mod_init);
module_exit(aesbs_mod_exit);
MODULE_DESCRIPTION("Bit sliced AES in CBC/CTR/XTS modes using NEON");
MODULE_AUTHOR("Ard Biesheuvel <ard.biesheuvel@linaro.org>");
MODULE_LICENSE("GPL");

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,634 @@
/* sha1-armv7-neon.S - ARM/NEON accelerated SHA-1 transform function
*
* Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/linkage.h>
.syntax unified
.code 32
.fpu neon
.text
/* Context structure */
#define state_h0 0
#define state_h1 4
#define state_h2 8
#define state_h3 12
#define state_h4 16
/* Constants */
#define K1 0x5A827999
#define K2 0x6ED9EBA1
#define K3 0x8F1BBCDC
#define K4 0xCA62C1D6
.align 4
.LK_VEC:
.LK1: .long K1, K1, K1, K1
.LK2: .long K2, K2, K2, K2
.LK3: .long K3, K3, K3, K3
.LK4: .long K4, K4, K4, K4
/* Register macros */
#define RSTATE r0
#define RDATA r1
#define RNBLKS r2
#define ROLDSTACK r3
#define RWK lr
#define _a r4
#define _b r5
#define _c r6
#define _d r7
#define _e r8
#define RT0 r9
#define RT1 r10
#define RT2 r11
#define RT3 r12
#define W0 q0
#define W1 q1
#define W2 q2
#define W3 q3
#define W4 q4
#define W5 q5
#define W6 q6
#define W7 q7
#define tmp0 q8
#define tmp1 q9
#define tmp2 q10
#define tmp3 q11
#define qK1 q12
#define qK2 q13
#define qK3 q14
#define qK4 q15
/* Round function macros. */
#define WK_offs(i) (((i) & 15) * 4)
#define _R_F1(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
bic RT0, d, b; \
add e, e, a, ror #(32 - 5); \
and RT1, c, b; \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add RT0, RT0, RT3; \
add e, e, RT1; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT0;
#define _R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
eor RT0, d, b; \
add e, e, a, ror #(32 - 5); \
eor RT0, RT0, c; \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT3; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT0; \
#define _R_F3(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
ldr RT3, [sp, WK_offs(i)]; \
pre1(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
eor RT0, b, c; \
and RT1, b, c; \
add e, e, a, ror #(32 - 5); \
pre2(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
and RT0, RT0, d; \
add RT1, RT1, RT3; \
add e, e, RT0; \
ror b, #(32 - 30); \
pre3(i16,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28); \
add e, e, RT1;
#define _R_F4(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
_R_F2(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define _R(a,b,c,d,e,f,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
_R_##f(a,b,c,d,e,i,pre1,pre2,pre3,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define R(a,b,c,d,e,f,i) \
_R_##f(a,b,c,d,e,i,dummy,dummy,dummy,i16,\
W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28)
#define dummy(...)
/* Input expansion macros. */
/********* Precalc macros for rounds 0-15 *************************************/
#define W_PRECALC_00_15() \
add RWK, sp, #(WK_offs(0)); \
\
vld1.32 {tmp0, tmp1}, [RDATA]!; \
vrev32.8 W0, tmp0; /* big => little */ \
vld1.32 {tmp2, tmp3}, [RDATA]!; \
vadd.u32 tmp0, W0, curK; \
vrev32.8 W7, tmp1; /* big => little */ \
vrev32.8 W6, tmp2; /* big => little */ \
vadd.u32 tmp1, W7, curK; \
vrev32.8 W5, tmp3; /* big => little */ \
vadd.u32 tmp2, W6, curK; \
vst1.32 {tmp0, tmp1}, [RWK]!; \
vadd.u32 tmp3, W5, curK; \
vst1.32 {tmp2, tmp3}, [RWK]; \
#define WPRECALC_00_15_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vld1.32 {tmp0, tmp1}, [RDATA]!; \
#define WPRECALC_00_15_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
add RWK, sp, #(WK_offs(0)); \
#define WPRECALC_00_15_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vrev32.8 W0, tmp0; /* big => little */ \
#define WPRECALC_00_15_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vld1.32 {tmp2, tmp3}, [RDATA]!; \
#define WPRECALC_00_15_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp0, W0, curK; \
#define WPRECALC_00_15_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vrev32.8 W7, tmp1; /* big => little */ \
#define WPRECALC_00_15_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vrev32.8 W6, tmp2; /* big => little */ \
#define WPRECALC_00_15_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp1, W7, curK; \
#define WPRECALC_00_15_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vrev32.8 W5, tmp3; /* big => little */ \
#define WPRECALC_00_15_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp2, W6, curK; \
#define WPRECALC_00_15_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp0, tmp1}, [RWK]!; \
#define WPRECALC_00_15_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp3, W5, curK; \
#define WPRECALC_00_15_12(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp2, tmp3}, [RWK]; \
/********* Precalc macros for rounds 16-31 ************************************/
#define WPRECALC_16_31_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp0, tmp0; \
vext.8 W, W_m16, W_m12, #8; \
#define WPRECALC_16_31_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
add RWK, sp, #(WK_offs(i)); \
vext.8 tmp0, W_m04, tmp0, #4; \
#define WPRECALC_16_31_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp0, tmp0, W_m16; \
veor.32 W, W, W_m08; \
#define WPRECALC_16_31_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp1, tmp1; \
veor W, W, tmp0; \
#define WPRECALC_16_31_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshl.u32 tmp0, W, #1; \
#define WPRECALC_16_31_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vext.8 tmp1, tmp1, W, #(16-12); \
vshr.u32 W, W, #31; \
#define WPRECALC_16_31_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vorr tmp0, tmp0, W; \
vshr.u32 W, tmp1, #30; \
#define WPRECALC_16_31_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshl.u32 tmp1, tmp1, #2; \
#define WPRECALC_16_31_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor tmp0, tmp0, W; \
#define WPRECALC_16_31_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, tmp0, tmp1; \
#define WPRECALC_16_31_10(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp0, W, curK; \
#define WPRECALC_16_31_11(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp0}, [RWK];
/********* Precalc macros for rounds 32-79 ************************************/
#define WPRECALC_32_79_0(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, W_m28; \
#define WPRECALC_32_79_1(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vext.8 tmp0, W_m08, W_m04, #8; \
#define WPRECALC_32_79_2(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, W_m16; \
#define WPRECALC_32_79_3(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
veor W, tmp0; \
#define WPRECALC_32_79_4(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
add RWK, sp, #(WK_offs(i&~3)); \
#define WPRECALC_32_79_5(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshl.u32 tmp1, W, #2; \
#define WPRECALC_32_79_6(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vshr.u32 tmp0, W, #30; \
#define WPRECALC_32_79_7(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vorr W, tmp0, tmp1; \
#define WPRECALC_32_79_8(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vadd.u32 tmp0, W, curK; \
#define WPRECALC_32_79_9(i,W,W_m04,W_m08,W_m12,W_m16,W_m20,W_m24,W_m28) \
vst1.32 {tmp0}, [RWK];
/*
* Transform nblks*64 bytes (nblks*16 32-bit words) at DATA.
*
* unsigned int
* sha1_transform_neon (void *ctx, const unsigned char *data,
* unsigned int nblks)
*/
.align 3
ENTRY(sha1_transform_neon)
/* input:
* r0: ctx, CTX
* r1: data (64*nblks bytes)
* r2: nblks
*/
cmp RNBLKS, #0;
beq .Ldo_nothing;
push {r4-r12, lr};
/*vpush {q4-q7};*/
adr RT3, .LK_VEC;
mov ROLDSTACK, sp;
/* Align stack. */
sub RT0, sp, #(16*4);
and RT0, #(~(16-1));
mov sp, RT0;
vld1.32 {qK1-qK2}, [RT3]!; /* Load K1,K2 */
/* Get the values of the chaining variables. */
ldm RSTATE, {_a-_e};
vld1.32 {qK3-qK4}, [RT3]; /* Load K3,K4 */
#undef curK
#define curK qK1
/* Precalc 0-15. */
W_PRECALC_00_15();
.Loop:
/* Transform 0-15 + Precalc 16-31. */
_R( _a, _b, _c, _d, _e, F1, 0,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 16,
W4, W5, W6, W7, W0, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 1,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 16,
W4, W5, W6, W7, W0, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 2,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 16,
W4, W5, W6, W7, W0, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 3,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,16,
W4, W5, W6, W7, W0, _, _, _ );
#undef curK
#define curK qK2
_R( _b, _c, _d, _e, _a, F1, 4,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 5,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 6,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 7,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,20,
W3, W4, W5, W6, W7, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 8,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _b, _c, _d, _e, _a, F1, 9,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 10,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _e, _a, _b, _c, _d, F1, 11,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,24,
W2, W3, W4, W5, W6, _, _, _ );
_R( _d, _e, _a, _b, _c, F1, 12,
WPRECALC_16_31_0, WPRECALC_16_31_1, WPRECALC_16_31_2, 28,
W1, W2, W3, W4, W5, _, _, _ );
_R( _c, _d, _e, _a, _b, F1, 13,
WPRECALC_16_31_3, WPRECALC_16_31_4, WPRECALC_16_31_5, 28,
W1, W2, W3, W4, W5, _, _, _ );
_R( _b, _c, _d, _e, _a, F1, 14,
WPRECALC_16_31_6, WPRECALC_16_31_7, WPRECALC_16_31_8, 28,
W1, W2, W3, W4, W5, _, _, _ );
_R( _a, _b, _c, _d, _e, F1, 15,
WPRECALC_16_31_9, WPRECALC_16_31_10,WPRECALC_16_31_11,28,
W1, W2, W3, W4, W5, _, _, _ );
/* Transform 16-63 + Precalc 32-79. */
_R( _e, _a, _b, _c, _d, F1, 16,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _d, _e, _a, _b, _c, F1, 17,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _c, _d, _e, _a, _b, F1, 18,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _b, _c, _d, _e, _a, F1, 19,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 32,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _a, _b, _c, _d, _e, F2, 20,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _e, _a, _b, _c, _d, F2, 21,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _d, _e, _a, _b, _c, F2, 22,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _c, _d, _e, _a, _b, F2, 23,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 36,
W7, W0, W1, W2, W3, W4, W5, W6);
#undef curK
#define curK qK3
_R( _b, _c, _d, _e, _a, F2, 24,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _a, _b, _c, _d, _e, F2, 25,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _e, _a, _b, _c, _d, F2, 26,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _d, _e, _a, _b, _c, F2, 27,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 40,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _c, _d, _e, _a, _b, F2, 28,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _b, _c, _d, _e, _a, F2, 29,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _a, _b, _c, _d, _e, F2, 30,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _e, _a, _b, _c, _d, F2, 31,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 44,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _d, _e, _a, _b, _c, F2, 32,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _c, _d, _e, _a, _b, F2, 33,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _b, _c, _d, _e, _a, F2, 34,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _a, _b, _c, _d, _e, F2, 35,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 48,
W4, W5, W6, W7, W0, W1, W2, W3);
_R( _e, _a, _b, _c, _d, F2, 36,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _d, _e, _a, _b, _c, F2, 37,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _c, _d, _e, _a, _b, F2, 38,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _b, _c, _d, _e, _a, F2, 39,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 52,
W3, W4, W5, W6, W7, W0, W1, W2);
_R( _a, _b, _c, _d, _e, F3, 40,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
_R( _e, _a, _b, _c, _d, F3, 41,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
_R( _d, _e, _a, _b, _c, F3, 42,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
_R( _c, _d, _e, _a, _b, F3, 43,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 56,
W2, W3, W4, W5, W6, W7, W0, W1);
#undef curK
#define curK qK4
_R( _b, _c, _d, _e, _a, F3, 44,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _a, _b, _c, _d, _e, F3, 45,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _e, _a, _b, _c, _d, F3, 46,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _d, _e, _a, _b, _c, F3, 47,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 60,
W1, W2, W3, W4, W5, W6, W7, W0);
_R( _c, _d, _e, _a, _b, F3, 48,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _b, _c, _d, _e, _a, F3, 49,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _a, _b, _c, _d, _e, F3, 50,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _e, _a, _b, _c, _d, F3, 51,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 64,
W0, W1, W2, W3, W4, W5, W6, W7);
_R( _d, _e, _a, _b, _c, F3, 52,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _c, _d, _e, _a, _b, F3, 53,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _b, _c, _d, _e, _a, F3, 54,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _a, _b, _c, _d, _e, F3, 55,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 68,
W7, W0, W1, W2, W3, W4, W5, W6);
_R( _e, _a, _b, _c, _d, F3, 56,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _d, _e, _a, _b, _c, F3, 57,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _c, _d, _e, _a, _b, F3, 58,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
_R( _b, _c, _d, _e, _a, F3, 59,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 72,
W6, W7, W0, W1, W2, W3, W4, W5);
subs RNBLKS, #1;
_R( _a, _b, _c, _d, _e, F4, 60,
WPRECALC_32_79_0, WPRECALC_32_79_1, WPRECALC_32_79_2, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _e, _a, _b, _c, _d, F4, 61,
WPRECALC_32_79_3, WPRECALC_32_79_4, WPRECALC_32_79_5, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _d, _e, _a, _b, _c, F4, 62,
WPRECALC_32_79_6, dummy, WPRECALC_32_79_7, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
_R( _c, _d, _e, _a, _b, F4, 63,
WPRECALC_32_79_8, dummy, WPRECALC_32_79_9, 76,
W5, W6, W7, W0, W1, W2, W3, W4);
beq .Lend;
/* Transform 64-79 + Precalc 0-15 of next block. */
#undef curK
#define curK qK1
_R( _b, _c, _d, _e, _a, F4, 64,
WPRECALC_00_15_0, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 65,
WPRECALC_00_15_1, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 66,
WPRECALC_00_15_2, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 67,
WPRECALC_00_15_3, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 68,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 69,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 70,
WPRECALC_00_15_4, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 71,
WPRECALC_00_15_5, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 72,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 73,
dummy, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 74,
WPRECALC_00_15_6, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _a, _b, _c, _d, _e, F4, 75,
WPRECALC_00_15_7, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _e, _a, _b, _c, _d, F4, 76,
WPRECALC_00_15_8, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _d, _e, _a, _b, _c, F4, 77,
WPRECALC_00_15_9, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _c, _d, _e, _a, _b, F4, 78,
WPRECALC_00_15_10, dummy, dummy, _, _, _, _, _, _, _, _, _ );
_R( _b, _c, _d, _e, _a, F4, 79,
WPRECALC_00_15_11, dummy, WPRECALC_00_15_12, _, _, _, _, _, _, _, _, _ );
/* Update the chaining variables. */
ldm RSTATE, {RT0-RT3};
add _a, RT0;
ldr RT0, [RSTATE, #state_h4];
add _b, RT1;
add _c, RT2;
add _d, RT3;
add _e, RT0;
stm RSTATE, {_a-_e};
b .Loop;
.Lend:
/* Transform 64-79 */
R( _b, _c, _d, _e, _a, F4, 64 );
R( _a, _b, _c, _d, _e, F4, 65 );
R( _e, _a, _b, _c, _d, F4, 66 );
R( _d, _e, _a, _b, _c, F4, 67 );
R( _c, _d, _e, _a, _b, F4, 68 );
R( _b, _c, _d, _e, _a, F4, 69 );
R( _a, _b, _c, _d, _e, F4, 70 );
R( _e, _a, _b, _c, _d, F4, 71 );
R( _d, _e, _a, _b, _c, F4, 72 );
R( _c, _d, _e, _a, _b, F4, 73 );
R( _b, _c, _d, _e, _a, F4, 74 );
R( _a, _b, _c, _d, _e, F4, 75 );
R( _e, _a, _b, _c, _d, F4, 76 );
R( _d, _e, _a, _b, _c, F4, 77 );
R( _c, _d, _e, _a, _b, F4, 78 );
R( _b, _c, _d, _e, _a, F4, 79 );
mov sp, ROLDSTACK;
/* Update the chaining variables. */
ldm RSTATE, {RT0-RT3};
add _a, RT0;
ldr RT0, [RSTATE, #state_h4];
add _b, RT1;
add _c, RT2;
add _d, RT3;
/*vpop {q4-q7};*/
add _e, RT0;
stm RSTATE, {_a-_e};
pop {r4-r12, pc};
.Ldo_nothing:
bx lr
ENDPROC(sha1_transform_neon)

View File

@ -23,32 +23,27 @@
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
#include <asm/crypto/sha1.h>
struct SHA1_CTX {
uint32_t h0,h1,h2,h3,h4;
u64 count;
u8 data[SHA1_BLOCK_SIZE];
};
asmlinkage void sha1_block_data_order(struct SHA1_CTX *digest,
asmlinkage void sha1_block_data_order(u32 *digest,
const unsigned char *data, unsigned int rounds);
static int sha1_init(struct shash_desc *desc)
{
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
memset(sctx, 0, sizeof(*sctx));
sctx->h0 = SHA1_H0;
sctx->h1 = SHA1_H1;
sctx->h2 = SHA1_H2;
sctx->h3 = SHA1_H3;
sctx->h4 = SHA1_H4;
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data,
unsigned int len, unsigned int partial)
static int __sha1_update(struct sha1_state *sctx, const u8 *data,
unsigned int len, unsigned int partial)
{
unsigned int done = 0;
@ -56,43 +51,44 @@ static int __sha1_update(struct SHA1_CTX *sctx, const u8 *data,
if (partial) {
done = SHA1_BLOCK_SIZE - partial;
memcpy(sctx->data + partial, data, done);
sha1_block_data_order(sctx, sctx->data, 1);
memcpy(sctx->buffer + partial, data, done);
sha1_block_data_order(sctx->state, sctx->buffer, 1);
}
if (len - done >= SHA1_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
sha1_block_data_order(sctx, data + done, rounds);
sha1_block_data_order(sctx->state, data + done, rounds);
done += rounds * SHA1_BLOCK_SIZE;
}
memcpy(sctx->data, data + done, len - done);
memcpy(sctx->buffer, data + done, len - done);
return 0;
}
static int sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
int sha1_update_arm(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
int res;
/* Handle the fast case right here */
if (partial + len < SHA1_BLOCK_SIZE) {
sctx->count += len;
memcpy(sctx->data + partial, data, len);
memcpy(sctx->buffer + partial, data, len);
return 0;
}
res = __sha1_update(sctx, data, len, partial);
return res;
}
EXPORT_SYMBOL_GPL(sha1_update_arm);
/* Add padding and return the message digest. */
static int sha1_final(struct shash_desc *desc, u8 *out)
{
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
__be32 *dst = (__be32 *)out;
__be64 bits;
@ -106,7 +102,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
/* We need to fill a whole block for __sha1_update() */
if (padlen <= 56) {
sctx->count += padlen;
memcpy(sctx->data + index, padding, padlen);
memcpy(sctx->buffer + index, padding, padlen);
} else {
__sha1_update(sctx, padding, padlen, index);
}
@ -114,7 +110,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(((u32 *)sctx)[i]);
dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
@ -124,7 +120,7 @@ static int sha1_final(struct shash_desc *desc, u8 *out)
static int sha1_export(struct shash_desc *desc, void *out)
{
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
@ -132,7 +128,7 @@ static int sha1_export(struct shash_desc *desc, void *out)
static int sha1_import(struct shash_desc *desc, const void *in)
{
struct SHA1_CTX *sctx = shash_desc_ctx(desc);
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
@ -141,12 +137,12 @@ static int sha1_import(struct shash_desc *desc, const void *in)
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_init,
.update = sha1_update,
.update = sha1_update_arm,
.final = sha1_final,
.export = sha1_export,
.import = sha1_import,
.descsize = sizeof(struct SHA1_CTX),
.statesize = sizeof(struct SHA1_CTX),
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name= "sha1-asm",

View File

@ -0,0 +1,197 @@
/*
* Glue code for the SHA1 Secure Hash Algorithm assembler implementation using
* ARM NEON instructions.
*
* Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is based on sha1_generic.c and sha1_ssse3_glue.c:
* Copyright (c) Alan Smithee.
* Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk>
* Copyright (c) Jean-Francois Dive <jef@linuxbe.org>
* Copyright (c) Mathias Krause <minipli@googlemail.com>
* Copyright (c) Chandramouli Narayanan <mouli@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
#include <asm/neon.h>
#include <asm/simd.h>
#include <asm/crypto/sha1.h>
asmlinkage void sha1_transform_neon(void *state_h, const char *data,
unsigned int rounds);
static int sha1_neon_init(struct shash_desc *desc)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
*sctx = (struct sha1_state){
.state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 },
};
return 0;
}
static int __sha1_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len, unsigned int partial)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int done = 0;
sctx->count += len;
if (partial) {
done = SHA1_BLOCK_SIZE - partial;
memcpy(sctx->buffer + partial, data, done);
sha1_transform_neon(sctx->state, sctx->buffer, 1);
}
if (len - done >= SHA1_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE;
sha1_transform_neon(sctx->state, data + done, rounds);
done += rounds * SHA1_BLOCK_SIZE;
}
memcpy(sctx->buffer, data + done, len - done);
return 0;
}
static int sha1_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count % SHA1_BLOCK_SIZE;
int res;
/* Handle the fast case right here */
if (partial + len < SHA1_BLOCK_SIZE) {
sctx->count += len;
memcpy(sctx->buffer + partial, data, len);
return 0;
}
if (!may_use_simd()) {
res = sha1_update_arm(desc, data, len);
} else {
kernel_neon_begin();
res = __sha1_neon_update(desc, data, len, partial);
kernel_neon_end();
}
return res;
}
/* Add padding and return the message digest. */
static int sha1_neon_final(struct shash_desc *desc, u8 *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
__be32 *dst = (__be32 *)out;
__be64 bits;
static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, };
bits = cpu_to_be64(sctx->count << 3);
/* Pad out to 56 mod 64 and append length */
index = sctx->count % SHA1_BLOCK_SIZE;
padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index);
if (!may_use_simd()) {
sha1_update_arm(desc, padding, padlen);
sha1_update_arm(desc, (const u8 *)&bits, sizeof(bits));
} else {
kernel_neon_begin();
/* We need to fill a whole block for __sha1_neon_update() */
if (padlen <= 56) {
sctx->count += padlen;
memcpy(sctx->buffer + index, padding, padlen);
} else {
__sha1_neon_update(desc, padding, padlen, index);
}
__sha1_neon_update(desc, (const u8 *)&bits, sizeof(bits), 56);
kernel_neon_end();
}
/* Store state in digest */
for (i = 0; i < 5; i++)
dst[i] = cpu_to_be32(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int sha1_neon_export(struct shash_desc *desc, void *out)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha1_neon_import(struct shash_desc *desc, const void *in)
{
struct sha1_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static struct shash_alg alg = {
.digestsize = SHA1_DIGEST_SIZE,
.init = sha1_neon_init,
.update = sha1_neon_update,
.final = sha1_neon_final,
.export = sha1_neon_export,
.import = sha1_neon_import,
.descsize = sizeof(struct sha1_state),
.statesize = sizeof(struct sha1_state),
.base = {
.cra_name = "sha1",
.cra_driver_name = "sha1-neon",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA1_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
};
static int __init sha1_neon_mod_init(void)
{
if (!cpu_has_neon())
return -ENODEV;
return crypto_register_shash(&alg);
}
static void __exit sha1_neon_mod_fini(void)
{
crypto_unregister_shash(&alg);
}
module_init(sha1_neon_mod_init);
module_exit(sha1_neon_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, NEON accelerated");
MODULE_ALIAS("sha1");

View File

@ -0,0 +1,455 @@
/* sha512-armv7-neon.S - ARM/NEON assembly implementation of SHA-512 transform
*
* Copyright © 2013-2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/linkage.h>
.syntax unified
.code 32
.fpu neon
.text
/* structure of SHA512_CONTEXT */
#define hd_a 0
#define hd_b ((hd_a) + 8)
#define hd_c ((hd_b) + 8)
#define hd_d ((hd_c) + 8)
#define hd_e ((hd_d) + 8)
#define hd_f ((hd_e) + 8)
#define hd_g ((hd_f) + 8)
/* register macros */
#define RK %r2
#define RA d0
#define RB d1
#define RC d2
#define RD d3
#define RE d4
#define RF d5
#define RG d6
#define RH d7
#define RT0 d8
#define RT1 d9
#define RT2 d10
#define RT3 d11
#define RT4 d12
#define RT5 d13
#define RT6 d14
#define RT7 d15
#define RT01q q4
#define RT23q q5
#define RT45q q6
#define RT67q q7
#define RW0 d16
#define RW1 d17
#define RW2 d18
#define RW3 d19
#define RW4 d20
#define RW5 d21
#define RW6 d22
#define RW7 d23
#define RW8 d24
#define RW9 d25
#define RW10 d26
#define RW11 d27
#define RW12 d28
#define RW13 d29
#define RW14 d30
#define RW15 d31
#define RW01q q8
#define RW23q q9
#define RW45q q10
#define RW67q q11
#define RW89q q12
#define RW1011q q13
#define RW1213q q14
#define RW1415q q15
/***********************************************************************
* ARM assembly implementation of sha512 transform
***********************************************************************/
#define rounds2_0_63(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, rw01q, rw2, \
rw23q, rw1415q, rw9, rw10, interleave_op, arg1) \
/* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
vshr.u64 RT2, re, #14; \
vshl.u64 RT3, re, #64 - 14; \
interleave_op(arg1); \
vshr.u64 RT4, re, #18; \
vshl.u64 RT5, re, #64 - 18; \
vld1.64 {RT0}, [RK]!; \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, re, #41; \
vshl.u64 RT5, re, #64 - 41; \
vadd.u64 RT0, RT0, rw0; \
veor.64 RT23q, RT23q, RT45q; \
vmov.64 RT7, re; \
veor.64 RT1, RT2, RT3; \
vbsl.64 RT7, rf, rg; \
\
vadd.u64 RT1, RT1, rh; \
vshr.u64 RT2, ra, #28; \
vshl.u64 RT3, ra, #64 - 28; \
vadd.u64 RT1, RT1, RT0; \
vshr.u64 RT4, ra, #34; \
vshl.u64 RT5, ra, #64 - 34; \
vadd.u64 RT1, RT1, RT7; \
\
/* h = Sum0 (a) + Maj (a, b, c); */ \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, ra, #39; \
vshl.u64 RT5, ra, #64 - 39; \
veor.64 RT0, ra, rb; \
veor.64 RT23q, RT23q, RT45q; \
vbsl.64 RT0, rc, rb; \
vadd.u64 rd, rd, RT1; /* d+=t1; */ \
veor.64 rh, RT2, RT3; \
\
/* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
vshr.u64 RT2, rd, #14; \
vshl.u64 RT3, rd, #64 - 14; \
vadd.u64 rh, rh, RT0; \
vshr.u64 RT4, rd, #18; \
vshl.u64 RT5, rd, #64 - 18; \
vadd.u64 rh, rh, RT1; /* h+=t1; */ \
vld1.64 {RT0}, [RK]!; \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, rd, #41; \
vshl.u64 RT5, rd, #64 - 41; \
vadd.u64 RT0, RT0, rw1; \
veor.64 RT23q, RT23q, RT45q; \
vmov.64 RT7, rd; \
veor.64 RT1, RT2, RT3; \
vbsl.64 RT7, re, rf; \
\
vadd.u64 RT1, RT1, rg; \
vshr.u64 RT2, rh, #28; \
vshl.u64 RT3, rh, #64 - 28; \
vadd.u64 RT1, RT1, RT0; \
vshr.u64 RT4, rh, #34; \
vshl.u64 RT5, rh, #64 - 34; \
vadd.u64 RT1, RT1, RT7; \
\
/* g = Sum0 (h) + Maj (h, a, b); */ \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, rh, #39; \
vshl.u64 RT5, rh, #64 - 39; \
veor.64 RT0, rh, ra; \
veor.64 RT23q, RT23q, RT45q; \
vbsl.64 RT0, rb, ra; \
vadd.u64 rc, rc, RT1; /* c+=t1; */ \
veor.64 rg, RT2, RT3; \
\
/* w[0] += S1 (w[14]) + w[9] + S0 (w[1]); */ \
/* w[1] += S1 (w[15]) + w[10] + S0 (w[2]); */ \
\
/**** S0(w[1:2]) */ \
\
/* w[0:1] += w[9:10] */ \
/* RT23q = rw1:rw2 */ \
vext.u64 RT23q, rw01q, rw23q, #1; \
vadd.u64 rw0, rw9; \
vadd.u64 rg, rg, RT0; \
vadd.u64 rw1, rw10;\
vadd.u64 rg, rg, RT1; /* g+=t1; */ \
\
vshr.u64 RT45q, RT23q, #1; \
vshl.u64 RT67q, RT23q, #64 - 1; \
vshr.u64 RT01q, RT23q, #8; \
veor.u64 RT45q, RT45q, RT67q; \
vshl.u64 RT67q, RT23q, #64 - 8; \
veor.u64 RT45q, RT45q, RT01q; \
vshr.u64 RT01q, RT23q, #7; \
veor.u64 RT45q, RT45q, RT67q; \
\
/**** S1(w[14:15]) */ \
vshr.u64 RT23q, rw1415q, #6; \
veor.u64 RT01q, RT01q, RT45q; \
vshr.u64 RT45q, rw1415q, #19; \
vshl.u64 RT67q, rw1415q, #64 - 19; \
veor.u64 RT23q, RT23q, RT45q; \
vshr.u64 RT45q, rw1415q, #61; \
veor.u64 RT23q, RT23q, RT67q; \
vshl.u64 RT67q, rw1415q, #64 - 61; \
veor.u64 RT23q, RT23q, RT45q; \
vadd.u64 rw01q, RT01q; /* w[0:1] += S(w[1:2]) */ \
veor.u64 RT01q, RT23q, RT67q;
#define vadd_RT01q(rw01q) \
/* w[0:1] += S(w[14:15]) */ \
vadd.u64 rw01q, RT01q;
#define dummy(_) /*_*/
#define rounds2_64_79(ra, rb, rc, rd, re, rf, rg, rh, rw0, rw1, \
interleave_op1, arg1, interleave_op2, arg2) \
/* t1 = h + Sum1 (e) + Ch (e, f, g) + k[t] + w[t]; */ \
vshr.u64 RT2, re, #14; \
vshl.u64 RT3, re, #64 - 14; \
interleave_op1(arg1); \
vshr.u64 RT4, re, #18; \
vshl.u64 RT5, re, #64 - 18; \
interleave_op2(arg2); \
vld1.64 {RT0}, [RK]!; \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, re, #41; \
vshl.u64 RT5, re, #64 - 41; \
vadd.u64 RT0, RT0, rw0; \
veor.64 RT23q, RT23q, RT45q; \
vmov.64 RT7, re; \
veor.64 RT1, RT2, RT3; \
vbsl.64 RT7, rf, rg; \
\
vadd.u64 RT1, RT1, rh; \
vshr.u64 RT2, ra, #28; \
vshl.u64 RT3, ra, #64 - 28; \
vadd.u64 RT1, RT1, RT0; \
vshr.u64 RT4, ra, #34; \
vshl.u64 RT5, ra, #64 - 34; \
vadd.u64 RT1, RT1, RT7; \
\
/* h = Sum0 (a) + Maj (a, b, c); */ \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, ra, #39; \
vshl.u64 RT5, ra, #64 - 39; \
veor.64 RT0, ra, rb; \
veor.64 RT23q, RT23q, RT45q; \
vbsl.64 RT0, rc, rb; \
vadd.u64 rd, rd, RT1; /* d+=t1; */ \
veor.64 rh, RT2, RT3; \
\
/* t1 = g + Sum1 (d) + Ch (d, e, f) + k[t] + w[t]; */ \
vshr.u64 RT2, rd, #14; \
vshl.u64 RT3, rd, #64 - 14; \
vadd.u64 rh, rh, RT0; \
vshr.u64 RT4, rd, #18; \
vshl.u64 RT5, rd, #64 - 18; \
vadd.u64 rh, rh, RT1; /* h+=t1; */ \
vld1.64 {RT0}, [RK]!; \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, rd, #41; \
vshl.u64 RT5, rd, #64 - 41; \
vadd.u64 RT0, RT0, rw1; \
veor.64 RT23q, RT23q, RT45q; \
vmov.64 RT7, rd; \
veor.64 RT1, RT2, RT3; \
vbsl.64 RT7, re, rf; \
\
vadd.u64 RT1, RT1, rg; \
vshr.u64 RT2, rh, #28; \
vshl.u64 RT3, rh, #64 - 28; \
vadd.u64 RT1, RT1, RT0; \
vshr.u64 RT4, rh, #34; \
vshl.u64 RT5, rh, #64 - 34; \
vadd.u64 RT1, RT1, RT7; \
\
/* g = Sum0 (h) + Maj (h, a, b); */ \
veor.64 RT23q, RT23q, RT45q; \
vshr.u64 RT4, rh, #39; \
vshl.u64 RT5, rh, #64 - 39; \
veor.64 RT0, rh, ra; \
veor.64 RT23q, RT23q, RT45q; \
vbsl.64 RT0, rb, ra; \
vadd.u64 rc, rc, RT1; /* c+=t1; */ \
veor.64 rg, RT2, RT3;
#define vadd_rg_RT0(rg) \
vadd.u64 rg, rg, RT0;
#define vadd_rg_RT1(rg) \
vadd.u64 rg, rg, RT1; /* g+=t1; */
.align 3
ENTRY(sha512_transform_neon)
/* Input:
* %r0: SHA512_CONTEXT
* %r1: data
* %r2: u64 k[] constants
* %r3: nblks
*/
push {%lr};
mov %lr, #0;
/* Load context to d0-d7 */
vld1.64 {RA-RD}, [%r0]!;
vld1.64 {RE-RH}, [%r0];
sub %r0, #(4*8);
/* Load input to w[16], d16-d31 */
/* NOTE: Assumes that on ARMv7 unaligned accesses are always allowed. */
vld1.64 {RW0-RW3}, [%r1]!;
vld1.64 {RW4-RW7}, [%r1]!;
vld1.64 {RW8-RW11}, [%r1]!;
vld1.64 {RW12-RW15}, [%r1]!;
#ifdef __ARMEL__
/* byteswap */
vrev64.8 RW01q, RW01q;
vrev64.8 RW23q, RW23q;
vrev64.8 RW45q, RW45q;
vrev64.8 RW67q, RW67q;
vrev64.8 RW89q, RW89q;
vrev64.8 RW1011q, RW1011q;
vrev64.8 RW1213q, RW1213q;
vrev64.8 RW1415q, RW1415q;
#endif
/* EABI says that d8-d15 must be preserved by callee. */
/*vpush {RT0-RT7};*/
.Loop:
rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
RW23q, RW1415q, RW9, RW10, dummy, _);
b .Lenter_rounds;
.Loop_rounds:
rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1, RW01q, RW2,
RW23q, RW1415q, RW9, RW10, vadd_RT01q, RW1415q);
.Lenter_rounds:
rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3, RW23q, RW4,
RW45q, RW01q, RW11, RW12, vadd_RT01q, RW01q);
rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5, RW45q, RW6,
RW67q, RW23q, RW13, RW14, vadd_RT01q, RW23q);
rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7, RW67q, RW8,
RW89q, RW45q, RW15, RW0, vadd_RT01q, RW45q);
rounds2_0_63(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9, RW89q, RW10,
RW1011q, RW67q, RW1, RW2, vadd_RT01q, RW67q);
rounds2_0_63(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11, RW1011q, RW12,
RW1213q, RW89q, RW3, RW4, vadd_RT01q, RW89q);
add %lr, #16;
rounds2_0_63(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13, RW1213q, RW14,
RW1415q, RW1011q, RW5, RW6, vadd_RT01q, RW1011q);
cmp %lr, #64;
rounds2_0_63(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15, RW1415q, RW0,
RW01q, RW1213q, RW7, RW8, vadd_RT01q, RW1213q);
bne .Loop_rounds;
subs %r3, #1;
rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW0, RW1,
vadd_RT01q, RW1415q, dummy, _);
rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW2, RW3,
vadd_rg_RT0, RG, vadd_rg_RT1, RG);
beq .Lhandle_tail;
vld1.64 {RW0-RW3}, [%r1]!;
rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
vadd_rg_RT0, RE, vadd_rg_RT1, RE);
rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
vadd_rg_RT0, RC, vadd_rg_RT1, RC);
#ifdef __ARMEL__
vrev64.8 RW01q, RW01q;
vrev64.8 RW23q, RW23q;
#endif
vld1.64 {RW4-RW7}, [%r1]!;
rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
vadd_rg_RT0, RA, vadd_rg_RT1, RA);
rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
vadd_rg_RT0, RG, vadd_rg_RT1, RG);
#ifdef __ARMEL__
vrev64.8 RW45q, RW45q;
vrev64.8 RW67q, RW67q;
#endif
vld1.64 {RW8-RW11}, [%r1]!;
rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
vadd_rg_RT0, RE, vadd_rg_RT1, RE);
rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
vadd_rg_RT0, RC, vadd_rg_RT1, RC);
#ifdef __ARMEL__
vrev64.8 RW89q, RW89q;
vrev64.8 RW1011q, RW1011q;
#endif
vld1.64 {RW12-RW15}, [%r1]!;
vadd_rg_RT0(RA);
vadd_rg_RT1(RA);
/* Load context */
vld1.64 {RT0-RT3}, [%r0]!;
vld1.64 {RT4-RT7}, [%r0];
sub %r0, #(4*8);
#ifdef __ARMEL__
vrev64.8 RW1213q, RW1213q;
vrev64.8 RW1415q, RW1415q;
#endif
vadd.u64 RA, RT0;
vadd.u64 RB, RT1;
vadd.u64 RC, RT2;
vadd.u64 RD, RT3;
vadd.u64 RE, RT4;
vadd.u64 RF, RT5;
vadd.u64 RG, RT6;
vadd.u64 RH, RT7;
/* Store the first half of context */
vst1.64 {RA-RD}, [%r0]!;
sub RK, $(8*80);
vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
mov %lr, #0;
sub %r0, #(4*8);
b .Loop;
.Lhandle_tail:
rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW4, RW5,
vadd_rg_RT0, RE, vadd_rg_RT1, RE);
rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW6, RW7,
vadd_rg_RT0, RC, vadd_rg_RT1, RC);
rounds2_64_79(RA, RB, RC, RD, RE, RF, RG, RH, RW8, RW9,
vadd_rg_RT0, RA, vadd_rg_RT1, RA);
rounds2_64_79(RG, RH, RA, RB, RC, RD, RE, RF, RW10, RW11,
vadd_rg_RT0, RG, vadd_rg_RT1, RG);
rounds2_64_79(RE, RF, RG, RH, RA, RB, RC, RD, RW12, RW13,
vadd_rg_RT0, RE, vadd_rg_RT1, RE);
rounds2_64_79(RC, RD, RE, RF, RG, RH, RA, RB, RW14, RW15,
vadd_rg_RT0, RC, vadd_rg_RT1, RC);
/* Load context to d16-d23 */
vld1.64 {RW0-RW3}, [%r0]!;
vadd_rg_RT0(RA);
vld1.64 {RW4-RW7}, [%r0];
vadd_rg_RT1(RA);
sub %r0, #(4*8);
vadd.u64 RA, RW0;
vadd.u64 RB, RW1;
vadd.u64 RC, RW2;
vadd.u64 RD, RW3;
vadd.u64 RE, RW4;
vadd.u64 RF, RW5;
vadd.u64 RG, RW6;
vadd.u64 RH, RW7;
/* Store the first half of context */
vst1.64 {RA-RD}, [%r0]!;
/* Clear used registers */
/* d16-d31 */
veor.u64 RW01q, RW01q;
veor.u64 RW23q, RW23q;
veor.u64 RW45q, RW45q;
veor.u64 RW67q, RW67q;
vst1.64 {RE-RH}, [%r0]; /* Store the last half of context */
veor.u64 RW89q, RW89q;
veor.u64 RW1011q, RW1011q;
veor.u64 RW1213q, RW1213q;
veor.u64 RW1415q, RW1415q;
/* d8-d15 */
/*vpop {RT0-RT7};*/
/* d0-d7 (q0-q3) */
veor.u64 %q0, %q0;
veor.u64 %q1, %q1;
veor.u64 %q2, %q2;
veor.u64 %q3, %q3;
pop {%pc};
ENDPROC(sha512_transform_neon)

View File

@ -0,0 +1,305 @@
/*
* Glue code for the SHA512 Secure Hash Algorithm assembly implementation
* using NEON instructions.
*
* Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
*
* This file is based on sha512_ssse3_glue.c:
* Copyright (C) 2013 Intel Corporation
* Author: Tim Chen <tim.c.chen@linux.intel.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <crypto/internal/hash.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/cryptohash.h>
#include <linux/types.h>
#include <linux/string.h>
#include <crypto/sha.h>
#include <asm/byteorder.h>
#include <asm/simd.h>
#include <asm/neon.h>
static const u64 sha512_k[] = {
0x428a2f98d728ae22ULL, 0x7137449123ef65cdULL,
0xb5c0fbcfec4d3b2fULL, 0xe9b5dba58189dbbcULL,
0x3956c25bf348b538ULL, 0x59f111f1b605d019ULL,
0x923f82a4af194f9bULL, 0xab1c5ed5da6d8118ULL,
0xd807aa98a3030242ULL, 0x12835b0145706fbeULL,
0x243185be4ee4b28cULL, 0x550c7dc3d5ffb4e2ULL,
0x72be5d74f27b896fULL, 0x80deb1fe3b1696b1ULL,
0x9bdc06a725c71235ULL, 0xc19bf174cf692694ULL,
0xe49b69c19ef14ad2ULL, 0xefbe4786384f25e3ULL,
0x0fc19dc68b8cd5b5ULL, 0x240ca1cc77ac9c65ULL,
0x2de92c6f592b0275ULL, 0x4a7484aa6ea6e483ULL,
0x5cb0a9dcbd41fbd4ULL, 0x76f988da831153b5ULL,
0x983e5152ee66dfabULL, 0xa831c66d2db43210ULL,
0xb00327c898fb213fULL, 0xbf597fc7beef0ee4ULL,
0xc6e00bf33da88fc2ULL, 0xd5a79147930aa725ULL,
0x06ca6351e003826fULL, 0x142929670a0e6e70ULL,
0x27b70a8546d22ffcULL, 0x2e1b21385c26c926ULL,
0x4d2c6dfc5ac42aedULL, 0x53380d139d95b3dfULL,
0x650a73548baf63deULL, 0x766a0abb3c77b2a8ULL,
0x81c2c92e47edaee6ULL, 0x92722c851482353bULL,
0xa2bfe8a14cf10364ULL, 0xa81a664bbc423001ULL,
0xc24b8b70d0f89791ULL, 0xc76c51a30654be30ULL,
0xd192e819d6ef5218ULL, 0xd69906245565a910ULL,
0xf40e35855771202aULL, 0x106aa07032bbd1b8ULL,
0x19a4c116b8d2d0c8ULL, 0x1e376c085141ab53ULL,
0x2748774cdf8eeb99ULL, 0x34b0bcb5e19b48a8ULL,
0x391c0cb3c5c95a63ULL, 0x4ed8aa4ae3418acbULL,
0x5b9cca4f7763e373ULL, 0x682e6ff3d6b2b8a3ULL,
0x748f82ee5defb2fcULL, 0x78a5636f43172f60ULL,
0x84c87814a1f0ab72ULL, 0x8cc702081a6439ecULL,
0x90befffa23631e28ULL, 0xa4506cebde82bde9ULL,
0xbef9a3f7b2c67915ULL, 0xc67178f2e372532bULL,
0xca273eceea26619cULL, 0xd186b8c721c0c207ULL,
0xeada7dd6cde0eb1eULL, 0xf57d4f7fee6ed178ULL,
0x06f067aa72176fbaULL, 0x0a637dc5a2c898a6ULL,
0x113f9804bef90daeULL, 0x1b710b35131c471bULL,
0x28db77f523047d84ULL, 0x32caab7b40c72493ULL,
0x3c9ebe0a15c9bebcULL, 0x431d67c49c100d4cULL,
0x4cc5d4becb3e42b6ULL, 0x597f299cfc657e2aULL,
0x5fcb6fab3ad6faecULL, 0x6c44198c4a475817ULL
};
asmlinkage void sha512_transform_neon(u64 *digest, const void *data,
const u64 k[], unsigned int num_blks);
static int sha512_neon_init(struct shash_desc *desc)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA512_H0;
sctx->state[1] = SHA512_H1;
sctx->state[2] = SHA512_H2;
sctx->state[3] = SHA512_H3;
sctx->state[4] = SHA512_H4;
sctx->state[5] = SHA512_H5;
sctx->state[6] = SHA512_H6;
sctx->state[7] = SHA512_H7;
sctx->count[0] = sctx->count[1] = 0;
return 0;
}
static int __sha512_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len, unsigned int partial)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
unsigned int done = 0;
sctx->count[0] += len;
if (sctx->count[0] < len)
sctx->count[1]++;
if (partial) {
done = SHA512_BLOCK_SIZE - partial;
memcpy(sctx->buf + partial, data, done);
sha512_transform_neon(sctx->state, sctx->buf, sha512_k, 1);
}
if (len - done >= SHA512_BLOCK_SIZE) {
const unsigned int rounds = (len - done) / SHA512_BLOCK_SIZE;
sha512_transform_neon(sctx->state, data + done, sha512_k,
rounds);
done += rounds * SHA512_BLOCK_SIZE;
}
memcpy(sctx->buf, data + done, len - done);
return 0;
}
static int sha512_neon_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
unsigned int partial = sctx->count[0] % SHA512_BLOCK_SIZE;
int res;
/* Handle the fast case right here */
if (partial + len < SHA512_BLOCK_SIZE) {
sctx->count[0] += len;
if (sctx->count[0] < len)
sctx->count[1]++;
memcpy(sctx->buf + partial, data, len);
return 0;
}
if (!may_use_simd()) {
res = crypto_sha512_update(desc, data, len);
} else {
kernel_neon_begin();
res = __sha512_neon_update(desc, data, len, partial);
kernel_neon_end();
}
return res;
}
/* Add padding and return the message digest. */
static int sha512_neon_final(struct shash_desc *desc, u8 *out)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
unsigned int i, index, padlen;
__be64 *dst = (__be64 *)out;
__be64 bits[2];
static const u8 padding[SHA512_BLOCK_SIZE] = { 0x80, };
/* save number of bits */
bits[1] = cpu_to_be64(sctx->count[0] << 3);
bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
/* Pad out to 112 mod 128 and append length */
index = sctx->count[0] & 0x7f;
padlen = (index < 112) ? (112 - index) : ((128+112) - index);
if (!may_use_simd()) {
crypto_sha512_update(desc, padding, padlen);
crypto_sha512_update(desc, (const u8 *)&bits, sizeof(bits));
} else {
kernel_neon_begin();
/* We need to fill a whole block for __sha512_neon_update() */
if (padlen <= 112) {
sctx->count[0] += padlen;
if (sctx->count[0] < padlen)
sctx->count[1]++;
memcpy(sctx->buf + index, padding, padlen);
} else {
__sha512_neon_update(desc, padding, padlen, index);
}
__sha512_neon_update(desc, (const u8 *)&bits,
sizeof(bits), 112);
kernel_neon_end();
}
/* Store state in digest */
for (i = 0; i < 8; i++)
dst[i] = cpu_to_be64(sctx->state[i]);
/* Wipe context */
memset(sctx, 0, sizeof(*sctx));
return 0;
}
static int sha512_neon_export(struct shash_desc *desc, void *out)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
memcpy(out, sctx, sizeof(*sctx));
return 0;
}
static int sha512_neon_import(struct shash_desc *desc, const void *in)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
memcpy(sctx, in, sizeof(*sctx));
return 0;
}
static int sha384_neon_init(struct shash_desc *desc)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
sctx->state[0] = SHA384_H0;
sctx->state[1] = SHA384_H1;
sctx->state[2] = SHA384_H2;
sctx->state[3] = SHA384_H3;
sctx->state[4] = SHA384_H4;
sctx->state[5] = SHA384_H5;
sctx->state[6] = SHA384_H6;
sctx->state[7] = SHA384_H7;
sctx->count[0] = sctx->count[1] = 0;
return 0;
}
static int sha384_neon_final(struct shash_desc *desc, u8 *hash)
{
u8 D[SHA512_DIGEST_SIZE];
sha512_neon_final(desc, D);
memcpy(hash, D, SHA384_DIGEST_SIZE);
memset(D, 0, SHA512_DIGEST_SIZE);
return 0;
}
static struct shash_alg algs[] = { {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_neon_init,
.update = sha512_neon_update,
.final = sha512_neon_final,
.export = sha512_neon_export,
.import = sha512_neon_import,
.descsize = sizeof(struct sha512_state),
.statesize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha512",
.cra_driver_name = "sha512-neon",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA512_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
}, {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_neon_init,
.update = sha512_neon_update,
.final = sha384_neon_final,
.export = sha512_neon_export,
.import = sha512_neon_import,
.descsize = sizeof(struct sha512_state),
.statesize = sizeof(struct sha512_state),
.base = {
.cra_name = "sha384",
.cra_driver_name = "sha384-neon",
.cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
.cra_blocksize = SHA384_BLOCK_SIZE,
.cra_module = THIS_MODULE,
}
} };
static int __init sha512_neon_mod_init(void)
{
if (!cpu_has_neon())
return -ENODEV;
return crypto_register_shashes(algs, ARRAY_SIZE(algs));
}
static void __exit sha512_neon_mod_fini(void)
{
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
}
module_init(sha512_neon_mod_init);
module_exit(sha512_neon_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA512 Secure Hash Algorithm, NEON accelerated");
MODULE_ALIAS("sha512");
MODULE_ALIAS("sha384");

View File

@ -17,4 +17,5 @@ generic-y += poll.h
generic-y += resource.h
generic-y += sections.h
generic-y += siginfo.h
generic-y += simd.h
generic-y += sizes.h

View File

@ -0,0 +1,10 @@
#ifndef ASM_ARM_CRYPTO_SHA1_H
#define ASM_ARM_CRYPTO_SHA1_H
#include <linux/crypto.h>
#include <crypto/sha.h>
extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
unsigned int len);
#endif

View File

@ -0,0 +1,36 @@
/*
* linux/arch/arm/include/asm/neon.h
*
* Copyright (C) 2013 Linaro Ltd <ard.biesheuvel@linaro.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <asm/hwcap.h>
#define cpu_has_neon() (!!(elf_hwcap & HWCAP_NEON))
#ifdef __ARM_NEON__
/*
* If you are affected by the BUILD_BUG below, it probably means that you are
* using NEON code /and/ calling the kernel_neon_begin() function from the same
* compilation unit. To prevent issues that may arise from GCC reordering or
* generating(1) NEON instructions outside of these begin/end functions, the
* only supported way of using NEON code in the kernel is by isolating it in a
* separate compilation unit, and calling it from another unit from inside a
* kernel_neon_begin/kernel_neon_end pair.
*
* (1) Current GCC (4.7) might generate NEON instructions at O3 level if
* -mpfu=neon is set.
*/
#define kernel_neon_begin() \
BUILD_BUG_ON_MSG(1, "kernel_neon_begin() called from NEON code")
#else
void kernel_neon_begin(void);
#endif
void kernel_neon_end(void);

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -2092,7 +2092,7 @@ static int bam_init(void)
a2_props.virt_addr = a2_virt_addr;
a2_props.virt_size = a2_phys_size;
a2_props.irq = a2_bam_irq;
a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP;
a2_props.options = SPS_BAM_OPT_IRQ_WAKEUP | SPS_BAM_HOLD_MEM;
a2_props.num_pipes = A2_NUM_PIPES;
a2_props.summing_threshold = A2_SUMMING_THRESHOLD;
a2_props.constrained_logging = true;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2008-2009, The Linux Foundation. All rights reserved.
/* Copyright (c) 2008-2009,2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -99,7 +99,7 @@ static int buffer_req(struct msm_dma_alloc_req *req)
if (i >= MAX_TEST_BUFFERS)
goto error;
buffers[i] = kmalloc(req->size, GFP_KERNEL | __GFP_DMA);
buffers[i] = kzalloc(req->size, GFP_KERNEL | __GFP_DMA);
if (buffers[i] == 0)
goto error;
sizes[i] = req->size;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
/* Copyright (c) 2011-2013, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -109,6 +109,8 @@
#define SPS_BAM_NO_LOCAL_CLK_GATING (1UL << 5)
/* Don't enable writeback cancel*/
#define SPS_BAM_CANCEL_WB (1UL << 6)
/* Hold memory for BAM DMUX */
#define SPS_BAM_HOLD_MEM (1UL << 8)
/* BAM device management flags */

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2011-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -516,13 +516,18 @@ static int msm_ipc_router_ioctl(struct socket *sock,
ret = copy_to_user((void *)arg, &server_arg,
sizeof(server_arg));
if (srv_info_sz) {
n = min(server_arg.num_entries_found,
server_arg.num_entries_in_array);
if (ret == 0 && n) {
ret = copy_to_user((void *)(arg + sizeof(server_arg)),
srv_info, srv_info_sz);
if (ret)
ret = -EFAULT;
kfree(srv_info);
srv_info, n * sizeof(*srv_info));
}
if (ret)
ret = -EFAULT;
kfree(srv_info);
break;
case IPC_ROUTER_IOCTL_BIND_CONTROL_PORT:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011-2013 The Linux Foundation. All rights reserved.
* Copyright (c) 2011-2013,2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -424,7 +424,6 @@ static int msm_l2_test_set_ev_constraint(struct perf_event *event)
int err = 0;
u64 bitmap_t;
u32 shift_idx;
if (evt_prefix == L2_TRACECTR_PREFIX)
return err;
/*

View File

@ -299,10 +299,12 @@ static int pil_msa_mba_init_image(struct pil_desc *pil,
dma_addr_t mdata_phys;
s32 status;
int ret;
DEFINE_DMA_ATTRS(attrs);
dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
/* Make metadata physically contiguous and 4K aligned. */
mdata_virt = dma_alloc_coherent(pil->dev, size, &mdata_phys,
GFP_KERNEL);
mdata_virt = dma_alloc_attrs(pil->dev, size, &mdata_phys,
GFP_KERNEL, &attrs);
if (!mdata_virt) {
dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
return -ENOMEM;
@ -328,7 +330,7 @@ static int pil_msa_mba_init_image(struct pil_desc *pil,
ret = -EINVAL;
}
dma_free_coherent(pil->dev, size, mdata_virt, mdata_phys);
dma_free_attrs(pil->dev, size, mdata_virt, mdata_phys, &attrs);
return ret;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -166,6 +166,7 @@ static int voice_svc_send_req(struct voice_svc_cmd_request *apr_request,
void *apr_handle = NULL;
struct apr_data *aprdata = NULL;
uint32_t user_payload_size = 0;
uint32_t payload_size = 0;
if (apr_request == NULL) {
pr_err("%s: apr_request is NULL\n", __func__);
@ -175,17 +176,21 @@ static int voice_svc_send_req(struct voice_svc_cmd_request *apr_request,
}
user_payload_size = apr_request->payload_size;
aprdata = kmalloc(sizeof(struct apr_data) + user_payload_size,
GFP_KERNEL);
if (aprdata == NULL) {
pr_err("%s: aprdata kmalloc failed.", __func__);
ret = -ENOMEM;
payload_size = sizeof(struct apr_data) + user_payload_size;
if (payload_size <= user_payload_size) {
pr_err("%s: invalid payload size ( 0x%x ).\n",
__func__, user_payload_size);
ret = -EINVAL;
goto done;
} else {
aprdata = kmalloc(payload_size, GFP_KERNEL);
if (aprdata == NULL) {
ret = -ENOMEM;
goto done;
}
}
voice_svc_update_hdr(apr_request, aprdata, prtd);
if (!strncmp(apr_request->svc_name, VOICE_SVC_CVS_STR,
@ -327,6 +332,7 @@ static long voice_svc_ioctl(struct file *file, unsigned int cmd,
void __user *arg = (void __user *)u_arg;
uint32_t user_payload_size = 0;
unsigned long spin_flags;
uint32_t payload_size = 0;
pr_debug("%s: cmd: %u\n", __func__, cmd);
@ -357,17 +363,21 @@ static long voice_svc_ioctl(struct file *file, unsigned int cmd,
user_payload_size =
((struct voice_svc_cmd_request*)arg)->payload_size;
apr_request = kmalloc(sizeof(struct voice_svc_cmd_request) +
user_payload_size, GFP_KERNEL);
if (apr_request == NULL) {
pr_err("%s: apr_request kmalloc failed.", __func__);
ret = -ENOMEM;
payload_size = sizeof(struct voice_svc_cmd_request) + user_payload_size;
if (payload_size <= user_payload_size) {
pr_err("%s: invalid payload size ( 0x%x ).\n",
__func__, user_payload_size);
ret = -EINVAL;
goto done;
} else {
apr_request = kmalloc(payload_size, GFP_KERNEL);
if (apr_request == NULL) {
ret = -ENOMEM;
goto done;
}
}
if (copy_from_user(apr_request, arg,
sizeof(struct voice_svc_cmd_request) +
user_payload_size)) {

View File

@ -1,6 +1,6 @@
/* arch/arm/mach-msm/smp2p_gpio.c
*
* Copyright (c) 2013, The Linux Foundation. All rights reserved.
* Copyright (c) 2013,2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -337,7 +337,7 @@ static int smp2p_irq_map(struct irq_domain *domain_ptr, unsigned int virq,
chip = domain_ptr->host_data;
if (!chip) {
SMP2P_ERR("%s: invalid domain ptr\n", __func__);
SMP2P_ERR("%s: invalid domain ptr %pK\n", __func__, domain_ptr);
return -ENODEV;
}

View File

@ -50,7 +50,7 @@
void *b_tmp = (b); \
if (!((a_tmp)cmp(b_tmp))) { \
seq_printf(s, \
"%s:%d Fail: " #a "(%p) " #cmp " " #b "(%p)\n", \
"%s:%d Fail: " #a "(%pK) " #cmp " " #b "(%pK)\n", \
__func__, __LINE__, \
a_tmp, b_tmp); \
failed = 1; \

View File

@ -22,6 +22,7 @@
#include <linux/user.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
@ -636,6 +637,52 @@ int vfp_restore_user_hwstate(struct user_vfp __user *ufp,
return err ? -EFAULT : 0;
}
#ifdef CONFIG_KERNEL_MODE_NEON
/*
* Kernel-side NEON support functions
*/
void kernel_neon_begin(void)
{
struct thread_info *thread = current_thread_info();
unsigned int cpu;
u32 fpexc;
/*
* Kernel mode NEON is only allowed outside of interrupt context
* with preemption disabled. This will make sure that the kernel
* mode NEON register contents never need to be preserved.
*/
BUG_ON(in_interrupt());
cpu = get_cpu();
fpexc = fmrx(FPEXC) | FPEXC_EN;
fmxr(FPEXC, fpexc);
/*
* Save the userland NEON/VFP state. Under UP,
* the owner could be a task other than 'current'
*/
if (vfp_state_in_hw(cpu, thread))
vfp_save_state(&thread->vfpstate, fpexc);
#ifndef CONFIG_SMP
else if (vfp_current_hw_state[cpu] != NULL)
vfp_save_state(vfp_current_hw_state[cpu], fpexc);
#endif
vfp_current_hw_state[cpu] = NULL;
}
EXPORT_SYMBOL(kernel_neon_begin);
void kernel_neon_end(void)
{
/* Disable the NEON/VFP unit. */
fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
put_cpu();
}
EXPORT_SYMBOL(kernel_neon_end);
#endif /* CONFIG_KERNEL_MODE_NEON */
/*
* VFP hardware can lose all context when a CPU goes offline.
* As we will be running in SMP mode with CPU hotplug, we will save the
@ -766,4 +813,4 @@ static int __init vfp_init(void)
return 0;
}
late_initcall(vfp_init);
core_initcall(vfp_init);

View File

@ -111,6 +111,10 @@ config CRYPTO_MANAGER2
select CRYPTO_BLKCIPHER2
select CRYPTO_PCOMP2
config CRYPTO_ABLK_HELPER
tristate
select CRYPTO_CRYPTD
config CRYPTO_USER
tristate "Userspace cryptographic algorithm configuration"
depends on NET
@ -439,11 +443,21 @@ config CRYPTO_SHA1_ARM
depends on ARM
select CRYPTO_SHA1
select CRYPTO_HASH
default y
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
using optimized ARM assembler.
config CRYPTO_SHA1_ARM_NEON
tristate "SHA1 digest algorithm (ARM NEON)"
depends on ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
select CRYPTO_SHA1_ARM
select CRYPTO_SHA1
select CRYPTO_HASH
help
SHA-1 secure hash standard (FIPS 180-1/DFIPS 180-2) implemented
using optimized ARM NEON assembly, when NEON instructions are
available.
config CRYPTO_SHA256
tristate "SHA224 and SHA256 digest algorithm"
select CRYPTO_HASH
@ -468,6 +482,21 @@ config CRYPTO_SHA512
This code also includes SHA-384, a 384 bit hash with 192 bits
of security against collision attacks.
config CRYPTO_SHA512_ARM_NEON
tristate "SHA384 and SHA512 digest algorithm (ARM NEON)"
depends on ARM && KERNEL_MODE_NEON && !CPU_BIG_ENDIAN
select CRYPTO_SHA512
select CRYPTO_HASH
help
SHA-512 secure hash standard (DFIPS 180-2) implemented
using ARM NEON instructions, when available.
This version of SHA implements a 512 bit hash with 256 bits of
security against collision attacks.
This code also includes SHA-384, a 384 bit hash with 192 bits
of security against collision attacks.
config CRYPTO_TGR192
tristate "Tiger digest algorithms"
select CRYPTO_HASH
@ -598,6 +627,46 @@ config CRYPTO_AES_NI_INTEL
ECB, CBC, LRW, PCBC, XTS. The 64 bit version has additional
acceleration for CTR.
config CRYPTO_AES_ARM
tristate "AES cipher algorithms (ARM-asm)"
depends on ARM
select CRYPTO_ALGAPI
select CRYPTO_AES
help
Use optimized AES assembler routines for ARM platforms.
AES cipher algorithms (FIPS-197). AES uses the Rijndael
algorithm.
Rijndael appears to be consistently a very good performer in
both hardware and software across a wide range of computing
environments regardless of its use in feedback or non-feedback
modes. Its key setup time is excellent, and its key agility is
good. Rijndael's very low memory requirements make it very well
suited for restricted-space environments, in which it also
demonstrates excellent performance. Rijndael's operations are
among the easiest to defend against power and timing attacks.
The AES specifies three key sizes: 128, 192 and 256 bits
See <http://csrc.nist.gov/encryption/aes/> for more information.
config CRYPTO_AES_ARM_BS
tristate "Bit sliced AES using NEON instructions"
depends on ARM && KERNEL_MODE_NEON
select CRYPTO_ALGAPI
select CRYPTO_AES_ARM
select CRYPTO_ABLK_HELPER
help
Use a faster and more secure NEON based implementation of AES in CBC,
CTR and XTS modes
Bit sliced AES gives around 45% speedup on Cortex-A15 for CTR mode
and for XTS mode encryption, CBC and XTS mode decryption speedup is
around 25%. (CBC encryption speed is not affected by this driver.)
This implementation does not rely on any lookup tables so it is
believed to be invulnerable to cache timing attacks.
config CRYPTO_AES_ARM
tristate "AES cipher algorithms (ARM-asm)"
depends on ARM

View File

@ -101,6 +101,6 @@ obj-$(CONFIG_CRYPTO_USER_API_SKCIPHER) += algif_skcipher.o
#
obj-$(CONFIG_XOR_BLOCKS) += xor.o
obj-$(CONFIG_ASYNC_CORE) += async_tx/
obj-$(CONFIG_CRYPTO_ABLK_HELPER) += ablk_helper.o
#Keep this at the bottom
obj-$(CONFIG_CRYPTO_FIPS) += last_file.o

150
crypto/ablk_helper.c Normal file
View File

@ -0,0 +1,150 @@
/*
* Shared async block cipher helpers
*
* Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
*
* Based on aesni-intel_glue.c by:
* Copyright (C) 2008, Intel Corp.
* Author: Huang Ying <ying.huang@intel.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
* USA
*
*/
#include <linux/kernel.h>
#include <linux/crypto.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/hardirq.h>
#include <crypto/algapi.h>
#include <crypto/cryptd.h>
#include <crypto/ablk_helper.h>
#include <asm/simd.h>
int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len)
{
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base;
int err;
crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm)
& CRYPTO_TFM_REQ_MASK);
err = crypto_ablkcipher_setkey(child, key, key_len);
crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child)
& CRYPTO_TFM_RES_MASK);
return err;
}
EXPORT_SYMBOL_GPL(ablk_set_key);
int __ablk_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
struct blkcipher_desc desc;
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
desc.info = req->info;
desc.flags = 0;
return crypto_blkcipher_crt(desc.tfm)->encrypt(
&desc, req->dst, req->src, req->nbytes);
}
EXPORT_SYMBOL_GPL(__ablk_encrypt);
int ablk_encrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!may_use_simd()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_encrypt(cryptd_req);
} else {
return __ablk_encrypt(req);
}
}
EXPORT_SYMBOL_GPL(ablk_encrypt);
int ablk_decrypt(struct ablkcipher_request *req)
{
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm);
if (!may_use_simd()) {
struct ablkcipher_request *cryptd_req =
ablkcipher_request_ctx(req);
memcpy(cryptd_req, req, sizeof(*req));
ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base);
return crypto_ablkcipher_decrypt(cryptd_req);
} else {
struct blkcipher_desc desc;
desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm);
desc.info = req->info;
desc.flags = 0;
return crypto_blkcipher_crt(desc.tfm)->decrypt(
&desc, req->dst, req->src, req->nbytes);
}
}
EXPORT_SYMBOL_GPL(ablk_decrypt);
void ablk_exit(struct crypto_tfm *tfm)
{
struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
cryptd_free_ablkcipher(ctx->cryptd_tfm);
}
EXPORT_SYMBOL_GPL(ablk_exit);
int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name)
{
struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm);
struct cryptd_ablkcipher *cryptd_tfm;
cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0);
if (IS_ERR(cryptd_tfm))
return PTR_ERR(cryptd_tfm);
ctx->cryptd_tfm = cryptd_tfm;
tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) +
crypto_ablkcipher_reqsize(&cryptd_tfm->base);
return 0;
}
EXPORT_SYMBOL_GPL(ablk_init_common);
int ablk_init(struct crypto_tfm *tfm)
{
char drv_name[CRYPTO_MAX_ALG_NAME];
snprintf(drv_name, sizeof(drv_name), "__driver-%s",
crypto_tfm_alg_driver_name(tfm));
return ablk_init_common(tfm, drv_name);
}
EXPORT_SYMBOL_GPL(ablk_init);
MODULE_LICENSE("GPL");

View File

@ -246,7 +246,7 @@ static int sha256_init(struct shash_desc *desc)
return 0;
}
static int sha256_update(struct shash_desc *desc, const u8 *data,
int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha256_state *sctx = shash_desc_ctx(desc);
@ -277,6 +277,7 @@ static int sha256_update(struct shash_desc *desc, const u8 *data,
return 0;
}
EXPORT_SYMBOL(crypto_sha256_update);
static int sha256_final(struct shash_desc *desc, u8 *out)
{
@ -293,10 +294,10 @@ static int sha256_final(struct shash_desc *desc, u8 *out)
/* Pad out to 56 mod 64. */
index = sctx->count & 0x3f;
pad_len = (index < 56) ? (56 - index) : ((64+56) - index);
sha256_update(desc, padding, pad_len);
crypto_sha256_update(desc, padding, pad_len);
/* Append length (before padding) */
sha256_update(desc, (const u8 *)&bits, sizeof(bits));
crypto_sha256_update(desc, (const u8 *)&bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 8; i++)
@ -339,7 +340,7 @@ static int sha256_import(struct shash_desc *desc, const void *in)
static struct shash_alg sha256 = {
.digestsize = SHA256_DIGEST_SIZE,
.init = sha256_init,
.update = sha256_update,
.update = crypto_sha256_update,
.final = sha256_final,
.export = sha256_export,
.import = sha256_import,
@ -357,7 +358,7 @@ static struct shash_alg sha256 = {
static struct shash_alg sha224 = {
.digestsize = SHA224_DIGEST_SIZE,
.init = sha224_init,
.update = sha256_update,
.update = crypto_sha256_update,
.final = sha224_final,
.descsize = sizeof(struct sha256_state),
.base = {

View File

@ -163,8 +163,8 @@ sha384_init(struct shash_desc *desc)
return 0;
}
static int
sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
{
struct sha512_state *sctx = shash_desc_ctx(desc);
@ -197,6 +197,7 @@ sha512_update(struct shash_desc *desc, const u8 *data, unsigned int len)
return 0;
}
EXPORT_SYMBOL(crypto_sha512_update);
static int
sha512_final(struct shash_desc *desc, u8 *hash)
@ -215,10 +216,10 @@ sha512_final(struct shash_desc *desc, u8 *hash)
/* Pad out to 112 mod 128. */
index = sctx->count[0] & 0x7f;
pad_len = (index < 112) ? (112 - index) : ((128+112) - index);
sha512_update(desc, padding, pad_len);
crypto_sha512_update(desc, padding, pad_len);
/* Append length (before padding) */
sha512_update(desc, (const u8 *)bits, sizeof(bits));
crypto_sha512_update(desc, (const u8 *)bits, sizeof(bits));
/* Store state in digest */
for (i = 0; i < 8; i++)
@ -245,7 +246,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash)
static struct shash_alg sha512 = {
.digestsize = SHA512_DIGEST_SIZE,
.init = sha512_init,
.update = sha512_update,
.update = crypto_sha512_update,
.final = sha512_final,
.descsize = sizeof(struct sha512_state),
.base = {
@ -259,7 +260,7 @@ static struct shash_alg sha512 = {
static struct shash_alg sha384 = {
.digestsize = SHA384_DIGEST_SIZE,
.init = sha384_init,
.update = sha512_update,
.update = crypto_sha512_update,
.final = sha384_final,
.descsize = sizeof(struct sha512_state),
.base = {

View File

@ -685,6 +685,42 @@ int crypto_unregister_shash(struct shash_alg *alg)
}
EXPORT_SYMBOL_GPL(crypto_unregister_shash);
int crypto_register_shashes(struct shash_alg *algs, int count)
{
int i, ret;
for (i = 0; i < count; i++) {
ret = crypto_register_shash(&algs[i]);
if (ret)
goto err;
}
return 0;
err:
for (--i; i >= 0; --i)
crypto_unregister_shash(&algs[i]);
return ret;
}
EXPORT_SYMBOL_GPL(crypto_register_shashes);
int crypto_unregister_shashes(struct shash_alg *algs, int count)
{
int i, ret;
for (i = count - 1; i >= 0; --i) {
ret = crypto_unregister_shash(&algs[i]);
if (ret)
pr_err("Failed to unregister %s %s: %d\n",
algs[i].base.cra_driver_name,
algs[i].base.cra_name, ret);
}
return 0;
}
EXPORT_SYMBOL_GPL(crypto_unregister_shashes);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst)
{

View File

@ -629,7 +629,7 @@ int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
return 0;
}
t = min_t(int, group->reg_count, count);
t = min_t(unsigned int, group->reg_count, count);
buf = kmalloc(t * sizeof(unsigned int), GFP_KERNEL);
if (buf == NULL) {

View File

@ -1,4 +1,5 @@
/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2002,2007-2014,2016, The Linux Foundation. All rights
* reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -982,7 +983,7 @@ static bool _parse_ibs(struct kgsl_device_private *dev_priv,
level++;
KGSL_CMD_INFO(dev_priv->device, "ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
KGSL_CMD_INFO(dev_priv->device, "ib: gpuaddr:0x%08x, wc:%d, hptr:%pK\n",
gpuaddr, sizedwords, hostaddr);
mb();
@ -1003,8 +1004,8 @@ static bool _parse_ibs(struct kgsl_device_private *dev_priv,
cur_ret = _handle_type3(dev_priv, hostaddr);
break;
default:
KGSL_CMD_ERR(dev_priv->device, "unexpected type: "
"type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
KGSL_CMD_ERR(dev_priv->device,
"unexpected type: type:%d, word:0x%08x @ 0x%pK, gpu:0x%08x\n",
*hostaddr >> 30, *hostaddr, hostaddr,
gpuaddr+4*(sizedwords-dwords_left));
cur_ret = false;
@ -1015,7 +1016,7 @@ static bool _parse_ibs(struct kgsl_device_private *dev_priv,
if (!cur_ret) {
KGSL_CMD_ERR(dev_priv->device,
"bad sub-type: #:%d/%d, v:0x%08x"
" @ 0x%p[gb:0x%08x], level:%d\n",
" @ 0x%pK[gb:0x%08x], level:%d\n",
sizedwords-dwords_left, sizedwords, *hostaddr,
hostaddr, gpuaddr+4*(sizedwords-dwords_left),
level);
@ -1035,7 +1036,7 @@ static bool _parse_ibs(struct kgsl_device_private *dev_priv,
if (dwords_left < 0) {
KGSL_CMD_ERR(dev_priv->device,
"bad count: c:%d, #:%d/%d, "
"v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
"v:0x%08x @ 0x%pK[gb:0x%08x], level:%d\n",
count, sizedwords-(dwords_left+count),
sizedwords, *(hostaddr-count), hostaddr-count,
gpuaddr+4*(sizedwords-(dwords_left+count)),
@ -1055,7 +1056,7 @@ done:
if (!ret)
KGSL_DRV_ERR(dev_priv->device,
"parsing failed: gpuaddr:0x%08x, "
"host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
"host:0x%pK, wc:%d\n", gpuaddr, hoststart, sizedwords);
level--;

View File

@ -1612,7 +1612,7 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
}
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
if (event->handle)
dev_err(device->dev, " fence: [%p] %s\n",
dev_err(device->dev, " fence: [%pK] %s\n",
event->handle->fence,
event->handle->name);
else

View File

@ -1,4 +1,5 @@
/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
* reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -426,7 +427,7 @@ void kgsl_cffdump_syncmem(struct kgsl_device *device,
src = (uint *)kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr);
if (memdesc->hostptr == NULL) {
KGSL_CORE_ERR(
"no kernel map for gpuaddr: 0x%08x, m->host: 0x%p, phys: %pa\n",
"no kernel map for gpuaddr: 0x%08x, m->host: 0x%pK, phys: %pa\n",
gpuaddr, memdesc->hostptr, &memdesc->physaddr);
return;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2011,2013-2015, The Linux Foundation. All rights reserved.
/* Copyright (c) 2011,2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -651,8 +651,9 @@ kgsl_gpummu_unmap(struct kgsl_pagetable *pt,
#ifdef VERBOSE_DEBUG
/* check if PTE exists */
if (!kgsl_pt_map_get(gpummu_pt, pte))
KGSL_CORE_ERR("pt entry %x is already "
"unmapped for pagetable %p\n", pte, gpummu_pt);
KGSL_CORE_ERR(
"pt entry %x is already unmapped for pagetable %pK\n",
pte, gpummu_pt);
#endif
kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));

View File

@ -331,7 +331,7 @@ static int kgsl_iommu_fault_handler(struct iommu_domain *domain,
iommu_dev = get_iommu_device(iommu_unit, dev);
if (!iommu_dev) {
KGSL_CORE_ERR("Invalid IOMMU device %p\n", dev);
KGSL_CORE_ERR("Invalid IOMMU device %pK\n", dev);
ret = -ENOSYS;
goto done;
}
@ -730,8 +730,8 @@ static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
iommu_detach_device(iommu_pt->domain,
iommu_unit->dev[j].dev);
iommu_unit->dev[j].attached = false;
KGSL_MEM_INFO(mmu->device, "iommu %pK detached "
"from user dev of MMU: %pK\n",
KGSL_MEM_INFO(mmu->device,
"iommu %pK detached from user dev of MMU: %pK\n",
iommu_pt->domain, mmu);
}
}
@ -848,7 +848,7 @@ static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;
KGSL_DRV_INFO(mmu->device,
"Obtained dev handle %p for iommu context %s\n",
"Obtained dev handle %pK for iommu context %s\n",
iommu_unit->dev[iommu_unit->dev_count].dev,
data->iommu_ctxs[i].iommu_ctx_name);
@ -1787,9 +1787,9 @@ kgsl_iommu_unmap(struct kgsl_pagetable *pt,
ret = iommu_unmap_range(iommu_pt->domain, gpuaddr, range);
if (ret) {
KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
"with err: %d\n", iommu_pt->domain, gpuaddr,
range, ret);
KGSL_CORE_ERR(
"iommu_unmap_range(%pK, %x, %d) failed with err: %d\n",
iommu_pt->domain, gpuaddr, range, ret);
return ret;
}
@ -1816,7 +1816,7 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
ret = iommu_map_range(iommu_pt->domain, iommu_virt_addr, memdesc->sg,
size, protflags);
if (ret) {
KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %x) err: %d\n",
KGSL_CORE_ERR("iommu_map_range(%pK, %x, %pK, %d, %x) err: %d\n",
iommu_pt->domain, iommu_virt_addr, memdesc->sg, size,
protflags, ret);
return ret;
@ -1826,7 +1826,7 @@ kgsl_iommu_map(struct kgsl_pagetable *pt,
page_to_phys(kgsl_guard_page), PAGE_SIZE,
protflags & ~IOMMU_WRITE);
if (ret) {
KGSL_CORE_ERR("iommu_map(%p, %x, guard, %x) err: %d\n",
KGSL_CORE_ERR("iommu_map(%pK, %x, guard, %x) err: %d\n",
iommu_pt->domain, iommu_virt_addr + size,
protflags & ~IOMMU_WRITE,
ret);

View File

@ -1,4 +1,5 @@
/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2002,2007-2014,2016, The Linux Foundation. All rights
* reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -875,7 +876,7 @@ int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
/*global mappings must have the same gpu address in all pagetables*/
if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
KGSL_CORE_ERR("pt %p addr mismatch phys %pa gpu 0x%0x 0x%08x",
KGSL_CORE_ERR("pt %pK addr mismatch phys %pa gpu 0x%0x 0x%08x",
pagetable, &memdesc->physaddr, gpuaddr, memdesc->gpuaddr);
goto error_unmap;
}

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2010-2014,2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2002,2007-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2002,2007-2014,2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -540,7 +540,7 @@ static int kgsl_ebimem_map_kernel(struct kgsl_memdesc *memdesc)
if (!memdesc->hostptr) {
memdesc->hostptr = ioremap(memdesc->physaddr, memdesc->size);
if (!memdesc->hostptr) {
KGSL_CORE_ERR("ioremap failed, addr:0x%p, size:0x%x\n",
KGSL_CORE_ERR("ioremap failed, addr:0x%pK, size:0x%x\n",
memdesc->hostptr, memdesc->size);
ret = -ENOMEM;
goto done;
@ -617,7 +617,8 @@ _kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
size_t size)
{
int pcount = 0, ret = 0;
int j, len, page_size, sglen_alloc, sglen = 0;
int j, page_size, sglen_alloc, sglen = 0;
size_t len;
struct page **pages = NULL;
pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
void *ptr;

View File

@ -880,3 +880,4 @@ void kgsl_device_snapshot_close(struct kgsl_device *device)
device->snapshot_faultcount = 0;
}
EXPORT_SYMBOL(kgsl_device_snapshot_close);

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2014,2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and

View File

@ -22,6 +22,7 @@
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/delay.h>
#include <linux/input.h>
#include <linux/firmware.h>
@ -294,6 +295,7 @@ struct synaptics_rmi4_fwu_handle {
static struct synaptics_rmi4_fwu_handle *fwu;
DECLARE_COMPLETION(fwu_remove_complete);
DEFINE_MUTEX(fwu_sysfs_mutex);
static unsigned int extract_uint(const unsigned char *ptr)
{
@ -1667,31 +1669,59 @@ static ssize_t fwu_sysfs_show_image(struct file *data_file,
char *buf, loff_t pos, size_t count)
{
struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
ssize_t retval;
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
if (count < fwu->config_size) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Not enough space (%zu bytes) in buffer\n",
__func__, count);
return -EINVAL;
retval = -EINVAL;
goto show_image_exit;
}
memcpy(buf, fwu->read_config_buf, fwu->config_size);
return fwu->config_size;
retval = fwu->config_size;
show_image_exit:
mutex_unlock(&fwu_sysfs_mutex);
return retval;
}
static ssize_t fwu_sysfs_store_image(struct file *data_file,
struct kobject *kobj, struct bin_attribute *attributes,
char *buf, loff_t pos, size_t count)
{
ssize_t retval;
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
if (!fwu->ext_data_source) {
dev_err(&fwu->rmi4_data->i2c_client->dev,
"Cannot use this without setting imagesize!\n");
retval = -EAGAIN;
goto exit;
}
if (count > fwu->image_size - fwu->data_pos) {
dev_err(&fwu->rmi4_data->i2c_client->dev,
"%s: Not enough space in buffer\n",
__func__);
retval = -EINVAL;
goto exit;
}
memcpy((void *)(&fwu->ext_data_source[fwu->data_pos]),
(const void *)buf,
count);
fwu->data_buffer = fwu->ext_data_source;
fwu->data_pos += count;
return count;
retval = count;
exit:
mutex_unlock(&fwu_sysfs_mutex);
return retval;
}
static ssize_t fwu_sysfs_image_name_store(struct device *dev,
@ -1714,18 +1744,29 @@ static ssize_t fwu_sysfs_image_name_store(struct device *dev,
return -EINVAL;
}
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
strlcpy(rmi4_data->fw_image_name, buf, count);
mutex_unlock(&fwu_sysfs_mutex);
return count;
}
static ssize_t fwu_sysfs_image_name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t retval;
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
if (strnlen(fwu->rmi4_data->fw_image_name, NAME_BUFFER_SIZE) > 0)
return snprintf(buf, PAGE_SIZE, "%s\n",
retval = snprintf(buf, PAGE_SIZE, "%s\n",
fwu->rmi4_data->fw_image_name);
else
return snprintf(buf, PAGE_SIZE, "No firmware name given\n");
retval = snprintf(buf, PAGE_SIZE, "No firmware name given\n");
mutex_unlock(&fwu_sysfs_mutex);
return retval;
}
static ssize_t fwu_sysfs_force_reflash_store(struct device *dev,
@ -1735,10 +1776,12 @@ static ssize_t fwu_sysfs_force_reflash_store(struct device *dev,
unsigned int input;
struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
if (sscanf(buf, "%u", &input) != 1) {
retval = -EINVAL;
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
retval = kstrtouint(buf, 10, &input);
if (retval)
goto exit;
}
if (input != 1) {
retval = -EINVAL;
@ -1762,7 +1805,10 @@ exit:
kfree(fwu->ext_data_source);
fwu->ext_data_source = NULL;
fwu->force_update = FORCE_UPDATE;
fwu->image_size = 0;
fwu->data_pos = 0;
fwu->do_lockdown = rmi4_data->board->do_lockdown;
mutex_unlock(&fwu_sysfs_mutex);
return retval;
}
@ -1773,10 +1819,12 @@ static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
unsigned int input;
struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
if (sscanf(buf, "%u", &input) != 1) {
retval = -EINVAL;
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
retval = kstrtouint(buf, 10, &input);
if (retval)
goto exit;
}
if (input & LOCKDOWN) {
fwu->do_lockdown = true;
@ -1804,8 +1852,11 @@ static ssize_t fwu_sysfs_do_reflash_store(struct device *dev,
exit:
kfree(fwu->ext_data_source);
fwu->ext_data_source = NULL;
fwu->image_size = 0;
fwu->data_pos = 0;
fwu->force_update = FORCE_UPDATE;
fwu->do_lockdown = rmi4_data->board->do_lockdown;
mutex_unlock(&fwu_sysfs_mutex);
return retval;
}
@ -1816,6 +1867,9 @@ static ssize_t fwu_sysfs_write_lockdown_store(struct device *dev,
unsigned int input;
struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
if (sscanf(buf, "%u", &input) != 1) {
retval = -EINVAL;
goto exit;
@ -1841,6 +1895,9 @@ exit:
fwu->ext_data_source = NULL;
fwu->force_update = FORCE_UPDATE;
fwu->do_lockdown = rmi4_data->board->do_lockdown;
fwu->image_size = 0;
fwu->data_pos = 0;
mutex_unlock(&fwu_sysfs_mutex);
return retval;
}
@ -1851,10 +1908,12 @@ static ssize_t fwu_sysfs_write_config_store(struct device *dev,
unsigned int input;
struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
if (sscanf(buf, "%u", &input) != 1) {
retval = -EINVAL;
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
retval = kstrtouint(buf, 10, &input);
if (retval)
goto exit;
}
if (input != 1) {
retval = -EINVAL;
@ -1874,6 +1933,9 @@ static ssize_t fwu_sysfs_write_config_store(struct device *dev,
exit:
kfree(fwu->ext_data_source);
fwu->ext_data_source = NULL;
fwu->image_size = 0;
fwu->data_pos = 0;
mutex_unlock(&fwu_sysfs_mutex);
return retval;
}
@ -1890,7 +1952,11 @@ static ssize_t fwu_sysfs_read_config_store(struct device *dev,
if (input != 1)
return -EINVAL;
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
retval = fwu_do_read_config();
mutex_unlock(&fwu_sysfs_mutex);
if (retval < 0) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to read config\n",
@ -1919,7 +1985,10 @@ static ssize_t fwu_sysfs_config_area_store(struct device *dev,
return -EINVAL;
}
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
fwu->config_area = config_area;
mutex_unlock(&fwu_sysfs_mutex);
return count;
}
@ -1929,25 +1998,28 @@ static ssize_t fwu_sysfs_image_size_store(struct device *dev,
{
int retval;
unsigned long size;
struct synaptics_rmi4_data *rmi4_data = fwu->rmi4_data;
retval = kstrtoul(buf, 10, &size);
if (retval)
return retval;
if (!mutex_trylock(&fwu_sysfs_mutex))
return -EBUSY;
fwu->image_size = size;
fwu->data_pos = 0;
kfree(fwu->ext_data_source);
fwu->ext_data_source = kzalloc(fwu->image_size, GFP_KERNEL);
if (!fwu->ext_data_source) {
dev_err(&rmi4_data->i2c_client->dev,
"%s: Failed to alloc mem for image data\n",
__func__);
return -ENOMEM;
retval = -ENOMEM;
goto exit;
}
return count;
retval = count;
exit:
mutex_unlock(&fwu_sysfs_mutex);
return retval;
}
static ssize_t fwu_sysfs_block_size_show(struct device *dev,
@ -2111,7 +2183,9 @@ static struct device_attribute attrs[] = {
static void synaptics_rmi4_fwu_work(struct work_struct *work)
{
mutex_lock(&fwu_sysfs_mutex);
fwu_start_reflash();
mutex_unlock(&fwu_sysfs_mutex);
}
static int synaptics_rmi4_fwu_init(struct synaptics_rmi4_data *rmi4_data)

View File

@ -24,15 +24,11 @@ int msm_isp_axi_create_stream(
struct msm_vfe_axi_shared_data *axi_data,
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
{
int i, rc = -1;
for (i = 0; i < MAX_NUM_STREAM; i++) {
if (axi_data->stream_info[i].state == AVALIABLE)
break;
}
if (i == MAX_NUM_STREAM) {
pr_err("%s: No free stream\n", __func__);
return rc;
uint32_t i = stream_cfg_cmd->stream_src;
if (i >= VFE_AXI_SRC_MAX) {
pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
stream_cfg_cmd->stream_src);
return -EINVAL;
}
if ((axi_data->stream_handle_cnt << 8) == 0)

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2013, The Linux Foundation. All rights reserved.
/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -893,9 +893,15 @@ static irqreturn_t msm_io_ispif_irq(int irq_num, void *data)
static int msm_ispif_set_vfe_info(struct ispif_device *ispif,
struct msm_ispif_vfe_info *vfe_info)
{
memcpy(&ispif->vfe_info, vfe_info, sizeof(struct msm_ispif_vfe_info));
if (ispif->vfe_info.num_vfe > ispif->hw_num_isps)
if (!vfe_info || (vfe_info->num_vfe <= 0) ||
((uint32_t)(vfe_info->num_vfe) > ispif->hw_num_isps)) {
pr_err("Invalid VFE info: %p %d\n", vfe_info,
(vfe_info ? vfe_info->num_vfe:0));
return -EINVAL;
}
memcpy(&ispif->vfe_info, vfe_info, sizeof(struct msm_ispif_vfe_info));
return 0;
}

View File

@ -1481,22 +1481,26 @@ ERROR0:
long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
struct cpp_device *cpp_dev = NULL;
struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
int rc = 0;
if (ioctl_ptr == NULL) {
pr_err("ioctl_ptr is null\n");
if ((sd == NULL) || (ioctl_ptr == NULL)) {
pr_err("Wrong ioctl_ptr %p, sd %p\n", ioctl_ptr, sd);
return -EINVAL;
}
//Qualcomm Patch :: when cpp_dev is null, return
if (cpp_dev == NULL) {
pr_err("cpp_dev is null\n");
if ((ioctl_ptr->ioctl_ptr == NULL) || (ioctl_ptr->len == 0)) {
pr_err("ioctl_ptr OR ioctl_ptr->len is NULL %p %d\n",
ioctl_ptr, ioctl_ptr->len);
return -EINVAL;
}
if (_IOC_DIR(cmd) == _IOC_NONE) {
pr_err("Invalid ioctl/subdev cmd %u", cmd);
pr_err("Invalid ioctl/subdev cmd %u\n", cmd);
return -EINVAL;
}
cpp_dev = v4l2_get_subdevdata(sd);
if (cpp_dev == NULL) {
pr_err("cpp_dev is null\n");
return -EINVAL;
}
mutex_lock(&cpp_dev->mutex);

View File

@ -85,12 +85,12 @@ static int32_t msm_actuator_piezo_set_default_focus(
static void msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl,
int16_t next_lens_position, uint32_t hw_params, uint16_t delay)
{
struct msm_actuator_reg_params_t *write_arr = a_ctrl->reg_tbl;
struct msm_actuator_reg_params_t *write_arr = NULL;
uint32_t hw_dword = hw_params;
uint16_t i2c_byte1 = 0, i2c_byte2 = 0;
uint16_t value = 0;
uint32_t size = a_ctrl->reg_tbl_size, i = 0;
struct msm_camera_i2c_reg_array *i2c_tbl = a_ctrl->i2c_reg_tbl;
uint32_t size = 0, i = 0;
struct msm_camera_i2c_reg_array *i2c_tbl = NULL;
struct msm_camera_i2c_seq_reg_array *i2c_seq = a_ctrl->i2c_reg_seq_tbl;
if (a_ctrl->i2c_reg_tbl == NULL) {
@ -98,6 +98,16 @@ static void msm_actuator_parse_i2c_params(struct msm_actuator_ctrl_t *a_ctrl,
return;
}
CDBG("Enter\n");
if (a_ctrl == NULL) {
pr_err("failed. actuator ctrl is NULL");
return;
}
size = a_ctrl->reg_tbl_size;
write_arr = a_ctrl->reg_tbl;
i2c_tbl = a_ctrl->i2c_reg_tbl;
for (i = 0; i < size; i++) {
if (write_arr[i].reg_write_type == MSM_ACTUATOR_WRITE_DAC_SEQ) {
value = (next_lens_position <<
@ -1255,6 +1265,12 @@ static int32_t msm_actuator_set_position(
if (set_pos->number_of_steps == 0)
return rc;
if (!a_ctrl || !a_ctrl->func_tbl ||
!a_ctrl->func_tbl->actuator_parse_i2c_params) {
pr_err("failed. NULL actuator pointers.");
return -EFAULT;
}
a_ctrl->i2c_tbl_index = 0;
for (index = 0; index < set_pos->number_of_steps; index++) {
msb = ((set_pos->pos[index] << 7) & 0x80);
@ -1360,6 +1376,12 @@ static int32_t msm_actuator_hvcm_set_position(
if (set_pos->number_of_steps == 0)
return rc;
if (!a_ctrl || !a_ctrl->func_tbl ||
!a_ctrl->func_tbl->actuator_parse_i2c_params) {
pr_err("failed. NULL actuator pointers.");
return -EFAULT;
}
a_ctrl->i2c_tbl_index = 0;
for (index = 0; index < set_pos->number_of_steps; index++) {
msb = (set_pos->pos[index]>>8)&0x00ff;

View File

@ -201,6 +201,11 @@ int32_t msm_camera_cci_i2c_write_seq(struct msm_camera_i2c_client *client,
&& client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
|| num_byte == 0)
return rc;
if (num_byte > I2C_SEQ_REG_DATA_MAX) {
pr_err("%s: num_byte=%d clamped to max supported %d\n",
__func__, num_byte, I2C_SEQ_REG_DATA_MAX);
return rc;
}
S_I2C_DBG("%s reg addr = 0x%x num bytes: %d\n",
__func__, addr, num_byte);
@ -375,6 +380,12 @@ int32_t msm_camera_cci_i2c_write_seq_table(
client_addr_type = client->addr_type;
client->addr_type = write_setting->addr_type;
if (reg_setting->reg_data_size > I2C_SEQ_REG_DATA_MAX) {
pr_err("%s: number of bytes %u exceeding the max supported %d\n",
__func__, reg_setting->reg_data_size, I2C_SEQ_REG_DATA_MAX);
return rc;
}
if (reg_setting->reg_data_size > I2C_SEQ_REG_DATA_MAX) {
pr_err("%s: number of bytes %u exceeding the max supported %d\n",
__func__, reg_setting->reg_data_size, I2C_SEQ_REG_DATA_MAX);

View File

@ -82,14 +82,14 @@ int32_t msm_camera_qup_i2c_read(struct msm_camera_i2c_client *client,
return rc;
if (client->addr_type > UINT_MAX - data_type) {
pr_err("%s: integer overflow prevented\n", __func__);
return rc;
pr_err("%s: integer overflow prevented\n", __func__);
return rc;
}
buf = kzalloc(client->addr_type+data_type, GFP_KERNEL);
if (!buf) {
pr_err("%s:%d no memory\n", __func__, __LINE__);
return -ENOMEM;
pr_err("%s:%d no memory\n", __func__, __LINE__);
return -ENOMEM;
}
if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
@ -130,21 +130,19 @@ int32_t msm_camera_qup_i2c_read_seq(struct msm_camera_i2c_client *client,
return rc;
if (num_byte > I2C_REG_DATA_MAX) {
pr_err("%s: Error num_byte:0x%x exceeds 8K max supported:0x%x\n",
__func__, num_byte, I2C_REG_DATA_MAX);
return rc;
pr_err("%s: Error num_byte:0x%x exceeds 8K max supported:0x%x\n",
__func__, num_byte, I2C_REG_DATA_MAX);
return rc;
}
if (client->addr_type > UINT_MAX - num_byte) {
pr_err("%s: integer overflow prevented\n", __func__);
return rc;
pr_err("%s: integer overflow prevented\n", __func__);
return rc;
}
buf = kzalloc(client->addr_type+num_byte, GFP_KERNEL);
if (!buf) {
pr_err("%s:%d no memory\n", __func__, __LINE__);
return -ENOMEM;
pr_err("%s:%d no memory\n", __func__, __LINE__);
return -ENOMEM;
}
if (client->addr_type == MSM_CAMERA_I2C_BYTE_ADDR) {
buf[0] = addr;
} else if (client->addr_type == MSM_CAMERA_I2C_WORD_ADDR) {

View File

@ -1302,8 +1302,8 @@ u32 hfi_process_msg_packet(
{
u32 rc = 0;
struct hal_session *sess = NULL;
if (!callback || !msg_hdr || msg_hdr->size <
VIDC_IFACEQ_MIN_PKT_SIZE) {
if (!callback || !session_lock || !msg_hdr ||
msg_hdr->size < VIDC_IFACEQ_MIN_PKT_SIZE) {
dprintk(VIDC_ERR, "hal_process_msg_packet:bad"
"packet/packet size: %d", msg_hdr->size);
rc = -EINVAL;

View File

@ -934,7 +934,7 @@ int msm_vdec_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a)
if ((fps % 15 == 14) || (fps % 24 == 23))
fps = fps + 1;
else if ((fps % 24 == 1) || (fps % 15 == 1))
else if ((fps > 1) && ((fps % 24 == 1) || (fps % 15 == 1)))
fps = fps - 1;
if (inst->prop.fps != fps) {

View File

@ -2595,7 +2595,7 @@ int msm_venc_s_parm(struct msm_vidc_inst *inst, struct v4l2_streamparm *a)
if ((fps % 15 == 14) || (fps % 24 == 23))
fps = fps + 1;
else if ((fps % 24 == 1) || (fps % 15 == 1))
else if ((fps > 1) && ((fps % 24 == 1) || (fps % 15 == 1)))
fps = fps - 1;
if (inst->prop.fps != fps) {

View File

@ -1365,7 +1365,6 @@ int msm_vidc_close(void *instance)
mutex_unlock(&inst->registeredbufs.lock);
core = inst->core;
msm_comm_session_clean(inst);
mutex_lock(&core->lock);
list_for_each_entry_safe(temp, inst_dummy, &core->instances, list) {
@ -1392,6 +1391,8 @@ int msm_vidc_close(void *instance)
dprintk(VIDC_ERR,
"Failed to move video instance to uninit state\n");
msm_comm_session_clean(inst);
msm_smem_delete_client(inst->mem_client);
pr_info(VIDC_DBG_TAG "Closed video instance: %pK\n", VIDC_INFO, inst);
kfree(inst);

View File

@ -140,8 +140,13 @@ static int msm_comm_get_inst_load(struct msm_vidc_inst *inst,
}
if (is_non_realtime_session(inst) &&
(quirks & LOAD_CALC_IGNORE_NON_REALTIME_LOAD))
load = msm_comm_get_mbs_per_sec(inst) / inst->prop.fps;
(quirks & LOAD_CALC_IGNORE_NON_REALTIME_LOAD)) {
if (!inst->prop.fps) {
dprintk(VIDC_INFO, "%s: instance:%p prop->fps is set 0\n", __func__, inst);
load = 0;
} else
load = msm_comm_get_mbs_per_sec(inst) / inst->prop.fps;
}
return load;
}
@ -999,7 +1004,6 @@ static void handle_session_close(enum command_response cmd, void *data)
dprintk(VIDC_ERR, "%s invalid params\n", __func__);
return;
}
msm_comm_session_clean(inst);
signal_session_msg_receipt(cmd, inst);
show_stats(inst);
} else {

View File

@ -136,6 +136,11 @@ static void venus_hfi_sim_modify_cmd_packet(u8 *packet)
sys_init = (struct hfi_cmd_sys_session_init_packet *)packet;
sess = (struct hal_session *) sys_init->session_id;
if (!sess) {
dprintk(VIDC_DBG, "%s :Invalid session id : %x\n",
__func__, sys_init->session_id);
return;
}
switch (sys_init->packet_type) {
case HFI_CMD_SESSION_EMPTY_BUFFER:
if (sess->is_decoder) {
@ -1455,7 +1460,7 @@ static inline int venus_hfi_clk_gating_off(struct venus_hfi_device *device)
{
int rc = 0;
if (!device) {
dprintk(VIDC_ERR, "Invalid params: %p\n", device);
dprintk(VIDC_ERR, "Invalid params: %pK\n", device);
return -EINVAL;
}
if (device->clk_state == ENABLED_PREPARED) {
@ -1978,7 +1983,6 @@ static int venus_hfi_core_init(void *device)
VENUS_SET_STATE(dev, VENUS_STATE_INIT);
dev->intr_status = 0;
INIT_LIST_HEAD(&dev->sess_head);
venus_hfi_set_registers(dev);
if (!dev->hal_client) {
@ -2497,19 +2501,19 @@ static int venus_hfi_session_abort(void *session)
static int venus_hfi_session_clean(void *session)
{
struct hal_session *sess_close;
struct venus_hfi_device *device;
if (!session) {
dprintk(VIDC_ERR, "Invalid Params %s", __func__);
return -EINVAL;
}
sess_close = session;
device = sess_close->device;
dprintk(VIDC_DBG, "deleted the session: 0x%p",
sess_close);
mutex_lock(&((struct venus_hfi_device *)
sess_close->device)->session_lock);
mutex_lock(&device->session_lock);
list_del(&sess_close->list);
mutex_unlock(&((struct venus_hfi_device *)
sess_close->device)->session_lock);
kfree(sess_close);
mutex_unlock(&device->session_lock);
return 0;
}
@ -3926,6 +3930,7 @@ static void *venus_hfi_add_device(u32 device_id,
mutex_init(&hdevice->session_lock);
mutex_init(&hdevice->clk_pwr_lock);
INIT_LIST_HEAD(&hdevice->sess_head);
if (hal_ctxt.dev_count == 0)
INIT_LIST_HEAD(&hal_ctxt.dev_head);

View File

@ -181,6 +181,7 @@ struct qseecom_control {
struct work_struct bw_inactive_req_ws;
struct cdev cdev;
bool timer_running;
bool appsbl_qseecom_support;
};
struct qseecom_client_handle {
@ -3146,6 +3147,7 @@ static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
struct qseecom_check_app_ireq req;
struct qseecom_registered_app_list *entry = NULL;
unsigned long flags = 0;
bool found_app = false;
/* Copy the relevant information needed for loading the image */
if (copy_from_user(&query_req,
@ -3172,6 +3174,7 @@ static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
&qseecom.registered_app_list_head, list){
if (entry->app_id == ret) {
entry->ref_cnt++;
found_app = true;
break;
}
}
@ -3181,6 +3184,29 @@ static int qseecom_query_app_loaded(struct qseecom_dev_handle *data,
query_req.app_id = ret;
strlcpy(data->client.app_name, query_req.app_name,
MAX_APP_NAME_SIZE);
/*
* If app was loaded by appsbl or kernel client before
* and was not registered, regiser this app now.
*/
if (!found_app) {
pr_debug("Register app %d [%s] which was loaded before\n",
ret, (char *)query_req.app_name);
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
pr_err("kmalloc for app entry failed\n");
return -ENOMEM;
}
entry->app_id = ret;
entry->ref_cnt = 1;
strlcpy(entry->app_name, data->client.app_name,
MAX_APP_NAME_SIZE);
spin_lock_irqsave(&qseecom.registered_app_list_lock,
flags);
list_add_tail(&entry->list,
&qseecom.registered_app_list_head);
spin_unlock_irqrestore(
&qseecom.registered_app_list_lock, flags);
}
if (copy_to_user(argp, &query_req, sizeof(query_req))) {
pr_err("copy_to_user failed\n");
return -EFAULT;
@ -4594,6 +4620,12 @@ static int __devinit qseecom_probe(struct platform_device *pdev)
qseecom.ce_info.qsee_ce_hw_instance);
}
qseecom.appsbl_qseecom_support =
of_property_read_bool((&pdev->dev)->of_node,
"qcom,appsbl-qseecom-support");
pr_info("qseecom.appsbl_qseecom_support = 0x%x",
qseecom.appsbl_qseecom_support);
qseecom.qsee.instance = qseecom.ce_info.qsee_ce_hw_instance;
qseecom.ce_drv.instance = qseecom.ce_info.hlos_ce_hw_instance;
@ -4620,7 +4652,8 @@ static int __devinit qseecom_probe(struct platform_device *pdev)
qseecom_platform_support = (struct msm_bus_scale_pdata *)
msm_bus_cl_get_pdata(pdev);
if (qseecom.qsee_version >= (QSEE_VERSION_02)) {
if (qseecom.qsee_version >= (QSEE_VERSION_02) &&
!qseecom.appsbl_qseecom_support) {
struct resource *resource = NULL;
struct qsee_apps_region_info_ireq req;
struct qseecom_command_scm_resp resp;

View File

@ -2907,6 +2907,7 @@ out:
* (all existing requests completed or reinserted to the block layer)
*/
if ((!req && !(test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags))) ||
(cmd_flags & MMC_REQ_SPECIAL_MASK) ||
((test_bit(MMC_QUEUE_URGENT_REQUEST, &mq->flags)) &&
!(cmd_flags & MMC_REQ_NOREINSERT_MASK))) {
if (mmc_card_need_bkops(card))

View File

@ -24,6 +24,8 @@
#define MMC_QUEUE_BOUNCESZ 65536
#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
/*
* Based on benchmark tests the default num of requests to trigger the write
* packing was determined, to keep the read latency as low as possible and
@ -66,6 +68,7 @@ static int mmc_queue_thread(void *d)
do {
struct mmc_queue_req *tmp;
struct request *req = NULL;
unsigned int cmd_flags = 0;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
@ -75,12 +78,13 @@ static int mmc_queue_thread(void *d)
if (req || mq->mqrq_prev->req) {
set_current_state(TASK_RUNNING);
cmd_flags = req ? req->cmd_flags : 0;
mq->issue_fn(mq, req);
if (test_bit(MMC_QUEUE_NEW_REQUEST, &mq->flags)) {
continue; /* fetch again */
} else if (test_bit(MMC_QUEUE_URGENT_REQUEST,
&mq->flags) && (mq->mqrq_cur->req &&
!(mq->mqrq_cur->req->cmd_flags &
!(cmd_flags &
MMC_REQ_NOREINSERT_MASK))) {
/*
* clean current request when urgent request
@ -95,7 +99,13 @@ static int mmc_queue_thread(void *d)
/*
* Current request becomes previous request
* and vice versa.
* In case of special requests, current request
* has been finished. Do not assign it to previous
* request.
*/
if (cmd_flags & MMC_REQ_SPECIAL_MASK)
mq->mqrq_cur->req = NULL;
mq->mqrq_prev->brq.mrq.data = NULL;
mq->mqrq_prev->req = NULL;
tmp = mq->mqrq_prev;

View File

@ -1,6 +1,8 @@
#ifndef MMC_QUEUE_H
#define MMC_QUEUE_H
#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
struct request;
struct task_struct;

View File

@ -2177,7 +2177,7 @@ int ppp_register_net_channel(struct net *net, struct ppp_channel *chan)
pch->ppp = NULL;
pch->chan = chan;
pch->chan_net = net;
pch->chan_net = get_net(net);
chan->ppp = pch;
init_ppp_file(&pch->file, CHANNEL);
pch->file.hdrlen = chan->hdrlen;
@ -2274,6 +2274,8 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
put_net(pch->chan_net);
pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);

View File

@ -307,7 +307,8 @@ int avcs_core_query_timer(uint64_t *avtimer_tick)
| avtimer_lsw;
res = do_div(avtimer_tick_temp, avtimer.clk_div);
*avtimer_tick = avtimer_tick_temp;
pr_debug("%s:Avtimer: msw: %u, lsw: %u, tick: %llu\n", __func__,
pr_debug_ratelimited("%s:Avtimer: msw: %u, lsw: %u, tick: %llu\n",
__func__,
avtimer_msw, avtimer_lsw, *avtimer_tick);
return 0;
}
@ -332,21 +333,11 @@ static long avtimer_ioctl(struct file *file, unsigned int ioctl_num,
switch (ioctl_num) {
case IOCTL_GET_AVTIMER_TICK:
{
uint32_t avtimer_msw_1st = 0, avtimer_lsw = 0;
uint32_t avtimer_msw_2nd = 0;
uint64_t avtimer_tick;
do {
avtimer_msw_1st = ioread32(avtimer.p_avtimer_msw);
avtimer_lsw = ioread32(avtimer.p_avtimer_lsw);
avtimer_msw_2nd = ioread32(avtimer.p_avtimer_msw);
} while (avtimer_msw_1st != avtimer_msw_2nd);
avtimer_lsw = avtimer_lsw/avtimer.clk_div;
avtimer_tick =
((uint64_t) avtimer_msw_1st << 32) | avtimer_lsw;
pr_debug("%s: AV Timer tick: msw: %x, lsw: %x time %llx\n",
__func__, avtimer_msw_1st, avtimer_lsw, avtimer_tick);
avcs_core_query_timer(&avtimer_tick);
pr_debug_ratelimited("%s: AV Timer tick: time %llx\n",
__func__, avtimer_tick);
if (copy_to_user((void *) ioctl_param, &avtimer_tick,
sizeof(avtimer_tick))) {
pr_err("copy_to_user failed\n");

View File

@ -2206,6 +2206,7 @@ EXPORT_SYMBOL(sps_register_bam_device);
int sps_deregister_bam_device(u32 dev_handle)
{
struct sps_bam *bam;
int n;
SPS_DBG2("sps:%s.", __func__);
@ -2222,6 +2223,12 @@ int sps_deregister_bam_device(u32 dev_handle)
SPS_DBG2("sps:SPS deregister BAM: phys 0x%x.", bam->props.phys_addr);
if (bam->props.options & SPS_BAM_HOLD_MEM) {
for (n = 0; n < BAM_MAX_PIPES; n++)
if (bam->desc_cache_pointers[n] != NULL)
kfree(bam->desc_cache_pointers[n]);
}
/* If this BAM is attached to a BAM-DMA, init the BAM-DMA device */
#ifdef CONFIG_SPS_SUPPORT_BAMDMA
if ((bam->props.options & SPS_BAM_OPT_BAMDMA)) {

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2011-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -959,10 +959,15 @@ int sps_bam_pipe_disconnect(struct sps_bam *dev, u32 pipe_index)
bam_pipe_exit(dev->base, pipe_index, dev->props.ee);
if (pipe->sys.desc_cache != NULL) {
u32 size = pipe->num_descs * sizeof(void *);
if (pipe->desc_size + size <= PAGE_SIZE)
kfree(pipe->sys.desc_cache);
else
if (pipe->desc_size + size <= PAGE_SIZE) {
if (dev->props.options & SPS_BAM_HOLD_MEM)
memset(pipe->sys.desc_cache, 0,
pipe->desc_size + size);
else
kfree(pipe->sys.desc_cache);
} else {
vfree(pipe->sys.desc_cache);
}
pipe->sys.desc_cache = NULL;
}
dev->pipes[pipe_index] = BAM_PIPE_UNASSIGNED;
@ -1086,30 +1091,43 @@ int sps_bam_pipe_set_params(struct sps_bam *dev, u32 pipe_index, u32 options)
/* Allocate both descriptor cache and user pointer array */
size = pipe->num_descs * sizeof(void *);
if (pipe->desc_size + size <= PAGE_SIZE)
pipe->sys.desc_cache =
kzalloc(pipe->desc_size + size, GFP_KERNEL);
else {
if (pipe->desc_size + size <= PAGE_SIZE) {
if ((dev->props.options &
SPS_BAM_HOLD_MEM)) {
if (dev->desc_cache_pointers[pipe_index]) {
pipe->sys.desc_cache =
dev->desc_cache_pointers
[pipe_index];
} else {
pipe->sys.desc_cache =
kzalloc(pipe->desc_size + size,
GFP_KERNEL);
dev->desc_cache_pointers[pipe_index] =
pipe->sys.desc_cache;
}
} else {
pipe->sys.desc_cache =
kzalloc(pipe->desc_size + size,
GFP_KERNEL);
}
if (pipe->sys.desc_cache == NULL) {
SPS_ERR("sps:No memory for pipe%d of BAM 0x%x\n",
pipe_index, BAM_ID(dev));
return -ENOMEM;
}
} else {
pipe->sys.desc_cache =
vmalloc(pipe->desc_size + size);
if (pipe->sys.desc_cache == NULL) {
SPS_ERR(
"sps:No memory for pipe %d of BAM 0x%x\n",
pipe_index, BAM_ID(dev));
SPS_ERR("sps:No memory for pipe %d of BAM 0x%x\n",
pipe_index, BAM_ID(dev));
return -ENOMEM;
}
memset(pipe->sys.desc_cache, 0, pipe->desc_size + size);
}
if (pipe->sys.desc_cache == NULL) {
/*** MUST BE LAST POINT OF FAILURE (see below) *****/
SPS_ERR("sps:Desc cache error: BAM 0x%x pipe %d: %d\n",
BAM_ID(dev), pipe_index,
pipe->desc_size + size);
return SPS_ERROR;
}
pipe->sys.user_ptrs = (void **)(pipe->sys.desc_cache +
pipe->desc_size);
pipe->sys.cache_offset = pipe->sys.acked_offset;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2011-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2011-2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -214,6 +214,9 @@ struct sps_bam {
u32 irq_from_disabled_pipe;
u32 event_trigger_failures;
/* Desc cache pointers */
u8 *desc_cache_pointers[BAM_MAX_PIPES];
};
/**

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -264,6 +264,12 @@ int spmi_add_device(struct spmi_device *spmidev)
return id;
}
id = ida_simple_get(&spmi_devid_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
pr_err("No id available status = %d\n", id);
return id;
}
/* Set the device name */
spmidev->id = id;
dev_set_name(dev, "%s-%d", spmidev->name, spmidev->id);

View File

@ -185,6 +185,7 @@ EXPORT_SYMBOL_GPL(ehci_cf_port_reset_rwsem);
#define HUB_DEBOUNCE_STABLE 100
static void hub_release(struct kref *kref);
static int usb_reset_and_verify_device(struct usb_device *udev);
static inline char *portspeed(struct usb_hub *hub, int portstatus)
@ -758,10 +759,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
unsigned delay;
/* Continue a partial initialization */
if (type == HUB_INIT2)
goto init2;
if (type == HUB_INIT3)
if (type == HUB_INIT2 || type == HUB_INIT3) {
device_lock(hub->intfdev);
/* Was the hub disconnected while we were waiting? */
if (hub->disconnected) {
device_unlock(hub->intfdev);
kref_put(&hub->kref, hub_release);
return;
}
if (type == HUB_INIT2)
goto init2;
goto init3;
}
kref_get(&hub->kref);
/* The superspeed hub except for root hub has to use Hub Depth
* value as an offset into the route string to locate the bits
@ -955,6 +966,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
PREPARE_DELAYED_WORK(&hub->init_work, hub_init_func3);
schedule_delayed_work(&hub->init_work,
msecs_to_jiffies(delay));
device_unlock(hub->intfdev);
return; /* Continues at init3: below */
} else {
msleep(delay);
@ -975,6 +987,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
/* Allow autosuspend if it was suppressed */
if (type <= HUB_INIT3)
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
if (type == HUB_INIT2 || type == HUB_INIT3)
device_unlock(hub->intfdev);
kref_put(&hub->kref, hub_release);
}
/* Implement the continuations for the delays above */

View File

@ -660,7 +660,7 @@ static ssize_t dwc3_store_ep_num(struct file *file, const char __user *ubuf,
struct seq_file *s = file->private_data;
struct dwc3 *dwc = s->private;
char kbuf[10];
unsigned int num, dir;
unsigned int num, dir, temp;
unsigned long flags;
memset(kbuf, 0, 10);
@ -671,8 +671,15 @@ static ssize_t dwc3_store_ep_num(struct file *file, const char __user *ubuf,
if (sscanf(kbuf, "%u %u", &num, &dir) != 2)
return -EINVAL;
if (dir != 0 && dir != 1)
return -EINVAL;
temp = (num << 1) + dir;
if (temp >= DWC3_ENDPOINTS_NUM)
return -EINVAL;
spin_lock_irqsave(&dwc->lock, flags);
ep_num = (num << 1) + dir;
ep_num = temp;
spin_unlock_irqrestore(&dwc->lock, flags);
return count;

View File

@ -2305,7 +2305,8 @@ static ssize_t audio_source_pcm_show(struct device *dev,
struct audio_source_config *config = f->config;
/* print PCM card and device numbers */
return sprintf(buf, "%d %d\n", config->card, config->device);
return snprintf(buf, PAGE_SIZE,
"%d %d\n", config->card, config->device);
}
static DEVICE_ATTR(pcm, S_IRUGO | S_IWUSR, audio_source_pcm_show, NULL);

View File

@ -705,6 +705,11 @@ static int set_config(struct usb_composite_dev *cdev,
union power_supply_propval value;
int main_type = POWER_SUPPLY_TYPE_USB_DCP;
if (!f->ss_descriptors) {
pr_err("%s(): No SS desc for function:%s\n",
__func__, f->name);
return -EINVAL;
}
descriptors = f->ss_descriptors;
if (!psy) {
pr_err("%s: fail to get %s psy\n", __func__, PSY_CHG_NAME);

View File

@ -995,6 +995,12 @@ int f_midi_bind_config(struct usb_configuration *c,
config->device = midi->rmidi->device;
}
if (config) {
config->card = midi->rmidi->card->number;
config->device = midi->rmidi->device;
}
return 0;
setup_fail:

View File

@ -79,7 +79,7 @@ static int generic_get_cmd(struct usb_audio_control *con, u8 cmd);
#define F_AUDIO_NUM_INTERFACES 2
/* B.3.1 Standard AC Interface Descriptor */
struct usb_interface_descriptor ac_interface_desc = {
struct usb_interface_descriptor uac1_ac_interface_desc = {
.bLength = USB_DT_INTERFACE_SIZE,
.bDescriptorType = USB_DT_INTERFACE,
.bNumEndpoints = 0,
@ -96,7 +96,7 @@ struct usb_interface_descriptor ac_interface_desc = {
)
/* B.3.2 Class-Specific AC Interface Descriptor */
struct uac1_ac_header_descriptor_2 ac_header_desc = {
struct uac1_ac_header_descriptor_2 uac1_ac_header_desc = {
.bLength = UAC_DT_AC_HEADER_SIZE(2),
.bDescriptorType = USB_DT_CS_INTERFACE,
.bDescriptorSubtype = UAC_HEADER,
@ -361,8 +361,8 @@ static struct usb_audio_control_selector microphone_as_iso_in = {
/*--------------------------------- */
static struct usb_descriptor_header *f_audio_desc[] = {
(struct usb_descriptor_header *)&ac_interface_desc,
(struct usb_descriptor_header *)&ac_header_desc,
(struct usb_descriptor_header *)&uac1_ac_interface_desc,
(struct usb_descriptor_header *)&uac1_ac_header_desc,
(struct usb_descriptor_header *)&microphone_input_terminal_desc,
(struct usb_descriptor_header *)&microphone_output_terminal_desc,
@ -887,9 +887,9 @@ static int f_audio_get_alt(struct usb_function *f, unsigned intf)
{
struct f_audio *audio = func_to_audio(f);
if (intf == ac_header_desc.baInterfaceNr[0])
if (intf == uac1_ac_header_desc.baInterfaceNr[0])
return audio->alt_intf[0];
if (intf == ac_header_desc.baInterfaceNr[1])
if (intf == uac1_ac_header_desc.baInterfaceNr[1])
return audio->alt_intf[1];
return 0;
@ -906,7 +906,7 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
pr_debug("intf %d, alt %d\n", intf, alt);
if (intf == ac_header_desc.baInterfaceNr[0]) {
if (intf == uac1_ac_header_desc.baInterfaceNr[0]) {
if (alt == 1) {
err = usb_ep_enable(in_ep);
if (err) {
@ -953,7 +953,7 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
spin_unlock_irqrestore(&audio->capture_lock, flags);
}
audio->alt_intf[0] = alt;
} else if (intf == ac_header_desc.baInterfaceNr[1]) {
} else if (intf == uac1_ac_header_desc.baInterfaceNr[1]) {
if (alt == 1) {
err = usb_ep_enable(out_ep);
if (err) {
@ -1071,7 +1071,7 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
pr_err("%s: failed to allocate desc interface", __func__);
goto fail;
}
ac_interface_desc.bInterfaceNumber = status;
uac1_ac_interface_desc.bInterfaceNumber = status;
status = -ENOMEM;
@ -1082,7 +1082,7 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
}
microphone_as_interface_alt_0_desc.bInterfaceNumber = status;
microphone_as_interface_alt_1_desc.bInterfaceNumber = status;
ac_header_desc.baInterfaceNr[0] = status;
uac1_ac_header_desc.baInterfaceNr[0] = status;
audio->alt_intf[0] = 0;
status = -ENODEV;
@ -1094,7 +1094,7 @@ f_audio_bind(struct usb_configuration *c, struct usb_function *f)
}
speaker_as_interface_alt_0_desc.bInterfaceNumber = status;
speaker_as_interface_alt_1_desc.bInterfaceNumber = status;
ac_header_desc.baInterfaceNr[1] = status;
uac1_ac_header_desc.baInterfaceNr[1] = status;
audio->alt_intf[1] = 0;
status = -ENODEV;

View File

@ -85,7 +85,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
xhci->quirks |= XHCI_AMD_PLL_FIX;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
pdev->device == PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI) {
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
xhci->limit_active_eps = 64;
xhci->quirks |= XHCI_SW_BW_CHECKING;

View File

@ -4047,6 +4047,13 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
get_quirks(dev, xhci);
/* In xhci controllers which follow xhci 1.0 spec gives a spurious
* success event after a short transfer. This quirk will ignore such
* spurious event.
*/
if (xhci->hci_version > 0x96)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
/* Make sure the HC is halted. */
retval = xhci_halt(xhci);
if (retval)

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
/* Copyright (c) 2009-2012, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -39,6 +39,9 @@
#endif
#define MDP_DEBUG_BUF 2048
#define MDP_MAX_OFFSET 0xF05FC
#define MDDI_MAX_OFFSET 0xC
#define HDMI_MAX_OFFSET 0x59C
static uint32 mdp_offset;
static uint32 mdp_count;
@ -78,11 +81,18 @@ static ssize_t mdp_offset_write(
debug_buf[count] = 0; /* end of string */
sscanf(debug_buf, "%x %d", &off, &cnt);
if (sscanf(debug_buf, "%x %d", &off, &cnt) != 2)
return -EFAULT;
if (cnt <= 0)
cnt = 1;
if ((off > MDP_MAX_OFFSET) || (cnt > (MDP_MAX_OFFSET - off))) {
printk(KERN_INFO "%s: Invalid offset%x+cnt%d > %x\n", __func__,
off, cnt, MDP_MAX_OFFSET);
return -EFAULT;
}
mdp_offset = off;
mdp_count = cnt;
@ -154,6 +164,14 @@ static ssize_t mdp_reg_write(
debug_buf[count] = 0; /* end of string */
cnt = sscanf(debug_buf, "%x %x", &off, &data);
if (cnt != 2)
return -EFAULT;
if (off > MDP_MAX_OFFSET) {
printk(KERN_INFO "%s: Invalid offset%x > %x\n", __func__,
off, MDP_MAX_OFFSET);
return -EFAULT;
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
outpdw(MDP_BASE + off, data);
@ -620,6 +638,11 @@ static void mddi_reg_write(int ndx, uint32 off, uint32 data)
else
base = (char *)msm_pmdh_base;
if (off > MDDI_MAX_OFFSET) {
printk(KERN_INFO "%s: Invalid offset=%x > %x\n", __func__,
off, MDDI_MAX_OFFSET);
return;
}
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
writel(data, base + off);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
@ -682,6 +705,14 @@ static ssize_t pmdh_reg_write(
debug_buf[count] = 0; /* end of string */
cnt = sscanf(debug_buf, "%x %x", &off, &data);
if (cnt != 2)
return -EFAULT;
if (off > MDDI_MAX_OFFSET) {
printk(KERN_INFO "%s: Invalid offset=%x > %x\n", __func__,
off, MDDI_MAX_OFFSET);
return -EFAULT;
}
mddi_reg_write(0, off, data);
@ -737,6 +768,14 @@ static ssize_t emdh_reg_write(
debug_buf[count] = 0; /* end of string */
cnt = sscanf(debug_buf, "%x %x", &off, &data);
if (cnt != 2)
return -EFAULT;
if (off > MDDI_MAX_OFFSET) {
printk(KERN_INFO "%s: Invalid offset=%x > %x\n", __func__,
off, MDDI_MAX_OFFSET);
return -EFAULT;
}
mddi_reg_write(1, off, data);
@ -884,15 +923,18 @@ static ssize_t dbg_offset_write(
cnt = sscanf(debug_buf, "%x %d %x", &off, &num, &base);
if (cnt < 0)
cnt = 0;
if (cnt != 3)
return -EFAULT;
if (cnt >= 1)
dbg_offset = off;
if (cnt >= 2)
dbg_count = num;
if (cnt >= 3)
dbg_base = (char *)base;
if ((off > MDP_MAX_OFFSET) || (num > (MDP_MAX_OFFSET - off))) {
printk(KERN_INFO "%s: Invalid offset%x+num%d > %x\n", __func__,
off, num, MDP_MAX_OFFSET);
return -EFAULT;
}
dbg_offset = off;
dbg_count = num;
dbg_base = (char *)base;
printk(KERN_INFO "%s: offset=%x cnt=%d base=%x\n", __func__,
dbg_offset, dbg_count, (int)dbg_base);
@ -951,6 +993,14 @@ static ssize_t dbg_reg_write(
debug_buf[count] = 0; /* end of string */
cnt = sscanf(debug_buf, "%x %x", &off, &data);
if (cnt != 2)
return -EFAULT;
if (off > MDP_MAX_OFFSET) {
printk(KERN_INFO "%s: Invalid offset%x > %x\n", __func__,
off, MDP_MAX_OFFSET);
return -EFAULT;
}
writel(data, dbg_base + off);
@ -1191,14 +1241,17 @@ static ssize_t hdmi_offset_write(
debug_buf[count] = 0; /* end of string */
cnt = sscanf(debug_buf, "%x %d", &off, &num);
if (cnt != 2)
return -EFAULT;
if (cnt < 0)
cnt = 0;
if ((off > HDMI_MAX_OFFSET) || (num > (HDMI_MAX_OFFSET - off))) {
printk(KERN_INFO "%s: Invalid offset%x+num%d > %x\n", __func__,
off, num, HDMI_MAX_OFFSET);
return -EFAULT;
}
if (cnt >= 1)
hdmi_offset = off;
if (cnt >= 2)
hdmi_count = num;
hdmi_offset = off;
hdmi_count = num;
printk(KERN_INFO "%s: offset=%x cnt=%d\n", __func__,
hdmi_offset, hdmi_count);
@ -1262,6 +1315,15 @@ static ssize_t hdmi_reg_write(
cnt = sscanf(debug_buf, "%x %x", &off, &data);
if (cnt != 2)
return -EFAULT;
if (off > HDMI_MAX_OFFSET) {
printk(KERN_INFO "%s: Invalid offset%x > %x\n", __func__,
off, HDMI_MAX_OFFSET);
return -EFAULT;
}
writel(data, base + off);
printk(KERN_INFO "%s: addr=%x data=%x\n",
@ -1355,14 +1417,14 @@ int mdp_debugfs_init(void)
return -1;
}
if (debugfs_create_file("off", 0644, dent, 0, &mdp_off_fops)
if (debugfs_create_file("off", 0600, dent, 0, &mdp_off_fops)
== NULL) {
printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n",
__FILE__, __LINE__);
return -1;
}
if (debugfs_create_file("reg", 0644, dent, 0, &mdp_reg_fops)
if (debugfs_create_file("reg", 0600, dent, 0, &mdp_reg_fops)
== NULL) {
printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
__FILE__, __LINE__);
@ -1402,7 +1464,7 @@ int mdp_debugfs_init(void)
return -1;
}
if (debugfs_create_file("reg", 0644, dent, 0, &pmdh_fops)
if (debugfs_create_file("reg", 0600, dent, 0, &pmdh_fops)
== NULL) {
printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
__FILE__, __LINE__);
@ -1417,7 +1479,7 @@ int mdp_debugfs_init(void)
return -1;
}
if (debugfs_create_file("reg", 0644, dent, 0, &emdh_fops)
if (debugfs_create_file("reg", 0600, dent, 0, &emdh_fops)
== NULL) {
printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
__FILE__, __LINE__);
@ -1432,21 +1494,21 @@ int mdp_debugfs_init(void)
return -1;
}
if (debugfs_create_file("base", 0644, dent, 0, &dbg_base_fops)
if (debugfs_create_file("base", 0600, dent, 0, &dbg_base_fops)
== NULL) {
printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n",
__FILE__, __LINE__);
return -1;
}
if (debugfs_create_file("off", 0644, dent, 0, &dbg_off_fops)
if (debugfs_create_file("off", 0600, dent, 0, &dbg_off_fops)
== NULL) {
printk(KERN_ERR "%s(%d): debugfs_create_file: index fail\n",
__FILE__, __LINE__);
return -1;
}
if (debugfs_create_file("reg", 0644, dent, 0, &dbg_reg_fops)
if (debugfs_create_file("reg", 0600, dent, 0, &dbg_reg_fops)
== NULL) {
printk(KERN_ERR "%s(%d): debugfs_create_file: debug fail\n",
__FILE__, __LINE__);
@ -1462,14 +1524,14 @@ int mdp_debugfs_init(void)
return PTR_ERR(dent);
}
if (debugfs_create_file("off", 0644, dent, 0, &hdmi_off_fops)
if (debugfs_create_file("off", 0600, dent, 0, &hdmi_off_fops)
== NULL) {
printk(KERN_ERR "%s(%d): debugfs_create_file: 'off' fail\n",
__FILE__, __LINE__);
return -ENOENT;
}
if (debugfs_create_file("reg", 0644, dent, 0, &hdmi_reg_fops)
if (debugfs_create_file("reg", 0600, dent, 0, &hdmi_reg_fops)
== NULL) {
printk(KERN_ERR "%s(%d): debugfs_create_file: 'reg' fail\n",
__FILE__, __LINE__);

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
/* Copyright (c) 2009-2013, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and

View File

@ -145,8 +145,10 @@ static int get_task_ioprio(struct task_struct *p)
if (ret)
goto out;
ret = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, IOPRIO_NORM);
task_lock(p);
if (p->io_context)
ret = p->io_context->ioprio;
task_unlock(p);
out:
return ret;
}

View File

@ -429,7 +429,6 @@ redo:
break;
}
ret += chars;
buf->offset += chars;
buf->len -= chars;
/* Was it a packet buffer? Clean up and exit */

View File

@ -0,0 +1,14 @@
#include <linux/hardirq.h>
/*
* may_use_simd - whether it is allowable at this time to issue SIMD
* instructions or access the SIMD register file
*
* As architectures typically don't preserve the SIMD register file when
* taking an interrupt, !in_interrupt() should be a reasonable default.
*/
static __must_check inline bool may_use_simd(void)
{
return !in_interrupt();
}

View File

@ -0,0 +1,31 @@
/*
* Shared async block cipher helpers
*/
#ifndef _CRYPTO_ABLK_HELPER_H
#define _CRYPTO_ABLK_HELPER_H
#include <linux/crypto.h>
#include <linux/kernel.h>
#include <crypto/cryptd.h>
struct async_helper_ctx {
struct cryptd_ablkcipher *cryptd_tfm;
};
extern int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key,
unsigned int key_len);
extern int __ablk_encrypt(struct ablkcipher_request *req);
extern int ablk_encrypt(struct ablkcipher_request *req);
extern int ablk_decrypt(struct ablkcipher_request *req);
extern void ablk_exit(struct crypto_tfm *tfm);
extern int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name);
extern int ablk_init(struct crypto_tfm *tfm);
#endif /* _CRYPTO_ABLK_HELPER_H */

View File

@ -91,6 +91,8 @@ struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
int crypto_register_shash(struct shash_alg *alg);
int crypto_unregister_shash(struct shash_alg *alg);
int crypto_register_shashes(struct shash_alg *algs, int count);
int crypto_unregister_shashes(struct shash_alg *algs, int count);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst);
void shash_free_instance(struct crypto_instance *inst);

View File

@ -87,4 +87,9 @@ struct shash_desc;
extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
unsigned int len);
#endif

View File

@ -58,7 +58,8 @@ enum rmnet_ioctl_extended_cmds_e {
RMNET_IOCTL_GET_SUPPORTED_QOS_MODES = 0x0013, /* Get QoS modes */
RMNET_IOCTL_SET_SLEEP_STATE = 0x0014, /* Set sleep state */
RMNET_IOCTL_SET_XLAT_DEV_INFO = 0x0015, /* xlat dev name */
RMNET_IOCTL_EXTENDED_MAX = 0x0016
RMNET_IOCTL_DEREGISTER_DEV = 0x0016, /* Dereg a net dev */
RMNET_IOCTL_EXTENDED_MAX = 0x0017
};
/* Return values for the RMNET_IOCTL_GET_SUPPORTED_FEATURES IOCTL */

View File

@ -552,53 +552,6 @@ static inline void boot_delay_msec(void)
}
#endif
/*
* Return the number of unread characters in the log buffer.
*/
static int log_buf_get_len(void)
{
return logged_chars;
}
/*
* Clears the ring-buffer
*/
void log_buf_clear(void)
{
logged_chars = 0;
}
/*
* Copy a range of characters from the log buffer.
*/
int log_buf_copy(char *dest, int idx, int len)
{
int ret, max;
bool took_lock = false;
if (!oops_in_progress) {
raw_spin_lock_irq(&logbuf_lock);
took_lock = true;
}
max = log_buf_get_len();
if (idx < 0 || idx >= max) {
ret = -1;
} else {
if (len > max - idx)
len = max - idx;
ret = len;
idx += (log_end - max);
while (len-- > 0)
dest[len] = LOG_BUF(idx + len);
}
if (took_lock)
raw_spin_unlock_irq(&logbuf_lock);
return ret;
}
#ifdef CONFIG_SECURITY_DMESG_RESTRICT
int dmesg_restrict = 1;
#else

View File

@ -222,7 +222,8 @@ int perf_trace_add(struct perf_event *p_event, int flags)
void perf_trace_del(struct perf_event *p_event, int flags)
{
struct ftrace_event_call *tp_event = p_event->tp_event;
hlist_del_rcu(&p_event->hlist_entry);
if (!hlist_unhashed(&p_event->hlist_entry))
hlist_del_rcu(&p_event->hlist_entry);
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
}

View File

@ -1251,6 +1251,14 @@ static int __too_many_isolated(struct zone *zone, int file,
}
}
/*
* GFP_NOIO/GFP_NOFS callers are allowed to isolate more pages, so they
* won't get blocked by normal direct-reclaimers, forming a circular
* deadlock.
*/
if ((sc->gfp_mask & GFP_IOFS) == GFP_IOFS)
inactive >>= 3;
return isolated > inactive;
}

View File

@ -472,14 +472,12 @@ static int mark_source_chains(const struct xt_table_info *newinfo,
return 1;
}
static inline int check_entry(const struct arpt_entry *e, const char *name)
static inline int check_entry(const struct arpt_entry *e)
{
const struct xt_entry_target *t;
if (!arp_checkentry(&e->arp)) {
duprintf("arp_tables: arp check failed %p %s.\n", e, name);
if (!arp_checkentry(&e->arp))
return -EINVAL;
}
if (e->target_offset + sizeof(struct xt_entry_target) > e->next_offset)
return -EINVAL;
@ -520,10 +518,6 @@ find_check_entry(struct arpt_entry *e, const char *name, unsigned int size)
struct xt_target *target;
int ret;
ret = check_entry(e, name);
if (ret)
return ret;
t = arpt_get_target(e);
target = xt_request_find_target(NFPROTO_ARP, t->u.user.name,
t->u.user.revision);
@ -568,6 +562,7 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct arpt_entry) != 0 ||
(unsigned char *)e + sizeof(struct arpt_entry) >= limit ||
@ -583,6 +578,10 @@ static inline int check_entry_size_and_hooks(struct arpt_entry *e,
return -EINVAL;
}
err = check_entry(e);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_ARP_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@ -1239,7 +1238,7 @@ check_compat_entry_size_and_hooks(struct compat_arpt_entry *e,
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct arpt_entry *)e, name);
ret = check_entry((struct arpt_entry *)e);
if (ret)
return ret;

View File

@ -567,14 +567,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
}
static int
check_entry(const struct ipt_entry *e, const char *name)
check_entry(const struct ipt_entry *e)
{
const struct xt_entry_target *t;
if (!ip_checkentry(&e->ip)) {
duprintf("ip check failed %p %s.\n", e, name);
if (!ip_checkentry(&e->ip))
return -EINVAL;
}
if (e->target_offset + sizeof(struct xt_entry_target) >
e->next_offset)
@ -664,10 +662,6 @@ find_check_entry(struct ipt_entry *e, struct net *net, const char *name,
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
ret = check_entry(e, name);
if (ret)
return ret;
j = 0;
mtpar.net = net;
mtpar.table = name;
@ -731,6 +725,7 @@ check_entry_size_and_hooks(struct ipt_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct ipt_entry) != 0 ||
(unsigned char *)e + sizeof(struct ipt_entry) >= limit ||
@ -746,6 +741,10 @@ check_entry_size_and_hooks(struct ipt_entry *e,
return -EINVAL;
}
err = check_entry(e);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@ -1505,7 +1504,7 @@ check_compat_entry_size_and_hooks(struct compat_ipt_entry *e,
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct ipt_entry *)e, name);
ret = check_entry((struct ipt_entry *)e);
if (ret)
return ret;

View File

@ -255,6 +255,9 @@ int ping_init_sock(struct sock *sk)
int i, j, count;
int ret = 0;
if (sk->sk_family == AF_INET6)
inet6_sk(sk)->ipv6only = 1;
if (sk->sk_family == AF_INET6)
inet6_sk(sk)->ipv6only = 1;

View File

@ -2086,14 +2086,18 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
int bucket)
{
struct inet_sock *inet = inet_sk(sp);
struct udp_sock *up = udp_sk(sp);
__be32 dest = inet->inet_daddr;
__be32 src = inet->inet_rcv_saddr;
__u16 destp = ntohs(inet->inet_dport);
__u16 srcp = ntohs(inet->inet_sport);
__u8 state = sp->sk_state;
if (up->encap_rcv)
state |= 0xF0;
seq_printf(f, "%5d: %08X:%04X %08X:%04X"
" %02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d",
bucket, src, srcp, dest, destp, sp->sk_state,
" %02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %d",
bucket, src, srcp, dest, destp, state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),

View File

@ -576,14 +576,12 @@ static void cleanup_match(struct xt_entry_match *m, struct net *net)
}
static int
check_entry(const struct ip6t_entry *e, const char *name)
check_entry(const struct ip6t_entry *e)
{
const struct xt_entry_target *t;
if (!ip6_checkentry(&e->ipv6)) {
duprintf("ip_tables: ip check failed %p %s.\n", e, name);
if (!ip6_checkentry(&e->ipv6))
return -EINVAL;
}
if (e->target_offset + sizeof(struct xt_entry_target) >
e->next_offset)
@ -674,10 +672,6 @@ find_check_entry(struct ip6t_entry *e, struct net *net, const char *name,
struct xt_mtchk_param mtpar;
struct xt_entry_match *ematch;
ret = check_entry(e, name);
if (ret)
return ret;
j = 0;
mtpar.net = net;
mtpar.table = name;
@ -741,6 +735,7 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
unsigned int valid_hooks)
{
unsigned int h;
int err;
if ((unsigned long)e % __alignof__(struct ip6t_entry) != 0 ||
(unsigned char *)e + sizeof(struct ip6t_entry) >= limit ||
@ -756,6 +751,10 @@ check_entry_size_and_hooks(struct ip6t_entry *e,
return -EINVAL;
}
err = check_entry(e);
if (err)
return err;
/* Check hooks & underflows */
for (h = 0; h < NF_INET_NUMHOOKS; h++) {
if (!(valid_hooks & (1 << h)))
@ -1516,7 +1515,7 @@ check_compat_entry_size_and_hooks(struct compat_ip6t_entry *e,
}
/* For purposes of check_entry casting the compat entry is fine */
ret = check_entry((struct ip6t_entry *)e, name);
ret = check_entry((struct ip6t_entry *)e);
if (ret)
return ret;

View File

@ -930,6 +930,8 @@ struct dst_entry * ip6_route_output(struct net *net, const struct sock *sk,
fl6->flowi6_iif = LOOPBACK_IFINDEX;
fl6->flowi6_iif = net->loopback_dev->ifindex;
if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
flags |= RT6_LOOKUP_F_IFACE;

View File

@ -497,6 +497,8 @@ static int snd_timer_stop1(struct snd_timer_instance *timeri, bool stop)
int result = 0;
unsigned long flags;
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
timer = timeri->timer;
if (!timer)
return -EINVAL;
@ -1236,6 +1238,7 @@ static void snd_timer_user_tinterrupt(struct snd_timer_instance *timeri,
}
if ((tu->filter & (1 << SNDRV_TIMER_EVENT_RESOLUTION)) &&
tu->last_resolution != resolution) {
memset(&r1, 0, sizeof(r1));
r1.event = SNDRV_TIMER_EVENT_RESOLUTION;
r1.tstamp = tstamp;
r1.val = resolution;

View File

@ -1031,6 +1031,7 @@ static int msm_compr_ioctl(struct snd_pcm_substream *substream,
struct snd_dec_ddp *ddp =
&compr->info.codec_param.codec.options.ddp;
uint32_t params_length = 0;
memset(params_value, 0, MAX_AC3_PARAM_SIZE);
/* check integer overflow */
if (ddp->params_length > UINT_MAX/sizeof(int)) {
pr_err("%s: Integer overflow ddp->params_length %d\n",
@ -1075,6 +1076,7 @@ static int msm_compr_ioctl(struct snd_pcm_substream *substream,
struct snd_dec_ddp *ddp =
&compr->info.codec_param.codec.options.ddp;
uint32_t params_length = 0;
memset(params_value, 0, MAX_AC3_PARAM_SIZE);
/* check integer overflow */
if (ddp->params_length > UINT_MAX/sizeof(int)) {
pr_err("%s: Integer overflow ddp->params_length %d\n",

Some files were not shown because too many files have changed in this diff Show More