mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
Merge branch 'lineage-18.1' into followmsi-11
This commit is contained in:
commit
8e1649a3cc
16 changed files with 4331 additions and 55 deletions
67
Documentation/usb/functionfs.txt
Normal file
67
Documentation/usb/functionfs.txt
Normal file
|
@ -0,0 +1,67 @@
|
|||
*How FunctionFS works*
|
||||
|
||||
From kernel point of view it is just a composite function with some
|
||||
unique behaviour. It may be added to an USB configuration only after
|
||||
the user space driver has registered by writing descriptors and
|
||||
strings (the user space program has to provide the same information
|
||||
that kernel level composite functions provide when they are added to
|
||||
the configuration).
|
||||
|
||||
This in particular means that the composite initialisation functions
|
||||
may not be in init section (ie. may not use the __init tag).
|
||||
|
||||
From user space point of view it is a file system which when
|
||||
mounted provides an "ep0" file. User space driver need to
|
||||
write descriptors and strings to that file. It does not need
|
||||
to worry about endpoints, interfaces or strings numbers but
|
||||
simply provide descriptors such as if the function was the
|
||||
only one (endpoints and strings numbers starting from one and
|
||||
interface numbers starting from zero). The FunctionFS changes
|
||||
them as needed also handling situation when numbers differ in
|
||||
different configurations.
|
||||
|
||||
When descriptors and strings are written "ep#" files appear
|
||||
(one for each declared endpoint) which handle communication on
|
||||
a single endpoint. Again, FunctionFS takes care of the real
|
||||
numbers and changing of the configuration (which means that
|
||||
"ep1" file may be really mapped to (say) endpoint 3 (and when
|
||||
configuration changes to (say) endpoint 2)). "ep0" is used
|
||||
for receiving events and handling setup requests.
|
||||
|
||||
When all files are closed the function disables itself.
|
||||
|
||||
What I also want to mention is that the FunctionFS is designed in such
|
||||
a way that it is possible to mount it several times so in the end
|
||||
a gadget could use several FunctionFS functions. The idea is that
|
||||
each FunctionFS instance is identified by the device name used
|
||||
when mounting.
|
||||
|
||||
One can imagine a gadget that has an Ethernet, MTP and HID interfaces
|
||||
where the last two are implemented via FunctionFS. On user space
|
||||
level it would look like this:
|
||||
|
||||
$ insmod g_ffs.ko idVendor=<ID> iSerialNumber=<string> functions=mtp,hid
|
||||
$ mkdir /dev/ffs-mtp && mount -t functionfs mtp /dev/ffs-mtp
|
||||
$ ( cd /dev/ffs-mtp && mtp-daemon ) &
|
||||
$ mkdir /dev/ffs-hid && mount -t functionfs hid /dev/ffs-hid
|
||||
$ ( cd /dev/ffs-hid && hid-daemon ) &
|
||||
|
||||
On kernel level the gadget checks ffs_data->dev_name to identify
|
||||
whether it's FunctionFS designed for MTP ("mtp") or HID ("hid").
|
||||
|
||||
If no "functions" module parameters is supplied, the driver accepts
|
||||
just one function with any name.
|
||||
|
||||
When "functions" module parameter is supplied, only functions
|
||||
with listed names are accepted. In particular, if the "functions"
|
||||
parameter's value is just a one-element list, then the behaviour
|
||||
is similar to when there is no "functions" at all; however,
|
||||
only a function with the specified name is accepted.
|
||||
|
||||
The gadget is registered only after all the declared function
|
||||
filesystems have been mounted and USB descriptors of all functions
|
||||
have been written to their ep0's.
|
||||
|
||||
Conversely, the gadget is unregistered after the first USB function
|
||||
closes its endpoints.
|
||||
|
|
@ -558,6 +558,7 @@ CONFIG_SECURITY_NETWORK=y
|
|||
CONFIG_LSM_MMAP_MIN_ADDR=4096
|
||||
CONFIG_SECURITY_SELINUX=y
|
||||
CONFIG_CRYPTO_SHA1_ARM_NEON=y
|
||||
CONFIG_CRYPTO_SHA256_ARM=y
|
||||
CONFIG_CRYPTO_SHA512_ARM_NEON=y
|
||||
CONFIG_CRYPTO_AES_ARM_BS=y
|
||||
CONFIG_CRYPTO_TWOFISH=y
|
||||
|
|
|
@ -6,12 +6,15 @@ obj-$(CONFIG_CRYPTO_AES_ARM) += aes-arm.o
|
|||
obj-$(CONFIG_CRYPTO_AES_ARM_BS) += aes-arm-bs.o
|
||||
obj-$(CONFIG_CRYPTO_SHA1_ARM) += sha1-arm.o
|
||||
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
|
||||
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
|
||||
obj-$(CONFIG_CRYPTO_SHA512_ARM_NEON) += sha512-arm-neon.o
|
||||
|
||||
aes-arm-y := aes-armv4.o aes_glue.o
|
||||
aes-arm-bs-y := aesbs-core.o aesbs-glue.o
|
||||
sha1-arm-y := sha1-armv4-large.o sha1_glue.o
|
||||
sha1-arm-neon-y := sha1-armv7-neon.o sha1_neon_glue.o
|
||||
sha256-arm-neon-$(CONFIG_KERNEL_MODE_NEON) := sha256_neon_glue.o
|
||||
sha256-arm-y := sha256-core.o sha256_glue.o $(sha256-arm-neon-y)
|
||||
sha512-arm-neon-y := sha512-armv7-neon.o sha512_neon_glue.o
|
||||
|
||||
quiet_cmd_perl = PERL $@
|
||||
|
@ -20,4 +23,7 @@ quiet_cmd_perl = PERL $@
|
|||
$(src)/aesbs-core.S_shipped: $(src)/bsaes-armv7.pl
|
||||
$(call cmd,perl)
|
||||
|
||||
.PRECIOUS: $(obj)/aesbs-core.S
|
||||
$(src)/sha256-core.S_shipped: $(src)/sha256-armv4.pl
|
||||
$(call cmd,perl)
|
||||
|
||||
.PRECIOUS: $(obj)/aesbs-core.S $(obj)/sha256-core.S
|
||||
|
|
716
arch/arm/crypto/sha256-armv4.pl
Normal file
716
arch/arm/crypto/sha256-armv4.pl
Normal file
|
@ -0,0 +1,716 @@
|
|||
#!/usr/bin/env perl
|
||||
|
||||
# ====================================================================
|
||||
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
|
||||
# project. The module is, however, dual licensed under OpenSSL and
|
||||
# CRYPTOGAMS licenses depending on where you obtain it. For further
|
||||
# details see http://www.openssl.org/~appro/cryptogams/.
|
||||
#
|
||||
# Permission to use under GPL terms is granted.
|
||||
# ====================================================================
|
||||
|
||||
# SHA256 block procedure for ARMv4. May 2007.
|
||||
|
||||
# Performance is ~2x better than gcc 3.4 generated code and in "abso-
|
||||
# lute" terms is ~2250 cycles per 64-byte block or ~35 cycles per
|
||||
# byte [on single-issue Xscale PXA250 core].
|
||||
|
||||
# July 2010.
|
||||
#
|
||||
# Rescheduling for dual-issue pipeline resulted in 22% improvement on
|
||||
# Cortex A8 core and ~20 cycles per processed byte.
|
||||
|
||||
# February 2011.
|
||||
#
|
||||
# Profiler-assisted and platform-specific optimization resulted in 16%
|
||||
# improvement on Cortex A8 core and ~15.4 cycles per processed byte.
|
||||
|
||||
# September 2013.
|
||||
#
|
||||
# Add NEON implementation. On Cortex A8 it was measured to process one
|
||||
# byte in 12.5 cycles or 23% faster than integer-only code. Snapdragon
|
||||
# S4 does it in 12.5 cycles too, but it's 50% faster than integer-only
|
||||
# code (meaning that latter performs sub-optimally, nothing was done
|
||||
# about it).
|
||||
|
||||
# May 2014.
|
||||
#
|
||||
# Add ARMv8 code path performing at 2.0 cpb on Apple A7.
|
||||
|
||||
while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
|
||||
open STDOUT,">$output";
|
||||
|
||||
$ctx="r0"; $t0="r0";
|
||||
$inp="r1"; $t4="r1";
|
||||
$len="r2"; $t1="r2";
|
||||
$T1="r3"; $t3="r3";
|
||||
$A="r4";
|
||||
$B="r5";
|
||||
$C="r6";
|
||||
$D="r7";
|
||||
$E="r8";
|
||||
$F="r9";
|
||||
$G="r10";
|
||||
$H="r11";
|
||||
@V=($A,$B,$C,$D,$E,$F,$G,$H);
|
||||
$t2="r12";
|
||||
$Ktbl="r14";
|
||||
|
||||
@Sigma0=( 2,13,22);
|
||||
@Sigma1=( 6,11,25);
|
||||
@sigma0=( 7,18, 3);
|
||||
@sigma1=(17,19,10);
|
||||
|
||||
sub BODY_00_15 {
|
||||
my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
|
||||
|
||||
$code.=<<___ if ($i<16);
|
||||
#if __ARM_ARCH__>=7
|
||||
@ ldr $t1,[$inp],#4 @ $i
|
||||
# if $i==15
|
||||
str $inp,[sp,#17*4] @ make room for $t4
|
||||
# endif
|
||||
eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
|
||||
add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
|
||||
eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
|
||||
# ifndef __ARMEB__
|
||||
rev $t1,$t1
|
||||
# endif
|
||||
#else
|
||||
@ ldrb $t1,[$inp,#3] @ $i
|
||||
add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
|
||||
ldrb $t2,[$inp,#2]
|
||||
ldrb $t0,[$inp,#1]
|
||||
orr $t1,$t1,$t2,lsl#8
|
||||
ldrb $t2,[$inp],#4
|
||||
orr $t1,$t1,$t0,lsl#16
|
||||
# if $i==15
|
||||
str $inp,[sp,#17*4] @ make room for $t4
|
||||
# endif
|
||||
eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]`
|
||||
orr $t1,$t1,$t2,lsl#24
|
||||
eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
|
||||
#endif
|
||||
___
|
||||
$code.=<<___;
|
||||
ldr $t2,[$Ktbl],#4 @ *K256++
|
||||
add $h,$h,$t1 @ h+=X[i]
|
||||
str $t1,[sp,#`$i%16`*4]
|
||||
eor $t1,$f,$g
|
||||
add $h,$h,$t0,ror#$Sigma1[0] @ h+=Sigma1(e)
|
||||
and $t1,$t1,$e
|
||||
add $h,$h,$t2 @ h+=K256[i]
|
||||
eor $t1,$t1,$g @ Ch(e,f,g)
|
||||
eor $t0,$a,$a,ror#`$Sigma0[1]-$Sigma0[0]`
|
||||
add $h,$h,$t1 @ h+=Ch(e,f,g)
|
||||
#if $i==31
|
||||
and $t2,$t2,#0xff
|
||||
cmp $t2,#0xf2 @ done?
|
||||
#endif
|
||||
#if $i<15
|
||||
# if __ARM_ARCH__>=7
|
||||
ldr $t1,[$inp],#4 @ prefetch
|
||||
# else
|
||||
ldrb $t1,[$inp,#3]
|
||||
# endif
|
||||
eor $t2,$a,$b @ a^b, b^c in next round
|
||||
#else
|
||||
ldr $t1,[sp,#`($i+2)%16`*4] @ from future BODY_16_xx
|
||||
eor $t2,$a,$b @ a^b, b^c in next round
|
||||
ldr $t4,[sp,#`($i+15)%16`*4] @ from future BODY_16_xx
|
||||
#endif
|
||||
eor $t0,$t0,$a,ror#`$Sigma0[2]-$Sigma0[0]` @ Sigma0(a)
|
||||
and $t3,$t3,$t2 @ (b^c)&=(a^b)
|
||||
add $d,$d,$h @ d+=h
|
||||
eor $t3,$t3,$b @ Maj(a,b,c)
|
||||
add $h,$h,$t0,ror#$Sigma0[0] @ h+=Sigma0(a)
|
||||
@ add $h,$h,$t3 @ h+=Maj(a,b,c)
|
||||
___
|
||||
($t2,$t3)=($t3,$t2);
|
||||
}
|
||||
|
||||
sub BODY_16_XX {
|
||||
my ($i,$a,$b,$c,$d,$e,$f,$g,$h) = @_;
|
||||
|
||||
$code.=<<___;
|
||||
@ ldr $t1,[sp,#`($i+1)%16`*4] @ $i
|
||||
@ ldr $t4,[sp,#`($i+14)%16`*4]
|
||||
mov $t0,$t1,ror#$sigma0[0]
|
||||
add $a,$a,$t2 @ h+=Maj(a,b,c) from the past
|
||||
mov $t2,$t4,ror#$sigma1[0]
|
||||
eor $t0,$t0,$t1,ror#$sigma0[1]
|
||||
eor $t2,$t2,$t4,ror#$sigma1[1]
|
||||
eor $t0,$t0,$t1,lsr#$sigma0[2] @ sigma0(X[i+1])
|
||||
ldr $t1,[sp,#`($i+0)%16`*4]
|
||||
eor $t2,$t2,$t4,lsr#$sigma1[2] @ sigma1(X[i+14])
|
||||
ldr $t4,[sp,#`($i+9)%16`*4]
|
||||
|
||||
add $t2,$t2,$t0
|
||||
eor $t0,$e,$e,ror#`$Sigma1[1]-$Sigma1[0]` @ from BODY_00_15
|
||||
add $t1,$t1,$t2
|
||||
eor $t0,$t0,$e,ror#`$Sigma1[2]-$Sigma1[0]` @ Sigma1(e)
|
||||
add $t1,$t1,$t4 @ X[i]
|
||||
___
|
||||
&BODY_00_15(@_);
|
||||
}
|
||||
|
||||
$code=<<___;
|
||||
#ifndef __KERNEL__
|
||||
# include "arm_arch.h"
|
||||
#else
|
||||
# define __ARM_ARCH__ __LINUX_ARM_ARCH__
|
||||
# define __ARM_MAX_ARCH__ 7
|
||||
#endif
|
||||
|
||||
.text
|
||||
#if __ARM_ARCH__<7
|
||||
.code 32
|
||||
#else
|
||||
.syntax unified
|
||||
# ifdef __thumb2__
|
||||
# define adrl adr
|
||||
.thumb
|
||||
# else
|
||||
.code 32
|
||||
# endif
|
||||
#endif
|
||||
|
||||
.type K256,%object
|
||||
.align 5
|
||||
K256:
|
||||
.word 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5
|
||||
.word 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5
|
||||
.word 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3
|
||||
.word 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174
|
||||
.word 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc
|
||||
.word 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da
|
||||
.word 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7
|
||||
.word 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967
|
||||
.word 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13
|
||||
.word 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85
|
||||
.word 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3
|
||||
.word 0xd192e819,0xd6990624,0xf40e3585,0x106aa070
|
||||
.word 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5
|
||||
.word 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3
|
||||
.word 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208
|
||||
.word 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2
|
||||
.size K256,.-K256
|
||||
.word 0 @ terminator
|
||||
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
||||
.LOPENSSL_armcap:
|
||||
.word OPENSSL_armcap_P-sha256_block_data_order
|
||||
#endif
|
||||
.align 5
|
||||
|
||||
.global sha256_block_data_order
|
||||
.type sha256_block_data_order,%function
|
||||
sha256_block_data_order:
|
||||
#if __ARM_ARCH__<7
|
||||
sub r3,pc,#8 @ sha256_block_data_order
|
||||
#else
|
||||
adr r3,sha256_block_data_order
|
||||
#endif
|
||||
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
||||
ldr r12,.LOPENSSL_armcap
|
||||
ldr r12,[r3,r12] @ OPENSSL_armcap_P
|
||||
tst r12,#ARMV8_SHA256
|
||||
bne .LARMv8
|
||||
tst r12,#ARMV7_NEON
|
||||
bne .LNEON
|
||||
#endif
|
||||
add $len,$inp,$len,lsl#6 @ len to point at the end of inp
|
||||
stmdb sp!,{$ctx,$inp,$len,r4-r11,lr}
|
||||
ldmia $ctx,{$A,$B,$C,$D,$E,$F,$G,$H}
|
||||
sub $Ktbl,r3,#256+32 @ K256
|
||||
sub sp,sp,#16*4 @ alloca(X[16])
|
||||
.Loop:
|
||||
# if __ARM_ARCH__>=7
|
||||
ldr $t1,[$inp],#4
|
||||
# else
|
||||
ldrb $t1,[$inp,#3]
|
||||
# endif
|
||||
eor $t3,$B,$C @ magic
|
||||
eor $t2,$t2,$t2
|
||||
___
|
||||
for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); }
|
||||
$code.=".Lrounds_16_xx:\n";
|
||||
for (;$i<32;$i++) { &BODY_16_XX($i,@V); unshift(@V,pop(@V)); }
|
||||
$code.=<<___;
|
||||
#if __ARM_ARCH__>=7
|
||||
ite eq @ Thumb2 thing, sanity check in ARM
|
||||
#endif
|
||||
ldreq $t3,[sp,#16*4] @ pull ctx
|
||||
bne .Lrounds_16_xx
|
||||
|
||||
add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
|
||||
ldr $t0,[$t3,#0]
|
||||
ldr $t1,[$t3,#4]
|
||||
ldr $t2,[$t3,#8]
|
||||
add $A,$A,$t0
|
||||
ldr $t0,[$t3,#12]
|
||||
add $B,$B,$t1
|
||||
ldr $t1,[$t3,#16]
|
||||
add $C,$C,$t2
|
||||
ldr $t2,[$t3,#20]
|
||||
add $D,$D,$t0
|
||||
ldr $t0,[$t3,#24]
|
||||
add $E,$E,$t1
|
||||
ldr $t1,[$t3,#28]
|
||||
add $F,$F,$t2
|
||||
ldr $inp,[sp,#17*4] @ pull inp
|
||||
ldr $t2,[sp,#18*4] @ pull inp+len
|
||||
add $G,$G,$t0
|
||||
add $H,$H,$t1
|
||||
stmia $t3,{$A,$B,$C,$D,$E,$F,$G,$H}
|
||||
cmp $inp,$t2
|
||||
sub $Ktbl,$Ktbl,#256 @ rewind Ktbl
|
||||
bne .Loop
|
||||
|
||||
add sp,sp,#`16+3`*4 @ destroy frame
|
||||
#if __ARM_ARCH__>=5
|
||||
ldmia sp!,{r4-r11,pc}
|
||||
#else
|
||||
ldmia sp!,{r4-r11,lr}
|
||||
tst lr,#1
|
||||
moveq pc,lr @ be binary compatible with V4, yet
|
||||
bx lr @ interoperable with Thumb ISA:-)
|
||||
#endif
|
||||
.size sha256_block_data_order,.-sha256_block_data_order
|
||||
___
|
||||
######################################################################
|
||||
# NEON stuff
|
||||
#
|
||||
{{{
|
||||
my @X=map("q$_",(0..3));
|
||||
my ($T0,$T1,$T2,$T3,$T4,$T5)=("q8","q9","q10","q11","d24","d25");
|
||||
my $Xfer=$t4;
|
||||
my $j=0;
|
||||
|
||||
sub Dlo() { shift=~m|q([1]?[0-9])|?"d".($1*2):""; }
|
||||
sub Dhi() { shift=~m|q([1]?[0-9])|?"d".($1*2+1):""; }
|
||||
|
||||
sub AUTOLOAD() # thunk [simplified] x86-style perlasm
|
||||
{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
|
||||
my $arg = pop;
|
||||
$arg = "#$arg" if ($arg*1 eq $arg);
|
||||
$code .= "\t$opcode\t".join(',',@_,$arg)."\n";
|
||||
}
|
||||
|
||||
sub Xupdate()
|
||||
{ use integer;
|
||||
my $body = shift;
|
||||
my @insns = (&$body,&$body,&$body,&$body);
|
||||
my ($a,$b,$c,$d,$e,$f,$g,$h);
|
||||
|
||||
&vext_8 ($T0,@X[0],@X[1],4); # X[1..4]
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vext_8 ($T1,@X[2],@X[3],4); # X[9..12]
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vshr_u32 ($T2,$T0,$sigma0[0]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += X[9..12]
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vshr_u32 ($T1,$T0,$sigma0[2]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vsli_32 ($T2,$T0,32-$sigma0[0]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vshr_u32 ($T3,$T0,$sigma0[1]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&veor ($T1,$T1,$T2);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vsli_32 ($T3,$T0,32-$sigma0[1]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[0]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&veor ($T1,$T1,$T3); # sigma0(X[1..4])
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[0]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vshr_u32 ($T5,&Dhi(@X[3]),$sigma1[2]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vadd_i32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4])
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&veor ($T5,$T5,$T4);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vshr_u32 ($T4,&Dhi(@X[3]),$sigma1[1]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vsli_32 ($T4,&Dhi(@X[3]),32-$sigma1[1]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&veor ($T5,$T5,$T4); # sigma1(X[14..15])
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vadd_i32 (&Dlo(@X[0]),&Dlo(@X[0]),$T5);# X[0..1] += sigma1(X[14..15])
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[0]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[0]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vshr_u32 ($T5,&Dlo(@X[0]),$sigma1[2]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&veor ($T5,$T5,$T4);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vshr_u32 ($T4,&Dlo(@X[0]),$sigma1[1]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vld1_32 ("{$T0}","[$Ktbl,:128]!");
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vsli_32 ($T4,&Dlo(@X[0]),32-$sigma1[1]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&veor ($T5,$T5,$T4); # sigma1(X[16..17])
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vadd_i32 (&Dhi(@X[0]),&Dhi(@X[0]),$T5);# X[2..3] += sigma1(X[16..17])
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vadd_i32 ($T0,$T0,@X[0]);
|
||||
while($#insns>=2) { eval(shift(@insns)); }
|
||||
&vst1_32 ("{$T0}","[$Xfer,:128]!");
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
|
||||
push(@X,shift(@X)); # "rotate" X[]
|
||||
}
|
||||
|
||||
sub Xpreload()
|
||||
{ use integer;
|
||||
my $body = shift;
|
||||
my @insns = (&$body,&$body,&$body,&$body);
|
||||
my ($a,$b,$c,$d,$e,$f,$g,$h);
|
||||
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vld1_32 ("{$T0}","[$Ktbl,:128]!");
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vrev32_8 (@X[0],@X[0]);
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
eval(shift(@insns));
|
||||
&vadd_i32 ($T0,$T0,@X[0]);
|
||||
foreach (@insns) { eval; } # remaining instructions
|
||||
&vst1_32 ("{$T0}","[$Xfer,:128]!");
|
||||
|
||||
push(@X,shift(@X)); # "rotate" X[]
|
||||
}
|
||||
|
||||
sub body_00_15 () {
|
||||
(
|
||||
'($a,$b,$c,$d,$e,$f,$g,$h)=@V;'.
|
||||
'&add ($h,$h,$t1)', # h+=X[i]+K[i]
|
||||
'&eor ($t1,$f,$g)',
|
||||
'&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))',
|
||||
'&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past
|
||||
'&and ($t1,$t1,$e)',
|
||||
'&eor ($t2,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e)
|
||||
'&eor ($t0,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))',
|
||||
'&eor ($t1,$t1,$g)', # Ch(e,f,g)
|
||||
'&add ($h,$h,$t2,"ror#$Sigma1[0]")', # h+=Sigma1(e)
|
||||
'&eor ($t2,$a,$b)', # a^b, b^c in next round
|
||||
'&eor ($t0,$t0,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a)
|
||||
'&add ($h,$h,$t1)', # h+=Ch(e,f,g)
|
||||
'&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'.
|
||||
'&ldr ($t1,"[$Ktbl]") if ($j==15);'.
|
||||
'&ldr ($t1,"[sp,#64]") if ($j==31)',
|
||||
'&and ($t3,$t3,$t2)', # (b^c)&=(a^b)
|
||||
'&add ($d,$d,$h)', # d+=h
|
||||
'&add ($h,$h,$t0,"ror#$Sigma0[0]");'. # h+=Sigma0(a)
|
||||
'&eor ($t3,$t3,$b)', # Maj(a,b,c)
|
||||
'$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);'
|
||||
)
|
||||
}
|
||||
|
||||
$code.=<<___;
|
||||
#if __ARM_MAX_ARCH__>=7
|
||||
.arch armv7-a
|
||||
.fpu neon
|
||||
|
||||
.global sha256_block_data_order_neon
|
||||
.type sha256_block_data_order_neon,%function
|
||||
.align 4
|
||||
sha256_block_data_order_neon:
|
||||
.LNEON:
|
||||
stmdb sp!,{r4-r12,lr}
|
||||
|
||||
sub $H,sp,#16*4+16
|
||||
adrl $Ktbl,K256
|
||||
bic $H,$H,#15 @ align for 128-bit stores
|
||||
mov $t2,sp
|
||||
mov sp,$H @ alloca
|
||||
add $len,$inp,$len,lsl#6 @ len to point at the end of inp
|
||||
|
||||
vld1.8 {@X[0]},[$inp]!
|
||||
vld1.8 {@X[1]},[$inp]!
|
||||
vld1.8 {@X[2]},[$inp]!
|
||||
vld1.8 {@X[3]},[$inp]!
|
||||
vld1.32 {$T0},[$Ktbl,:128]!
|
||||
vld1.32 {$T1},[$Ktbl,:128]!
|
||||
vld1.32 {$T2},[$Ktbl,:128]!
|
||||
vld1.32 {$T3},[$Ktbl,:128]!
|
||||
vrev32.8 @X[0],@X[0] @ yes, even on
|
||||
str $ctx,[sp,#64]
|
||||
vrev32.8 @X[1],@X[1] @ big-endian
|
||||
str $inp,[sp,#68]
|
||||
mov $Xfer,sp
|
||||
vrev32.8 @X[2],@X[2]
|
||||
str $len,[sp,#72]
|
||||
vrev32.8 @X[3],@X[3]
|
||||
str $t2,[sp,#76] @ save original sp
|
||||
vadd.i32 $T0,$T0,@X[0]
|
||||
vadd.i32 $T1,$T1,@X[1]
|
||||
vst1.32 {$T0},[$Xfer,:128]!
|
||||
vadd.i32 $T2,$T2,@X[2]
|
||||
vst1.32 {$T1},[$Xfer,:128]!
|
||||
vadd.i32 $T3,$T3,@X[3]
|
||||
vst1.32 {$T2},[$Xfer,:128]!
|
||||
vst1.32 {$T3},[$Xfer,:128]!
|
||||
|
||||
ldmia $ctx,{$A-$H}
|
||||
sub $Xfer,$Xfer,#64
|
||||
ldr $t1,[sp,#0]
|
||||
eor $t2,$t2,$t2
|
||||
eor $t3,$B,$C
|
||||
b .L_00_48
|
||||
|
||||
.align 4
|
||||
.L_00_48:
|
||||
___
|
||||
&Xupdate(\&body_00_15);
|
||||
&Xupdate(\&body_00_15);
|
||||
&Xupdate(\&body_00_15);
|
||||
&Xupdate(\&body_00_15);
|
||||
$code.=<<___;
|
||||
teq $t1,#0 @ check for K256 terminator
|
||||
ldr $t1,[sp,#0]
|
||||
sub $Xfer,$Xfer,#64
|
||||
bne .L_00_48
|
||||
|
||||
ldr $inp,[sp,#68]
|
||||
ldr $t0,[sp,#72]
|
||||
sub $Ktbl,$Ktbl,#256 @ rewind $Ktbl
|
||||
teq $inp,$t0
|
||||
it eq
|
||||
subeq $inp,$inp,#64 @ avoid SEGV
|
||||
vld1.8 {@X[0]},[$inp]! @ load next input block
|
||||
vld1.8 {@X[1]},[$inp]!
|
||||
vld1.8 {@X[2]},[$inp]!
|
||||
vld1.8 {@X[3]},[$inp]!
|
||||
it ne
|
||||
strne $inp,[sp,#68]
|
||||
mov $Xfer,sp
|
||||
___
|
||||
&Xpreload(\&body_00_15);
|
||||
&Xpreload(\&body_00_15);
|
||||
&Xpreload(\&body_00_15);
|
||||
&Xpreload(\&body_00_15);
|
||||
$code.=<<___;
|
||||
ldr $t0,[$t1,#0]
|
||||
add $A,$A,$t2 @ h+=Maj(a,b,c) from the past
|
||||
ldr $t2,[$t1,#4]
|
||||
ldr $t3,[$t1,#8]
|
||||
ldr $t4,[$t1,#12]
|
||||
add $A,$A,$t0 @ accumulate
|
||||
ldr $t0,[$t1,#16]
|
||||
add $B,$B,$t2
|
||||
ldr $t2,[$t1,#20]
|
||||
add $C,$C,$t3
|
||||
ldr $t3,[$t1,#24]
|
||||
add $D,$D,$t4
|
||||
ldr $t4,[$t1,#28]
|
||||
add $E,$E,$t0
|
||||
str $A,[$t1],#4
|
||||
add $F,$F,$t2
|
||||
str $B,[$t1],#4
|
||||
add $G,$G,$t3
|
||||
str $C,[$t1],#4
|
||||
add $H,$H,$t4
|
||||
str $D,[$t1],#4
|
||||
stmia $t1,{$E-$H}
|
||||
|
||||
ittte ne
|
||||
movne $Xfer,sp
|
||||
ldrne $t1,[sp,#0]
|
||||
eorne $t2,$t2,$t2
|
||||
ldreq sp,[sp,#76] @ restore original sp
|
||||
itt ne
|
||||
eorne $t3,$B,$C
|
||||
bne .L_00_48
|
||||
|
||||
ldmia sp!,{r4-r12,pc}
|
||||
.size sha256_block_data_order_neon,.-sha256_block_data_order_neon
|
||||
#endif
|
||||
___
|
||||
}}}
|
||||
######################################################################
|
||||
# ARMv8 stuff
|
||||
#
|
||||
{{{
|
||||
my ($ABCD,$EFGH,$abcd)=map("q$_",(0..2));
|
||||
my @MSG=map("q$_",(8..11));
|
||||
my ($W0,$W1,$ABCD_SAVE,$EFGH_SAVE)=map("q$_",(12..15));
|
||||
my $Ktbl="r3";
|
||||
|
||||
$code.=<<___;
|
||||
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
||||
|
||||
# ifdef __thumb2__
|
||||
# define INST(a,b,c,d) .byte c,d|0xc,a,b
|
||||
# else
|
||||
# define INST(a,b,c,d) .byte a,b,c,d
|
||||
# endif
|
||||
|
||||
.type sha256_block_data_order_armv8,%function
|
||||
.align 5
|
||||
sha256_block_data_order_armv8:
|
||||
.LARMv8:
|
||||
vld1.32 {$ABCD,$EFGH},[$ctx]
|
||||
# ifdef __thumb2__
|
||||
adr $Ktbl,.LARMv8
|
||||
sub $Ktbl,$Ktbl,#.LARMv8-K256
|
||||
# else
|
||||
adrl $Ktbl,K256
|
||||
# endif
|
||||
add $len,$inp,$len,lsl#6 @ len to point at the end of inp
|
||||
|
||||
.Loop_v8:
|
||||
vld1.8 {@MSG[0]-@MSG[1]},[$inp]!
|
||||
vld1.8 {@MSG[2]-@MSG[3]},[$inp]!
|
||||
vld1.32 {$W0},[$Ktbl]!
|
||||
vrev32.8 @MSG[0],@MSG[0]
|
||||
vrev32.8 @MSG[1],@MSG[1]
|
||||
vrev32.8 @MSG[2],@MSG[2]
|
||||
vrev32.8 @MSG[3],@MSG[3]
|
||||
vmov $ABCD_SAVE,$ABCD @ offload
|
||||
vmov $EFGH_SAVE,$EFGH
|
||||
teq $inp,$len
|
||||
___
|
||||
for($i=0;$i<12;$i++) {
|
||||
$code.=<<___;
|
||||
vld1.32 {$W1},[$Ktbl]!
|
||||
vadd.i32 $W0,$W0,@MSG[0]
|
||||
sha256su0 @MSG[0],@MSG[1]
|
||||
vmov $abcd,$ABCD
|
||||
sha256h $ABCD,$EFGH,$W0
|
||||
sha256h2 $EFGH,$abcd,$W0
|
||||
sha256su1 @MSG[0],@MSG[2],@MSG[3]
|
||||
___
|
||||
($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG));
|
||||
}
|
||||
$code.=<<___;
|
||||
vld1.32 {$W1},[$Ktbl]!
|
||||
vadd.i32 $W0,$W0,@MSG[0]
|
||||
vmov $abcd,$ABCD
|
||||
sha256h $ABCD,$EFGH,$W0
|
||||
sha256h2 $EFGH,$abcd,$W0
|
||||
|
||||
vld1.32 {$W0},[$Ktbl]!
|
||||
vadd.i32 $W1,$W1,@MSG[1]
|
||||
vmov $abcd,$ABCD
|
||||
sha256h $ABCD,$EFGH,$W1
|
||||
sha256h2 $EFGH,$abcd,$W1
|
||||
|
||||
vld1.32 {$W1},[$Ktbl]
|
||||
vadd.i32 $W0,$W0,@MSG[2]
|
||||
sub $Ktbl,$Ktbl,#256-16 @ rewind
|
||||
vmov $abcd,$ABCD
|
||||
sha256h $ABCD,$EFGH,$W0
|
||||
sha256h2 $EFGH,$abcd,$W0
|
||||
|
||||
vadd.i32 $W1,$W1,@MSG[3]
|
||||
vmov $abcd,$ABCD
|
||||
sha256h $ABCD,$EFGH,$W1
|
||||
sha256h2 $EFGH,$abcd,$W1
|
||||
|
||||
vadd.i32 $ABCD,$ABCD,$ABCD_SAVE
|
||||
vadd.i32 $EFGH,$EFGH,$EFGH_SAVE
|
||||
it ne
|
||||
bne .Loop_v8
|
||||
|
||||
vst1.32 {$ABCD,$EFGH},[$ctx]
|
||||
|
||||
ret @ bx lr
|
||||
.size sha256_block_data_order_armv8,.-sha256_block_data_order_armv8
|
||||
#endif
|
||||
___
|
||||
}}}
|
||||
$code.=<<___;
|
||||
.asciz "SHA256 block transform for ARMv4/NEON/ARMv8, CRYPTOGAMS by <appro\@openssl.org>"
|
||||
.align 2
|
||||
#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
|
||||
.comm OPENSSL_armcap_P,4,4
|
||||
#endif
|
||||
___
|
||||
|
||||
open SELF,$0;
|
||||
while(<SELF>) {
|
||||
next if (/^#!/);
|
||||
last if (!s/^#/@/ and !/^$/);
|
||||
print;
|
||||
}
|
||||
close SELF;
|
||||
|
||||
{ my %opcode = (
|
||||
"sha256h" => 0xf3000c40, "sha256h2" => 0xf3100c40,
|
||||
"sha256su0" => 0xf3ba03c0, "sha256su1" => 0xf3200c40 );
|
||||
|
||||
sub unsha256 {
|
||||
my ($mnemonic,$arg)=@_;
|
||||
|
||||
if ($arg =~ m/q([0-9]+)(?:,\s*q([0-9]+))?,\s*q([0-9]+)/o) {
|
||||
my $word = $opcode{$mnemonic}|(($1&7)<<13)|(($1&8)<<19)
|
||||
|(($2&7)<<17)|(($2&8)<<4)
|
||||
|(($3&7)<<1) |(($3&8)<<2);
|
||||
# since ARMv7 instructions are always encoded little-endian.
|
||||
# correct solution is to use .inst directive, but older
|
||||
# assemblers don't implement it:-(
|
||||
sprintf "INST(0x%02x,0x%02x,0x%02x,0x%02x)\t@ %s %s",
|
||||
$word&0xff,($word>>8)&0xff,
|
||||
($word>>16)&0xff,($word>>24)&0xff,
|
||||
$mnemonic,$arg;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
foreach (split($/,$code)) {
|
||||
|
||||
s/\`([^\`]*)\`/eval $1/geo;
|
||||
|
||||
s/\b(sha256\w+)\s+(q.*)/unsha256($1,$2)/geo;
|
||||
|
||||
s/\bret\b/bx lr/go or
|
||||
s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4
|
||||
|
||||
print $_,"\n";
|
||||
}
|
||||
|
||||
close STDOUT; # enforce flush
|
2808
arch/arm/crypto/sha256-core.S_shipped
Normal file
2808
arch/arm/crypto/sha256-core.S_shipped
Normal file
File diff suppressed because it is too large
Load diff
246
arch/arm/crypto/sha256_glue.c
Normal file
246
arch/arm/crypto/sha256_glue.c
Normal file
|
@ -0,0 +1,246 @@
|
|||
/*
|
||||
* Glue code for the SHA256 Secure Hash Algorithm assembly implementation
|
||||
* using optimized ARM assembler and NEON instructions.
|
||||
*
|
||||
* Copyright © 2015 Google Inc.
|
||||
*
|
||||
* This file is based on sha256_ssse3_glue.c:
|
||||
* Copyright (C) 2013 Intel Corporation
|
||||
* Author: Tim Chen <tim.c.chen@linux.intel.com>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/crypto.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/mm.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/neon.h>
|
||||
#include "sha256_glue.h"
|
||||
|
||||
asmlinkage void sha256_block_data_order(u32 *digest, const void *data,
|
||||
unsigned int num_blks);
|
||||
|
||||
|
||||
int sha256_init(struct shash_desc *desc)
|
||||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
sctx->state[0] = SHA256_H0;
|
||||
sctx->state[1] = SHA256_H1;
|
||||
sctx->state[2] = SHA256_H2;
|
||||
sctx->state[3] = SHA256_H3;
|
||||
sctx->state[4] = SHA256_H4;
|
||||
sctx->state[5] = SHA256_H5;
|
||||
sctx->state[6] = SHA256_H6;
|
||||
sctx->state[7] = SHA256_H7;
|
||||
sctx->count = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sha224_init(struct shash_desc *desc)
|
||||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
sctx->state[0] = SHA224_H0;
|
||||
sctx->state[1] = SHA224_H1;
|
||||
sctx->state[2] = SHA224_H2;
|
||||
sctx->state[3] = SHA224_H3;
|
||||
sctx->state[4] = SHA224_H4;
|
||||
sctx->state[5] = SHA224_H5;
|
||||
sctx->state[6] = SHA224_H6;
|
||||
sctx->state[7] = SHA224_H7;
|
||||
sctx->count = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int __sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len,
|
||||
unsigned int partial)
|
||||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
unsigned int done = 0;
|
||||
|
||||
sctx->count += len;
|
||||
|
||||
if (partial) {
|
||||
done = SHA256_BLOCK_SIZE - partial;
|
||||
memcpy(sctx->buf + partial, data, done);
|
||||
sha256_block_data_order(sctx->state, sctx->buf, 1);
|
||||
}
|
||||
|
||||
if (len - done >= SHA256_BLOCK_SIZE) {
|
||||
const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;
|
||||
|
||||
sha256_block_data_order(sctx->state, data + done, rounds);
|
||||
done += rounds * SHA256_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
memcpy(sctx->buf, data + done, len - done);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sha256_update(struct shash_desc *desc, const u8 *data, unsigned int len)
|
||||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
|
||||
|
||||
/* Handle the fast case right here */
|
||||
if (partial + len < SHA256_BLOCK_SIZE) {
|
||||
sctx->count += len;
|
||||
memcpy(sctx->buf + partial, data, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
return __sha256_update(desc, data, len, partial);
|
||||
}
|
||||
|
||||
/* Add padding and return the message digest. */
|
||||
static int sha256_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
unsigned int i, index, padlen;
|
||||
__be32 *dst = (__be32 *)out;
|
||||
__be64 bits;
|
||||
static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
|
||||
|
||||
/* save number of bits */
|
||||
bits = cpu_to_be64(sctx->count << 3);
|
||||
|
||||
/* Pad out to 56 mod 64 and append length */
|
||||
index = sctx->count % SHA256_BLOCK_SIZE;
|
||||
padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56)-index);
|
||||
|
||||
/* We need to fill a whole block for __sha256_update */
|
||||
if (padlen <= 56) {
|
||||
sctx->count += padlen;
|
||||
memcpy(sctx->buf + index, padding, padlen);
|
||||
} else {
|
||||
__sha256_update(desc, padding, padlen, index);
|
||||
}
|
||||
__sha256_update(desc, (const u8 *)&bits, sizeof(bits), 56);
|
||||
|
||||
/* Store state in digest */
|
||||
for (i = 0; i < 8; i++)
|
||||
dst[i] = cpu_to_be32(sctx->state[i]);
|
||||
|
||||
/* Wipe context */
|
||||
memset(sctx, 0, sizeof(*sctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sha224_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
u8 D[SHA256_DIGEST_SIZE];
|
||||
|
||||
sha256_final(desc, D);
|
||||
|
||||
memcpy(out, D, SHA224_DIGEST_SIZE);
|
||||
memzero_explicit(D, SHA256_DIGEST_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sha256_export(struct shash_desc *desc, void *out)
|
||||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
memcpy(out, sctx, sizeof(*sctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int sha256_import(struct shash_desc *desc, const void *in)
|
||||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
|
||||
memcpy(sctx, in, sizeof(*sctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static struct shash_alg algs[] = { {
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.init = sha256_init,
|
||||
.update = sha256_update,
|
||||
.final = sha256_final,
|
||||
.export = sha256_export,
|
||||
.import = sha256_import,
|
||||
.descsize = sizeof(struct sha256_state),
|
||||
.statesize = sizeof(struct sha256_state),
|
||||
.base = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name = "sha256-asm",
|
||||
.cra_priority = 150,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
}, {
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.init = sha224_init,
|
||||
.update = sha256_update,
|
||||
.final = sha224_final,
|
||||
.export = sha256_export,
|
||||
.import = sha256_import,
|
||||
.descsize = sizeof(struct sha256_state),
|
||||
.statesize = sizeof(struct sha256_state),
|
||||
.base = {
|
||||
.cra_name = "sha224",
|
||||
.cra_driver_name = "sha224-asm",
|
||||
.cra_priority = 150,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
} };
|
||||
|
||||
static int __init sha256_mod_init(void)
|
||||
{
|
||||
int res = crypto_register_shashes(algs, ARRAY_SIZE(algs));
|
||||
|
||||
if (res < 0)
|
||||
return res;
|
||||
|
||||
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon()) {
|
||||
res = crypto_register_shashes(sha256_neon_algs,
|
||||
ARRAY_SIZE(sha256_neon_algs));
|
||||
|
||||
if (res < 0)
|
||||
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static void __exit sha256_mod_fini(void)
|
||||
{
|
||||
crypto_unregister_shashes(algs, ARRAY_SIZE(algs));
|
||||
|
||||
if (IS_ENABLED(CONFIG_KERNEL_MODE_NEON) && cpu_has_neon())
|
||||
crypto_unregister_shashes(sha256_neon_algs,
|
||||
ARRAY_SIZE(sha256_neon_algs));
|
||||
}
|
||||
|
||||
module_init(sha256_mod_init);
|
||||
module_exit(sha256_mod_fini);
|
||||
|
||||
MODULE_LICENSE("GPL");
|
||||
MODULE_DESCRIPTION("SHA256 Secure Hash Algorithm (ARM), including NEON");
|
||||
|
||||
MODULE_ALIAS("sha256");
|
23
arch/arm/crypto/sha256_glue.h
Normal file
23
arch/arm/crypto/sha256_glue.h
Normal file
|
@ -0,0 +1,23 @@
|
|||
#ifndef _CRYPTO_SHA256_GLUE_H
|
||||
#define _CRYPTO_SHA256_GLUE_H
|
||||
|
||||
#include <linux/crypto.h>
|
||||
#include <crypto/sha.h>
|
||||
|
||||
extern struct shash_alg sha256_neon_algs[2];
|
||||
|
||||
extern int sha256_init(struct shash_desc *desc);
|
||||
|
||||
extern int sha224_init(struct shash_desc *desc);
|
||||
|
||||
extern int __sha256_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, unsigned int partial);
|
||||
|
||||
extern int sha256_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len);
|
||||
|
||||
extern int sha256_export(struct shash_desc *desc, void *out);
|
||||
|
||||
extern int sha256_import(struct shash_desc *desc, const void *in);
|
||||
|
||||
#endif /* _CRYPTO_SHA256_GLUE_H */
|
173
arch/arm/crypto/sha256_neon_glue.c
Normal file
173
arch/arm/crypto/sha256_neon_glue.c
Normal file
|
@ -0,0 +1,173 @@
|
|||
/*
|
||||
* Glue code for the SHA256 Secure Hash Algorithm assembly implementation
|
||||
* using NEON instructions.
|
||||
*
|
||||
* Copyright © 2015 Google Inc.
|
||||
*
|
||||
* This file is based on sha512_neon_glue.c:
|
||||
* Copyright © 2014 Jussi Kivilinna <jussi.kivilinna@iki.fi>
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License as published by the Free
|
||||
* Software Foundation; either version 2 of the License, or (at your option)
|
||||
* any later version.
|
||||
*
|
||||
*/
|
||||
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <linux/cryptohash.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/types.h>
|
||||
#include <linux/string.h>
|
||||
#include <crypto/sha.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/simd.h>
|
||||
#include <asm/neon.h>
|
||||
#include "sha256_glue.h"
|
||||
|
||||
asmlinkage void sha256_block_data_order_neon(u32 *digest, const void *data,
|
||||
unsigned int num_blks);
|
||||
|
||||
|
||||
static int __sha256_neon_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len, unsigned int partial)
|
||||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
unsigned int done = 0;
|
||||
|
||||
sctx->count += len;
|
||||
|
||||
if (partial) {
|
||||
done = SHA256_BLOCK_SIZE - partial;
|
||||
memcpy(sctx->buf + partial, data, done);
|
||||
sha256_block_data_order_neon(sctx->state, sctx->buf, 1);
|
||||
}
|
||||
|
||||
if (len - done >= SHA256_BLOCK_SIZE) {
|
||||
const unsigned int rounds = (len - done) / SHA256_BLOCK_SIZE;
|
||||
|
||||
sha256_block_data_order_neon(sctx->state, data + done, rounds);
|
||||
done += rounds * SHA256_BLOCK_SIZE;
|
||||
}
|
||||
|
||||
memcpy(sctx->buf, data + done, len - done);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sha256_neon_update(struct shash_desc *desc, const u8 *data,
|
||||
unsigned int len)
|
||||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
unsigned int partial = sctx->count % SHA256_BLOCK_SIZE;
|
||||
int res;
|
||||
|
||||
/* Handle the fast case right here */
|
||||
if (partial + len < SHA256_BLOCK_SIZE) {
|
||||
sctx->count += len;
|
||||
memcpy(sctx->buf + partial, data, len);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!may_use_simd()) {
|
||||
res = __sha256_update(desc, data, len, partial);
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
res = __sha256_neon_update(desc, data, len, partial);
|
||||
kernel_neon_end();
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
/* Add padding and return the message digest. */
|
||||
static int sha256_neon_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
struct sha256_state *sctx = shash_desc_ctx(desc);
|
||||
unsigned int i, index, padlen;
|
||||
__be32 *dst = (__be32 *)out;
|
||||
__be64 bits;
|
||||
static const u8 padding[SHA256_BLOCK_SIZE] = { 0x80, };
|
||||
|
||||
/* save number of bits */
|
||||
bits = cpu_to_be64(sctx->count << 3);
|
||||
|
||||
/* Pad out to 56 mod 64 and append length */
|
||||
index = sctx->count % SHA256_BLOCK_SIZE;
|
||||
padlen = (index < 56) ? (56 - index) : ((SHA256_BLOCK_SIZE+56)-index);
|
||||
|
||||
if (!may_use_simd()) {
|
||||
sha256_update(desc, padding, padlen);
|
||||
sha256_update(desc, (const u8 *)&bits, sizeof(bits));
|
||||
} else {
|
||||
kernel_neon_begin();
|
||||
/* We need to fill a whole block for __sha256_neon_update() */
|
||||
if (padlen <= 56) {
|
||||
sctx->count += padlen;
|
||||
memcpy(sctx->buf + index, padding, padlen);
|
||||
} else {
|
||||
__sha256_neon_update(desc, padding, padlen, index);
|
||||
}
|
||||
__sha256_neon_update(desc, (const u8 *)&bits,
|
||||
sizeof(bits), 56);
|
||||
kernel_neon_end();
|
||||
}
|
||||
|
||||
/* Store state in digest */
|
||||
for (i = 0; i < 8; i++)
|
||||
dst[i] = cpu_to_be32(sctx->state[i]);
|
||||
|
||||
/* Wipe context */
|
||||
memzero_explicit(sctx, sizeof(*sctx));
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int sha224_neon_final(struct shash_desc *desc, u8 *out)
|
||||
{
|
||||
u8 D[SHA256_DIGEST_SIZE];
|
||||
|
||||
sha256_neon_final(desc, D);
|
||||
|
||||
memcpy(out, D, SHA224_DIGEST_SIZE);
|
||||
memzero_explicit(D, SHA256_DIGEST_SIZE);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
struct shash_alg sha256_neon_algs[] = { {
|
||||
.digestsize = SHA256_DIGEST_SIZE,
|
||||
.init = sha256_init,
|
||||
.update = sha256_neon_update,
|
||||
.final = sha256_neon_final,
|
||||
.export = sha256_export,
|
||||
.import = sha256_import,
|
||||
.descsize = sizeof(struct sha256_state),
|
||||
.statesize = sizeof(struct sha256_state),
|
||||
.base = {
|
||||
.cra_name = "sha256",
|
||||
.cra_driver_name = "sha256-neon",
|
||||
.cra_priority = 250,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SHA256_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
}, {
|
||||
.digestsize = SHA224_DIGEST_SIZE,
|
||||
.init = sha224_init,
|
||||
.update = sha256_neon_update,
|
||||
.final = sha224_neon_final,
|
||||
.export = sha256_export,
|
||||
.import = sha256_import,
|
||||
.descsize = sizeof(struct sha256_state),
|
||||
.statesize = sizeof(struct sha256_state),
|
||||
.base = {
|
||||
.cra_name = "sha224",
|
||||
.cra_driver_name = "sha224-neon",
|
||||
.cra_priority = 250,
|
||||
.cra_flags = CRYPTO_ALG_TYPE_SHASH,
|
||||
.cra_blocksize = SHA224_BLOCK_SIZE,
|
||||
.cra_module = THIS_MODULE,
|
||||
}
|
||||
} };
|
|
@ -459,6 +459,13 @@ config CRYPTO_SHA256
|
|||
This code also includes SHA-224, a 224 bit hash with 112 bits
|
||||
of security against collision attacks.
|
||||
|
||||
config CRYPTO_SHA256_ARM
|
||||
tristate "SHA-224/256 digest algorithm (ARM-asm and NEON)"
|
||||
select CRYPTO_HASH
|
||||
help
|
||||
SHA-256 secure hash standard (DFIPS 180-2) implemented
|
||||
using optimized ARM assembler and NEON, when available.
|
||||
|
||||
config CRYPTO_SHA512
|
||||
tristate "SHA384 and SHA512 digest algorithms"
|
||||
select CRYPTO_HASH
|
||||
|
|
|
@ -514,11 +514,14 @@ static void functionfs_closed_callback(struct ffs_data *ffs)
|
|||
mutex_unlock(&dev->mutex);
|
||||
}
|
||||
|
||||
static int functionfs_check_dev_callback(const char *dev_name)
|
||||
static void *functionfs_acquire_dev_callback(const char *dev_name)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void functionfs_release_dev_callback(struct ffs_data *ffs_data)
|
||||
{
|
||||
}
|
||||
|
||||
struct adb_data {
|
||||
bool opened;
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
#include <linux/blkdev.h>
|
||||
#include <linux/pagemap.h>
|
||||
#include <linux/export.h>
|
||||
#include <linux/hid.h>
|
||||
#include <asm/unaligned.h>
|
||||
|
||||
#include <linux/usb/composite.h>
|
||||
|
@ -934,6 +935,29 @@ static long ffs_epfile_ioctl(struct file *file, unsigned code,
|
|||
case FUNCTIONFS_ENDPOINT_REVMAP:
|
||||
ret = epfile->ep->num;
|
||||
break;
|
||||
case FUNCTIONFS_ENDPOINT_DESC:
|
||||
{
|
||||
int desc_idx;
|
||||
struct usb_endpoint_descriptor *desc;
|
||||
|
||||
switch (epfile->ffs->gadget->speed) {
|
||||
case USB_SPEED_SUPER:
|
||||
desc_idx = 2;
|
||||
break;
|
||||
case USB_SPEED_HIGH:
|
||||
desc_idx = 1;
|
||||
break;
|
||||
default:
|
||||
desc_idx = 0;
|
||||
}
|
||||
desc = epfile->ep->descs[desc_idx];
|
||||
|
||||
spin_unlock_irq(&epfile->ffs->eps_lock);
|
||||
ret = copy_to_user((void *)value, desc, sizeof(*desc));
|
||||
if (ret)
|
||||
ret = -EFAULT;
|
||||
return ret;
|
||||
}
|
||||
default:
|
||||
ret = -ENOTTY;
|
||||
}
|
||||
|
@ -1035,25 +1059,19 @@ struct ffs_sb_fill_data {
|
|||
struct ffs_file_perms perms;
|
||||
umode_t root_mode;
|
||||
const char *dev_name;
|
||||
struct ffs_data *ffs_data;
|
||||
};
|
||||
|
||||
static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
|
||||
{
|
||||
struct ffs_sb_fill_data *data = _data;
|
||||
struct inode *inode;
|
||||
struct ffs_data *ffs;
|
||||
struct ffs_data *ffs = data->ffs_data;
|
||||
|
||||
ENTER();
|
||||
|
||||
/* Initialise data */
|
||||
ffs = ffs_data_new();
|
||||
if (unlikely(!ffs))
|
||||
goto Enomem;
|
||||
|
||||
ffs->sb = sb;
|
||||
ffs->dev_name = data->dev_name;
|
||||
ffs->file_perms = data->perms;
|
||||
|
||||
data->ffs_data = NULL;
|
||||
sb->s_fs_info = ffs;
|
||||
sb->s_blocksize = PAGE_CACHE_SIZE;
|
||||
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
|
||||
|
@ -1069,17 +1087,14 @@ static int ffs_sb_fill(struct super_block *sb, void *_data, int silent)
|
|||
&data->perms);
|
||||
sb->s_root = d_make_root(inode);
|
||||
if (unlikely(!sb->s_root))
|
||||
goto Enomem;
|
||||
return -ENOMEM;
|
||||
|
||||
/* EP0 file */
|
||||
if (unlikely(!ffs_sb_create_file(sb, "ep0", ffs,
|
||||
&ffs_ep0_operations, NULL)))
|
||||
goto Enomem;
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
|
||||
Enomem:
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
static int ffs_fs_parse_opts(struct ffs_sb_fill_data *data, char *opts)
|
||||
|
@ -1171,20 +1186,42 @@ ffs_fs_mount(struct file_system_type *t, int flags,
|
|||
},
|
||||
.root_mode = S_IFDIR | 0500,
|
||||
};
|
||||
struct dentry *rv;
|
||||
int ret;
|
||||
void *ffs_dev;
|
||||
struct ffs_data *ffs;
|
||||
|
||||
ENTER();
|
||||
|
||||
ret = functionfs_check_dev_callback(dev_name);
|
||||
if (unlikely(ret < 0))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
ret = ffs_fs_parse_opts(&data, opts);
|
||||
if (unlikely(ret < 0))
|
||||
return ERR_PTR(ret);
|
||||
|
||||
data.dev_name = dev_name;
|
||||
return mount_single(t, flags, &data, ffs_sb_fill);
|
||||
ffs = ffs_data_new();
|
||||
if (unlikely(!ffs))
|
||||
return ERR_PTR(-ENOMEM);
|
||||
ffs->file_perms = data.perms;
|
||||
|
||||
ffs->dev_name = kstrdup(dev_name, GFP_KERNEL);
|
||||
if (unlikely(!ffs->dev_name)) {
|
||||
ffs_data_put(ffs);
|
||||
return ERR_PTR(-ENOMEM);
|
||||
}
|
||||
|
||||
ffs_dev = functionfs_acquire_dev_callback(dev_name);
|
||||
if (IS_ERR(ffs_dev)) {
|
||||
ffs_data_put(ffs);
|
||||
return ERR_CAST(ffs_dev);
|
||||
}
|
||||
ffs->private_data = ffs_dev;
|
||||
data.ffs_data = ffs;
|
||||
|
||||
rv = mount_nodev(t, flags, &data, ffs_sb_fill);
|
||||
if (IS_ERR(rv) && data.ffs_data) {
|
||||
functionfs_release_dev_callback(data.ffs_data);
|
||||
ffs_data_put(data.ffs_data);
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
static void
|
||||
|
@ -1193,8 +1230,10 @@ ffs_fs_kill_sb(struct super_block *sb)
|
|||
ENTER();
|
||||
|
||||
kill_litter_super(sb);
|
||||
if (sb->s_fs_info)
|
||||
if (sb->s_fs_info) {
|
||||
functionfs_release_dev_callback(sb->s_fs_info);
|
||||
ffs_data_put(sb->s_fs_info);
|
||||
}
|
||||
}
|
||||
|
||||
static struct file_system_type ffs_fs_type = {
|
||||
|
@ -1261,6 +1300,7 @@ static void ffs_data_put(struct ffs_data *ffs)
|
|||
ffs_data_clear(ffs);
|
||||
BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
|
||||
waitqueue_active(&ffs->ep0req_completion.wait));
|
||||
kfree(ffs->dev_name);
|
||||
kfree(ffs);
|
||||
}
|
||||
}
|
||||
|
@ -1640,6 +1680,12 @@ static int __must_check ffs_do_desc(char *data, unsigned len,
|
|||
}
|
||||
break;
|
||||
|
||||
case HID_DT_HID:
|
||||
pr_vdebug("hid descriptor\n");
|
||||
if (length != sizeof(struct hid_descriptor))
|
||||
goto inv_length;
|
||||
break;
|
||||
|
||||
case USB_DT_OTG:
|
||||
if (length != sizeof(struct usb_otg_descriptor))
|
||||
goto inv_length;
|
||||
|
|
|
@ -67,6 +67,15 @@ MODULE_LICENSE("GPL");
|
|||
#define GFS_VENDOR_ID 0x1d6b /* Linux Foundation */
|
||||
#define GFS_PRODUCT_ID 0x0105 /* FunctionFS Gadget */
|
||||
|
||||
#define GFS_MAX_DEVS 10
|
||||
|
||||
struct gfs_ffs_obj {
|
||||
const char *name;
|
||||
bool mounted;
|
||||
bool desc_ready;
|
||||
struct ffs_data *ffs_data;
|
||||
};
|
||||
|
||||
static struct usb_device_descriptor gfs_dev_desc = {
|
||||
.bLength = sizeof gfs_dev_desc,
|
||||
.bDescriptorType = USB_DT_DEVICE,
|
||||
|
@ -78,12 +87,17 @@ static struct usb_device_descriptor gfs_dev_desc = {
|
|||
.idProduct = cpu_to_le16(GFS_PRODUCT_ID),
|
||||
};
|
||||
|
||||
static char *func_names[GFS_MAX_DEVS];
|
||||
static unsigned int func_num;
|
||||
|
||||
module_param_named(bDeviceClass, gfs_dev_desc.bDeviceClass, byte, 0644);
|
||||
MODULE_PARM_DESC(bDeviceClass, "USB Device class");
|
||||
module_param_named(bDeviceSubClass, gfs_dev_desc.bDeviceSubClass, byte, 0644);
|
||||
MODULE_PARM_DESC(bDeviceSubClass, "USB Device subclass");
|
||||
module_param_named(bDeviceProtocol, gfs_dev_desc.bDeviceProtocol, byte, 0644);
|
||||
MODULE_PARM_DESC(bDeviceProtocol, "USB Device protocol");
|
||||
module_param_array_named(functions, func_names, charp, &func_num, 0);
|
||||
MODULE_PARM_DESC(functions, "USB Functions list");
|
||||
|
||||
static const struct usb_descriptor_header *gfs_otg_desc[] = {
|
||||
(const struct usb_descriptor_header *)
|
||||
|
@ -158,13 +172,34 @@ static struct usb_composite_driver gfs_driver = {
|
|||
.iProduct = DRIVER_DESC,
|
||||
};
|
||||
|
||||
static struct ffs_data *gfs_ffs_data;
|
||||
static unsigned long gfs_registered;
|
||||
static DEFINE_MUTEX(gfs_lock);
|
||||
static unsigned int missing_funcs;
|
||||
static bool gfs_ether_setup;
|
||||
static bool gfs_registered;
|
||||
static bool gfs_single_func;
|
||||
static struct gfs_ffs_obj *ffs_tab;
|
||||
|
||||
static int __init gfs_init(void)
|
||||
{
|
||||
int i;
|
||||
|
||||
ENTER();
|
||||
|
||||
if (!func_num) {
|
||||
gfs_single_func = true;
|
||||
func_num = 1;
|
||||
}
|
||||
|
||||
ffs_tab = kcalloc(func_num, sizeof *ffs_tab, GFP_KERNEL);
|
||||
if (!ffs_tab)
|
||||
return -ENOMEM;
|
||||
|
||||
if (!gfs_single_func)
|
||||
for (i = 0; i < func_num; i++)
|
||||
ffs_tab[i].name = func_names[i];
|
||||
|
||||
missing_funcs = func_num;
|
||||
|
||||
return functionfs_init();
|
||||
}
|
||||
module_init(gfs_init);
|
||||
|
@ -172,63 +207,165 @@ module_init(gfs_init);
|
|||
static void __exit gfs_exit(void)
|
||||
{
|
||||
ENTER();
|
||||
mutex_lock(&gfs_lock);
|
||||
|
||||
if (test_and_clear_bit(0, &gfs_registered))
|
||||
if (gfs_registered)
|
||||
usb_composite_unregister(&gfs_driver);
|
||||
gfs_registered = false;
|
||||
|
||||
functionfs_cleanup();
|
||||
|
||||
mutex_unlock(&gfs_lock);
|
||||
kfree(ffs_tab);
|
||||
}
|
||||
module_exit(gfs_exit);
|
||||
|
||||
static int functionfs_ready_callback(struct ffs_data *ffs)
|
||||
static struct gfs_ffs_obj *gfs_find_dev(const char *dev_name)
|
||||
{
|
||||
int ret;
|
||||
int i;
|
||||
|
||||
ENTER();
|
||||
|
||||
if (WARN_ON(test_and_set_bit(0, &gfs_registered)))
|
||||
return -EBUSY;
|
||||
if (gfs_single_func)
|
||||
return &ffs_tab[0];
|
||||
|
||||
for (i = 0; i < func_num; i++)
|
||||
if (strcmp(ffs_tab[i].name, dev_name) == 0)
|
||||
return &ffs_tab[i];
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static int functionfs_ready_callback(struct ffs_data *ffs)
|
||||
{
|
||||
struct gfs_ffs_obj *ffs_obj;
|
||||
int ret;
|
||||
|
||||
ENTER();
|
||||
mutex_lock(&gfs_lock);
|
||||
|
||||
ffs_obj = ffs->private_data;
|
||||
if (!ffs_obj) {
|
||||
ret = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (WARN_ON(ffs_obj->desc_ready)) {
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
ffs_obj->desc_ready = true;
|
||||
ffs_obj->ffs_data = ffs;
|
||||
|
||||
if (--missing_funcs) {
|
||||
ret = 0;
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (gfs_registered) {
|
||||
ret = -EBUSY;
|
||||
goto done;
|
||||
}
|
||||
gfs_registered = true;
|
||||
|
||||
gfs_ffs_data = ffs;
|
||||
ret = usb_composite_probe(&gfs_driver, gfs_bind);
|
||||
if (unlikely(ret < 0))
|
||||
clear_bit(0, &gfs_registered);
|
||||
gfs_registered = false;
|
||||
|
||||
done:
|
||||
mutex_unlock(&gfs_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void functionfs_closed_callback(struct ffs_data *ffs)
|
||||
{
|
||||
struct gfs_ffs_obj *ffs_obj;
|
||||
|
||||
ENTER();
|
||||
mutex_lock(&gfs_lock);
|
||||
|
||||
if (test_and_clear_bit(0, &gfs_registered))
|
||||
ffs_obj = ffs->private_data;
|
||||
if (!ffs_obj)
|
||||
goto done;
|
||||
|
||||
ffs_obj->desc_ready = false;
|
||||
missing_funcs++;
|
||||
|
||||
if (gfs_registered)
|
||||
usb_composite_unregister(&gfs_driver);
|
||||
gfs_registered = false;
|
||||
|
||||
done:
|
||||
mutex_unlock(&gfs_lock);
|
||||
}
|
||||
|
||||
static int functionfs_check_dev_callback(const char *dev_name)
|
||||
static void *functionfs_acquire_dev_callback(const char *dev_name)
|
||||
{
|
||||
return 0;
|
||||
struct gfs_ffs_obj *ffs_dev;
|
||||
|
||||
ENTER();
|
||||
mutex_lock(&gfs_lock);
|
||||
|
||||
ffs_dev = gfs_find_dev(dev_name);
|
||||
if (!ffs_dev) {
|
||||
ffs_dev = ERR_PTR(-ENODEV);
|
||||
goto done;
|
||||
}
|
||||
|
||||
if (ffs_dev->mounted) {
|
||||
ffs_dev = ERR_PTR(-EBUSY);
|
||||
goto done;
|
||||
}
|
||||
ffs_dev->mounted = true;
|
||||
|
||||
done:
|
||||
mutex_unlock(&gfs_lock);
|
||||
return ffs_dev;
|
||||
}
|
||||
|
||||
static void functionfs_release_dev_callback(struct ffs_data *ffs_data)
|
||||
{
|
||||
struct gfs_ffs_obj *ffs_dev;
|
||||
|
||||
ENTER();
|
||||
mutex_lock(&gfs_lock);
|
||||
|
||||
ffs_dev = ffs_data->private_data;
|
||||
if (ffs_dev)
|
||||
ffs_dev->mounted = false;
|
||||
|
||||
mutex_unlock(&gfs_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* It is assumed that gfs_bind is called from a context where gfs_lock is held
|
||||
*/
|
||||
static int gfs_bind(struct usb_composite_dev *cdev)
|
||||
{
|
||||
int ret, i;
|
||||
|
||||
ENTER();
|
||||
|
||||
if (WARN_ON(!gfs_ffs_data))
|
||||
if (missing_funcs)
|
||||
return -ENODEV;
|
||||
|
||||
ret = gether_setup(cdev->gadget, gfs_hostaddr);
|
||||
if (unlikely(ret < 0))
|
||||
goto error_quick;
|
||||
gfs_ether_setup = true;
|
||||
|
||||
ret = usb_string_ids_tab(cdev, gfs_strings);
|
||||
if (unlikely(ret < 0))
|
||||
goto error;
|
||||
|
||||
ret = functionfs_bind(gfs_ffs_data, cdev);
|
||||
if (unlikely(ret < 0))
|
||||
goto error;
|
||||
for (i = func_num; i--; ) {
|
||||
ret = functionfs_bind(ffs_tab[i].ffs_data, cdev);
|
||||
if (unlikely(ret < 0)) {
|
||||
while (++i < func_num)
|
||||
functionfs_unbind(ffs_tab[i].ffs_data);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(gfs_configurations); ++i) {
|
||||
struct gfs_configuration *c = gfs_configurations + i;
|
||||
|
@ -246,16 +383,22 @@ static int gfs_bind(struct usb_composite_dev *cdev)
|
|||
return 0;
|
||||
|
||||
error_unbind:
|
||||
functionfs_unbind(gfs_ffs_data);
|
||||
for (i = 0; i < func_num; i++)
|
||||
functionfs_unbind(ffs_tab[i].ffs_data);
|
||||
error:
|
||||
gether_cleanup();
|
||||
error_quick:
|
||||
gfs_ffs_data = NULL;
|
||||
gfs_ether_setup = false;
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is assumed that gfs_unbind is called from a context where gfs_lock is held
|
||||
*/
|
||||
static int gfs_unbind(struct usb_composite_dev *cdev)
|
||||
{
|
||||
int i;
|
||||
|
||||
ENTER();
|
||||
|
||||
/*
|
||||
|
@ -266,22 +409,29 @@ static int gfs_unbind(struct usb_composite_dev *cdev)
|
|||
* from composite on orror recovery, but what you're gonna
|
||||
* do...?
|
||||
*/
|
||||
if (gfs_ffs_data) {
|
||||
if (gfs_ether_setup)
|
||||
gether_cleanup();
|
||||
functionfs_unbind(gfs_ffs_data);
|
||||
gfs_ffs_data = NULL;
|
||||
}
|
||||
gfs_ether_setup = false;
|
||||
|
||||
for (i = func_num; i--; )
|
||||
if (ffs_tab[i].ffs_data)
|
||||
functionfs_unbind(ffs_tab[i].ffs_data);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* It is assumed that gfs_do_config is called from a context where
|
||||
* gfs_lock is held
|
||||
*/
|
||||
static int gfs_do_config(struct usb_configuration *c)
|
||||
{
|
||||
struct gfs_configuration *gc =
|
||||
container_of(c, struct gfs_configuration, c);
|
||||
int i;
|
||||
int ret;
|
||||
|
||||
if (WARN_ON(!gfs_ffs_data))
|
||||
if (missing_funcs)
|
||||
return -ENODEV;
|
||||
|
||||
if (gadget_is_otg(c->cdev->gadget)) {
|
||||
|
@ -295,9 +445,11 @@ static int gfs_do_config(struct usb_configuration *c)
|
|||
return ret;
|
||||
}
|
||||
|
||||
ret = functionfs_bind_config(c->cdev, c, gfs_ffs_data);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
for (i = 0; i < func_num; i++) {
|
||||
ret = functionfs_bind_config(c->cdev, c, ffs_tab[i].ffs_data);
|
||||
if (unlikely(ret < 0))
|
||||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* After previous do_configs there may be some invalid
|
||||
|
|
|
@ -3152,6 +3152,5 @@ static struct dentry *nfs4_referral_mount(struct file_system_type *fs_type,
|
|||
return res;
|
||||
}
|
||||
|
||||
MODULE_ALIAS("nfs4");
|
||||
|
||||
#endif /* CONFIG_NFS_V4 */
|
||||
|
|
|
@ -40,6 +40,7 @@
|
|||
#define F_SEAL_SHRINK 0x0002 /* prevent file from shrinking */
|
||||
#define F_SEAL_GROW 0x0004 /* prevent file from growing */
|
||||
#define F_SEAL_WRITE 0x0008 /* prevent writes */
|
||||
#define F_SEAL_FUTURE_WRITE 0x0010 /* prevent writes */
|
||||
/* (1U << 31) is reserved for signed error codes */
|
||||
|
||||
/*
|
||||
|
|
|
@ -164,6 +164,11 @@ struct usb_functionfs_event {
|
|||
*/
|
||||
#define FUNCTIONFS_ENDPOINT_REVMAP _IO('g', 129)
|
||||
|
||||
/*
|
||||
* Returns endpoint descriptor. If function is not active returns -ENODEV.
|
||||
*/
|
||||
#define FUNCTIONFS_ENDPOINT_DESC _IOR('g', 130, \
|
||||
struct usb_endpoint_descriptor)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
|
@ -190,8 +195,10 @@ static int functionfs_ready_callback(struct ffs_data *ffs)
|
|||
__attribute__((warn_unused_result, nonnull));
|
||||
static void functionfs_closed_callback(struct ffs_data *ffs)
|
||||
__attribute__((nonnull));
|
||||
static int functionfs_check_dev_callback(const char *dev_name)
|
||||
static void *functionfs_acquire_dev_callback(const char *dev_name)
|
||||
__attribute__((warn_unused_result, nonnull));
|
||||
static void functionfs_release_dev_callback(struct ffs_data *ffs_data)
|
||||
__attribute__((nonnull));
|
||||
|
||||
|
||||
#endif
|
||||
|
|
29
mm/shmem.c
29
mm/shmem.c
|
@ -1427,6 +1427,25 @@ out_nomem:
|
|||
|
||||
static int shmem_mmap(struct file *file, struct vm_area_struct *vma)
|
||||
{
|
||||
struct inode *inode = file->f_path.dentry->d_inode;
|
||||
struct shmem_inode_info *info = SHMEM_I(inode);
|
||||
|
||||
if (info->seals & F_SEAL_FUTURE_WRITE) {
|
||||
/*
|
||||
* New PROT_WRITE and MAP_SHARED mmaps are not allowed when
|
||||
* "future write" seal active.
|
||||
*/
|
||||
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
|
||||
return -EPERM;
|
||||
|
||||
/*
|
||||
* Since the F_SEAL_FUTURE_WRITE seals allow for a MAP_SHARED
|
||||
* read-only mapping, take care to not allow mprotect to revert
|
||||
* protections.
|
||||
*/
|
||||
vma->vm_flags &= ~(VM_MAYWRITE);
|
||||
}
|
||||
|
||||
file_accessed(file);
|
||||
vma->vm_ops = &shmem_vm_ops;
|
||||
return 0;
|
||||
|
@ -1511,8 +1530,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
|
|||
pgoff_t index = pos >> PAGE_CACHE_SHIFT;
|
||||
|
||||
/* i_mutex is held by caller */
|
||||
if (unlikely(info->seals)) {
|
||||
if (info->seals & F_SEAL_WRITE)
|
||||
if (unlikely(info->seals & (F_SEAL_GROW |
|
||||
F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))) {
|
||||
if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE))
|
||||
return -EPERM;
|
||||
if ((info->seals & F_SEAL_GROW) && pos + len > inode->i_size)
|
||||
return -EPERM;
|
||||
|
@ -2004,7 +2024,8 @@ continue_resched:
|
|||
#define F_ALL_SEALS (F_SEAL_SEAL | \
|
||||
F_SEAL_SHRINK | \
|
||||
F_SEAL_GROW | \
|
||||
F_SEAL_WRITE)
|
||||
F_SEAL_WRITE | \
|
||||
F_SEAL_FUTURE_WRITE)
|
||||
|
||||
int shmem_add_seals(struct file *file, unsigned int seals)
|
||||
{
|
||||
|
@ -2128,7 +2149,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
|
|||
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
|
||||
|
||||
/* protected by i_mutex */
|
||||
if (info->seals & F_SEAL_WRITE) {
|
||||
if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
|
||||
error = -EPERM;
|
||||
goto out;
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue