mirror of
https://github.com/team-infusion-developers/android_kernel_samsung_msm8976.git
synced 2024-11-01 02:21:16 +00:00
36126f8f2e
This changes the interfaces in <asm/word-at-a-time.h> to be a bit more complicated, but a lot more generic. In particular, it allows us to really do the operations efficiently on both little-endian and big-endian machines, pretty much regardless of machine details. For example, if you can rely on a fast population count instruction on your architecture, this will allow you to make your optimized <asm/word-at-a-time.h> file with that. NOTE! The "generic" version in include/asm-generic/word-at-a-time.h is not truly generic, it actually only works on big-endian. Why? Because on little-endian the generic algorithms are wasteful, since you can inevitably do better. The x86 implementation is an example of that. (The only truly non-generic part of the asm-generic implementation is the "find_zero()" function, and you could make a little-endian version of it. And if the Kbuild infrastructure allowed us to pick a particular header file, that would be lovely) The <asm/word-at-a-time.h> functions are as follows: - WORD_AT_A_TIME_CONSTANTS: specific constants that the algorithm uses. - has_zero(): take a word, and determine if it has a zero byte in it. It gets the word, the pointer to the constant pool, and a pointer to an intermediate "data" field it can set. This is the "quick-and-dirty" zero tester: it's what is run inside the hot loops. - "prep_zero_mask()": take the word, the data that has_zero() produced, and the constant pool, and generate an *exact* mask of which byte had the first zero. This is run directly *outside* the loop, and allows the "has_zero()" function to answer the "is there a zero byte" question without necessarily getting exactly *which* byte is the first one to contain a zero. If you do multiple byte lookups concurrently (eg "hash_name()", which looks for both NUL and '/' bytes), after you've done the prep_zero_mask() phase, the result of those can be or'ed together to get the "either or" case. - The result from "prep_zero_mask()" can then be fed into "find_zero()" (to find the byte offset of the first byte that was zero) or into "zero_bytemask()" (to find the bytemask of the bytes preceding the zero byte). The existence of zero_bytemask() is optional, and is not necessary for the normal string routines. But dentry name hashing needs it, so if you enable DENTRY_WORD_AT_A_TIME you need to expose it. This changes the generic strncpy_from_user() function and the dentry hashing functions to use these modified word-at-a-time interfaces. This gets us back to the optimized state of the x86 strncpy that we lost in the previous commit when moving over to the generic version. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
113 lines
2.9 KiB
C
113 lines
2.9 KiB
C
#include <linux/module.h>
|
|
#include <linux/uaccess.h>
|
|
#include <linux/kernel.h>
|
|
#include <linux/errno.h>
|
|
|
|
#include <asm/byteorder.h>
|
|
#include <asm/word-at-a-time.h>
|
|
|
|
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
|
|
#define IS_UNALIGNED(src, dst) 0
|
|
#else
|
|
#define IS_UNALIGNED(src, dst) \
|
|
(((long) dst | (long) src) & (sizeof(long) - 1))
|
|
#endif
|
|
|
|
/*
|
|
* Do a strncpy, return length of string without final '\0'.
|
|
* 'count' is the user-supplied count (return 'count' if we
|
|
* hit it), 'max' is the address space maximum (and we return
|
|
* -EFAULT if we hit it).
|
|
*/
|
|
static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
|
|
{
|
|
const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
|
|
long res = 0;
|
|
|
|
/*
|
|
* Truncate 'max' to the user-specified limit, so that
|
|
* we only have one limit we need to check in the loop
|
|
*/
|
|
if (max > count)
|
|
max = count;
|
|
|
|
if (IS_UNALIGNED(src, dst))
|
|
goto byte_at_a_time;
|
|
|
|
while (max >= sizeof(unsigned long)) {
|
|
unsigned long c, data;
|
|
|
|
/* Fall back to byte-at-a-time if we get a page fault */
|
|
if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
|
|
break;
|
|
*(unsigned long *)(dst+res) = c;
|
|
if (has_zero(c, &data, &constants)) {
|
|
data = prep_zero_mask(c, data, &constants);
|
|
data = create_zero_mask(data);
|
|
return res + find_zero(data);
|
|
}
|
|
res += sizeof(unsigned long);
|
|
max -= sizeof(unsigned long);
|
|
}
|
|
|
|
byte_at_a_time:
|
|
while (max) {
|
|
char c;
|
|
|
|
if (unlikely(__get_user(c,src+res)))
|
|
return -EFAULT;
|
|
dst[res] = c;
|
|
if (!c)
|
|
return res;
|
|
res++;
|
|
max--;
|
|
}
|
|
|
|
/*
|
|
* Uhhuh. We hit 'max'. But was that the user-specified maximum
|
|
* too? If so, that's ok - we got as much as the user asked for.
|
|
*/
|
|
if (res >= count)
|
|
return res;
|
|
|
|
/*
|
|
* Nope: we hit the address space limit, and we still had more
|
|
* characters the caller would have wanted. That's an EFAULT.
|
|
*/
|
|
return -EFAULT;
|
|
}
|
|
|
|
/**
|
|
* strncpy_from_user: - Copy a NUL terminated string from userspace.
|
|
* @dst: Destination address, in kernel space. This buffer must be at
|
|
* least @count bytes long.
|
|
* @src: Source address, in user space.
|
|
* @count: Maximum number of bytes to copy, including the trailing NUL.
|
|
*
|
|
* Copies a NUL-terminated string from userspace to kernel space.
|
|
*
|
|
* On success, returns the length of the string (not including the trailing
|
|
* NUL).
|
|
*
|
|
* If access to userspace fails, returns -EFAULT (some data may have been
|
|
* copied).
|
|
*
|
|
* If @count is smaller than the length of the string, copies @count bytes
|
|
* and returns @count.
|
|
*/
|
|
long strncpy_from_user(char *dst, const char __user *src, long count)
|
|
{
|
|
unsigned long max_addr, src_addr;
|
|
|
|
if (unlikely(count <= 0))
|
|
return 0;
|
|
|
|
max_addr = user_addr_max();
|
|
src_addr = (unsigned long)src;
|
|
if (likely(src_addr < max_addr)) {
|
|
unsigned long max = max_addr - src_addr;
|
|
return do_strncpy_from_user(dst, src, count, max);
|
|
}
|
|
return -EFAULT;
|
|
}
|
|
EXPORT_SYMBOL(strncpy_from_user);
|