bludgeon the flounder kernel until it builds on i386 for qemu testing
Change-Id: Ib0a45f164301d18629fca0c89a1b17f7a435a8d4 Signed-off-by: Theodore Ts'o <tytso@mit.edu> Signed-off-by: Theodore Ts'o <tytso@google.com> Git-commit: 9be223506831100e234f46103355310e479be9c0 Git-repo: https://android.googlesource.com/kernel/common.git Signed-off-by: Kaushal Kumar <kaushalk@codeaurora.org>
This commit is contained in:
parent
805074fa98
commit
82cfe03fa4
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -660,6 +660,10 @@ static inline void cpu_relax(void)
|
|||
rep_nop();
|
||||
}
|
||||
|
||||
#ifndef cpu_read_relax
|
||||
#define cpu_read_relax() cpu_relax()
|
||||
#endif
|
||||
|
||||
/* Stop speculative execution and prefetching of modified code. */
|
||||
static inline void sync_core(void)
|
||||
{
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
/*
|
||||
* x86/include/asm/relaxed.h
|
||||
*
|
||||
* (copied from arm/include/asm/relaxed.h)
|
||||
*
|
||||
* Copyright (c) 2014 NVIDIA Corporation. All rights reserved.
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
* may be copied, distributed, and modified under those terms.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*/
|
||||
|
||||
#ifndef _ASM_RELAXED_H_
|
||||
#define _ASM_RELAXED_H_
|
||||
|
||||
#include <asm-generic/relaxed.h>
|
||||
|
||||
#endif /*_ASM_RELAXED_H_*/
|
|
@ -1,3 +1,4 @@
|
|||
#if !defined(__i386__) && !defined(__amd64__)
|
||||
/*
|
||||
* Coherent per-device memory handling.
|
||||
* Borrowed from i386
|
||||
|
@ -218,3 +219,228 @@ int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
|
|||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_mmap_from_coherent);
|
||||
#else
|
||||
/* CMA is a wretched hive of scum and villany --- and also doesn't
|
||||
* compile on x86 */
|
||||
|
||||
/*
|
||||
* Coherent per-device memory handling.
|
||||
* Borrowed from i386
|
||||
*/
|
||||
#include <linux/slab.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/module.h>
|
||||
#include <linux/dma-mapping.h>
|
||||
|
||||
struct dma_coherent_mem {
|
||||
void *virt_base;
|
||||
dma_addr_t device_base;
|
||||
phys_addr_t pfn_base;
|
||||
int size;
|
||||
int flags;
|
||||
unsigned long *bitmap;
|
||||
};
|
||||
|
||||
int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
||||
dma_addr_t device_addr, size_t size, int flags)
|
||||
{
|
||||
void __iomem *mem_base = NULL;
|
||||
int pages = size >> PAGE_SHIFT;
|
||||
int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
|
||||
|
||||
if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
|
||||
goto out;
|
||||
if (!size)
|
||||
goto out;
|
||||
if (dev->dma_mem)
|
||||
goto out;
|
||||
|
||||
/* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
|
||||
|
||||
mem_base = ioremap(bus_addr, size);
|
||||
if (!mem_base)
|
||||
goto out;
|
||||
|
||||
dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
|
||||
if (!dev->dma_mem)
|
||||
goto out;
|
||||
dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
|
||||
if (!dev->dma_mem->bitmap)
|
||||
goto free1_out;
|
||||
|
||||
dev->dma_mem->virt_base = mem_base;
|
||||
dev->dma_mem->device_base = device_addr;
|
||||
dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
|
||||
dev->dma_mem->size = pages;
|
||||
dev->dma_mem->flags = flags;
|
||||
|
||||
if (flags & DMA_MEMORY_MAP)
|
||||
return DMA_MEMORY_MAP;
|
||||
|
||||
return DMA_MEMORY_IO;
|
||||
|
||||
free1_out:
|
||||
kfree(dev->dma_mem);
|
||||
out:
|
||||
if (mem_base)
|
||||
iounmap(mem_base);
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_declare_coherent_memory);
|
||||
|
||||
void dma_release_declared_memory(struct device *dev)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev->dma_mem;
|
||||
|
||||
if (!mem)
|
||||
return;
|
||||
dev->dma_mem = NULL;
|
||||
iounmap(mem->virt_base);
|
||||
kfree(mem->bitmap);
|
||||
kfree(mem);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_release_declared_memory);
|
||||
|
||||
void *dma_mark_declared_memory_occupied(struct device *dev,
|
||||
dma_addr_t device_addr, size_t size)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev->dma_mem;
|
||||
int pos, err;
|
||||
|
||||
size += device_addr & ~PAGE_MASK;
|
||||
|
||||
if (!mem)
|
||||
return ERR_PTR(-EINVAL);
|
||||
|
||||
pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
|
||||
err = bitmap_allocate_region(mem->bitmap, pos, get_order(size));
|
||||
if (err != 0)
|
||||
return ERR_PTR(err);
|
||||
return mem->virt_base + (pos << PAGE_SHIFT);
|
||||
}
|
||||
EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
|
||||
|
||||
/**
|
||||
* dma_alloc_from_coherent() - try to allocate memory from the per-device coherent area
|
||||
*
|
||||
* @dev: device from which we allocate memory
|
||||
* @size: size of requested memory area
|
||||
* @dma_handle: This will be filled with the correct dma handle
|
||||
* @ret: This pointer will be filled with the virtual address
|
||||
* to allocated area.
|
||||
*
|
||||
* This function should be only called from per-arch dma_alloc_coherent()
|
||||
* to support allocation from per-device coherent memory pools.
|
||||
*
|
||||
* Returns 0 if dma_alloc_coherent should continue with allocating from
|
||||
* generic memory areas, or !0 if dma_alloc_coherent should return @ret.
|
||||
*/
|
||||
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
||||
dma_addr_t *dma_handle, void **ret)
|
||||
{
|
||||
struct dma_coherent_mem *mem;
|
||||
int order = get_order(size);
|
||||
int pageno;
|
||||
|
||||
if (!dev)
|
||||
return 0;
|
||||
mem = dev->dma_mem;
|
||||
if (!mem)
|
||||
return 0;
|
||||
|
||||
*ret = NULL;
|
||||
|
||||
if (unlikely(size > (mem->size << PAGE_SHIFT)))
|
||||
goto err;
|
||||
|
||||
pageno = bitmap_find_free_region(mem->bitmap, mem->size, order);
|
||||
if (unlikely(pageno < 0))
|
||||
goto err;
|
||||
|
||||
/*
|
||||
* Memory was found in the per-device area.
|
||||
*/
|
||||
*dma_handle = mem->device_base + (pageno << PAGE_SHIFT);
|
||||
*ret = mem->virt_base + (pageno << PAGE_SHIFT);
|
||||
memset(*ret, 0, size);
|
||||
|
||||
return 1;
|
||||
|
||||
err:
|
||||
/*
|
||||
* In the case where the allocation can not be satisfied from the
|
||||
* per-device area, try to fall back to generic memory if the
|
||||
* constraints allow it.
|
||||
*/
|
||||
return mem->flags & DMA_MEMORY_EXCLUSIVE;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_alloc_from_coherent);
|
||||
|
||||
/**
|
||||
* dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
|
||||
* @dev: device from which the memory was allocated
|
||||
* @order: the order of pages allocated
|
||||
* @vaddr: virtual address of allocated pages
|
||||
*
|
||||
* This checks whether the memory was allocated from the per-device
|
||||
* coherent memory pool and if so, releases that memory.
|
||||
*
|
||||
* Returns 1 if we correctly released the memory, or 0 if
|
||||
* dma_release_coherent() should proceed with releasing memory from
|
||||
* generic pools.
|
||||
*/
|
||||
int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
|
||||
if (mem && vaddr >= mem->virt_base && vaddr <
|
||||
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
||||
int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||
|
||||
bitmap_release_region(mem->bitmap, page, order);
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_release_from_coherent);
|
||||
|
||||
/**
|
||||
* dma_mmap_from_coherent() - try to mmap the memory allocated from
|
||||
* per-device coherent memory pool to userspace
|
||||
* @dev: device from which the memory was allocated
|
||||
* @vma: vm_area for the userspace memory
|
||||
* @vaddr: cpu address returned by dma_alloc_from_coherent
|
||||
* @size: size of the memory buffer allocated by dma_alloc_from_coherent
|
||||
* @ret: result from remap_pfn_range()
|
||||
*
|
||||
* This checks whether the memory was allocated from the per-device
|
||||
* coherent memory pool and if so, maps that memory to the provided vma.
|
||||
*
|
||||
* Returns 1 if we correctly mapped the memory, or 0 if the caller should
|
||||
* proceed with mapping memory from generic pools.
|
||||
*/
|
||||
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *vaddr, size_t size, int *ret)
|
||||
{
|
||||
struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
|
||||
|
||||
if (mem && vaddr >= mem->virt_base && vaddr + size <=
|
||||
(mem->virt_base + (mem->size << PAGE_SHIFT))) {
|
||||
unsigned long off = vma->vm_pgoff;
|
||||
int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
|
||||
int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
||||
int count = size >> PAGE_SHIFT;
|
||||
|
||||
*ret = -ENXIO;
|
||||
if (off < count && user_count <= count - off) {
|
||||
unsigned pfn = mem->pfn_base + start + off;
|
||||
*ret = remap_pfn_range(vma, vma->vm_start, pfn,
|
||||
user_count << PAGE_SHIFT,
|
||||
vma->vm_page_prot);
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(dma_mmap_from_coherent);
|
||||
#endif
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
#if !defined(__i386__) && !defined(__amd64__)
|
||||
|
||||
#ifndef DMA_COHERENT_H
|
||||
#define DMA_COHERENT_H
|
||||
|
||||
|
@ -32,4 +34,47 @@ dma_mark_declared_memory_occupied(struct device *dev,
|
|||
#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
#else /* CMA is a crutch for brain-damaged hardware that causes untold pain */
|
||||
|
||||
#ifndef DMA_COHERENT_H
|
||||
#define DMA_COHERENT_H
|
||||
|
||||
#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
|
||||
/*
|
||||
* These three functions are only for dma allocator.
|
||||
* Don't use them in device drivers.
|
||||
*/
|
||||
int dma_alloc_from_coherent(struct device *dev, ssize_t size,
|
||||
dma_addr_t *dma_handle, void **ret);
|
||||
int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
|
||||
|
||||
int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
|
||||
void *cpu_addr, size_t size, int *ret);
|
||||
/*
|
||||
* Standard interface
|
||||
*/
|
||||
#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
|
||||
extern int
|
||||
dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
|
||||
dma_addr_t device_addr, size_t size, int flags);
|
||||
|
||||
extern void
|
||||
dma_release_declared_memory(struct device *dev);
|
||||
|
||||
extern void *
|
||||
dma_mark_declared_memory_occupied(struct device *dev,
|
||||
dma_addr_t device_addr, size_t size);
|
||||
#else
|
||||
#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
|
||||
#define dma_release_from_coherent(dev, order, vaddr) (0)
|
||||
#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
|
||||
#endif
|
||||
|
|
|
@ -184,6 +184,7 @@ extern struct trace_event_functions exit_syscall_print_funcs;
|
|||
__SYSCALL_DEFINEx(x, sname, __VA_ARGS__)
|
||||
|
||||
#define __PROTECT(...) asmlinkage_protect(__VA_ARGS__)
|
||||
#if !defined(__i386) && !defined(__amd64__)
|
||||
#define __SYSCALL_DEFINEx(x, name, ...) \
|
||||
asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
|
||||
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
|
||||
|
@ -196,6 +197,21 @@ extern struct trace_event_functions exit_syscall_print_funcs;
|
|||
} \
|
||||
SYSCALL_ALIAS(sys##name, SyS##name); \
|
||||
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
|
||||
#else
|
||||
#define __SYSCALL_DEFINEx(x, name, ...) \
|
||||
asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
|
||||
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
|
||||
asmlinkage long SyS##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
|
||||
{ \
|
||||
long ret; \
|
||||
ret = SYSC##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
|
||||
__MAP(x,__SC_TEST,__VA_ARGS__); \
|
||||
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
|
||||
return ret; \
|
||||
} \
|
||||
SYSCALL_ALIAS(sys##name, SyS##name); \
|
||||
static inline long SYSC##name(__MAP(x,__SC_DECL,__VA_ARGS__))
|
||||
#endif
|
||||
|
||||
asmlinkage long sys_time(time_t __user *tloc);
|
||||
asmlinkage long sys_stime(time_t __user *tptr);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include <linux/kmemleak.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <linux/llist.h>
|
||||
#include <linux/sizes.h>
|
||||
#include <asm/uaccess.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/shmparam.h>
|
||||
|
|
Loading…
Reference in New Issue