arm: Add option to skip buffer zeroing

The DMA framework currently zeros all buffers because it (righfully so)
assumes that drivers will soon need to pass the memory to a device.
Some devices/use case may not require zeroed memory and there can
be an increase in performance if we skip the zeroing. Add a DMA_ATTR
to allow skipping of DMA zeroing.

Change-Id: Ib78a19cb341c3c441f91d5b004c6375c80c10413
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
This commit is contained in:
Laura Abbott 2014-05-22 14:38:48 -07:00
parent 9e91c0c5d3
commit 8daee608ad
2 changed files with 12 additions and 12 deletions

View File

@ -291,7 +291,7 @@ static void __dma_free_buffer(struct page *page, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller,
bool no_kernel_mapping);
struct dma_attrs *attrs);
static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
pgprot_t prot, struct page **ret_page,
@ -397,7 +397,7 @@ static int __init atomic_pool_init(void)
if (IS_ENABLED(CONFIG_CMA))
ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page,
atomic_pool_init, false);
atomic_pool_init, NULL);
else
ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page,
atomic_pool_init);
@ -611,13 +611,15 @@ static int __free_from_pool(void *start, size_t size)
static void *__alloc_from_contiguous(struct device *dev, size_t size,
pgprot_t prot, struct page **ret_page,
const void *caller,
bool no_kernel_mapping)
struct dma_attrs *attrs)
{
unsigned long order = get_order(size);
size_t count = size >> PAGE_SHIFT;
unsigned long pfn;
struct page *page;
void *ptr;
bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
attrs);
pfn = dma_alloc_from_contiguous(dev, count, order);
if (!pfn)
@ -625,7 +627,8 @@ static void *__alloc_from_contiguous(struct device *dev, size_t size,
page = pfn_to_page(pfn);
__dma_clear_buffer(page, size);
if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))
__dma_clear_buffer(page, size);
if (PageHighMem(page)) {
if (no_kernel_mapping) {
@ -706,7 +709,7 @@ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
gfp_t gfp, pgprot_t prot, bool is_coherent,
const void *caller, bool no_kernel_mapping)
const void *caller, struct dma_attrs *attrs)
{
u64 mask = get_coherent_dma_mask(dev);
struct page *page = NULL;
@ -747,7 +750,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
else
addr = __alloc_from_contiguous(dev, size, prot, &page, caller,
no_kernel_mapping);
attrs);
if (addr)
*handle = pfn_to_dma(dev, page_to_pfn(page));
@ -764,14 +767,12 @@ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
attrs);
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
return __dma_alloc(dev, size, handle, gfp, prot, false,
__builtin_return_address(0), no_kernel_mapping);
__builtin_return_address(0), attrs);
}
static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
@ -779,14 +780,12 @@ static void *arm_coherent_dma_alloc(struct device *dev, size_t size,
{
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL);
void *memory;
bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING,
attrs);
if (dma_alloc_from_coherent(dev, size, handle, &memory))
return memory;
return __dma_alloc(dev, size, handle, gfp, prot, true,
__builtin_return_address(0), no_kernel_mapping);
__builtin_return_address(0), attrs);
}
/*

View File

@ -19,6 +19,7 @@ enum dma_attr {
DMA_ATTR_SKIP_CPU_SYNC,
DMA_ATTR_FORCE_CONTIGUOUS,
DMA_ATTR_STRONGLY_ORDERED,
DMA_ATTR_SKIP_ZEROING,
DMA_ATTR_MAX,
};