gpu: ion: Add msm specific extensions to CMA heap

A number of changes have been made to the Ion framework for the
msm target. Add the necessary changes on top of the CMA heap to
allow the CMA heap to be fully utilized.

Change-Id: Ie006dcd4c41481e4d914c67bafbf42d1afdb1a76
Signed-off-by: Laura Abbott <lauraa@codeaurora.org>
Signed-off-by: Mitchel Humpherys <mitchelh@codeaurora.org>
This commit is contained in:
Laura Abbott 2012-08-15 11:09:10 -07:00 committed by Stephen Boyd
parent 1382bb05d7
commit 142cd31a3e
2 changed files with 169 additions and 5 deletions

View file

@ -1,3 +1,4 @@
obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o ion_iommu_heap.o ion_cp_heap.o
obj-$(CONFIG_CMA) += ion_cma_heap.o
obj-$(CONFIG_ION_TEGRA) += tegra/
obj-$(CONFIG_ION_MSM) += msm/

View file

@ -21,6 +21,10 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/dma-mapping.h>
#include <linux/msm_ion.h>
#include <mach/iommu_domains.h>
#include <asm/cacheflush.h>
/* for ion_heap_ops structure */
#include "ion_priv.h"
@ -31,8 +35,10 @@ struct ion_cma_buffer_info {
void *cpu_addr;
dma_addr_t handle;
struct sg_table *table;
bool is_cached;
};
static int cma_heap_has_outer_cache;
/*
* Create scatter-list for the already allocated DMA buffer.
* This function could be replace by dma_common_get_sgtable
@ -68,7 +74,12 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
return ION_CMA_ALLOCATE_FAILED;
}
info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), 0);
if (!ION_IS_CACHED(flags))
info->cpu_addr = dma_alloc_writecombine(dev, len,
&(info->handle), 0);
else
info->cpu_addr = dma_alloc_nonconsistent(dev, len,
&(info->handle), 0);
if (!info->cpu_addr) {
dev_err(dev, "Fail to allocate buffer\n");
@ -81,6 +92,8 @@ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
goto err;
}
info->is_cached = ION_IS_CACHED(flags);
ion_cma_get_sgtable(dev,
info->table, info->cpu_addr, info->handle, len);
@ -115,9 +128,9 @@ static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
struct ion_cma_buffer_info *info = buffer->priv_virt;
dev_dbg(dev, "Return buffer %p physical address 0x%x\n", buffer,
virt_to_phys(info->cpu_addr));
info->handle);
*addr = virt_to_phys(info->cpu_addr);
*addr = info->handle;
*len = buffer->size;
return 0;
@ -143,8 +156,152 @@ static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
struct device *dev = buffer->heap->priv;
struct ion_cma_buffer_info *info = buffer->priv_virt;
return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle,
buffer->size);
if (info->is_cached)
return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
info->handle, buffer->size);
else
return dma_mmap_writecombine(dev, vma, info->cpu_addr,
info->handle, buffer->size);
}
static void *ion_cma_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cma_buffer_info *info = buffer->priv_virt;
return info->cpu_addr;
}
static void ion_cma_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return;
}
int ion_cma_map_iommu(struct ion_buffer *buffer,
struct ion_iommu_map *data,
unsigned int domain_num,
unsigned int partition_num,
unsigned long align,
unsigned long iova_length,
unsigned long flags)
{
int ret = 0;
struct iommu_domain *domain;
unsigned long extra;
unsigned long extra_iova_addr;
struct ion_cma_buffer_info *info = buffer->priv_virt;
struct sg_table *table = info->table;
int prot = IOMMU_WRITE | IOMMU_READ;
data->mapped_size = iova_length;
if (!msm_use_iommu()) {
data->iova_addr = info->handle;
return 0;
}
extra = iova_length - buffer->size;
ret = msm_allocate_iova_address(domain_num, partition_num,
data->mapped_size, align,
&data->iova_addr);
if (ret)
goto out;
domain = msm_get_iommu_domain(domain_num);
if (!domain) {
ret = -EINVAL;
goto out1;
}
ret = iommu_map_range(domain, data->iova_addr, table->sgl,
buffer->size, prot);
if (ret) {
pr_err("%s: could not map %lx in domain %p\n",
__func__, data->iova_addr, domain);
goto out1;
}
extra_iova_addr = data->iova_addr + buffer->size;
if (extra) {
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra, SZ_4K,
prot);
if (ret)
goto out2;
}
return ret;
out2:
iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
out:
return ret;
}
void ion_cma_unmap_iommu(struct ion_iommu_map *data)
{
unsigned int domain_num;
unsigned int partition_num;
struct iommu_domain *domain;
if (!msm_use_iommu())
return;
domain_num = iommu_map_domain(data);
partition_num = iommu_map_partition(data);
domain = msm_get_iommu_domain(domain_num);
if (!domain) {
WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
return;
}
iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
return;
}
int ion_cma_cache_ops(struct ion_heap *heap,
struct ion_buffer *buffer, void *vaddr,
unsigned int offset, unsigned int length,
unsigned int cmd)
{
void (*outer_cache_op)(phys_addr_t, phys_addr_t);
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
dmac_clean_range(vaddr, vaddr + length);
outer_cache_op = outer_clean_range;
break;
case ION_IOC_INV_CACHES:
dmac_inv_range(vaddr, vaddr + length);
outer_cache_op = outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
dmac_flush_range(vaddr, vaddr + length);
outer_cache_op = outer_flush_range;
break;
default:
return -EINVAL;
}
if (cma_heap_has_outer_cache) {
struct ion_cma_buffer_info *info = buffer->priv_virt;
outer_cache_op(info->handle, info->handle + length);
}
return 0;
}
static struct ion_heap_ops ion_cma_ops = {
@ -154,6 +311,11 @@ static struct ion_heap_ops ion_cma_ops = {
.unmap_dma = ion_cma_heap_unmap_dma,
.phys = ion_cma_phys,
.map_user = ion_cma_mmap,
.map_kernel = ion_cma_map_kernel,
.unmap_kernel = ion_cma_unmap_kernel,
.map_iommu = ion_cma_map_iommu,
.unmap_iommu = ion_cma_unmap_iommu,
.cache_op = ion_cma_cache_ops,
};
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
@ -170,6 +332,7 @@ struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
* used to make the link with reserved CMA memory */
heap->priv = data->priv;
heap->type = ION_HEAP_TYPE_DMA;
cma_heap_has_outer_cache = data->has_outer_cache;
return heap;
}