mirror of
https://github.com/followmsi/android_kernel_google_msm.git
synced 2024-11-06 23:17:41 +00:00
b4569e784e
Change-Id: Ia659c05926c39e23307339e0976d9e72f808473a
386 lines
9.1 KiB
C
386 lines
9.1 KiB
C
/*
|
|
* drivers/gpu/ion/ion_cma_heap.c
|
|
*
|
|
* Copyright (C) Linaro 2012
|
|
* Author: <benjamin.gaignard@linaro.org> for ST-Ericsson.
|
|
*
|
|
* This software is licensed under the terms of the GNU General Public
|
|
* License version 2, as published by the Free Software Foundation, and
|
|
* may be copied, distributed, and modified under those terms.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
*/
|
|
|
|
#include <linux/device.h>
|
|
#include <linux/ion.h>
|
|
#include <linux/slab.h>
|
|
#include <linux/errno.h>
|
|
#include <linux/err.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/msm_ion.h>
|
|
#include <mach/iommu_domains.h>
|
|
|
|
#include <asm/cacheflush.h>
|
|
|
|
/* for ion_heap_ops structure */
|
|
#include "ion_priv.h"
|
|
|
|
#define ION_CMA_ALLOCATE_FAILED -1
|
|
|
|
struct ion_cma_buffer_info {
|
|
void *cpu_addr;
|
|
dma_addr_t handle;
|
|
struct sg_table *table;
|
|
bool is_cached;
|
|
};
|
|
|
|
static int cma_heap_has_outer_cache;
|
|
/*
|
|
* Create scatter-list for the already allocated DMA buffer.
|
|
* This function could be replace by dma_common_get_sgtable
|
|
* as soon as it will avalaible.
|
|
*/
|
|
int ion_cma_get_sgtable(struct device *dev, struct sg_table *sgt,
|
|
void *cpu_addr, dma_addr_t handle, size_t size)
|
|
{
|
|
struct page *page = virt_to_page(cpu_addr);
|
|
int ret;
|
|
|
|
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
|
|
if (unlikely(ret))
|
|
return ret;
|
|
|
|
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
|
|
return 0;
|
|
}
|
|
|
|
/* ION CMA heap operations functions */
|
|
static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer,
|
|
unsigned long len, unsigned long align,
|
|
unsigned long flags)
|
|
{
|
|
struct device *dev = heap->priv;
|
|
struct ion_cma_buffer_info *info;
|
|
|
|
dev_dbg(dev, "Request buffer allocation len %ld\n", len);
|
|
|
|
info = kzalloc(sizeof(struct ion_cma_buffer_info), GFP_KERNEL);
|
|
if (!info) {
|
|
dev_err(dev, "Can't allocate buffer info\n");
|
|
return ION_CMA_ALLOCATE_FAILED;
|
|
}
|
|
|
|
if (!ION_IS_CACHED(flags))
|
|
info->cpu_addr = dma_alloc_writecombine(dev, len,
|
|
&(info->handle), 0);
|
|
else
|
|
info->cpu_addr = dma_alloc_nonconsistent(dev, len,
|
|
&(info->handle), 0);
|
|
|
|
if (!info->cpu_addr) {
|
|
dev_err(dev, "Fail to allocate buffer\n");
|
|
goto err;
|
|
}
|
|
|
|
info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
|
|
if (!info->table) {
|
|
dev_err(dev, "Fail to allocate sg table\n");
|
|
goto err;
|
|
}
|
|
|
|
info->is_cached = ION_IS_CACHED(flags);
|
|
|
|
ion_cma_get_sgtable(dev,
|
|
info->table, info->cpu_addr, info->handle, len);
|
|
|
|
/* keep this for memory release */
|
|
buffer->priv_virt = info;
|
|
dev_dbg(dev, "Allocate buffer %pK\n", buffer);
|
|
return 0;
|
|
|
|
err:
|
|
kfree(info);
|
|
return ION_CMA_ALLOCATE_FAILED;
|
|
}
|
|
|
|
static void ion_cma_free(struct ion_buffer *buffer)
|
|
{
|
|
struct device *dev = buffer->heap->priv;
|
|
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
|
|
|
dev_dbg(dev, "Release buffer %pK\n", buffer);
|
|
/* release memory */
|
|
dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle);
|
|
sg_free_table(info->table);
|
|
/* release sg table */
|
|
kfree(info->table);
|
|
kfree(info);
|
|
}
|
|
|
|
/* return physical address in addr */
|
|
static int ion_cma_phys(struct ion_heap *heap, struct ion_buffer *buffer,
|
|
ion_phys_addr_t *addr, size_t *len)
|
|
{
|
|
struct device *dev = heap->priv;
|
|
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
|
|
|
dev_dbg(dev, "Return buffer %pK physical address 0x%pa\n", buffer,
|
|
&info->handle);
|
|
|
|
*addr = info->handle;
|
|
*len = buffer->size;
|
|
|
|
return 0;
|
|
}
|
|
|
|
struct sg_table *ion_cma_heap_map_dma(struct ion_heap *heap,
|
|
struct ion_buffer *buffer)
|
|
{
|
|
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
|
|
|
return info->table;
|
|
}
|
|
|
|
void ion_cma_heap_unmap_dma(struct ion_heap *heap,
|
|
struct ion_buffer *buffer)
|
|
{
|
|
return;
|
|
}
|
|
|
|
static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer,
|
|
struct vm_area_struct *vma)
|
|
{
|
|
struct device *dev = buffer->heap->priv;
|
|
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
|
|
|
if (info->is_cached)
|
|
return dma_mmap_nonconsistent(dev, vma, info->cpu_addr,
|
|
info->handle, buffer->size);
|
|
else
|
|
return dma_mmap_writecombine(dev, vma, info->cpu_addr,
|
|
info->handle, buffer->size);
|
|
}
|
|
|
|
static void *ion_cma_map_kernel(struct ion_heap *heap,
|
|
struct ion_buffer *buffer)
|
|
{
|
|
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
|
|
|
return info->cpu_addr;
|
|
}
|
|
|
|
static void ion_cma_unmap_kernel(struct ion_heap *heap,
|
|
struct ion_buffer *buffer)
|
|
{
|
|
return;
|
|
}
|
|
|
|
int ion_cma_map_iommu(struct ion_buffer *buffer,
|
|
struct ion_iommu_map *data,
|
|
unsigned int domain_num,
|
|
unsigned int partition_num,
|
|
unsigned long align,
|
|
unsigned long iova_length,
|
|
unsigned long flags)
|
|
{
|
|
int ret = 0;
|
|
struct iommu_domain *domain;
|
|
unsigned long extra;
|
|
unsigned long extra_iova_addr;
|
|
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
|
struct sg_table *table = info->table;
|
|
int prot = IOMMU_WRITE | IOMMU_READ;
|
|
|
|
data->mapped_size = iova_length;
|
|
|
|
if (!msm_use_iommu()) {
|
|
data->iova_addr = info->handle;
|
|
return 0;
|
|
}
|
|
|
|
extra = iova_length - buffer->size;
|
|
|
|
ret = msm_allocate_iova_address(domain_num, partition_num,
|
|
data->mapped_size, align,
|
|
&data->iova_addr);
|
|
|
|
if (ret)
|
|
goto out;
|
|
|
|
domain = msm_get_iommu_domain(domain_num);
|
|
|
|
if (!domain) {
|
|
ret = -EINVAL;
|
|
goto out1;
|
|
}
|
|
|
|
ret = iommu_map_range(domain, data->iova_addr, table->sgl,
|
|
buffer->size, prot);
|
|
|
|
if (ret) {
|
|
pr_err("%s: could not map %lx in domain %p\n",
|
|
__func__, data->iova_addr, domain);
|
|
goto out1;
|
|
}
|
|
|
|
extra_iova_addr = data->iova_addr + buffer->size;
|
|
if (extra) {
|
|
unsigned long phys_addr = sg_phys(table->sgl);
|
|
ret = msm_iommu_map_extra(domain, extra_iova_addr, phys_addr,
|
|
extra, SZ_4K, prot);
|
|
if (ret)
|
|
goto out2;
|
|
}
|
|
return ret;
|
|
|
|
out2:
|
|
iommu_unmap_range(domain, data->iova_addr, buffer->size);
|
|
out1:
|
|
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
|
|
data->mapped_size);
|
|
out:
|
|
return ret;
|
|
}
|
|
|
|
|
|
void ion_cma_unmap_iommu(struct ion_iommu_map *data)
|
|
{
|
|
unsigned int domain_num;
|
|
unsigned int partition_num;
|
|
struct iommu_domain *domain;
|
|
|
|
if (!msm_use_iommu())
|
|
return;
|
|
|
|
domain_num = iommu_map_domain(data);
|
|
partition_num = iommu_map_partition(data);
|
|
|
|
domain = msm_get_iommu_domain(domain_num);
|
|
|
|
if (!domain) {
|
|
WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
|
|
return;
|
|
}
|
|
|
|
iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
|
|
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
|
|
data->mapped_size);
|
|
|
|
return;
|
|
}
|
|
|
|
int ion_cma_cache_ops(struct ion_heap *heap,
|
|
struct ion_buffer *buffer, void *vaddr,
|
|
unsigned int offset, unsigned int length,
|
|
unsigned int cmd)
|
|
{
|
|
void (*outer_cache_op)(phys_addr_t, phys_addr_t);
|
|
|
|
switch (cmd) {
|
|
case ION_IOC_CLEAN_CACHES:
|
|
if (!vaddr)
|
|
dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
|
|
buffer->sg_table->nents, DMA_TO_DEVICE);
|
|
else
|
|
dmac_clean_range(vaddr, vaddr + length);
|
|
outer_cache_op = outer_clean_range;
|
|
break;
|
|
case ION_IOC_INV_CACHES:
|
|
if (!vaddr)
|
|
dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
|
|
buffer->sg_table->nents, DMA_FROM_DEVICE);
|
|
else
|
|
dmac_inv_range(vaddr, vaddr + length);
|
|
outer_cache_op = outer_inv_range;
|
|
break;
|
|
case ION_IOC_CLEAN_INV_CACHES:
|
|
if (!vaddr) {
|
|
dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
|
|
buffer->sg_table->nents, DMA_TO_DEVICE);
|
|
dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl,
|
|
buffer->sg_table->nents, DMA_FROM_DEVICE);
|
|
} else {
|
|
dmac_flush_range(vaddr, vaddr + length);
|
|
}
|
|
outer_cache_op = outer_flush_range;
|
|
break;
|
|
default:
|
|
return -EINVAL;
|
|
}
|
|
|
|
if (cma_heap_has_outer_cache) {
|
|
struct ion_cma_buffer_info *info = buffer->priv_virt;
|
|
|
|
outer_cache_op(info->handle, info->handle + length);
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int ion_cma_print_debug(struct ion_heap *heap, struct seq_file *s,
|
|
const struct list_head *mem_map)
|
|
{
|
|
if (mem_map) {
|
|
struct mem_map_data *data;
|
|
|
|
seq_printf(s, "\nMemory Map\n");
|
|
seq_printf(s, "%16.s %14.s %14.s %14.s\n",
|
|
"client", "start address", "end address",
|
|
"size (hex)");
|
|
|
|
list_for_each_entry(data, mem_map, node) {
|
|
const char *client_name = "(null)";
|
|
|
|
if (data->client_name)
|
|
client_name = data->client_name;
|
|
|
|
seq_printf(s, "%16.s %14lx %14lx %14lu (%lx)\n",
|
|
client_name, data->addr,
|
|
data->addr_end,
|
|
data->size, data->size);
|
|
}
|
|
}
|
|
return 0;
|
|
}
|
|
|
|
static struct ion_heap_ops ion_cma_ops = {
|
|
.allocate = ion_cma_allocate,
|
|
.free = ion_cma_free,
|
|
.map_dma = ion_cma_heap_map_dma,
|
|
.unmap_dma = ion_cma_heap_unmap_dma,
|
|
.phys = ion_cma_phys,
|
|
.map_user = ion_cma_mmap,
|
|
.map_kernel = ion_cma_map_kernel,
|
|
.unmap_kernel = ion_cma_unmap_kernel,
|
|
.map_iommu = ion_cma_map_iommu,
|
|
.unmap_iommu = ion_cma_unmap_iommu,
|
|
.cache_op = ion_cma_cache_ops,
|
|
.print_debug = ion_cma_print_debug,
|
|
};
|
|
|
|
struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data)
|
|
{
|
|
struct ion_heap *heap;
|
|
|
|
heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL);
|
|
|
|
if (!heap)
|
|
return ERR_PTR(-ENOMEM);
|
|
|
|
heap->ops = &ion_cma_ops;
|
|
/* set device as private heaps data, later it will be
|
|
* used to make the link with reserved CMA memory */
|
|
heap->priv = data->priv;
|
|
heap->type = ION_HEAP_TYPE_DMA;
|
|
cma_heap_has_outer_cache = data->has_outer_cache;
|
|
return heap;
|
|
}
|
|
|
|
void ion_cma_heap_destroy(struct ion_heap *heap)
|
|
{
|
|
kfree(heap);
|
|
}
|