diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO index 5f14247392bf43..8f3ac37bfe12e7 100644 --- a/drivers/staging/android/TODO +++ b/drivers/staging/android/TODO @@ -7,10 +7,23 @@ TODO: ion/ - - Add dt-bindings for remaining heaps (chunk and carveout heaps). This would - involve putting appropriate bindings in a memory node for Ion to find. - - Split /dev/ion up into multiple nodes (e.g. /dev/ion/heap0) - - Better test framework (integration with VGEM was suggested) + - Remove ION_IOC_SYNC: Flushing for devices should be purely a kernel internal + interface on top of dma-buf. flush_for_device needs to be added to dma-buf + first. + - Remove ION_IOC_CUSTOM: Atm used for cache flushing for cpu access in some + vendor trees. Should be replaced with an ioctl on the dma-buf to expose the + begin/end_cpu_access hooks to userspace. + - Clarify the tricks ion plays with explicitly managing coherency behind the + dma api's back (this is absolutely needed for high-perf gpu drivers): Add an + explicit coherency management mode to flush_for_device to be used by drivers + which want to manage caches themselves and which indicates whether cpu caches + need flushing. + - With those removed there's probably no use for ION_IOC_IMPORT anymore either + since ion would just be the central allocator for shared buffers. + - Add dt-binding to expose cma regions as ion heaps, with the rule that any + such cma regions must already be used by some device for dma. I.e. ion only + exposes existing cma regions and doesn't reserve unecessarily memory when + booting a system which doesn't use ion. Please send patches to Greg Kroah-Hartman and Cc: Arve Hjønnevåg and Riley Andrews diff --git a/drivers/staging/android/ion/Kconfig b/drivers/staging/android/ion/Kconfig index a517b2d29f1bb6..c8fb4134c55d04 100644 --- a/drivers/staging/android/ion/Kconfig +++ b/drivers/staging/android/ion/Kconfig @@ -10,35 +10,45 @@ menuconfig ION If you're not using Android its probably safe to say N here. -config ION_SYSTEM_HEAP - bool "Ion system heap" +config ION_TEST + tristate "Ion Test Device" depends on ION help - Choose this option to enable the Ion system heap. The system heap - is backed by pages from the buddy allocator. If in doubt, say Y. + Choose this option to create a device that can be used to test the + kernel and device side ION functions. -config ION_CARVEOUT_HEAP - bool "Ion carveout heap support" +config ION_DUMMY + bool "Dummy Ion driver" depends on ION help - Choose this option to enable carveout heaps with Ion. Carveout heaps - are backed by memory reserved from the system. Allocation times are - typically faster at the cost of memory not being used. Unless you - know your system has these regions, you should say N here. + Provides a dummy ION driver that registers the + /dev/ion device and some basic heaps. This can + be used for testing the ION infrastructure if + one doesn't have access to hardware drivers that + use ION. -config ION_CHUNK_HEAP - bool "Ion chunk heap support" - depends on ION +config ION_TEGRA + tristate "Ion for Tegra" + depends on ARCH_TEGRA && ION help - Choose this option to enable chunk heaps with Ion. This heap is - similar in function the carveout heap but memory is broken down - into smaller chunk sizes, typically corresponding to a TLB size. - Unless you know your system has these regions, you should say N here. + Choose this option if you wish to use ion on an nVidia Tegra. -config ION_CMA_HEAP - bool "Ion CMA heap support" - depends on ION && CMA +config ION_HISI + tristate "Ion for Hisilicon" + depends on ARCH_HISI && ION + select ION_OF help - Choose this option to enable CMA heaps with Ion. This heap is backed - by the Contiguous Memory Allocator (CMA). If your system has these - regions, you should say Y here. + Choose this option if you wish to use ion on Hisilicon Platform. + +source "drivers/staging/android/ion/hisilicon/Kconfig" + +config ION_OF + bool "Devicetree support for Ion" + depends on ION && OF_ADDRESS + help + Provides base support for defining Ion heaps in devicetree + and setting them up. Also includes functions for platforms + to parse the devicetree and expand for their own custom + extensions + + If using Ion and devicetree, you should say Y here diff --git a/drivers/staging/android/ion/Makefile b/drivers/staging/android/ion/Makefile index eb7eeed6ae409b..5d630a088381a4 100644 --- a/drivers/staging/android/ion/Makefile +++ b/drivers/staging/android/ion/Makefile @@ -1,5 +1,13 @@ -obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o -obj-$(CONFIG_ION_SYSTEM_HEAP) += ion_system_heap.o ion_page_pool.o -obj-$(CONFIG_ION_CARVEOUT_HEAP) += ion_carveout_heap.o -obj-$(CONFIG_ION_CHUNK_HEAP) += ion_chunk_heap.o -obj-$(CONFIG_ION_CMA_HEAP) += ion_cma_heap.o +obj-$(CONFIG_ION) += ion.o ion-ioctl.o ion_heap.o \ + ion_page_pool.o ion_system_heap.o \ + ion_carveout_heap.o ion_chunk_heap.o ion_cma_heap.o +obj-$(CONFIG_ION_TEST) += ion_test.o +ifdef CONFIG_COMPAT +obj-$(CONFIG_ION) += compat_ion.o +endif + +obj-$(CONFIG_ION_DUMMY) += ion_dummy_driver.o +obj-$(CONFIG_ION_TEGRA) += tegra/ +obj-$(CONFIG_ION_HISI) += hisilicon/ +obj-$(CONFIG_ION_OF) += ion_of.o + diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c new file mode 100644 index 00000000000000..9a978d21785ec0 --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.c @@ -0,0 +1,195 @@ +/* + * drivers/staging/android/ion/compat_ion.c + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include + +#include "ion.h" +#include "compat_ion.h" + +/* See drivers/staging/android/uapi/ion.h for the definition of these structs */ +struct compat_ion_allocation_data { + compat_size_t len; + compat_size_t align; + compat_uint_t heap_id_mask; + compat_uint_t flags; + compat_int_t handle; +}; + +struct compat_ion_custom_data { + compat_uint_t cmd; + compat_ulong_t arg; +}; + +struct compat_ion_handle_data { + compat_int_t handle; +}; + +#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ + struct compat_ion_allocation_data) +#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \ + struct compat_ion_handle_data) +#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \ + struct compat_ion_custom_data) + +static int compat_get_ion_allocation_data( + struct compat_ion_allocation_data __user *data32, + struct ion_allocation_data __user *data) +{ + compat_size_t s; + compat_uint_t u; + compat_int_t i; + int err; + + err = get_user(s, &data32->len); + err |= put_user(s, &data->len); + err |= get_user(s, &data32->align); + err |= put_user(s, &data->align); + err |= get_user(u, &data32->heap_id_mask); + err |= put_user(u, &data->heap_id_mask); + err |= get_user(u, &data32->flags); + err |= put_user(u, &data->flags); + err |= get_user(i, &data32->handle); + err |= put_user(i, &data->handle); + + return err; +} + +static int compat_get_ion_handle_data( + struct compat_ion_handle_data __user *data32, + struct ion_handle_data __user *data) +{ + compat_int_t i; + int err; + + err = get_user(i, &data32->handle); + err |= put_user(i, &data->handle); + + return err; +} + +static int compat_put_ion_allocation_data( + struct compat_ion_allocation_data __user *data32, + struct ion_allocation_data __user *data) +{ + compat_size_t s; + compat_uint_t u; + compat_int_t i; + int err; + + err = get_user(s, &data->len); + err |= put_user(s, &data32->len); + err |= get_user(s, &data->align); + err |= put_user(s, &data32->align); + err |= get_user(u, &data->heap_id_mask); + err |= put_user(u, &data32->heap_id_mask); + err |= get_user(u, &data->flags); + err |= put_user(u, &data32->flags); + err |= get_user(i, &data->handle); + err |= put_user(i, &data32->handle); + + return err; +} + +static int compat_get_ion_custom_data( + struct compat_ion_custom_data __user *data32, + struct ion_custom_data __user *data) +{ + compat_uint_t cmd; + compat_ulong_t arg; + int err; + + err = get_user(cmd, &data32->cmd); + err |= put_user(cmd, &data->cmd); + err |= get_user(arg, &data32->arg); + err |= put_user(arg, &data->arg); + + return err; +}; + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) +{ + long ret; + + if (!filp->f_op->unlocked_ioctl) + return -ENOTTY; + + switch (cmd) { + case COMPAT_ION_IOC_ALLOC: + { + struct compat_ion_allocation_data __user *data32; + struct ion_allocation_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (!data) + return -EFAULT; + + err = compat_get_ion_allocation_data(data32, data); + if (err) + return err; + ret = filp->f_op->unlocked_ioctl(filp, ION_IOC_ALLOC, + (unsigned long)data); + err = compat_put_ion_allocation_data(data32, data); + return ret ? ret : err; + } + case COMPAT_ION_IOC_FREE: + { + struct compat_ion_handle_data __user *data32; + struct ion_handle_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (!data) + return -EFAULT; + + err = compat_get_ion_handle_data(data32, data); + if (err) + return err; + + return filp->f_op->unlocked_ioctl(filp, ION_IOC_FREE, + (unsigned long)data); + } + case COMPAT_ION_IOC_CUSTOM: { + struct compat_ion_custom_data __user *data32; + struct ion_custom_data __user *data; + int err; + + data32 = compat_ptr(arg); + data = compat_alloc_user_space(sizeof(*data)); + if (!data) + return -EFAULT; + + err = compat_get_ion_custom_data(data32, data); + if (err) + return err; + + return filp->f_op->unlocked_ioctl(filp, ION_IOC_CUSTOM, + (unsigned long)data); + } + case ION_IOC_SHARE: + case ION_IOC_MAP: + case ION_IOC_IMPORT: + case ION_IOC_SYNC: + return filp->f_op->unlocked_ioctl(filp, cmd, + (unsigned long)compat_ptr(arg)); + default: + return -ENOIOCTLCMD; + } +} diff --git a/drivers/staging/android/ion/compat_ion.h b/drivers/staging/android/ion/compat_ion.h new file mode 100644 index 00000000000000..9da8f917670b53 --- /dev/null +++ b/drivers/staging/android/ion/compat_ion.h @@ -0,0 +1,29 @@ +/* + * drivers/staging/android/ion/compat_ion.h + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _LINUX_COMPAT_ION_H +#define _LINUX_COMPAT_ION_H + +#if IS_ENABLED(CONFIG_COMPAT) + +long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +#else + +#define compat_ion_ioctl NULL + +#endif /* CONFIG_COMPAT */ +#endif /* _LINUX_COMPAT_ION_H */ diff --git a/drivers/staging/android/ion/devicetree.txt b/drivers/staging/android/ion/devicetree.txt new file mode 100644 index 00000000000000..168715271f0640 --- /dev/null +++ b/drivers/staging/android/ion/devicetree.txt @@ -0,0 +1,51 @@ +Ion Memory Manager + +Ion is a memory manager that allows for sharing of buffers via dma-buf. +Ion allows for different types of allocation via an abstraction called +a 'heap'. A heap represents a specific type of memory. Each heap has +a different type. There can be multiple instances of the same heap +type. + +Specific heap instances are tied to heap IDs. Heap IDs are not to be specified +in the devicetree. + +Required properties for Ion + +- compatible: "linux,ion" PLUS a compatible property for the device + +All child nodes of a linux,ion node are interpreted as heaps + +required properties for heaps + +- compatible: compatible string for a heap type PLUS a compatible property +for the specific instance of the heap. Current heap types +-- linux,ion-heap-system +-- linux,ion-heap-system-contig +-- linux,ion-heap-carveout +-- linux,ion-heap-chunk +-- linux,ion-heap-dma +-- linux,ion-heap-custom + +Optional properties +- memory-region: A phandle to a memory region. Required for DMA heap type +(see reserved-memory.txt for details on the reservation) + +Example: + + ion { + compatbile = "hisilicon,ion", "linux,ion"; + + ion-system-heap { + compatbile = "hisilicon,system-heap", "linux,ion-heap-system" + }; + + ion-camera-region { + compatible = "hisilicon,camera-heap", "linux,ion-heap-dma" + memory-region = <&camera_region>; + }; + + ion-fb-region { + compatbile = "hisilicon,fb-heap", "linux,ion-heap-dma" + memory-region = <&fb_region>; + }; + } diff --git a/drivers/staging/android/ion/hisilicon/Kconfig b/drivers/staging/android/ion/hisilicon/Kconfig new file mode 100644 index 00000000000000..2b4bd079829007 --- /dev/null +++ b/drivers/staging/android/ion/hisilicon/Kconfig @@ -0,0 +1,5 @@ +config HI6220_ION + bool "Hi6220 ION Driver" + depends on ARCH_HISI && ION + help + Build the Hisilicon Hi6220 ion driver. diff --git a/drivers/staging/android/ion/hisilicon/Makefile b/drivers/staging/android/ion/hisilicon/Makefile new file mode 100644 index 00000000000000..2a89414280ac16 --- /dev/null +++ b/drivers/staging/android/ion/hisilicon/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_HI6220_ION) += hi6220_ion.o diff --git a/drivers/staging/android/ion/hisilicon/hi6220_ion.c b/drivers/staging/android/ion/hisilicon/hi6220_ion.c new file mode 100644 index 00000000000000..0de7897fd4bf59 --- /dev/null +++ b/drivers/staging/android/ion/hisilicon/hi6220_ion.c @@ -0,0 +1,113 @@ +/* + * Hisilicon Hi6220 ION Driver + * + * Copyright (c) 2015 Hisilicon Limited. + * + * Author: Chen Feng + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "Ion: " fmt + +#include +#include +#include +#include +#include +#include "../ion_priv.h" +#include "../ion.h" +#include "../ion_of.h" + +struct hisi_ion_dev { + struct ion_heap **heaps; + struct ion_device *idev; + struct ion_platform_data *data; +}; + +static struct ion_of_heap hisi_heaps[] = { + PLATFORM_HEAP("hisilicon,sys_user", 0, + ION_HEAP_TYPE_SYSTEM, "sys_user"), + PLATFORM_HEAP("hisilicon,sys_contig", 1, + ION_HEAP_TYPE_SYSTEM_CONTIG, "sys_contig"), + PLATFORM_HEAP("hisilicon,cma", ION_HEAP_TYPE_DMA, ION_HEAP_TYPE_DMA, + "cma"), + {} +}; + +static int hi6220_ion_probe(struct platform_device *pdev) +{ + struct hisi_ion_dev *ipdev; + int i; + + ipdev = devm_kzalloc(&pdev->dev, sizeof(*ipdev), GFP_KERNEL); + if (!ipdev) + return -ENOMEM; + + platform_set_drvdata(pdev, ipdev); + + ipdev->idev = ion_device_create(NULL); + if (IS_ERR(ipdev->idev)) + return PTR_ERR(ipdev->idev); + + ipdev->data = ion_parse_dt(pdev, hisi_heaps); + if (IS_ERR(ipdev->data)) + return PTR_ERR(ipdev->data); + + ipdev->heaps = devm_kzalloc(&pdev->dev, + sizeof(struct ion_heap) * ipdev->data->nr, + GFP_KERNEL); + if (!ipdev->heaps) { + ion_destroy_platform_data(ipdev->data); + return -ENOMEM; + } + + for (i = 0; i < ipdev->data->nr; i++) { + ipdev->heaps[i] = ion_heap_create(&ipdev->data->heaps[i]); + if (!ipdev->heaps) { + ion_destroy_platform_data(ipdev->data); + return -ENOMEM; + } + ion_device_add_heap(ipdev->idev, ipdev->heaps[i]); + } + return 0; +} + +static int hi6220_ion_remove(struct platform_device *pdev) +{ + struct hisi_ion_dev *ipdev; + int i; + + ipdev = platform_get_drvdata(pdev); + + for (i = 0; i < ipdev->data->nr; i++) + ion_heap_destroy(ipdev->heaps[i]); + + ion_destroy_platform_data(ipdev->data); + ion_device_destroy(ipdev->idev); + + return 0; +} + +static const struct of_device_id hi6220_ion_match_table[] = { + {.compatible = "hisilicon,hi6220-ion"}, + {}, +}; + +static struct platform_driver hi6220_ion_driver = { + .probe = hi6220_ion_probe, + .remove = hi6220_ion_remove, + .driver = { + .name = "ion-hi6220", + .of_match_table = hi6220_ion_match_table, + }, +}; + +static int __init hi6220_ion_init(void) +{ + return platform_driver_register(&hi6220_ion_driver); +} + +subsys_initcall(hi6220_ion_init); diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c index d9f8b1424da18b..9ff815ad1cb16f 100644 --- a/drivers/staging/android/ion/ion-ioctl.c +++ b/drivers/staging/android/ion/ion-ioctl.c @@ -19,9 +19,14 @@ #include #include "ion.h" +#include "ion_priv.h" +#include "compat_ion.h" union ion_ioctl_arg { + struct ion_fd_data fd; struct ion_allocation_data allocation; + struct ion_handle_data handle; + struct ion_custom_data custom; struct ion_heap_query query; }; @@ -46,6 +51,10 @@ static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg) static unsigned int ion_ioctl_dir(unsigned int cmd) { switch (cmd) { + case ION_IOC_SYNC: + case ION_IOC_FREE: + case ION_IOC_CUSTOM: + return _IOC_WRITE; default: return _IOC_DIR(cmd); } @@ -53,6 +62,9 @@ static unsigned int ion_ioctl_dir(unsigned int cmd) long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { + struct ion_client *client = filp->private_data; + struct ion_device *dev = client->dev; + struct ion_handle *cleanup_handle = NULL; int ret = 0; unsigned int dir; union ion_ioctl_arg data; @@ -80,28 +92,87 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) switch (cmd) { case ION_IOC_ALLOC: { - int fd; + struct ion_handle *handle; - fd = ion_alloc(data.allocation.len, - data.allocation.heap_id_mask, - data.allocation.flags); - if (fd < 0) - return fd; + handle = ion_alloc(client, data.allocation.len, + data.allocation.align, + data.allocation.heap_id_mask, + data.allocation.flags); + if (IS_ERR(handle)) + return PTR_ERR(handle); - data.allocation.fd = fd; + data.allocation.handle = handle->id; + cleanup_handle = handle; + break; + } + case ION_IOC_FREE: + { + struct ion_handle *handle; + + mutex_lock(&client->lock); + handle = ion_handle_get_by_id_nolock(client, + data.handle.handle); + if (IS_ERR(handle)) { + mutex_unlock(&client->lock); + return PTR_ERR(handle); + } + ion_free_nolock(client, handle); + ion_handle_put_nolock(handle); + mutex_unlock(&client->lock); + break; + } + case ION_IOC_SHARE: + case ION_IOC_MAP: + { + struct ion_handle *handle; + + handle = ion_handle_get_by_id(client, data.handle.handle); + if (IS_ERR(handle)) + return PTR_ERR(handle); + data.fd.fd = ion_share_dma_buf_fd(client, handle); + ion_handle_put(handle); + if (data.fd.fd < 0) + ret = data.fd.fd; + break; + } + case ION_IOC_IMPORT: + { + struct ion_handle *handle; + + handle = ion_import_dma_buf_fd(client, data.fd.fd); + if (IS_ERR(handle)) + ret = PTR_ERR(handle); + else + data.handle.handle = handle->id; + break; + } + case ION_IOC_SYNC: + { + ret = ion_sync_for_device(client, data.fd.fd); + break; + } + case ION_IOC_CUSTOM: + { + if (!dev->custom_ioctl) + return -ENOTTY; + ret = dev->custom_ioctl(client, data.custom.cmd, + data.custom.arg); break; } case ION_IOC_HEAP_QUERY: - ret = ion_query_heaps(&data.query); + ret = ion_query_heaps(client, &data.query); break; default: return -ENOTTY; } if (dir & _IOC_READ) { - if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) + if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) { + if (cleanup_handle) + ion_free(client, cleanup_handle); return -EFAULT; + } } return ret; } diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c index 93e2c90fa77d51..95a7f1648c00ca 100644 --- a/drivers/staging/android/ion/ion.c +++ b/drivers/staging/android/ion/ion.c @@ -39,15 +39,40 @@ #include #include "ion.h" +#include "ion_priv.h" +#include "compat_ion.h" -static struct ion_device *internal_dev; -static int heap_id; +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer) +{ + return (buffer->flags & ION_FLAG_CACHED) && + !(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC); +} bool ion_buffer_cached(struct ion_buffer *buffer) { return !!(buffer->flags & ION_FLAG_CACHED); } +static inline struct page *ion_buffer_page(struct page *page) +{ + return (struct page *)((unsigned long)page & ~(1UL)); +} + +static inline bool ion_buffer_page_is_dirty(struct page *page) +{ + return !!((unsigned long)page & 1UL); +} + +static inline void ion_buffer_page_dirty(struct page **page) +{ + *page = (struct page *)((unsigned long)(*page) | 1UL); +} + +static inline void ion_buffer_page_clean(struct page **page) +{ + *page = (struct page *)((unsigned long)(*page) & ~(1UL)); +} + /* this function should only be called while dev->lock is held */ static void ion_buffer_add(struct ion_device *dev, struct ion_buffer *buffer) @@ -78,11 +103,13 @@ static void ion_buffer_add(struct ion_device *dev, static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, struct ion_device *dev, unsigned long len, + unsigned long align, unsigned long flags) { struct ion_buffer *buffer; struct sg_table *table; - int ret; + struct scatterlist *sg; + int i, ret; buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) @@ -90,20 +117,22 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, buffer->heap = heap; buffer->flags = flags; + kref_init(&buffer->ref); - ret = heap->ops->allocate(heap, buffer, len, flags); + ret = heap->ops->allocate(heap, buffer, len, align, flags); if (ret) { if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE)) goto err2; ion_heap_freelist_drain(heap, 0); - ret = heap->ops->allocate(heap, buffer, len, flags); + ret = heap->ops->allocate(heap, buffer, len, align, + flags); if (ret) goto err2; } - if (!buffer->sg_table) { + if (buffer->sg_table == NULL) { WARN_ONCE(1, "This heap needs to set the sgtable"); ret = -EINVAL; goto err1; @@ -113,10 +142,43 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap, buffer->dev = dev; buffer->size = len; + if (ion_buffer_fault_user_mappings(buffer)) { + int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + struct scatterlist *sg; + int i, j, k = 0; + + buffer->pages = vmalloc(sizeof(struct page *) * num_pages); + if (!buffer->pages) { + ret = -ENOMEM; + goto err1; + } + + for_each_sg(table->sgl, sg, table->nents, i) { + struct page *page = sg_page(sg); + + for (j = 0; j < sg->length / PAGE_SIZE; j++) + buffer->pages[k++] = page++; + } + } + buffer->dev = dev; buffer->size = len; - INIT_LIST_HEAD(&buffer->attachments); + INIT_LIST_HEAD(&buffer->vmas); mutex_init(&buffer->lock); + /* + * this will set up dma addresses for the sglist -- it is not + * technically correct as per the dma api -- a specific + * device isn't really taking ownership here. However, in practice on + * our systems the only dma_address space is physical addresses. + * Additionally, we can't afford the overhead of invalidating every + * allocation via dma_map_sg. The implicit contract here is that + * memory coming from the heaps is ready for dma, ie if it has a + * cached mapping that mapping has been invalidated + */ + for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) { + sg_dma_address(sg) = sg_phys(sg); + sg_dma_len(sg) = sg->length; + } mutex_lock(&dev->buffer_lock); ion_buffer_add(dev, buffer); mutex_unlock(&dev->buffer_lock); @@ -134,11 +196,13 @@ void ion_buffer_destroy(struct ion_buffer *buffer) if (WARN_ON(buffer->kmap_cnt > 0)) buffer->heap->ops->unmap_kernel(buffer->heap, buffer); buffer->heap->ops->free(buffer); + vfree(buffer->pages); kfree(buffer); } -static void _ion_buffer_destroy(struct ion_buffer *buffer) +static void _ion_buffer_destroy(struct kref *kref) { + struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref); struct ion_heap *heap = buffer->heap; struct ion_device *dev = buffer->dev; @@ -152,6 +216,273 @@ static void _ion_buffer_destroy(struct ion_buffer *buffer) ion_buffer_destroy(buffer); } +static void ion_buffer_get(struct ion_buffer *buffer) +{ + kref_get(&buffer->ref); +} + +static int ion_buffer_put(struct ion_buffer *buffer) +{ + return kref_put(&buffer->ref, _ion_buffer_destroy); +} + +static void ion_buffer_add_to_handle(struct ion_buffer *buffer) +{ + mutex_lock(&buffer->lock); + buffer->handle_count++; + mutex_unlock(&buffer->lock); +} + +static void ion_buffer_remove_from_handle(struct ion_buffer *buffer) +{ + /* + * when a buffer is removed from a handle, if it is not in + * any other handles, copy the taskcomm and the pid of the + * process it's being removed from into the buffer. At this + * point there will be no way to track what processes this buffer is + * being used by, it only exists as a dma_buf file descriptor. + * The taskcomm and pid can provide a debug hint as to where this fd + * is in the system + */ + mutex_lock(&buffer->lock); + buffer->handle_count--; + BUG_ON(buffer->handle_count < 0); + if (!buffer->handle_count) { + struct task_struct *task; + + task = current->group_leader; + get_task_comm(buffer->task_comm, task); + buffer->pid = task_pid_nr(task); + } + mutex_unlock(&buffer->lock); +} + +static struct ion_handle *ion_handle_create(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct ion_handle *handle; + + handle = kzalloc(sizeof(*handle), GFP_KERNEL); + if (!handle) + return ERR_PTR(-ENOMEM); + kref_init(&handle->ref); + RB_CLEAR_NODE(&handle->node); + handle->client = client; + ion_buffer_get(buffer); + ion_buffer_add_to_handle(buffer); + handle->buffer = buffer; + + return handle; +} + +static void ion_handle_kmap_put(struct ion_handle *); + +static void ion_handle_destroy(struct kref *kref) +{ + struct ion_handle *handle = container_of(kref, struct ion_handle, ref); + struct ion_client *client = handle->client; + struct ion_buffer *buffer = handle->buffer; + + mutex_lock(&buffer->lock); + while (handle->kmap_cnt) + ion_handle_kmap_put(handle); + mutex_unlock(&buffer->lock); + + idr_remove(&client->idr, handle->id); + if (!RB_EMPTY_NODE(&handle->node)) + rb_erase(&handle->node, &client->handles); + + ion_buffer_remove_from_handle(buffer); + ion_buffer_put(buffer); + + kfree(handle); +} + +static void ion_handle_get(struct ion_handle *handle) +{ + kref_get(&handle->ref); +} + +int ion_handle_put_nolock(struct ion_handle *handle) +{ + return kref_put(&handle->ref, ion_handle_destroy); +} + +int ion_handle_put(struct ion_handle *handle) +{ + struct ion_client *client = handle->client; + int ret; + + mutex_lock(&client->lock); + ret = ion_handle_put_nolock(handle); + mutex_unlock(&client->lock); + + return ret; +} + +static struct ion_handle *ion_handle_lookup(struct ion_client *client, + struct ion_buffer *buffer) +{ + struct rb_node *n = client->handles.rb_node; + + while (n) { + struct ion_handle *entry = rb_entry(n, struct ion_handle, node); + + if (buffer < entry->buffer) + n = n->rb_left; + else if (buffer > entry->buffer) + n = n->rb_right; + else + return entry; + } + return ERR_PTR(-EINVAL); +} + +struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, + int id) +{ + struct ion_handle *handle; + + handle = idr_find(&client->idr, id); + if (handle) + ion_handle_get(handle); + + return handle ? handle : ERR_PTR(-EINVAL); +} + +struct ion_handle *ion_handle_get_by_id(struct ion_client *client, + int id) +{ + struct ion_handle *handle; + + mutex_lock(&client->lock); + handle = ion_handle_get_by_id_nolock(client, id); + mutex_unlock(&client->lock); + + return handle; +} + +static bool ion_handle_validate(struct ion_client *client, + struct ion_handle *handle) +{ + WARN_ON(!mutex_is_locked(&client->lock)); + return idr_find(&client->idr, handle->id) == handle; +} + +static int ion_handle_add(struct ion_client *client, struct ion_handle *handle) +{ + int id; + struct rb_node **p = &client->handles.rb_node; + struct rb_node *parent = NULL; + struct ion_handle *entry; + + id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL); + if (id < 0) + return id; + + handle->id = id; + + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_handle, node); + + if (handle->buffer < entry->buffer) + p = &(*p)->rb_left; + else if (handle->buffer > entry->buffer) + p = &(*p)->rb_right; + else + WARN(1, "%s: buffer already found.", __func__); + } + + rb_link_node(&handle->node, parent, p); + rb_insert_color(&handle->node, &client->handles); + + return 0; +} + +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags) +{ + struct ion_handle *handle; + struct ion_device *dev = client->dev; + struct ion_buffer *buffer = NULL; + struct ion_heap *heap; + int ret; + + pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__, + len, align, heap_id_mask, flags); + /* + * traverse the list of heaps available in this system in priority + * order. If the heap type is supported by the client, and matches the + * request of the caller allocate from it. Repeat until allocate has + * succeeded or all heaps have been tried + */ + len = PAGE_ALIGN(len); + + if (!len) + return ERR_PTR(-EINVAL); + + down_read(&dev->lock); + plist_for_each_entry(heap, &dev->heaps, node) { + /* if the caller didn't specify this heap id */ + if (!((1 << heap->id) & heap_id_mask)) + continue; + buffer = ion_buffer_create(heap, dev, len, align, flags); + if (!IS_ERR(buffer)) + break; + } + up_read(&dev->lock); + + if (buffer == NULL) + return ERR_PTR(-ENODEV); + + if (IS_ERR(buffer)) + return ERR_CAST(buffer); + + handle = ion_handle_create(client, buffer); + + /* + * ion_buffer_create will create a buffer with a ref_cnt of 1, + * and ion_handle_create will take a second reference, drop one here + */ + ion_buffer_put(buffer); + + if (IS_ERR(handle)) + return handle; + + mutex_lock(&client->lock); + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { + ion_handle_put(handle); + handle = ERR_PTR(ret); + } + + return handle; +} +EXPORT_SYMBOL(ion_alloc); + +void ion_free_nolock(struct ion_client *client, + struct ion_handle *handle) +{ + if (!ion_handle_validate(client, handle)) { + WARN(1, "%s: invalid handle passed to free.\n", __func__); + return; + } + ion_handle_put_nolock(handle); +} + +void ion_free(struct ion_client *client, struct ion_handle *handle) +{ + BUG_ON(client != handle->client); + + mutex_lock(&client->lock); + ion_free_nolock(client, handle); + mutex_unlock(&client->lock); +} +EXPORT_SYMBOL(ion_free); + static void *ion_buffer_kmap_get(struct ion_buffer *buffer) { void *vaddr; @@ -161,7 +492,7 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer) return buffer->vaddr; } vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer); - if (WARN_ONCE(!vaddr, + if (WARN_ONCE(vaddr == NULL, "heap->ops->map_kernel should return ERR_PTR on error")) return ERR_PTR(-EINVAL); if (IS_ERR(vaddr)) @@ -171,6 +502,22 @@ static void *ion_buffer_kmap_get(struct ion_buffer *buffer) return vaddr; } +static void *ion_handle_kmap_get(struct ion_handle *handle) +{ + struct ion_buffer *buffer = handle->buffer; + void *vaddr; + + if (handle->kmap_cnt) { + handle->kmap_cnt++; + return buffer->vaddr; + } + vaddr = ion_buffer_kmap_get(buffer); + if (IS_ERR(vaddr)) + return vaddr; + handle->kmap_cnt++; + return vaddr; +} + static void ion_buffer_kmap_put(struct ion_buffer *buffer) { buffer->kmap_cnt--; @@ -180,110 +527,408 @@ static void ion_buffer_kmap_put(struct ion_buffer *buffer) } } -static struct sg_table *dup_sg_table(struct sg_table *table) +static void ion_handle_kmap_put(struct ion_handle *handle) { - struct sg_table *new_table; - int ret, i; - struct scatterlist *sg, *new_sg; + struct ion_buffer *buffer = handle->buffer; - new_table = kzalloc(sizeof(*new_table), GFP_KERNEL); - if (!new_table) - return ERR_PTR(-ENOMEM); + if (!handle->kmap_cnt) { + WARN(1, "%s: Double unmap detected! bailing...\n", __func__); + return; + } + handle->kmap_cnt--; + if (!handle->kmap_cnt) + ion_buffer_kmap_put(buffer); +} - ret = sg_alloc_table(new_table, table->nents, GFP_KERNEL); - if (ret) { - kfree(new_table); - return ERR_PTR(-ENOMEM); +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle) +{ + struct ion_buffer *buffer; + void *vaddr; + + mutex_lock(&client->lock); + if (!ion_handle_validate(client, handle)) { + pr_err("%s: invalid handle passed to map_kernel.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); } - new_sg = new_table->sgl; - for_each_sg(table->sgl, sg, table->nents, i) { - memcpy(new_sg, sg, sizeof(*sg)); - sg->dma_address = 0; - new_sg = sg_next(new_sg); + buffer = handle->buffer; + + if (!handle->buffer->heap->ops->map_kernel) { + pr_err("%s: map_kernel is not implemented by this heap.\n", + __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-ENODEV); } - return new_table; + mutex_lock(&buffer->lock); + vaddr = ion_handle_kmap_get(handle); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); + return vaddr; } +EXPORT_SYMBOL(ion_map_kernel); -static void free_duped_table(struct sg_table *table) +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle) { - sg_free_table(table); - kfree(table); + struct ion_buffer *buffer; + + mutex_lock(&client->lock); + buffer = handle->buffer; + mutex_lock(&buffer->lock); + ion_handle_kmap_put(handle); + mutex_unlock(&buffer->lock); + mutex_unlock(&client->lock); } +EXPORT_SYMBOL(ion_unmap_kernel); -struct ion_dma_buf_attachment { - struct device *dev; - struct sg_table *table; - struct list_head list; +static struct mutex debugfs_mutex; +static struct rb_root *ion_root_client; +static int is_client_alive(struct ion_client *client) +{ + struct rb_node *node; + struct ion_client *tmp; + struct ion_device *dev; + + node = ion_root_client->rb_node; + dev = container_of(ion_root_client, struct ion_device, clients); + + down_read(&dev->lock); + while (node) { + tmp = rb_entry(node, struct ion_client, node); + if (client < tmp) { + node = node->rb_left; + } else if (client > tmp) { + node = node->rb_right; + } else { + up_read(&dev->lock); + return 1; + } + } + + up_read(&dev->lock); + return 0; +} + +static int ion_debug_client_show(struct seq_file *s, void *unused) +{ + struct ion_client *client = s->private; + struct rb_node *n; + size_t sizes[ION_NUM_HEAP_IDS] = {0}; + const char *names[ION_NUM_HEAP_IDS] = {NULL}; + int i; + + mutex_lock(&debugfs_mutex); + if (!is_client_alive(client)) { + seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n", + client); + mutex_unlock(&debugfs_mutex); + return 0; + } + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + unsigned int id = handle->buffer->heap->id; + + if (!names[id]) + names[id] = handle->buffer->heap->name; + sizes[id] += handle->buffer->size; + } + mutex_unlock(&client->lock); + mutex_unlock(&debugfs_mutex); + + seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes"); + for (i = 0; i < ION_NUM_HEAP_IDS; i++) { + if (!names[i]) + continue; + seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]); + } + return 0; +} + +static int ion_debug_client_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_client_show, inode->i_private); +} + +static const struct file_operations debug_client_fops = { + .open = ion_debug_client_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, }; -static int ion_dma_buf_attach(struct dma_buf *dmabuf, struct device *dev, - struct dma_buf_attachment *attachment) +static int ion_get_client_serial(const struct rb_root *root, + const unsigned char *name) { - struct ion_dma_buf_attachment *a; - struct sg_table *table; - struct ion_buffer *buffer = dmabuf->priv; + int serial = -1; + struct rb_node *node; - a = kzalloc(sizeof(*a), GFP_KERNEL); - if (!a) - return -ENOMEM; + for (node = rb_first(root); node; node = rb_next(node)) { + struct ion_client *client = rb_entry(node, struct ion_client, + node); - table = dup_sg_table(buffer->sg_table); - if (IS_ERR(table)) { - kfree(a); - return -ENOMEM; + if (strcmp(client->name, name)) + continue; + serial = max(serial, client->display_serial); } + return serial + 1; +} - a->table = table; - a->dev = dev; - INIT_LIST_HEAD(&a->list); +struct ion_client *ion_client_create(struct ion_device *dev, + const char *name) +{ + struct ion_client *client; + struct task_struct *task; + struct rb_node **p; + struct rb_node *parent = NULL; + struct ion_client *entry; + pid_t pid; - attachment->priv = a; + if (!name) { + pr_err("%s: Name cannot be null\n", __func__); + return ERR_PTR(-EINVAL); + } - mutex_lock(&buffer->lock); - list_add(&a->list, &buffer->attachments); - mutex_unlock(&buffer->lock); + get_task_struct(current->group_leader); + task_lock(current->group_leader); + pid = task_pid_nr(current->group_leader); + /* + * don't bother to store task struct for kernel threads, + * they can't be killed anyway + */ + if (current->group_leader->flags & PF_KTHREAD) { + put_task_struct(current->group_leader); + task = NULL; + } else { + task = current->group_leader; + } + task_unlock(current->group_leader); + + client = kzalloc(sizeof(*client), GFP_KERNEL); + if (!client) + goto err_put_task_struct; + + client->dev = dev; + client->handles = RB_ROOT; + idr_init(&client->idr); + mutex_init(&client->lock); + client->task = task; + client->pid = pid; + client->name = kstrdup(name, GFP_KERNEL); + if (!client->name) + goto err_free_client; - return 0; + down_write(&dev->lock); + client->display_serial = ion_get_client_serial(&dev->clients, name); + client->display_name = kasprintf( + GFP_KERNEL, "%s-%d", name, client->display_serial); + if (!client->display_name) { + up_write(&dev->lock); + goto err_free_client_name; + } + p = &dev->clients.rb_node; + while (*p) { + parent = *p; + entry = rb_entry(parent, struct ion_client, node); + + if (client < entry) + p = &(*p)->rb_left; + else if (client > entry) + p = &(*p)->rb_right; + } + rb_link_node(&client->node, parent, p); + rb_insert_color(&client->node, &dev->clients); + + client->debug_root = debugfs_create_file(client->display_name, 0664, + dev->clients_debug_root, + client, &debug_client_fops); + if (!client->debug_root) { + char buf[256], *path; + + path = dentry_path(dev->clients_debug_root, buf, 256); + pr_err("Failed to create client debugfs at %s/%s\n", + path, client->display_name); + } + + up_write(&dev->lock); + + return client; + +err_free_client_name: + kfree(client->name); +err_free_client: + kfree(client); +err_put_task_struct: + if (task) + put_task_struct(current->group_leader); + return ERR_PTR(-ENOMEM); } +EXPORT_SYMBOL(ion_client_create); -static void ion_dma_buf_detatch(struct dma_buf *dmabuf, - struct dma_buf_attachment *attachment) +void ion_client_destroy(struct ion_client *client) { - struct ion_dma_buf_attachment *a = attachment->priv; - struct ion_buffer *buffer = dmabuf->priv; + struct ion_device *dev = client->dev; + struct rb_node *n; + + pr_debug("%s: %d\n", __func__, __LINE__); + mutex_lock(&debugfs_mutex); + while ((n = rb_first(&client->handles))) { + struct ion_handle *handle = rb_entry(n, struct ion_handle, + node); + ion_handle_destroy(&handle->ref); + } - free_duped_table(a->table); - mutex_lock(&buffer->lock); - list_del(&a->list); - mutex_unlock(&buffer->lock); + idr_destroy(&client->idr); + + down_write(&dev->lock); + if (client->task) + put_task_struct(client->task); + rb_erase(&client->node, &dev->clients); + debugfs_remove_recursive(client->debug_root); + up_write(&dev->lock); - kfree(a); + kfree(client->display_name); + kfree(client->name); + kfree(client); + mutex_unlock(&debugfs_mutex); } +EXPORT_SYMBOL(ion_client_destroy); + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction direction); static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment, enum dma_data_direction direction) { - struct ion_dma_buf_attachment *a = attachment->priv; - struct sg_table *table; - - table = a->table; - - if (!dma_map_sg(attachment->dev, table->sgl, table->nents, - direction)) - return ERR_PTR(-ENOMEM); + struct dma_buf *dmabuf = attachment->dmabuf; + struct ion_buffer *buffer = dmabuf->priv; - return table; + ion_buffer_sync_for_device(buffer, attachment->dev, direction); + return buffer->sg_table; } static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *table, enum dma_data_direction direction) { - dma_unmap_sg(attachment->dev, table->sgl, table->nents, direction); } +void ion_pages_sync_for_device(struct device *dev, struct page *page, + size_t size, enum dma_data_direction dir) +{ + struct scatterlist sg; + + sg_init_table(&sg, 1); + sg_set_page(&sg, page, size, 0); + /* + * This is not correct - sg_dma_address needs a dma_addr_t that is valid + * for the targeted device, but this works on the currently targeted + * hardware. + */ + sg_dma_address(&sg) = page_to_phys(page); + dma_sync_sg_for_device(dev, &sg, 1, dir); +} + +struct ion_vma_list { + struct list_head list; + struct vm_area_struct *vma; +}; + +static void ion_buffer_sync_for_device(struct ion_buffer *buffer, + struct device *dev, + enum dma_data_direction dir) +{ + struct ion_vma_list *vma_list; + int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE; + int i; + + pr_debug("%s: syncing for device %s\n", __func__, + dev ? dev_name(dev) : "null"); + + if (!ion_buffer_fault_user_mappings(buffer)) + return; + + mutex_lock(&buffer->lock); + for (i = 0; i < pages; i++) { + struct page *page = buffer->pages[i]; + + if (ion_buffer_page_is_dirty(page)) + ion_pages_sync_for_device(dev, ion_buffer_page(page), + PAGE_SIZE, dir); + + ion_buffer_page_clean(buffer->pages + i); + } + list_for_each_entry(vma_list, &buffer->vmas, list) { + struct vm_area_struct *vma = vma_list->vma; + + zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start); + } + mutex_unlock(&buffer->lock); +} + +static int ion_vm_fault(struct vm_fault *vmf) +{ + struct ion_buffer *buffer = vmf->vma->vm_private_data; + unsigned long pfn; + int ret; + + mutex_lock(&buffer->lock); + ion_buffer_page_dirty(buffer->pages + vmf->pgoff); + BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]); + + pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff])); + ret = vm_insert_pfn(vmf->vma, vmf->address, pfn); + mutex_unlock(&buffer->lock); + if (ret) + return VM_FAULT_ERROR; + + return VM_FAULT_NOPAGE; +} + +static void ion_vm_open(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list; + + vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL); + if (!vma_list) + return; + vma_list->vma = vma; + mutex_lock(&buffer->lock); + list_add(&vma_list->list, &buffer->vmas); + mutex_unlock(&buffer->lock); + pr_debug("%s: adding %p\n", __func__, vma); +} + +static void ion_vm_close(struct vm_area_struct *vma) +{ + struct ion_buffer *buffer = vma->vm_private_data; + struct ion_vma_list *vma_list, *tmp; + + pr_debug("%s\n", __func__); + mutex_lock(&buffer->lock); + list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) { + if (vma_list->vma != vma) + continue; + list_del(&vma_list->list); + kfree(vma_list); + pr_debug("%s: deleting %p\n", __func__, vma); + break; + } + mutex_unlock(&buffer->lock); +} + +static const struct vm_operations_struct ion_vma_ops = { + .open = ion_vm_open, + .close = ion_vm_close, + .fault = ion_vm_fault, +}; + static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) { struct ion_buffer *buffer = dmabuf->priv; @@ -295,6 +940,15 @@ static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma) return -EINVAL; } + if (ion_buffer_fault_user_mappings(buffer)) { + vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | + VM_DONTDUMP; + vma->vm_private_data = buffer; + vma->vm_ops = &ion_vma_ops; + ion_vm_open(vma); + return 0; + } + if (!(buffer->flags & ION_FLAG_CACHED)) vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot); @@ -314,7 +968,7 @@ static void ion_dma_buf_release(struct dma_buf *dmabuf) { struct ion_buffer *buffer = dmabuf->priv; - _ion_buffer_destroy(buffer); + ion_buffer_put(buffer); } static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset) @@ -334,44 +988,26 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, { struct ion_buffer *buffer = dmabuf->priv; void *vaddr; - struct ion_dma_buf_attachment *a; - /* - * TODO: Move this elsewhere because we don't always need a vaddr - */ - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - vaddr = ion_buffer_kmap_get(buffer); - mutex_unlock(&buffer->lock); + if (!buffer->heap->ops->map_kernel) { + pr_err("%s: map kernel is not implemented by this heap.\n", + __func__); + return -ENODEV; } mutex_lock(&buffer->lock); - list_for_each_entry(a, &buffer->attachments, list) { - dma_sync_sg_for_cpu(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); - } + vaddr = ion_buffer_kmap_get(buffer); mutex_unlock(&buffer->lock); - - return 0; + return PTR_ERR_OR_ZERO(vaddr); } static int ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, enum dma_data_direction direction) { struct ion_buffer *buffer = dmabuf->priv; - struct ion_dma_buf_attachment *a; - - if (buffer->heap->ops->map_kernel) { - mutex_lock(&buffer->lock); - ion_buffer_kmap_put(buffer); - mutex_unlock(&buffer->lock); - } mutex_lock(&buffer->lock); - list_for_each_entry(a, &buffer->attachments, list) { - dma_sync_sg_for_device(a->dev, a->table->sgl, a->table->nents, - DMA_BIDIRECTIONAL); - } + ion_buffer_kmap_put(buffer); mutex_unlock(&buffer->lock); return 0; @@ -382,8 +1018,6 @@ static const struct dma_buf_ops dma_buf_ops = { .unmap_dma_buf = ion_unmap_dma_buf, .mmap = ion_mmap, .release = ion_dma_buf_release, - .attach = ion_dma_buf_attach, - .detach = ion_dma_buf_detatch, .begin_cpu_access = ion_dma_buf_begin_cpu_access, .end_cpu_access = ion_dma_buf_end_cpu_access, .map_atomic = ion_dma_buf_kmap, @@ -392,44 +1026,24 @@ static const struct dma_buf_ops dma_buf_ops = { .unmap = ion_dma_buf_kunmap, }; -int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags) +struct dma_buf *ion_share_dma_buf(struct ion_client *client, + struct ion_handle *handle) { - struct ion_device *dev = internal_dev; - struct ion_buffer *buffer = NULL; - struct ion_heap *heap; DEFINE_DMA_BUF_EXPORT_INFO(exp_info); - int fd; + struct ion_buffer *buffer; struct dma_buf *dmabuf; + bool valid_handle; - pr_debug("%s: len %zu heap_id_mask %u flags %x\n", __func__, - len, heap_id_mask, flags); - /* - * traverse the list of heaps available in this system in priority - * order. If the heap type is supported by the client, and matches the - * request of the caller allocate from it. Repeat until allocate has - * succeeded or all heaps have been tried - */ - len = PAGE_ALIGN(len); - - if (!len) - return -EINVAL; - - down_read(&dev->lock); - plist_for_each_entry(heap, &dev->heaps, node) { - /* if the caller didn't specify this heap id */ - if (!((1 << heap->id) & heap_id_mask)) - continue; - buffer = ion_buffer_create(heap, dev, len, flags); - if (!IS_ERR(buffer)) - break; + mutex_lock(&client->lock); + valid_handle = ion_handle_validate(client, handle); + if (!valid_handle) { + WARN(1, "%s: invalid handle passed to share.\n", __func__); + mutex_unlock(&client->lock); + return ERR_PTR(-EINVAL); } - up_read(&dev->lock); - - if (!buffer) - return -ENODEV; - - if (IS_ERR(buffer)) - return PTR_ERR(buffer); + buffer = handle->buffer; + ion_buffer_get(buffer); + mutex_unlock(&client->lock); exp_info.ops = &dma_buf_ops; exp_info.size = buffer->size; @@ -438,20 +1052,116 @@ int ion_alloc(size_t len, unsigned int heap_id_mask, unsigned int flags) dmabuf = dma_buf_export(&exp_info); if (IS_ERR(dmabuf)) { - _ion_buffer_destroy(buffer); - return PTR_ERR(dmabuf); + ion_buffer_put(buffer); + return dmabuf; } + return dmabuf; +} +EXPORT_SYMBOL(ion_share_dma_buf); + +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle) +{ + struct dma_buf *dmabuf; + int fd; + + dmabuf = ion_share_dma_buf(client, handle); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + fd = dma_buf_fd(dmabuf, O_CLOEXEC); if (fd < 0) dma_buf_put(dmabuf); return fd; } +EXPORT_SYMBOL(ion_share_dma_buf_fd); -int ion_query_heaps(struct ion_heap_query *query) +struct ion_handle *ion_import_dma_buf(struct ion_client *client, + struct dma_buf *dmabuf) { - struct ion_device *dev = internal_dev; + struct ion_buffer *buffer; + struct ion_handle *handle; + int ret; + + /* if this memory came from ion */ + + if (dmabuf->ops != &dma_buf_ops) { + pr_err("%s: can not import dmabuf from another exporter\n", + __func__); + return ERR_PTR(-EINVAL); + } + buffer = dmabuf->priv; + + mutex_lock(&client->lock); + /* if a handle exists for this buffer just take a reference to it */ + handle = ion_handle_lookup(client, buffer); + if (!IS_ERR(handle)) { + ion_handle_get(handle); + mutex_unlock(&client->lock); + goto end; + } + + handle = ion_handle_create(client, buffer); + if (IS_ERR(handle)) { + mutex_unlock(&client->lock); + goto end; + } + + ret = ion_handle_add(client, handle); + mutex_unlock(&client->lock); + if (ret) { + ion_handle_put(handle); + handle = ERR_PTR(ret); + } + +end: + return handle; +} +EXPORT_SYMBOL(ion_import_dma_buf); + +struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_handle *handle; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return ERR_CAST(dmabuf); + + handle = ion_import_dma_buf(client, dmabuf); + dma_buf_put(dmabuf); + return handle; +} +EXPORT_SYMBOL(ion_import_dma_buf_fd); + +int ion_sync_for_device(struct ion_client *client, int fd) +{ + struct dma_buf *dmabuf; + struct ion_buffer *buffer; + + dmabuf = dma_buf_get(fd); + if (IS_ERR(dmabuf)) + return PTR_ERR(dmabuf); + + /* if this memory came from ion */ + if (dmabuf->ops != &dma_buf_ops) { + pr_err("%s: can not sync dmabuf from another exporter\n", + __func__); + dma_buf_put(dmabuf); + return -EINVAL; + } + buffer = dmabuf->priv; + + dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, + buffer->sg_table->nents, DMA_BIDIRECTIONAL); + dma_buf_put(dmabuf); + return 0; +} + +int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query) +{ + struct ion_device *dev = client->dev; struct ion_heap_data __user *buffer = u64_to_user_ptr(query->heaps); int ret = -EINVAL, cnt = 0, max_cnt; struct ion_heap *heap; @@ -488,18 +1198,138 @@ int ion_query_heaps(struct ion_heap_query *query) } query->cnt = cnt; - ret = 0; out: up_read(&dev->lock); return ret; } +static int ion_release(struct inode *inode, struct file *file) +{ + struct ion_client *client = file->private_data; + + pr_debug("%s: %d\n", __func__, __LINE__); + ion_client_destroy(client); + return 0; +} + +static int ion_open(struct inode *inode, struct file *file) +{ + struct miscdevice *miscdev = file->private_data; + struct ion_device *dev = container_of(miscdev, struct ion_device, dev); + struct ion_client *client; + char debug_name[64]; + + pr_debug("%s: %d\n", __func__, __LINE__); + snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader)); + client = ion_client_create(dev, debug_name); + if (IS_ERR(client)) + return PTR_ERR(client); + file->private_data = client; + + return 0; +} + static const struct file_operations ion_fops = { .owner = THIS_MODULE, + .open = ion_open, + .release = ion_release, .unlocked_ioctl = ion_ioctl, -#ifdef CONFIG_COMPAT - .compat_ioctl = ion_ioctl, -#endif + .compat_ioctl = compat_ion_ioctl, +}; + +static size_t ion_debug_heap_total(struct ion_client *client, + unsigned int id) +{ + size_t size = 0; + struct rb_node *n; + + mutex_lock(&client->lock); + for (n = rb_first(&client->handles); n; n = rb_next(n)) { + struct ion_handle *handle = rb_entry(n, + struct ion_handle, + node); + if (handle->buffer->heap->id == id) + size += handle->buffer->size; + } + mutex_unlock(&client->lock); + return size; +} + +static int ion_debug_heap_show(struct seq_file *s, void *unused) +{ + struct ion_heap *heap = s->private; + struct ion_device *dev = heap->dev; + struct rb_node *n; + size_t total_size = 0; + size_t total_orphaned_size = 0; + + seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size"); + seq_puts(s, "----------------------------------------------------\n"); + + mutex_lock(&debugfs_mutex); + for (n = rb_first(&dev->clients); n; n = rb_next(n)) { + struct ion_client *client = rb_entry(n, struct ion_client, + node); + size_t size = ion_debug_heap_total(client, heap->id); + + if (!size) + continue; + if (client->task) { + char task_comm[TASK_COMM_LEN]; + + get_task_comm(task_comm, client->task); + seq_printf(s, "%16s %16u %16zu\n", task_comm, + client->pid, size); + } else { + seq_printf(s, "%16s %16u %16zu\n", client->name, + client->pid, size); + } + } + mutex_unlock(&debugfs_mutex); + + seq_puts(s, "----------------------------------------------------\n"); + seq_puts(s, "orphaned allocations (info is from last known client):\n"); + mutex_lock(&dev->buffer_lock); + for (n = rb_first(&dev->buffers); n; n = rb_next(n)) { + struct ion_buffer *buffer = rb_entry(n, struct ion_buffer, + node); + if (buffer->heap->id != heap->id) + continue; + total_size += buffer->size; + if (!buffer->handle_count) { + seq_printf(s, "%16s %16u %16zu %d %d\n", + buffer->task_comm, buffer->pid, + buffer->size, buffer->kmap_cnt, + kref_read(&buffer->ref)); + total_orphaned_size += buffer->size; + } + } + mutex_unlock(&dev->buffer_lock); + seq_puts(s, "----------------------------------------------------\n"); + seq_printf(s, "%16s %16zu\n", "total orphaned", + total_orphaned_size); + seq_printf(s, "%16s %16zu\n", "total ", total_size); + if (heap->flags & ION_HEAP_FLAG_DEFER_FREE) + seq_printf(s, "%16s %16zu\n", "deferred free", + heap->free_list_size); + seq_puts(s, "----------------------------------------------------\n"); + + if (heap->debug_show) + heap->debug_show(heap, s, unused); + + return 0; +} + +static int ion_debug_heap_open(struct inode *inode, struct file *file) +{ + return single_open(file, ion_debug_heap_show, inode->i_private); +} + +static const struct file_operations debug_heap_fops = { + .open = ion_debug_heap_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, }; static int debug_shrink_set(void *data, u64 val) @@ -537,10 +1367,9 @@ static int debug_shrink_get(void *data, u64 *val) DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get, debug_shrink_set, "%llu\n"); -void ion_device_add_heap(struct ion_heap *heap) +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap) { struct dentry *debug_file; - struct ion_device *dev = internal_dev; if (!heap->ops->allocate || !heap->ops->free) pr_err("%s: can not add heap with invalid ops struct.\n", @@ -557,25 +1386,35 @@ void ion_device_add_heap(struct ion_heap *heap) heap->dev = dev; down_write(&dev->lock); - heap->id = heap_id++; /* * use negative heap->id to reverse the priority -- when traversing * the list later attempt higher id numbers first */ plist_node_init(&heap->node, -heap->id); plist_add(&heap->node, &dev->heaps); + debug_file = debugfs_create_file(heap->name, 0664, + dev->heaps_debug_root, heap, + &debug_heap_fops); + + if (!debug_file) { + char buf[256], *path; + + path = dentry_path(dev->heaps_debug_root, buf, 256); + pr_err("Failed to create heap debugfs at %s/%s\n", + path, heap->name); + } if (heap->shrinker.count_objects && heap->shrinker.scan_objects) { char debug_name[64]; snprintf(debug_name, 64, "%s_shrink", heap->name); debug_file = debugfs_create_file( - debug_name, 0644, dev->debug_root, heap, + debug_name, 0644, dev->heaps_debug_root, heap, &debug_shrink_fops); if (!debug_file) { char buf[256], *path; - path = dentry_path(dev->debug_root, buf, 256); + path = dentry_path(dev->heaps_debug_root, buf, 256); pr_err("Failed to create heap shrinker debugfs at %s/%s\n", path, debug_name); } @@ -586,14 +1425,17 @@ void ion_device_add_heap(struct ion_heap *heap) } EXPORT_SYMBOL(ion_device_add_heap); -static int ion_device_create(void) +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)) { struct ion_device *idev; int ret; idev = kzalloc(sizeof(*idev), GFP_KERNEL); if (!idev) - return -ENOMEM; + return ERR_PTR(-ENOMEM); idev->dev.minor = MISC_DYNAMIC_MINOR; idev->dev.name = "ion"; @@ -603,7 +1445,7 @@ static int ion_device_create(void) if (ret) { pr_err("ion: failed to register misc device.\n"); kfree(idev); - return ret; + return ERR_PTR(ret); } idev->debug_root = debugfs_create_dir("ion", NULL); @@ -611,13 +1453,35 @@ static int ion_device_create(void) pr_err("ion: failed to create debugfs root directory.\n"); goto debugfs_done; } + idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root); + if (!idev->heaps_debug_root) { + pr_err("ion: failed to create debugfs heaps directory.\n"); + goto debugfs_done; + } + idev->clients_debug_root = debugfs_create_dir("clients", + idev->debug_root); + if (!idev->clients_debug_root) + pr_err("ion: failed to create debugfs clients directory.\n"); debugfs_done: + + idev->custom_ioctl = custom_ioctl; idev->buffers = RB_ROOT; mutex_init(&idev->buffer_lock); init_rwsem(&idev->lock); plist_head_init(&idev->heaps); - internal_dev = idev; - return 0; + idev->clients = RB_ROOT; + ion_root_client = &idev->clients; + mutex_init(&debugfs_mutex); + return idev; +} +EXPORT_SYMBOL(ion_device_create); + +void ion_device_destroy(struct ion_device *dev) +{ + misc_deregister(&dev->dev); + debugfs_remove_recursive(dev->debug_root); + /* XXX need to free the heaps and clients ? */ + kfree(dev); } -subsys_initcall(ion_device_create); +EXPORT_SYMBOL(ion_device_destroy); diff --git a/drivers/staging/android/ion/ion.h b/drivers/staging/android/ion/ion.h index 621e5f7ceacb5e..93dafb4586e433 100644 --- a/drivers/staging/android/ion/ion.h +++ b/drivers/staging/android/ion/ion.h @@ -14,31 +14,38 @@ * */ -#ifndef _ION_H -#define _ION_H +#ifndef _LINUX_ION_H +#define _LINUX_ION_H -#include -#include -#include -#include -#include -#include -#include -#include #include -#include #include "../uapi/ion.h" +struct ion_handle; +struct ion_device; +struct ion_heap; +struct ion_mapper; +struct ion_client; +struct ion_buffer; + +/* + * This should be removed some day when phys_addr_t's are fully + * plumbed in the kernel, and all instances of ion_phys_addr_t should + * be converted to phys_addr_t. For the time being many kernel interfaces + * do not accept phys_addr_t's that would have to + */ +#define ion_phys_addr_t unsigned long + /** * struct ion_platform_heap - defines a heap in the given platform * @type: type of the heap from ion_heap_type enum - * @id: unique identifier for heap. When allocating higher numb ers + * @id: unique identifier for heap. When allocating higher numbers * will be allocated from first. At allocation these are passed * as a bit mask and therefore can not exceed ION_NUM_HEAP_IDS. * @name: used for debug purposes * @base: base address of heap in physical memory if applicable * @size: size of the heap in bytes if applicable + * @align: required alignment in physical memory if applicable * @priv: private info passed from the board file * * Provided by the board file. @@ -47,315 +54,123 @@ struct ion_platform_heap { enum ion_heap_type type; unsigned int id; const char *name; - phys_addr_t base; + ion_phys_addr_t base; size_t size; - phys_addr_t align; + ion_phys_addr_t align; void *priv; }; /** - * struct ion_buffer - metadata for a particular buffer - * @ref: reference count - * @node: node in the ion_device buffers tree - * @dev: back pointer to the ion_device - * @heap: back pointer to the heap the buffer came from - * @flags: buffer specific flags - * @private_flags: internal buffer specific flags - * @size: size of the buffer - * @priv_virt: private data to the buffer representable as - * a void * - * @lock: protects the buffers cnt fields - * @kmap_cnt: number of times the buffer is mapped to the kernel - * @vaddr: the kernel mapping if kmap_cnt is not zero - * @sg_table: the sg table for the buffer if dmap_cnt is not zero - */ -struct ion_buffer { - union { - struct rb_node node; - struct list_head list; - }; - struct ion_device *dev; - struct ion_heap *heap; - unsigned long flags; - unsigned long private_flags; - size_t size; - void *priv_virt; - struct mutex lock; - int kmap_cnt; - void *vaddr; - struct sg_table *sg_table; - struct list_head attachments; -}; -void ion_buffer_destroy(struct ion_buffer *buffer); - -/** - * struct ion_device - the metadata of the ion device node - * @dev: the actual misc device - * @buffers: an rb tree of all the existing buffers - * @buffer_lock: lock protecting the tree of buffers - * @lock: rwsem protecting the tree of heaps and clients - */ -struct ion_device { - struct miscdevice dev; - struct rb_root buffers; - struct mutex buffer_lock; - struct rw_semaphore lock; - struct plist_head heaps; - struct dentry *debug_root; - int heap_cnt; -}; - -/** - * struct ion_heap_ops - ops to operate on a given heap - * @allocate: allocate memory - * @free: free memory - * @map_kernel map memory to the kernel - * @unmap_kernel unmap memory to the kernel - * @map_user map memory to userspace + * struct ion_platform_data - array of platform heaps passed from board file + * @nr: number of structures in the array + * @heaps: array of platform_heap structions * - * allocate, phys, and map_user return 0 on success, -errno on error. - * map_dma and map_kernel return pointer on success, ERR_PTR on - * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in - * the buffer's private_flags when called from a shrinker. In that - * case, the pages being free'd must be truly free'd back to the - * system, not put in a page pool or otherwise cached. + * Provided by the board file in the form of platform data to a platform device. */ -struct ion_heap_ops { - int (*allocate)(struct ion_heap *heap, - struct ion_buffer *buffer, unsigned long len, - unsigned long flags); - void (*free)(struct ion_buffer *buffer); - void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); - void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); - int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, - struct vm_area_struct *vma); - int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); +struct ion_platform_data { + int nr; + struct ion_platform_heap *heaps; }; /** - * heap flags - flags between the heaps and core ion code - */ -#define ION_HEAP_FLAG_DEFER_FREE BIT(0) - -/** - * private flags - flags internal to ion - */ -/* - * Buffer is being freed from a shrinker function. Skip any possible - * heap-specific caching mechanism (e.g. page pools). Guarantees that - * any buffer storage that came from the system allocator will be - * returned to the system allocator. - */ -#define ION_PRIV_FLAG_SHRINKER_FREE BIT(0) - -/** - * struct ion_heap - represents a heap in the system - * @node: rb node to put the heap on the device's tree of heaps - * @dev: back pointer to the ion_device - * @type: type of heap - * @ops: ops struct as above - * @flags: flags - * @id: id of heap, also indicates priority of this heap when - * allocating. These are specified by platform data and - * MUST be unique + * ion_client_create() - allocate a client and returns it + * @dev: the global ion device * @name: used for debugging - * @shrinker: a shrinker for the heap - * @free_list: free list head if deferred free is used - * @free_list_size size of the deferred free list in bytes - * @lock: protects the free list - * @waitqueue: queue to wait on from deferred free thread - * @task: task struct of deferred free thread - * @debug_show: called when heap debug file is read to add any - * heap specific debug info to output - * - * Represents a pool of memory from which buffers can be made. In some - * systems the only heap is regular system memory allocated via vmalloc. - * On others, some blocks might require large physically contiguous buffers - * that are allocated from a specially reserved heap. */ -struct ion_heap { - struct plist_node node; - struct ion_device *dev; - enum ion_heap_type type; - struct ion_heap_ops *ops; - unsigned long flags; - unsigned int id; - const char *name; - struct shrinker shrinker; - struct list_head free_list; - size_t free_list_size; - spinlock_t free_lock; - wait_queue_head_t waitqueue; - struct task_struct *task; - - int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); -}; +struct ion_client *ion_client_create(struct ion_device *dev, + const char *name); /** - * ion_buffer_cached - this ion buffer is cached - * @buffer: buffer + * ion_client_destroy() - free's a client and all it's handles + * @client: the client * - * indicates whether this ion buffer is cached + * Free the provided client and all it's resources including + * any handles it is holding. */ -bool ion_buffer_cached(struct ion_buffer *buffer); +void ion_client_destroy(struct ion_client *client); /** - * ion_buffer_fault_user_mappings - fault in user mappings of this buffer - * @buffer: buffer + * ion_alloc - allocate ion memory + * @client: the client + * @len: size of the allocation + * @align: requested allocation alignment, lots of hardware blocks + * have alignment requirements of some kind + * @heap_id_mask: mask of heaps to allocate from, if multiple bits are set + * heaps will be tried in order from highest to lowest + * id + * @flags: heap flags, the low 16 bits are consumed by ion, the + * high 16 bits are passed on to the respective heap and + * can be heap custom * - * indicates whether userspace mappings of this buffer will be faulted - * in, this can affect how buffers are allocated from the heap. - */ -bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); - -/** - * ion_device_add_heap - adds a heap to the ion device - * @heap: the heap to add + * Allocate memory in one of the heaps provided in heap mask and return + * an opaque handle to it. */ -void ion_device_add_heap(struct ion_heap *heap); +struct ion_handle *ion_alloc(struct ion_client *client, size_t len, + size_t align, unsigned int heap_id_mask, + unsigned int flags); /** - * some helpers for common operations on buffers using the sg_table - * and vaddr fields - */ -void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer); -void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer); -int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, - struct vm_area_struct *vma); -int ion_heap_buffer_zero(struct ion_buffer *buffer); -int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); - -int ion_alloc(size_t len, - unsigned int heap_id_mask, - unsigned int flags); - -/** - * ion_heap_init_shrinker - * @heap: the heap + * ion_free - free a handle + * @client: the client + * @handle: the handle to free * - * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op - * this function will be called to setup a shrinker to shrink the freelists - * and call the heap's shrink op. + * Free the provided handle. */ -void ion_heap_init_shrinker(struct ion_heap *heap); +void ion_free(struct ion_client *client, struct ion_handle *handle); /** - * ion_heap_init_deferred_free -- initialize deferred free functionality - * @heap: the heap + * ion_map_kernel - create mapping for the given handle + * @client: the client + * @handle: handle to map * - * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will - * be called to setup deferred frees. Calls to free the buffer will - * return immediately and the actual free will occur some time later + * Map the given handle into the kernel and return a kernel address that + * can be used to access this address. */ -int ion_heap_init_deferred_free(struct ion_heap *heap); +void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle); /** - * ion_heap_freelist_add - add a buffer to the deferred free list - * @heap: the heap - * @buffer: the buffer - * - * Adds an item to the deferred freelist. + * ion_unmap_kernel() - destroy a kernel mapping for a handle + * @client: the client + * @handle: handle to unmap */ -void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); +void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle); /** - * ion_heap_freelist_drain - drain the deferred free list - * @heap: the heap - * @size: amount of memory to drain in bytes - * - * Drains the indicated amount of memory from the deferred freelist immediately. - * Returns the total amount freed. The total freed may be higher depending - * on the size of the items in the list, or lower if there is insufficient - * total memory on the freelist. + * ion_share_dma_buf() - share buffer as dma-buf + * @client: the client + * @handle: the handle */ -size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); +struct dma_buf *ion_share_dma_buf(struct ion_client *client, + struct ion_handle *handle); /** - * ion_heap_freelist_shrink - drain the deferred free - * list, skipping any heap-specific - * pooling or caching mechanisms - * - * @heap: the heap - * @size: amount of memory to drain in bytes - * - * Drains the indicated amount of memory from the deferred freelist immediately. - * Returns the total amount freed. The total freed may be higher depending - * on the size of the items in the list, or lower if there is insufficient - * total memory on the freelist. - * - * Unlike with @ion_heap_freelist_drain, don't put any pages back into - * page pools or otherwise cache the pages. Everything must be - * genuinely free'd back to the system. If you're free'ing from a - * shrinker you probably want to use this. Note that this relies on - * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE - * flag. + * ion_share_dma_buf_fd() - given an ion client, create a dma-buf fd + * @client: the client + * @handle: the handle */ -size_t ion_heap_freelist_shrink(struct ion_heap *heap, - size_t size); +int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle); /** - * ion_heap_freelist_size - returns the size of the freelist in bytes - * @heap: the heap - */ -size_t ion_heap_freelist_size(struct ion_heap *heap); - - -/** - * functions for creating and destroying a heap pool -- allows you - * to keep a pool of pre allocated memory to use from your heap. Keeping - * a pool of memory that is ready for dma, ie any cached mapping have been - * invalidated from the cache, provides a significant performance benefit on - * many systems - */ - -/** - * struct ion_page_pool - pagepool struct - * @high_count: number of highmem items in the pool - * @low_count: number of lowmem items in the pool - * @high_items: list of highmem items - * @low_items: list of lowmem items - * @mutex: lock protecting this struct and especially the count - * item list - * @gfp_mask: gfp_mask to use from alloc - * @order: order of pages in the pool - * @list: plist node for list of pools - * @cached: it's cached pool or not + * ion_import_dma_buf() - get ion_handle from dma-buf + * @client: the client + * @dmabuf: the dma-buf * - * Allows you to keep a pool of pre allocated pages to use from your heap. - * Keeping a pool of pages that is ready for dma, ie any cached mapping have - * been invalidated from the cache, provides a significant performance benefit - * on many systems + * Get the ion_buffer associated with the dma-buf and return the ion_handle. + * If no ion_handle exists for this buffer, return newly created ion_handle. + * If dma-buf from another exporter is passed, return ERR_PTR(-EINVAL) */ -struct ion_page_pool { - int high_count; - int low_count; - bool cached; - struct list_head high_items; - struct list_head low_items; - struct mutex mutex; - gfp_t gfp_mask; - unsigned int order; - struct plist_node list; -}; - -struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order, - bool cached); -void ion_page_pool_destroy(struct ion_page_pool *pool); -struct page *ion_page_pool_alloc(struct ion_page_pool *pool); -void ion_page_pool_free(struct ion_page_pool *pool, struct page *page); +struct ion_handle *ion_import_dma_buf(struct ion_client *client, + struct dma_buf *dmabuf); -/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool - * @pool: the pool - * @gfp_mask: the memory type to reclaim - * @nr_to_scan: number of items to shrink in pages +/** + * ion_import_dma_buf_fd() - given a dma-buf fd from the ion exporter get handle + * @client: the client + * @fd: the dma-buf fd * - * returns the number of items freed in pages + * Given an dma-buf fd that was allocated through ion via ion_share_dma_buf_fd, + * import that fd and return a handle representing it. If a dma-buf from + * another exporter is passed in this function will return ERR_PTR(-EINVAL) */ -int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, - int nr_to_scan); - -long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); - -int ion_query_heaps(struct ion_heap_query *query); +struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd); -#endif /* _ION_H */ +#endif /* _LINUX_ION_H */ diff --git a/drivers/staging/android/ion/ion_carveout_heap.c b/drivers/staging/android/ion/ion_carveout_heap.c index fee7650d6fbb39..a8ea97391c40f9 100644 --- a/drivers/staging/android/ion/ion_carveout_heap.c +++ b/drivers/staging/android/ion/ion_carveout_heap.c @@ -23,17 +23,19 @@ #include #include #include "ion.h" +#include "ion_priv.h" #define ION_CARVEOUT_ALLOCATE_FAIL -1 struct ion_carveout_heap { struct ion_heap heap; struct gen_pool *pool; - phys_addr_t base; + ion_phys_addr_t base; }; -static phys_addr_t ion_carveout_allocate(struct ion_heap *heap, - unsigned long size) +static ion_phys_addr_t ion_carveout_allocate(struct ion_heap *heap, + unsigned long size, + unsigned long align) { struct ion_carveout_heap *carveout_heap = container_of(heap, struct ion_carveout_heap, heap); @@ -45,7 +47,7 @@ static phys_addr_t ion_carveout_allocate(struct ion_heap *heap, return offset; } -static void ion_carveout_free(struct ion_heap *heap, phys_addr_t addr, +static void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr, unsigned long size) { struct ion_carveout_heap *carveout_heap = @@ -58,13 +60,16 @@ static void ion_carveout_free(struct ion_heap *heap, phys_addr_t addr, static int ion_carveout_heap_allocate(struct ion_heap *heap, struct ion_buffer *buffer, - unsigned long size, + unsigned long size, unsigned long align, unsigned long flags) { struct sg_table *table; - phys_addr_t paddr; + ion_phys_addr_t paddr; int ret; + if (align > PAGE_SIZE) + return -EINVAL; + table = kmalloc(sizeof(*table), GFP_KERNEL); if (!table) return -ENOMEM; @@ -72,7 +77,7 @@ static int ion_carveout_heap_allocate(struct ion_heap *heap, if (ret) goto err_free; - paddr = ion_carveout_allocate(heap, size); + paddr = ion_carveout_allocate(heap, size, align); if (paddr == ION_CARVEOUT_ALLOCATE_FAIL) { ret = -ENOMEM; goto err_free_table; @@ -95,10 +100,14 @@ static void ion_carveout_heap_free(struct ion_buffer *buffer) struct ion_heap *heap = buffer->heap; struct sg_table *table = buffer->sg_table; struct page *page = sg_page(table->sgl); - phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); + ion_phys_addr_t paddr = PFN_PHYS(page_to_pfn(page)); ion_heap_buffer_zero(buffer); + if (ion_buffer_cached(buffer)) + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + ion_carveout_free(heap, paddr, buffer->size); sg_free_table(table); kfree(table); @@ -123,6 +132,8 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) page = pfn_to_page(PFN_DOWN(heap_data->base)); size = heap_data->size; + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); if (ret) return ERR_PTR(ret); @@ -145,3 +156,13 @@ struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data) return &carveout_heap->heap; } + +void ion_carveout_heap_destroy(struct ion_heap *heap) +{ + struct ion_carveout_heap *carveout_heap = + container_of(heap, struct ion_carveout_heap, heap); + + gen_pool_destroy(carveout_heap->pool); + kfree(carveout_heap); + carveout_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c index 102c09398317c3..70495dc645ea13 100644 --- a/drivers/staging/android/ion/ion_chunk_heap.c +++ b/drivers/staging/android/ion/ion_chunk_heap.c @@ -22,11 +22,12 @@ #include #include #include "ion.h" +#include "ion_priv.h" struct ion_chunk_heap { struct ion_heap heap; struct gen_pool *pool; - phys_addr_t base; + ion_phys_addr_t base; unsigned long chunk_size; unsigned long size; unsigned long allocated; @@ -34,7 +35,7 @@ struct ion_chunk_heap { static int ion_chunk_heap_allocate(struct ion_heap *heap, struct ion_buffer *buffer, - unsigned long size, + unsigned long size, unsigned long align, unsigned long flags) { struct ion_chunk_heap *chunk_heap = @@ -45,6 +46,9 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap, unsigned long num_chunks; unsigned long allocated_size; + if (align > chunk_heap->chunk_size) + return -EINVAL; + allocated_size = ALIGN(size, chunk_heap->chunk_size); num_chunks = allocated_size / chunk_heap->chunk_size; @@ -100,6 +104,10 @@ static void ion_chunk_heap_free(struct ion_buffer *buffer) ion_heap_buffer_zero(buffer); + if (ion_buffer_cached(buffer)) + dma_sync_sg_for_device(NULL, table->sgl, table->nents, + DMA_BIDIRECTIONAL); + for_each_sg(table->sgl, sg, table->nents, i) { gen_pool_free(chunk_heap->pool, page_to_phys(sg_page(sg)), sg->length); @@ -127,6 +135,8 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) page = pfn_to_page(PFN_DOWN(heap_data->base)); size = heap_data->size; + ion_pages_sync_for_device(NULL, page, size, DMA_BIDIRECTIONAL); + ret = ion_heap_pages_zero(page, size, pgprot_writecombine(PAGE_KERNEL)); if (ret) return ERR_PTR(ret); @@ -150,8 +160,8 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) chunk_heap->heap.ops = &chunk_heap_ops; chunk_heap->heap.type = ION_HEAP_TYPE_CHUNK; chunk_heap->heap.flags = ION_HEAP_FLAG_DEFER_FREE; - pr_debug("%s: base %pa size %zu\n", __func__, - &chunk_heap->base, heap_data->size); + pr_debug("%s: base %lu size %zu align %ld\n", __func__, + chunk_heap->base, heap_data->size, heap_data->align); return &chunk_heap->heap; @@ -160,3 +170,12 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data) return ERR_PTR(ret); } +void ion_chunk_heap_destroy(struct ion_heap *heap) +{ + struct ion_chunk_heap *chunk_heap = + container_of(heap, struct ion_chunk_heap, heap); + + gen_pool_destroy(chunk_heap->pool); + kfree(chunk_heap); + chunk_heap = NULL; +} diff --git a/drivers/staging/android/ion/ion_cma_heap.c b/drivers/staging/android/ion/ion_cma_heap.c index dd5545d9990a0f..6c4068523781a4 100644 --- a/drivers/staging/android/ion/ion_cma_heap.c +++ b/drivers/staging/android/ion/ion_cma_heap.c @@ -19,74 +19,124 @@ #include #include #include -#include -#include +#include #include "ion.h" +#include "ion_priv.h" struct ion_cma_heap { struct ion_heap heap; - struct cma *cma; + struct device *dev; }; #define to_cma_heap(x) container_of(x, struct ion_cma_heap, heap) +struct ion_cma_buffer_info { + void *cpu_addr; + dma_addr_t handle; + struct sg_table *table; +}; + + /* ION CMA heap operations functions */ static int ion_cma_allocate(struct ion_heap *heap, struct ion_buffer *buffer, - unsigned long len, + unsigned long len, unsigned long align, unsigned long flags) { struct ion_cma_heap *cma_heap = to_cma_heap(heap); - struct sg_table *table; - struct page *pages; - int ret; + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info; + + dev_dbg(dev, "Request buffer allocation len %ld\n", len); + + if (buffer->flags & ION_FLAG_CACHED) + return -EINVAL; + + if (align > PAGE_SIZE) + return -EINVAL; - pages = cma_alloc(cma_heap->cma, len, 0, GFP_KERNEL); - if (!pages) + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) return -ENOMEM; - table = kmalloc(sizeof(*table), GFP_KERNEL); - if (!table) + info->cpu_addr = dma_alloc_coherent(dev, len, &(info->handle), + GFP_HIGHUSER | __GFP_ZERO); + + if (!info->cpu_addr) { + dev_err(dev, "Fail to allocate buffer\n"); goto err; + } - ret = sg_alloc_table(table, 1, GFP_KERNEL); - if (ret) + info->table = kmalloc(sizeof(*info->table), GFP_KERNEL); + if (!info->table) goto free_mem; - sg_set_page(table->sgl, pages, len, 0); - - buffer->priv_virt = pages; - buffer->sg_table = table; + if (dma_get_sgtable(dev, info->table, info->cpu_addr, info->handle, + len)) + goto free_table; + /* keep this for memory release */ + buffer->priv_virt = info; + buffer->sg_table = info->table; + dev_dbg(dev, "Allocate buffer %p\n", buffer); return 0; +free_table: + kfree(info->table); free_mem: - kfree(table); + dma_free_coherent(dev, len, info->cpu_addr, info->handle); err: - cma_release(cma_heap->cma, pages, buffer->size); + kfree(info); return -ENOMEM; } static void ion_cma_free(struct ion_buffer *buffer) { struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); - struct page *pages = buffer->priv_virt; + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + dev_dbg(dev, "Release buffer %p\n", buffer); /* release memory */ - cma_release(cma_heap->cma, pages, buffer->size); + dma_free_coherent(dev, buffer->size, info->cpu_addr, info->handle); /* release sg table */ - sg_free_table(buffer->sg_table); - kfree(buffer->sg_table); + sg_free_table(info->table); + kfree(info->table); + kfree(info); +} + +static int ion_cma_mmap(struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma) +{ + struct ion_cma_heap *cma_heap = to_cma_heap(buffer->heap); + struct device *dev = cma_heap->dev; + struct ion_cma_buffer_info *info = buffer->priv_virt; + + return dma_mmap_coherent(dev, vma, info->cpu_addr, info->handle, + buffer->size); +} + +static void *ion_cma_map_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ + struct ion_cma_buffer_info *info = buffer->priv_virt; + /* kernel memory mapping has been done at allocation time */ + return info->cpu_addr; +} + +static void ion_cma_unmap_kernel(struct ion_heap *heap, + struct ion_buffer *buffer) +{ } static struct ion_heap_ops ion_cma_ops = { .allocate = ion_cma_allocate, .free = ion_cma_free, - .map_user = ion_heap_map_user, - .map_kernel = ion_heap_map_kernel, - .unmap_kernel = ion_heap_unmap_kernel, + .map_user = ion_cma_mmap, + .map_kernel = ion_cma_map_kernel, + .unmap_kernel = ion_cma_unmap_kernel, }; -static struct ion_heap *__ion_cma_heap_create(struct cma *cma) +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data) { struct ion_cma_heap *cma_heap; @@ -100,28 +150,14 @@ static struct ion_heap *__ion_cma_heap_create(struct cma *cma) * get device from private heaps data, later it will be * used to make the link with reserved CMA memory */ - cma_heap->cma = cma; + cma_heap->dev = data->priv; cma_heap->heap.type = ION_HEAP_TYPE_DMA; return &cma_heap->heap; } -static int __ion_add_cma_heaps(struct cma *cma, void *data) +void ion_cma_heap_destroy(struct ion_heap *heap) { - struct ion_heap *heap; - - heap = __ion_cma_heap_create(cma); - if (IS_ERR(heap)) - return PTR_ERR(heap); - - heap->name = cma_get_name(cma); - - ion_device_add_heap(heap); - return 0; -} + struct ion_cma_heap *cma_heap = to_cma_heap(heap); -static int ion_add_cma_heaps(void) -{ - cma_for_each_area(__ion_add_cma_heaps, NULL); - return 0; + kfree(cma_heap); } -device_initcall(ion_add_cma_heaps); diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c new file mode 100644 index 00000000000000..cf5c010d32bcac --- /dev/null +++ b/drivers/staging/android/ion/ion_dummy_driver.c @@ -0,0 +1,156 @@ +/* + * drivers/gpu/ion/ion_dummy_driver.c + * + * Copyright (C) 2013 Linaro, Inc + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" + +static struct ion_device *idev; +static struct ion_heap **heaps; + +static void *carveout_ptr; +static void *chunk_ptr; + +static struct ion_platform_heap dummy_heaps[] = { + { + .id = ION_HEAP_TYPE_SYSTEM, + .type = ION_HEAP_TYPE_SYSTEM, + .name = "system", + }, + { + .id = ION_HEAP_TYPE_SYSTEM_CONTIG, + .type = ION_HEAP_TYPE_SYSTEM_CONTIG, + .name = "system contig", + }, + { + .id = ION_HEAP_TYPE_CARVEOUT, + .type = ION_HEAP_TYPE_CARVEOUT, + .name = "carveout", + .size = SZ_4M, + }, + { + .id = ION_HEAP_TYPE_CHUNK, + .type = ION_HEAP_TYPE_CHUNK, + .name = "chunk", + .size = SZ_4M, + .align = SZ_16K, + .priv = (void *)(SZ_16K), + }, +}; + +static const struct ion_platform_data dummy_ion_pdata = { + .nr = ARRAY_SIZE(dummy_heaps), + .heaps = dummy_heaps, +}; + +static int __init ion_dummy_init(void) +{ + int i, err; + + idev = ion_device_create(NULL); + if (IS_ERR(idev)) + return PTR_ERR(idev); + heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *), + GFP_KERNEL); + if (!heaps) + return -ENOMEM; + + + /* Allocate a dummy carveout heap */ + carveout_ptr = alloc_pages_exact( + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size, + GFP_KERNEL); + if (carveout_ptr) + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].base = + virt_to_phys(carveout_ptr); + else + pr_err("ion_dummy: Could not allocate carveout\n"); + + /* Allocate a dummy chunk heap */ + chunk_ptr = alloc_pages_exact( + dummy_heaps[ION_HEAP_TYPE_CHUNK].size, + GFP_KERNEL); + if (chunk_ptr) + dummy_heaps[ION_HEAP_TYPE_CHUNK].base = virt_to_phys(chunk_ptr); + else + pr_err("ion_dummy: Could not allocate chunk\n"); + + for (i = 0; i < dummy_ion_pdata.nr; i++) { + struct ion_platform_heap *heap_data = &dummy_ion_pdata.heaps[i]; + + if (heap_data->type == ION_HEAP_TYPE_CARVEOUT && + !heap_data->base) + continue; + + if (heap_data->type == ION_HEAP_TYPE_CHUNK && !heap_data->base) + continue; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + return 0; +err: + for (i = 0; i < dummy_ion_pdata.nr; ++i) + ion_heap_destroy(heaps[i]); + kfree(heaps); + + if (carveout_ptr) { + free_pages_exact(carveout_ptr, + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); + carveout_ptr = NULL; + } + if (chunk_ptr) { + free_pages_exact(chunk_ptr, + dummy_heaps[ION_HEAP_TYPE_CHUNK].size); + chunk_ptr = NULL; + } + return err; +} +device_initcall(ion_dummy_init); + +static void __exit ion_dummy_exit(void) +{ + int i; + + ion_device_destroy(idev); + + for (i = 0; i < dummy_ion_pdata.nr; i++) + ion_heap_destroy(heaps[i]); + kfree(heaps); + + if (carveout_ptr) { + free_pages_exact(carveout_ptr, + dummy_heaps[ION_HEAP_TYPE_CARVEOUT].size); + carveout_ptr = NULL; + } + if (chunk_ptr) { + free_pages_exact(chunk_ptr, + dummy_heaps[ION_HEAP_TYPE_CHUNK].size); + chunk_ptr = NULL; + } +} +__exitcall(ion_dummy_exit); diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c index 91faa7f035b93c..c69d0bd536934a 100644 --- a/drivers/staging/android/ion/ion_heap.c +++ b/drivers/staging/android/ion/ion_heap.c @@ -24,6 +24,7 @@ #include #include #include "ion.h" +#include "ion_priv.h" void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer) @@ -314,3 +315,70 @@ void ion_heap_init_shrinker(struct ion_heap *heap) heap->shrinker.batch = 0; register_shrinker(&heap->shrinker); } + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data) +{ + struct ion_heap *heap = NULL; + + switch (heap_data->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + heap = ion_system_contig_heap_create(heap_data); + break; + case ION_HEAP_TYPE_SYSTEM: + heap = ion_system_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CARVEOUT: + heap = ion_carveout_heap_create(heap_data); + break; + case ION_HEAP_TYPE_CHUNK: + heap = ion_chunk_heap_create(heap_data); + break; + case ION_HEAP_TYPE_DMA: + heap = ion_cma_heap_create(heap_data); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap_data->type); + return ERR_PTR(-EINVAL); + } + + if (IS_ERR_OR_NULL(heap)) { + pr_err("%s: error creating heap %s type %d base %lu size %zu\n", + __func__, heap_data->name, heap_data->type, + heap_data->base, heap_data->size); + return ERR_PTR(-EINVAL); + } + + heap->name = heap_data->name; + heap->id = heap_data->id; + return heap; +} +EXPORT_SYMBOL(ion_heap_create); + +void ion_heap_destroy(struct ion_heap *heap) +{ + if (!heap) + return; + + switch (heap->type) { + case ION_HEAP_TYPE_SYSTEM_CONTIG: + ion_system_contig_heap_destroy(heap); + break; + case ION_HEAP_TYPE_SYSTEM: + ion_system_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CARVEOUT: + ion_carveout_heap_destroy(heap); + break; + case ION_HEAP_TYPE_CHUNK: + ion_chunk_heap_destroy(heap); + break; + case ION_HEAP_TYPE_DMA: + ion_cma_heap_destroy(heap); + break; + default: + pr_err("%s: Invalid heap type %d\n", __func__, + heap->type); + } +} +EXPORT_SYMBOL(ion_heap_destroy); diff --git a/drivers/staging/android/ion/ion_of.c b/drivers/staging/android/ion/ion_of.c new file mode 100644 index 00000000000000..7791c705ea3156 --- /dev/null +++ b/drivers/staging/android/ion/ion_of.c @@ -0,0 +1,184 @@ +/* + * Based on work from: + * Andrew Andrianov + * Google + * The Linux Foundation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ion.h" +#include "ion_priv.h" +#include "ion_of.h" + +static int ion_parse_dt_heap_common(struct device_node *heap_node, + struct ion_platform_heap *heap, + struct ion_of_heap *compatible) +{ + int i; + + for (i = 0; compatible[i].name; i++) { + if (of_device_is_compatible(heap_node, compatible[i].compat)) + break; + } + + if (!compatible[i].name) + return -ENODEV; + + heap->id = compatible[i].heap_id; + heap->type = compatible[i].type; + heap->name = compatible[i].name; + heap->align = compatible[i].align; + + /* Some kind of callback function pointer? */ + + pr_info("%s: id %d type %d name %s align %lx\n", __func__, + heap->id, heap->type, heap->name, heap->align); + return 0; +} + +static int ion_setup_heap_common(struct platform_device *parent, + struct device_node *heap_node, + struct ion_platform_heap *heap) +{ + int ret = 0; + + switch (heap->type) { + case ION_HEAP_TYPE_CARVEOUT: + case ION_HEAP_TYPE_CHUNK: + if (heap->base && heap->size) + return 0; + + ret = of_reserved_mem_device_init(heap->priv); + break; + default: + break; + } + + return ret; +} + +struct ion_platform_data *ion_parse_dt(struct platform_device *pdev, + struct ion_of_heap *compatible) +{ + int num_heaps, ret; + const struct device_node *dt_node = pdev->dev.of_node; + struct device_node *node; + struct ion_platform_heap *heaps; + struct ion_platform_data *data; + int i = 0; + + num_heaps = of_get_available_child_count(dt_node); + + if (!num_heaps) + return ERR_PTR(-EINVAL); + + heaps = devm_kzalloc(&pdev->dev, + sizeof(struct ion_platform_heap) * num_heaps, + GFP_KERNEL); + if (!heaps) + return ERR_PTR(-ENOMEM); + + data = devm_kzalloc(&pdev->dev, sizeof(struct ion_platform_data), + GFP_KERNEL); + if (!data) + return ERR_PTR(-ENOMEM); + + for_each_available_child_of_node(dt_node, node) { + struct platform_device *heap_pdev; + + ret = ion_parse_dt_heap_common(node, &heaps[i], compatible); + if (ret) + return ERR_PTR(ret); + + heap_pdev = of_platform_device_create(node, heaps[i].name, + &pdev->dev); + if (!heap_pdev) + return ERR_PTR(-ENOMEM); + heap_pdev->dev.platform_data = &heaps[i]; + + heaps[i].priv = &heap_pdev->dev; + + ret = ion_setup_heap_common(pdev, node, &heaps[i]); + if (ret) + goto out_err; + i++; + } + + data->heaps = heaps; + data->nr = num_heaps; + return data; + +out_err: + for ( ; i >= 0; i--) + if (heaps[i].priv) + of_device_unregister(to_platform_device(heaps[i].priv)); + + return ERR_PTR(ret); +} + +void ion_destroy_platform_data(struct ion_platform_data *data) +{ + int i; + + for (i = 0; i < data->nr; i++) + if (data->heaps[i].priv) + of_device_unregister(to_platform_device( + data->heaps[i].priv)); +} + +#ifdef CONFIG_OF_RESERVED_MEM +#include +#include +#include + +static int rmem_ion_device_init(struct reserved_mem *rmem, struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct ion_platform_heap *heap = pdev->dev.platform_data; + + heap->base = rmem->base; + heap->base = rmem->size; + pr_debug("%s: heap %s base %pa size %pa dev %p\n", __func__, + heap->name, &rmem->base, &rmem->size, dev); + return 0; +} + +static void rmem_ion_device_release(struct reserved_mem *rmem, + struct device *dev) +{ +} + +static const struct reserved_mem_ops rmem_dma_ops = { + .device_init = rmem_ion_device_init, + .device_release = rmem_ion_device_release, +}; + +static int __init rmem_ion_setup(struct reserved_mem *rmem) +{ + phys_addr_t size = rmem->size; + + size = size / 1024; + + pr_info("Ion memory setup at %pa size %pa MiB\n", + &rmem->base, &size); + rmem->ops = &rmem_dma_ops; + return 0; +} + +RESERVEDMEM_OF_DECLARE(ion, "ion-region", rmem_ion_setup); +#endif diff --git a/drivers/staging/android/ion/ion_of.h b/drivers/staging/android/ion/ion_of.h new file mode 100644 index 00000000000000..8241a1770f0a31 --- /dev/null +++ b/drivers/staging/android/ion/ion_of.h @@ -0,0 +1,37 @@ +/* + * Based on work from: + * Andrew Andrianov + * Google + * The Linux Foundation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#ifndef _ION_OF_H +#define _ION_OF_H + +struct ion_of_heap { + const char *compat; + int heap_id; + int type; + const char *name; + int align; +}; + +#define PLATFORM_HEAP(_compat, _id, _type, _name) \ +{ \ + .compat = _compat, \ + .heap_id = _id, \ + .type = _type, \ + .name = _name, \ + .align = PAGE_SIZE, \ +} + +struct ion_platform_data *ion_parse_dt(struct platform_device *pdev, + struct ion_of_heap *compatible); + +void ion_destroy_platform_data(struct ion_platform_data *data); + +#endif diff --git a/drivers/staging/android/ion/ion_page_pool.c b/drivers/staging/android/ion/ion_page_pool.c index 817849df9de33d..aea89c1ec3451e 100644 --- a/drivers/staging/android/ion/ion_page_pool.c +++ b/drivers/staging/android/ion/ion_page_pool.c @@ -22,8 +22,7 @@ #include #include #include - -#include "ion.h" +#include "ion_priv.h" static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) { @@ -31,6 +30,9 @@ static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool) if (!page) return NULL; + if (!pool->cached) + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << pool->order, + DMA_BIDIRECTIONAL); return page; } diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h new file mode 100644 index 00000000000000..5b3059c78431aa --- /dev/null +++ b/drivers/staging/android/ion/ion_priv.h @@ -0,0 +1,473 @@ +/* + * drivers/staging/android/ion/ion_priv.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _ION_PRIV_H +#define _ION_PRIV_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ion.h" + +/** + * struct ion_buffer - metadata for a particular buffer + * @ref: reference count + * @node: node in the ion_device buffers tree + * @dev: back pointer to the ion_device + * @heap: back pointer to the heap the buffer came from + * @flags: buffer specific flags + * @private_flags: internal buffer specific flags + * @size: size of the buffer + * @priv_virt: private data to the buffer representable as + * a void * + * @lock: protects the buffers cnt fields + * @kmap_cnt: number of times the buffer is mapped to the kernel + * @vaddr: the kernel mapping if kmap_cnt is not zero + * @dmap_cnt: number of times the buffer is mapped for dma + * @sg_table: the sg table for the buffer if dmap_cnt is not zero + * @pages: flat array of pages in the buffer -- used by fault + * handler and only valid for buffers that are faulted in + * @vmas: list of vma's mapping this buffer + * @handle_count: count of handles referencing this buffer + * @task_comm: taskcomm of last client to reference this buffer in a + * handle, used for debugging + * @pid: pid of last client to reference this buffer in a + * handle, used for debugging + */ +struct ion_buffer { + struct kref ref; + union { + struct rb_node node; + struct list_head list; + }; + struct ion_device *dev; + struct ion_heap *heap; + unsigned long flags; + unsigned long private_flags; + size_t size; + void *priv_virt; + struct mutex lock; + int kmap_cnt; + void *vaddr; + int dmap_cnt; + struct sg_table *sg_table; + struct page **pages; + struct list_head vmas; + /* used to track orphaned buffers */ + int handle_count; + char task_comm[TASK_COMM_LEN]; + pid_t pid; +}; +void ion_buffer_destroy(struct ion_buffer *buffer); + +/** + * struct ion_device - the metadata of the ion device node + * @dev: the actual misc device + * @buffers: an rb tree of all the existing buffers + * @buffer_lock: lock protecting the tree of buffers + * @lock: rwsem protecting the tree of heaps and clients + * @heaps: list of all the heaps in the system + * @user_clients: list of all the clients created from userspace + */ +struct ion_device { + struct miscdevice dev; + struct rb_root buffers; + struct mutex buffer_lock; + struct rw_semaphore lock; + struct plist_head heaps; + long (*custom_ioctl)(struct ion_client *client, unsigned int cmd, + unsigned long arg); + struct rb_root clients; + struct dentry *debug_root; + struct dentry *heaps_debug_root; + struct dentry *clients_debug_root; + int heap_cnt; +}; + +/** + * struct ion_client - a process/hw block local address space + * @node: node in the tree of all clients + * @dev: backpointer to ion device + * @handles: an rb tree of all the handles in this client + * @idr: an idr space for allocating handle ids + * @lock: lock protecting the tree of handles + * @name: used for debugging + * @display_name: used for debugging (unique version of @name) + * @display_serial: used for debugging (to make display_name unique) + * @task: used for debugging + * + * A client represents a list of buffers this client may access. + * The mutex stored here is used to protect both handles tree + * as well as the handles themselves, and should be held while modifying either. + */ +struct ion_client { + struct rb_node node; + struct ion_device *dev; + struct rb_root handles; + struct idr idr; + struct mutex lock; + const char *name; + char *display_name; + int display_serial; + struct task_struct *task; + pid_t pid; + struct dentry *debug_root; +}; + +/** + * ion_handle - a client local reference to a buffer + * @ref: reference count + * @client: back pointer to the client the buffer resides in + * @buffer: pointer to the buffer + * @node: node in the client's handle rbtree + * @kmap_cnt: count of times this client has mapped to kernel + * @id: client-unique id allocated by client->idr + * + * Modifications to node, map_cnt or mapping should be protected by the + * lock in the client. Other fields are never changed after initialization. + */ +struct ion_handle { + struct kref ref; + struct ion_client *client; + struct ion_buffer *buffer; + struct rb_node node; + unsigned int kmap_cnt; + int id; +}; + +/** + * struct ion_heap_ops - ops to operate on a given heap + * @allocate: allocate memory + * @free: free memory + * @map_kernel map memory to the kernel + * @unmap_kernel unmap memory to the kernel + * @map_user map memory to userspace + * + * allocate, phys, and map_user return 0 on success, -errno on error. + * map_dma and map_kernel return pointer on success, ERR_PTR on + * error. @free will be called with ION_PRIV_FLAG_SHRINKER_FREE set in + * the buffer's private_flags when called from a shrinker. In that + * case, the pages being free'd must be truly free'd back to the + * system, not put in a page pool or otherwise cached. + */ +struct ion_heap_ops { + int (*allocate)(struct ion_heap *heap, + struct ion_buffer *buffer, unsigned long len, + unsigned long align, unsigned long flags); + void (*free)(struct ion_buffer *buffer); + void * (*map_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); + void (*unmap_kernel)(struct ion_heap *heap, struct ion_buffer *buffer); + int (*map_user)(struct ion_heap *mapper, struct ion_buffer *buffer, + struct vm_area_struct *vma); + int (*shrink)(struct ion_heap *heap, gfp_t gfp_mask, int nr_to_scan); +}; + +/** + * heap flags - flags between the heaps and core ion code + */ +#define ION_HEAP_FLAG_DEFER_FREE (1 << 0) + +/** + * private flags - flags internal to ion + */ +/* + * Buffer is being freed from a shrinker function. Skip any possible + * heap-specific caching mechanism (e.g. page pools). Guarantees that + * any buffer storage that came from the system allocator will be + * returned to the system allocator. + */ +#define ION_PRIV_FLAG_SHRINKER_FREE (1 << 0) + +/** + * struct ion_heap - represents a heap in the system + * @node: rb node to put the heap on the device's tree of heaps + * @dev: back pointer to the ion_device + * @type: type of heap + * @ops: ops struct as above + * @flags: flags + * @id: id of heap, also indicates priority of this heap when + * allocating. These are specified by platform data and + * MUST be unique + * @name: used for debugging + * @shrinker: a shrinker for the heap + * @free_list: free list head if deferred free is used + * @free_list_size size of the deferred free list in bytes + * @lock: protects the free list + * @waitqueue: queue to wait on from deferred free thread + * @task: task struct of deferred free thread + * @debug_show: called when heap debug file is read to add any + * heap specific debug info to output + * + * Represents a pool of memory from which buffers can be made. In some + * systems the only heap is regular system memory allocated via vmalloc. + * On others, some blocks might require large physically contiguous buffers + * that are allocated from a specially reserved heap. + */ +struct ion_heap { + struct plist_node node; + struct ion_device *dev; + enum ion_heap_type type; + struct ion_heap_ops *ops; + unsigned long flags; + unsigned int id; + const char *name; + struct shrinker shrinker; + struct list_head free_list; + size_t free_list_size; + spinlock_t free_lock; + wait_queue_head_t waitqueue; + struct task_struct *task; + + int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *); +}; + +/** + * ion_buffer_cached - this ion buffer is cached + * @buffer: buffer + * + * indicates whether this ion buffer is cached + */ +bool ion_buffer_cached(struct ion_buffer *buffer); + +/** + * ion_buffer_fault_user_mappings - fault in user mappings of this buffer + * @buffer: buffer + * + * indicates whether userspace mappings of this buffer will be faulted + * in, this can affect how buffers are allocated from the heap. + */ +bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer); + +/** + * ion_device_create - allocates and returns an ion device + * @custom_ioctl: arch specific ioctl function if applicable + * + * returns a valid device or -PTR_ERR + */ +struct ion_device *ion_device_create(long (*custom_ioctl) + (struct ion_client *client, + unsigned int cmd, + unsigned long arg)); + +/** + * ion_device_destroy - free and device and it's resource + * @dev: the device + */ +void ion_device_destroy(struct ion_device *dev); + +/** + * ion_device_add_heap - adds a heap to the ion device + * @dev: the device + * @heap: the heap to add + */ +void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap); + +/** + * some helpers for common operations on buffers using the sg_table + * and vaddr fields + */ +void *ion_heap_map_kernel(struct ion_heap *heap, struct ion_buffer *buffer); +void ion_heap_unmap_kernel(struct ion_heap *heap, struct ion_buffer *buffer); +int ion_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer, + struct vm_area_struct *vma); +int ion_heap_buffer_zero(struct ion_buffer *buffer); +int ion_heap_pages_zero(struct page *page, size_t size, pgprot_t pgprot); + +/** + * ion_heap_init_shrinker + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag or defines the shrink op + * this function will be called to setup a shrinker to shrink the freelists + * and call the heap's shrink op. + */ +void ion_heap_init_shrinker(struct ion_heap *heap); + +/** + * ion_heap_init_deferred_free -- initialize deferred free functionality + * @heap: the heap + * + * If a heap sets the ION_HEAP_FLAG_DEFER_FREE flag this function will + * be called to setup deferred frees. Calls to free the buffer will + * return immediately and the actual free will occur some time later + */ +int ion_heap_init_deferred_free(struct ion_heap *heap); + +/** + * ion_heap_freelist_add - add a buffer to the deferred free list + * @heap: the heap + * @buffer: the buffer + * + * Adds an item to the deferred freelist. + */ +void ion_heap_freelist_add(struct ion_heap *heap, struct ion_buffer *buffer); + +/** + * ion_heap_freelist_drain - drain the deferred free list + * @heap: the heap + * @size: amount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed. The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + */ +size_t ion_heap_freelist_drain(struct ion_heap *heap, size_t size); + +/** + * ion_heap_freelist_shrink - drain the deferred free + * list, skipping any heap-specific + * pooling or caching mechanisms + * + * @heap: the heap + * @size: amount of memory to drain in bytes + * + * Drains the indicated amount of memory from the deferred freelist immediately. + * Returns the total amount freed. The total freed may be higher depending + * on the size of the items in the list, or lower if there is insufficient + * total memory on the freelist. + * + * Unlike with @ion_heap_freelist_drain, don't put any pages back into + * page pools or otherwise cache the pages. Everything must be + * genuinely free'd back to the system. If you're free'ing from a + * shrinker you probably want to use this. Note that this relies on + * the heap.ops.free callback honoring the ION_PRIV_FLAG_SHRINKER_FREE + * flag. + */ +size_t ion_heap_freelist_shrink(struct ion_heap *heap, + size_t size); + +/** + * ion_heap_freelist_size - returns the size of the freelist in bytes + * @heap: the heap + */ +size_t ion_heap_freelist_size(struct ion_heap *heap); + + +/** + * functions for creating and destroying the built in ion heaps. + * architectures can add their own custom architecture specific + * heaps as appropriate. + */ + +struct ion_heap *ion_heap_create(struct ion_platform_heap *heap_data); +void ion_heap_destroy(struct ion_heap *heap); +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused); +void ion_system_heap_destroy(struct ion_heap *heap); + +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *heap); +void ion_system_contig_heap_destroy(struct ion_heap *heap); + +struct ion_heap *ion_carveout_heap_create(struct ion_platform_heap *heap_data); +void ion_carveout_heap_destroy(struct ion_heap *heap); + +struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data); +void ion_chunk_heap_destroy(struct ion_heap *heap); +struct ion_heap *ion_cma_heap_create(struct ion_platform_heap *data); +void ion_cma_heap_destroy(struct ion_heap *heap); + +/** + * functions for creating and destroying a heap pool -- allows you + * to keep a pool of pre allocated memory to use from your heap. Keeping + * a pool of memory that is ready for dma, ie any cached mapping have been + * invalidated from the cache, provides a significant performance benefit on + * many systems + */ + +/** + * struct ion_page_pool - pagepool struct + * @high_count: number of highmem items in the pool + * @low_count: number of lowmem items in the pool + * @high_items: list of highmem items + * @low_items: list of lowmem items + * @mutex: lock protecting this struct and especially the count + * item list + * @gfp_mask: gfp_mask to use from alloc + * @order: order of pages in the pool + * @list: plist node for list of pools + * @cached: it's cached pool or not + * + * Allows you to keep a pool of pre allocated pages to use from your heap. + * Keeping a pool of pages that is ready for dma, ie any cached mapping have + * been invalidated from the cache, provides a significant performance benefit + * on many systems + */ +struct ion_page_pool { + int high_count; + int low_count; + bool cached; + struct list_head high_items; + struct list_head low_items; + struct mutex mutex; + gfp_t gfp_mask; + unsigned int order; + struct plist_node list; +}; + +struct ion_page_pool *ion_page_pool_create(gfp_t gfp_mask, unsigned int order, + bool cached); +void ion_page_pool_destroy(struct ion_page_pool *pool); +struct page *ion_page_pool_alloc(struct ion_page_pool *pool); +void ion_page_pool_free(struct ion_page_pool *pool, struct page *page); + +/** ion_page_pool_shrink - shrinks the size of the memory cached in the pool + * @pool: the pool + * @gfp_mask: the memory type to reclaim + * @nr_to_scan: number of items to shrink in pages + * + * returns the number of items freed in pages + */ +int ion_page_pool_shrink(struct ion_page_pool *pool, gfp_t gfp_mask, + int nr_to_scan); + +/** + * ion_pages_sync_for_device - cache flush pages for use with the specified + * device + * @dev: the device the pages will be used with + * @page: the first page to be flushed + * @size: size in bytes of region to be flushed + * @dir: direction of dma transfer + */ +void ion_pages_sync_for_device(struct device *dev, struct page *page, + size_t size, enum dma_data_direction dir); + +long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); + +int ion_sync_for_device(struct ion_client *client, int fd); + +struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client, + int id); + +void ion_free_nolock(struct ion_client *client, struct ion_handle *handle); + +int ion_handle_put_nolock(struct ion_handle *handle); + +struct ion_handle *ion_handle_get_by_id(struct ion_client *client, + int id); + +int ion_handle_put(struct ion_handle *handle); + +int ion_query_heaps(struct ion_client *client, struct ion_heap_query *query); + +#endif /* _ION_PRIV_H */ diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c index 4dc5d7a589c28b..3ebbb75746e8c4 100644 --- a/drivers/staging/android/ion/ion_system_heap.c +++ b/drivers/staging/android/ion/ion_system_heap.c @@ -24,6 +24,7 @@ #include #include #include "ion.h" +#include "ion_priv.h" #define NUM_ORDERS ARRAY_SIZE(orders) @@ -74,6 +75,9 @@ static struct page *alloc_buffer_page(struct ion_system_heap *heap, page = ion_page_pool_alloc(pool); + if (cached) + ion_pages_sync_for_device(NULL, page, PAGE_SIZE << order, + DMA_BIDIRECTIONAL); return page; } @@ -98,6 +102,7 @@ static void free_buffer_page(struct ion_system_heap *heap, ion_page_pool_free(pool, page); } + static struct page *alloc_largest_available(struct ion_system_heap *heap, struct ion_buffer *buffer, unsigned long size, @@ -124,7 +129,7 @@ static struct page *alloc_largest_available(struct ion_system_heap *heap, static int ion_system_heap_allocate(struct ion_heap *heap, struct ion_buffer *buffer, - unsigned long size, + unsigned long size, unsigned long align, unsigned long flags) { struct ion_system_heap *sys_heap = container_of(heap, @@ -138,6 +143,9 @@ static int ion_system_heap_allocate(struct ion_heap *heap, unsigned long size_remaining = PAGE_ALIGN(size); unsigned int max_order = orders[0]; + if (align > PAGE_SIZE) + return -EINVAL; + if (size / PAGE_SIZE > totalram_pages / 2) return -ENOMEM; @@ -152,7 +160,7 @@ static int ion_system_heap_allocate(struct ion_heap *heap, max_order = compound_order(page); i++; } - table = kmalloc(sizeof(*table), GFP_KERNEL); + table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!table) goto free_pages; @@ -255,6 +263,7 @@ static struct ion_heap_ops system_heap_ops = { static int ion_system_heap_debug_show(struct ion_heap *heap, struct seq_file *s, void *unused) { + struct ion_system_heap *sys_heap = container_of(heap, struct ion_system_heap, heap); @@ -318,7 +327,7 @@ static int ion_system_heap_create_pools(struct ion_page_pool **pools, return -ENOMEM; } -static struct ion_heap *__ion_system_heap_create(void) +struct ion_heap *ion_system_heap_create(struct ion_platform_heap *unused) { struct ion_system_heap *heap; @@ -346,23 +355,24 @@ static struct ion_heap *__ion_system_heap_create(void) return ERR_PTR(-ENOMEM); } -static int ion_system_heap_create(void) +void ion_system_heap_destroy(struct ion_heap *heap) { - struct ion_heap *heap; - - heap = __ion_system_heap_create(); - if (IS_ERR(heap)) - return PTR_ERR(heap); - heap->name = "ion_system_heap"; + struct ion_system_heap *sys_heap = container_of(heap, + struct ion_system_heap, + heap); + int i; - ion_device_add_heap(heap); - return 0; + for (i = 0; i < NUM_ORDERS; i++) { + ion_page_pool_destroy(sys_heap->uncached_pools[i]); + ion_page_pool_destroy(sys_heap->cached_pools[i]); + } + kfree(sys_heap); } -device_initcall(ion_system_heap_create); static int ion_system_contig_heap_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long len, + unsigned long align, unsigned long flags) { int order = get_order(len); @@ -371,6 +381,9 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, unsigned long i; int ret; + if (align > (PAGE_SIZE << order)) + return -EINVAL; + page = alloc_pages(low_order_gfp_flags, order); if (!page) return -ENOMEM; @@ -381,7 +394,7 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, for (i = len >> PAGE_SHIFT; i < (1 << order); i++) __free_page(page + i); - table = kmalloc(sizeof(*table), GFP_KERNEL); + table = kmalloc(sizeof(struct sg_table), GFP_KERNEL); if (!table) { ret = -ENOMEM; goto free_pages; @@ -395,6 +408,8 @@ static int ion_system_contig_heap_allocate(struct ion_heap *heap, buffer->sg_table = table; + ion_pages_sync_for_device(NULL, page, len, DMA_BIDIRECTIONAL); + return 0; free_table: @@ -427,29 +442,19 @@ static struct ion_heap_ops kmalloc_ops = { .map_user = ion_heap_map_user, }; -static struct ion_heap *__ion_system_contig_heap_create(void) +struct ion_heap *ion_system_contig_heap_create(struct ion_platform_heap *unused) { struct ion_heap *heap; - heap = kzalloc(sizeof(*heap), GFP_KERNEL); + heap = kzalloc(sizeof(struct ion_heap), GFP_KERNEL); if (!heap) return ERR_PTR(-ENOMEM); heap->ops = &kmalloc_ops; heap->type = ION_HEAP_TYPE_SYSTEM_CONTIG; - heap->name = "ion_system_contig_heap"; return heap; } -static int ion_system_contig_heap_create(void) +void ion_system_contig_heap_destroy(struct ion_heap *heap) { - struct ion_heap *heap; - - heap = __ion_system_contig_heap_create(); - if (IS_ERR(heap)) - return PTR_ERR(heap); - - ion_device_add_heap(heap); - return 0; + kfree(heap); } -device_initcall(ion_system_contig_heap_create); - diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c new file mode 100644 index 00000000000000..5abf8320a96aaa --- /dev/null +++ b/drivers/staging/android/ion/ion_test.c @@ -0,0 +1,305 @@ +/* + * + * Copyright (C) 2013 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#define pr_fmt(fmt) "ion-test: " fmt + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ion.h" +#include "../uapi/ion_test.h" + +#define u64_to_uptr(x) ((void __user *)(unsigned long)(x)) + +struct ion_test_device { + struct miscdevice misc; +}; + +struct ion_test_data { + struct dma_buf *dma_buf; + struct device *dev; +}; + +static int ion_handle_test_dma(struct device *dev, struct dma_buf *dma_buf, + void __user *ptr, size_t offset, size_t size, + bool write) +{ + int ret = 0; + struct dma_buf_attachment *attach; + struct sg_table *table; + pgprot_t pgprot = pgprot_writecombine(PAGE_KERNEL); + enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + struct sg_page_iter sg_iter; + unsigned long offset_page; + + attach = dma_buf_attach(dma_buf, dev); + if (IS_ERR(attach)) + return PTR_ERR(attach); + + table = dma_buf_map_attachment(attach, dir); + if (IS_ERR(table)) + return PTR_ERR(table); + + offset_page = offset >> PAGE_SHIFT; + offset %= PAGE_SIZE; + + for_each_sg_page(table->sgl, &sg_iter, table->nents, offset_page) { + struct page *page = sg_page_iter_page(&sg_iter); + void *vaddr = vmap(&page, 1, VM_MAP, pgprot); + size_t to_copy = PAGE_SIZE - offset; + + to_copy = min(to_copy, size); + if (!vaddr) { + ret = -ENOMEM; + goto err; + } + + if (write) + ret = copy_from_user(vaddr + offset, ptr, to_copy); + else + ret = copy_to_user(ptr, vaddr + offset, to_copy); + + vunmap(vaddr); + if (ret) { + ret = -EFAULT; + goto err; + } + size -= to_copy; + if (!size) + break; + ptr += to_copy; + offset = 0; + } + +err: + dma_buf_unmap_attachment(attach, table, dir); + dma_buf_detach(dma_buf, attach); + return ret; +} + +static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr, + size_t offset, size_t size, bool write) +{ + int ret; + unsigned long page_offset = offset >> PAGE_SHIFT; + size_t copy_offset = offset % PAGE_SIZE; + size_t copy_size = size; + enum dma_data_direction dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE; + + if (offset > dma_buf->size || size > dma_buf->size - offset) + return -EINVAL; + + ret = dma_buf_begin_cpu_access(dma_buf, dir); + if (ret) + return ret; + + while (copy_size > 0) { + size_t to_copy; + void *vaddr = dma_buf_kmap(dma_buf, page_offset); + + if (!vaddr) + goto err; + + to_copy = min_t(size_t, PAGE_SIZE - copy_offset, copy_size); + + if (write) + ret = copy_from_user(vaddr + copy_offset, ptr, to_copy); + else + ret = copy_to_user(ptr, vaddr + copy_offset, to_copy); + + dma_buf_kunmap(dma_buf, page_offset, vaddr); + if (ret) { + ret = -EFAULT; + goto err; + } + + copy_size -= to_copy; + ptr += to_copy; + page_offset++; + copy_offset = 0; + } +err: + dma_buf_end_cpu_access(dma_buf, dir); + return ret; +} + +static long ion_test_ioctl(struct file *filp, unsigned int cmd, + unsigned long arg) +{ + struct ion_test_data *test_data = filp->private_data; + int ret = 0; + + union { + struct ion_test_rw_data test_rw; + } data; + + if (_IOC_SIZE(cmd) > sizeof(data)) + return -EINVAL; + + if (_IOC_DIR(cmd) & _IOC_WRITE) + if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd))) + return -EFAULT; + + switch (cmd) { + case ION_IOC_TEST_SET_FD: + { + struct dma_buf *dma_buf = NULL; + int fd = arg; + + if (fd >= 0) { + dma_buf = dma_buf_get((int)arg); + if (IS_ERR(dma_buf)) + return PTR_ERR(dma_buf); + } + if (test_data->dma_buf) + dma_buf_put(test_data->dma_buf); + test_data->dma_buf = dma_buf; + break; + } + case ION_IOC_TEST_DMA_MAPPING: + { + ret = ion_handle_test_dma(test_data->dev, test_data->dma_buf, + u64_to_uptr(data.test_rw.ptr), + data.test_rw.offset, + data.test_rw.size, + data.test_rw.write); + break; + } + case ION_IOC_TEST_KERNEL_MAPPING: + { + ret = ion_handle_test_kernel(test_data->dma_buf, + u64_to_uptr(data.test_rw.ptr), + data.test_rw.offset, + data.test_rw.size, + data.test_rw.write); + break; + } + default: + return -ENOTTY; + } + + if (_IOC_DIR(cmd) & _IOC_READ) { + if (copy_to_user((void __user *)arg, &data, sizeof(data))) + return -EFAULT; + } + return ret; +} + +static int ion_test_open(struct inode *inode, struct file *file) +{ + struct ion_test_data *data; + struct miscdevice *miscdev = file->private_data; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + data->dev = miscdev->parent; + + file->private_data = data; + + return 0; +} + +static int ion_test_release(struct inode *inode, struct file *file) +{ + struct ion_test_data *data = file->private_data; + + kfree(data); + + return 0; +} + +static const struct file_operations ion_test_fops = { + .owner = THIS_MODULE, + .unlocked_ioctl = ion_test_ioctl, + .compat_ioctl = ion_test_ioctl, + .open = ion_test_open, + .release = ion_test_release, +}; + +static int __init ion_test_probe(struct platform_device *pdev) +{ + int ret; + struct ion_test_device *testdev; + + testdev = devm_kzalloc(&pdev->dev, sizeof(struct ion_test_device), + GFP_KERNEL); + if (!testdev) + return -ENOMEM; + + testdev->misc.minor = MISC_DYNAMIC_MINOR; + testdev->misc.name = "ion-test"; + testdev->misc.fops = &ion_test_fops; + testdev->misc.parent = &pdev->dev; + ret = misc_register(&testdev->misc); + if (ret) { + pr_err("failed to register misc device.\n"); + return ret; + } + + platform_set_drvdata(pdev, testdev); + + return 0; +} + +static int ion_test_remove(struct platform_device *pdev) +{ + struct ion_test_device *testdev; + + testdev = platform_get_drvdata(pdev); + if (!testdev) + return -ENODATA; + + misc_deregister(&testdev->misc); + return 0; +} + +static struct platform_device *ion_test_pdev; +static struct platform_driver ion_test_platform_driver = { + .remove = ion_test_remove, + .driver = { + .name = "ion-test", + }, +}; + +static int __init ion_test_init(void) +{ + ion_test_pdev = platform_device_register_simple("ion-test", + -1, NULL, 0); + if (IS_ERR(ion_test_pdev)) + return PTR_ERR(ion_test_pdev); + + return platform_driver_probe(&ion_test_platform_driver, ion_test_probe); +} + +static void __exit ion_test_exit(void) +{ + platform_driver_unregister(&ion_test_platform_driver); + platform_device_unregister(ion_test_pdev); +} + +module_init(ion_test_init); +module_exit(ion_test_exit); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/staging/android/ion/tegra/Makefile b/drivers/staging/android/ion/tegra/Makefile new file mode 100644 index 00000000000000..808f1f53f11adc --- /dev/null +++ b/drivers/staging/android/ion/tegra/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_ION_TEGRA) += tegra_ion.o diff --git a/drivers/staging/android/ion/tegra/tegra_ion.c b/drivers/staging/android/ion/tegra/tegra_ion.c new file mode 100644 index 00000000000000..49e55e5acead9d --- /dev/null +++ b/drivers/staging/android/ion/tegra/tegra_ion.c @@ -0,0 +1,80 @@ +/* + * drivers/gpu/tegra/tegra_ion.c + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include +#include +#include +#include +#include "../ion.h" +#include "../ion_priv.h" + +static struct ion_device *idev; +static int num_heaps; +static struct ion_heap **heaps; + +static int tegra_ion_probe(struct platform_device *pdev) +{ + struct ion_platform_data *pdata = pdev->dev.platform_data; + int err; + int i; + + num_heaps = pdata->nr; + + heaps = devm_kcalloc(&pdev->dev, pdata->nr, + sizeof(struct ion_heap *), GFP_KERNEL); + + idev = ion_device_create(NULL); + if (IS_ERR(idev)) + return PTR_ERR(idev); + + /* create the heaps as specified in the board file */ + for (i = 0; i < num_heaps; i++) { + struct ion_platform_heap *heap_data = &pdata->heaps[i]; + + heaps[i] = ion_heap_create(heap_data); + if (IS_ERR_OR_NULL(heaps[i])) { + err = PTR_ERR(heaps[i]); + goto err; + } + ion_device_add_heap(idev, heaps[i]); + } + platform_set_drvdata(pdev, idev); + return 0; +err: + for (i = 0; i < num_heaps; ++i) + ion_heap_destroy(heaps[i]); + return err; +} + +static int tegra_ion_remove(struct platform_device *pdev) +{ + struct ion_device *idev = platform_get_drvdata(pdev); + int i; + + ion_device_destroy(idev); + for (i = 0; i < num_heaps; i++) + ion_heap_destroy(heaps[i]); + return 0; +} + +static struct platform_driver ion_driver = { + .probe = tegra_ion_probe, + .remove = tegra_ion_remove, + .driver = { .name = "ion-tegra" } +}; + +module_platform_driver(ion_driver); + diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h index 9e21451149d06f..14cd8738ecfc3a 100644 --- a/drivers/staging/android/uapi/ion.h +++ b/drivers/staging/android/uapi/ion.h @@ -20,6 +20,8 @@ #include #include +typedef int ion_user_handle_t; + /** * enum ion_heap_types - list of all possible types of heaps * @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc @@ -57,6 +59,12 @@ enum ion_heap_type { */ #define ION_FLAG_CACHED 1 +/* + * mappings of this buffer will created at mmap time, if this is set + * caches must be managed manually + */ +#define ION_FLAG_CACHED_NEEDS_SYNC 2 + /** * DOC: Ion Userspace API * @@ -68,6 +76,7 @@ enum ion_heap_type { /** * struct ion_allocation_data - metadata passed from userspace for allocations * @len: size of the allocation + * @align: required alignment of the allocation * @heap_id_mask: mask of heap ids to allocate from * @flags: flags passed to heap * @handle: pointer that will be populated with a cookie to use to @@ -76,11 +85,47 @@ enum ion_heap_type { * Provided by userspace as an argument to the ioctl */ struct ion_allocation_data { - __u64 len; - __u32 heap_id_mask; - __u32 flags; - __u32 fd; - __u32 unused; + size_t len; + size_t align; + unsigned int heap_id_mask; + unsigned int flags; + ion_user_handle_t handle; +}; + +/** + * struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair + * @handle: a handle + * @fd: a file descriptor representing that handle + * + * For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with + * the handle returned from ion alloc, and the kernel returns the file + * descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace + * provides the file descriptor and the kernel returns the handle. + */ +struct ion_fd_data { + ion_user_handle_t handle; + int fd; +}; + +/** + * struct ion_handle_data - a handle passed to/from the kernel + * @handle: a handle + */ +struct ion_handle_data { + ion_user_handle_t handle; +}; + +/** + * struct ion_custom_data - metadata passed to/from userspace for a custom ioctl + * @cmd: the custom ioctl function to call + * @arg: additional data to pass to the custom ioctl, typically a user + * pointer to a predefined structure + * + * This works just like the regular cmd and arg fields of an ioctl. + */ +struct ion_custom_data { + unsigned int cmd; + unsigned long arg; }; #define MAX_HEAP_NAME 32 @@ -124,6 +169,61 @@ struct ion_heap_query { #define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \ struct ion_allocation_data) +/** + * DOC: ION_IOC_FREE - free memory + * + * Takes an ion_handle_data struct and frees the handle. + */ +#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data) + +/** + * DOC: ION_IOC_MAP - get a file descriptor to mmap + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be used as an argument to mmap. + */ +#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data) + +/** + * DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation + * + * Takes an ion_fd_data struct with the handle field populated with a valid + * opaque handle. Returns the struct with the fd field set to a file + * descriptor open in the current address space. This file descriptor + * can then be passed to another process. The corresponding opaque handle can + * be retrieved via ION_IOC_IMPORT. + */ +#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data) + +/** + * DOC: ION_IOC_IMPORT - imports a shared file descriptor + * + * Takes an ion_fd_data struct with the fd field populated with a valid file + * descriptor obtained from ION_IOC_SHARE and returns the struct with the handle + * filed set to the corresponding opaque handle. + */ +#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, struct ion_fd_data) + +/** + * DOC: ION_IOC_SYNC - syncs a shared file descriptors to memory + * + * Deprecated in favor of using the dma_buf api's correctly (syncing + * will happen automatically when the buffer is mapped to a device). + * If necessary should be used after touching a cached buffer from the cpu, + * this will make the buffer in memory coherent. + */ +#define ION_IOC_SYNC _IOWR(ION_IOC_MAGIC, 7, struct ion_fd_data) + +/** + * DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl + * + * Takes the argument of the architecture specific ioctl to call and + * passes appropriate userdata for that ioctl + */ +#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data) + /** * DOC: ION_IOC_HEAP_QUERY - information about available heaps * diff --git a/drivers/staging/android/uapi/ion_test.h b/drivers/staging/android/uapi/ion_test.h new file mode 100644 index 00000000000000..480242e02f8d64 --- /dev/null +++ b/drivers/staging/android/uapi/ion_test.h @@ -0,0 +1,69 @@ +/* + * drivers/staging/android/uapi/ion.h + * + * Copyright (C) 2011 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _UAPI_LINUX_ION_TEST_H +#define _UAPI_LINUX_ION_TEST_H + +#include +#include + +/** + * struct ion_test_rw_data - metadata passed to the kernel to read handle + * @ptr: a pointer to an area at least as large as size + * @offset: offset into the ion buffer to start reading + * @size: size to read or write + * @write: 1 to write, 0 to read + */ +struct ion_test_rw_data { + __u64 ptr; + __u64 offset; + __u64 size; + int write; + int __padding; +}; + +#define ION_IOC_MAGIC 'I' + +/** + * DOC: ION_IOC_TEST_SET_DMA_BUF - attach a dma buf to the test driver + * + * Attaches a dma buf fd to the test driver. Passing a second fd or -1 will + * release the first fd. + */ +#define ION_IOC_TEST_SET_FD \ + _IO(ION_IOC_MAGIC, 0xf0) + +/** + * DOC: ION_IOC_TEST_DMA_MAPPING - read or write memory from a handle as DMA + * + * Reads or writes the memory from a handle using an uncached mapping. Can be + * used by unit tests to emulate a DMA engine as close as possible. Only + * expected to be used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_DMA_MAPPING \ + _IOW(ION_IOC_MAGIC, 0xf1, struct ion_test_rw_data) + +/** + * DOC: ION_IOC_TEST_KERNEL_MAPPING - read or write memory from a handle + * + * Reads or writes the memory from a handle using a kernel mapping. Can be + * used by unit tests to test heap map_kernel functions. Only expected to be + * used for debugging and testing, may not always be available. + */ +#define ION_IOC_TEST_KERNEL_MAPPING \ + _IOW(ION_IOC_MAGIC, 0xf2, struct ion_test_rw_data) + +#endif /* _UAPI_LINUX_ION_H */