[RFC][PATCH 6/6] staging: android: ion: Introduce new ioctls for dynamic heaps

Laura Abbott labbott at redhat.com
Mon Jun 6 18:23:33 UTC 2016


From: Laura Abbott <labbott at fedoraproject.org>


The Ion ABI for heaps is limiting to work with for more complex systems.
Heaps have to be registered at boot time with known ids available to
userspace. This becomes a tight ABI which is prone to breakage.

Introduce a new mechanism for registering heap ids dynamically. A base
set of heap ids are registered at boot time but there is no knowledge
of fallbacks. Fallback information can be remapped and changed
dynamically. Information about available heaps can also be queried with
an ioctl to avoid the need to have heap ids be an ABI with userspace.

Signed-off-by: Laura Abbott <labbott at redhat.com>
---
 drivers/staging/android/ion/ion-ioctl.c | 109 +++++++++++++++++--
 drivers/staging/android/ion/ion.c       | 184 ++++++++++++++++++++++++++++++--
 drivers/staging/android/ion/ion_priv.h  |  15 +++
 drivers/staging/android/uapi/ion.h      | 135 +++++++++++++++++++++++
 4 files changed, 426 insertions(+), 17 deletions(-)

diff --git a/drivers/staging/android/ion/ion-ioctl.c b/drivers/staging/android/ion/ion-ioctl.c
index 45b89e8..169cad8 100644
--- a/drivers/staging/android/ion/ion-ioctl.c
+++ b/drivers/staging/android/ion/ion-ioctl.c
@@ -22,6 +22,49 @@
 #include "ion_priv.h"
 #include "compat_ion.h"
 
+union ion_ioctl_arg {
+	struct ion_fd_data fd;
+	struct ion_allocation_data allocation;
+	struct ion_handle_data handle;
+	struct ion_custom_data custom;
+	struct ion_abi_version abi_version;
+	struct ion_new_alloc_data allocation2;
+	struct ion_usage_id_map id_map;
+	struct ion_usage_cnt usage_cnt;
+	struct ion_heap_query query;
+};
+
+static int validate_ioctl_arg(unsigned int cmd, union ion_ioctl_arg *arg)
+{
+	int ret = 0;
+
+	switch (cmd) {
+	case ION_IOC_ABI_VERSION:
+		ret =  arg->abi_version.reserved != 0;
+		break;
+	case ION_IOC_ALLOC2:
+		ret = arg->allocation2.reserved0 != 0;
+		ret |= arg->allocation2.reserved1 != 0;
+		ret |= arg->allocation2.reserved2 != 0;
+		break;
+	case ION_IOC_ID_MAP:
+		ret = arg->id_map.reserved0 != 0;
+		ret |= arg->id_map.reserved1 != 0;
+		break;
+	case ION_IOC_USAGE_CNT:
+		ret = arg->usage_cnt.reserved != 0;
+		break;
+	case ION_IOC_HEAP_QUERY:
+		ret = arg->query.reserved0 != 0;
+		ret |= arg->query.reserved1 != 0;
+		ret |= arg->query.reserved2 != 0;
+		break;
+	default:
+		break;
+	}
+	return ret ? -EINVAL : 0;
+}
+
 /* fix up the cases where the ioctl direction bits are incorrect */
 static unsigned int ion_ioctl_dir(unsigned int cmd)
 {
@@ -42,14 +85,7 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 	struct ion_handle *cleanup_handle = NULL;
 	int ret = 0;
 	unsigned int dir;
-
-	union {
-		struct ion_fd_data fd;
-		struct ion_allocation_data allocation;
-		struct ion_handle_data handle;
-		struct ion_custom_data custom;
-		struct ion_abi_version abi_version;
-	} data;
+	union ion_ioctl_arg data;
 
 	dir = ion_ioctl_dir(cmd);
 
@@ -60,7 +96,12 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
 			return -EFAULT;
 
+	ret = validate_ioctl_arg(cmd, &data);
+	if (ret)
+		return ret;
+
 	switch (cmd) {
+	/* Old ioctl */
 	case ION_IOC_ALLOC:
 	{
 		struct ion_handle *handle;
@@ -77,6 +118,7 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		cleanup_handle = handle;
 		break;
 	}
+	/* Old ioctl */
 	case ION_IOC_FREE:
 	{
 		struct ion_handle *handle;
@@ -92,6 +134,7 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		mutex_unlock(&client->lock);
 		break;
 	}
+	/* Old ioctl */
 	case ION_IOC_SHARE:
 	case ION_IOC_MAP:
 	{
@@ -106,6 +149,7 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 			ret = data.fd.fd;
 		break;
 	}
+	/* Old ioctl */
 	case ION_IOC_IMPORT:
 	{
 		struct ion_handle *handle;
@@ -117,11 +161,13 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 			data.handle.handle = handle->id;
 		break;
 	}
+	/* Old ioctl */
 	case ION_IOC_SYNC:
 	{
 		ret = ion_sync_for_device(client, data.fd.fd);
 		break;
 	}
+	/* Old ioctl */
 	case ION_IOC_CUSTOM:
 	{
 		if (!dev->custom_ioctl)
@@ -135,6 +181,53 @@ long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 		data.abi_version.abi_version = ION_ABI_VERSION;
 		break;
 	}
+	case ION_IOC_ALLOC2:
+	{
+		struct ion_handle *handle;
+
+		handle = ion_alloc2(client, data.allocation2.len,
+					data.allocation2.align,
+					data.allocation2.usage_id,
+					data.allocation2.flags);
+		if (IS_ERR(handle))
+			return PTR_ERR(handle);
+
+		if (data.allocation2.flags & ION_FLAG_NO_HANDLE) {
+			data.allocation2.fd = ion_share_dma_buf_fd(client,
+								handle);
+			ion_handle_put(handle);
+			if (data.allocation2.fd < 0)
+				ret = data.allocation2.fd;
+		} else {
+			data.allocation2.handle = handle->id;
+
+			cleanup_handle = handle;
+		}
+		break;
+	}
+	case ION_IOC_ID_MAP:
+	{
+		ret = ion_map_usage_ids(client,
+				(unsigned int __user *)data.id_map.usage_ids,
+				data.id_map.cnt);
+		if (ret > 0)
+			data.id_map.new_id = ret;
+		break;
+	}
+	case ION_IOC_USAGE_CNT:
+	{
+		down_read(&client->dev->lock);
+		data.usage_cnt.cnt = client->dev->heap_cnt;
+		up_read(&client->dev->lock);
+		break;
+	}
+	case ION_IOC_HEAP_QUERY:
+	{
+		ret = ion_query_heaps(client,
+				(struct ion_heap_data __user *)data.query.heaps,
+				data.query.cnt);
+		break;
+	}
 	default:
 		return -ENOTTY;
 	}
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 129707f..c096cb9 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -459,6 +459,48 @@ static struct ion_handle *__ion_alloc(struct ion_client *client, size_t len,
 	return handle;
 }
 
+struct ion_handle *ion_alloc2(struct ion_client *client, size_t len,
+				size_t align, unsigned int usage_id,
+				unsigned int flags)
+{
+	struct ion_device *dev = client->dev;
+	struct ion_heap *heap;
+	struct ion_handle *handle = ERR_PTR(-ENODEV);
+
+	down_read(&dev->lock);
+	heap = idr_find(&dev->idr, usage_id);
+	if (!heap) {
+		handle = ERR_PTR(-EINVAL);
+		goto out;
+	}
+
+	if (heap->type == ION_USAGE_ID_MAP) {
+		int i;
+
+		for (i = 0; i < heap->fallback_cnt; i++){
+			heap = idr_find(&dev->idr, heap->fallbacks[i]);
+			if (!heap)
+				continue;
+
+			/* Don't recurse for now? */
+			if (heap->type == ION_USAGE_ID_MAP)
+				continue;
+
+			handle =  __ion_alloc(client, len, align, heap, flags);
+			if (IS_ERR(handle))
+				continue;
+			else
+				break;
+		}
+	} else {
+		handle = __ion_alloc(client, len, align, heap, flags);
+	}
+out:
+	up_read(&dev->lock);
+	return handle;
+}
+EXPORT_SYMBOL(ion_alloc2);
+
 struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
 				size_t align, unsigned int heap_mask,
 				unsigned int flags)
@@ -1243,6 +1285,55 @@ int ion_sync_for_device(struct ion_client *client, int fd)
 	return 0;
 }
 
+struct ion_query_data {
+	struct ion_heap_data __user *buffer;
+	int cnt;
+	int max_cnt;
+};
+
+int __ion_query(int id, void *p, void *data)
+{
+	struct ion_heap *heap = p;
+	struct ion_query_data *query = data;
+	struct ion_heap_data hdata = {0};
+
+	if (query->cnt >= query->max_cnt)
+		return -ENOSPC;
+
+	strncpy(hdata.name, heap->name, 20);
+	hdata.name[sizeof(hdata.name) - 1] = '\0';
+	hdata.type = heap->type;
+	hdata.usage_id = heap->id;
+
+	return copy_to_user(&query->buffer[query->cnt++], &hdata, sizeof(hdata));
+}
+
+int ion_query_heaps(struct ion_client *client,
+		struct ion_heap_data __user *buffer,
+		int cnt)
+{
+	struct ion_device *dev = client->dev;
+	struct ion_query_data data;
+	int ret;
+
+	data.buffer = buffer;
+	data.cnt = 0;
+	data.max_cnt = cnt;
+
+	down_read(&dev->lock);
+	if (data.max_cnt < 0 || data.max_cnt > dev->heap_cnt) {
+		ret = -EINVAL;
+		goto out;
+	}
+	ret = idr_for_each(&dev->idr, __ion_query, &data);
+out:
+	up_read(&dev->lock);
+
+	return ret;
+}
+
+
+
 static int ion_release(struct inode *inode, struct file *file)
 {
 	struct ion_client *client = file->private_data;
@@ -1407,13 +1498,69 @@ static int debug_shrink_get(void *data, u64 *val)
 DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
 			debug_shrink_set, "%llu\n");
 
+int ion_map_usage_ids(struct ion_client *client,
+			unsigned int __user *usage_ids,
+			int cnt)
+{
+	struct ion_heap *heap = kzalloc(sizeof(*heap), GFP_KERNEL);
+	unsigned int *fallbacks;
+	int i;
+	int ret;
+
+	if (!heap)
+		return -ENOMEM;
+
+	fallbacks = kzalloc(sizeof(unsigned int)*cnt, GFP_KERNEL);
+	if (!fallbacks) {
+		ret = -ENOMEM;
+		goto out1;
+	}
+
+	ret = copy_from_user(fallbacks, usage_ids, sizeof(unsigned int)*cnt);
+	if (ret)
+		goto out2;
+
+	down_read(&client->dev->lock);
+	for (i = 0; i < cnt; i++) {
+		if (idr_find(&client->dev->idr, fallbacks[i]) == NULL) {
+			ret = -EINVAL;
+			goto out3;
+		}
+	}
+	up_read(&client->dev->lock);
+
+	/*
+	 * This is a racy check since the lock is dropped before the heap
+	 * is actually added. It's okay though because ids are never actually
+	 * deleted. Worst case some user gets an error back and an indication
+	 * to fix races in their code.
+	 */
+
+	heap->fallbacks = fallbacks;
+	heap->fallback_cnt = cnt;
+	heap->type = ION_USAGE_ID_MAP;
+	heap->id = ION_DYNAMIC_HEAP_ASSIGN;
+	ion_device_add_heap(client->dev, heap);
+	return heap->id;
+out3:
+	up_read(&client->dev->lock);
+out2:
+	kfree(fallbacks);
+out1:
+	kfree(heap);
+	return ret;
+}
+
 int ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 {
 	struct dentry *debug_file;
 	int ret;
 
-	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
-	    !heap->ops->unmap_dma) {
+	if (heap->type != ION_USAGE_ID_MAP &&
+	   (!heap->ops->allocate ||
+	    !heap->ops->free ||
+	    !heap->ops->map_dma ||
+	    !heap->ops->unmap_dma)) {
 		pr_err("%s: can not add heap with invalid ops struct.\n",
 		       __func__);
 		return -EINVAL;
@@ -1425,18 +1572,36 @@ int ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
 		ion_heap_init_deferred_free(heap);
 
-	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
+	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || (heap->ops && heap->ops->shrink))
 		ion_heap_init_shrinker(heap);
 
 	heap->dev = dev;
 	down_write(&dev->lock);
 
-	ret = idr_alloc(&dev->idr, heap, heap->id, heap->id + 1, GFP_KERNEL);
-	if (ret < 0 || ret != heap->id) {
-		pr_info("%s: Failed to add heap id, expected %d got %d\n",
-			__func__, heap->id, ret);
-		up_write(&dev->lock);
-		return ret < 0 ? ret : -EINVAL;
+	if (heap->id == ION_DYNAMIC_HEAP_ASSIGN) {
+		ret = idr_alloc(&dev->idr, heap,
+				ION_DYNAMIC_HEAP_ASSIGN + 1, 0, GFP_KERNEL);
+		if (ret < 0)
+			goto out_unlock;
+
+		heap->id = ret;
+	} else {
+		ret = idr_alloc(&dev->idr, heap, heap->id, heap->id + 1,
+				GFP_KERNEL);
+		if (ret < 0 || ret != heap->id) {
+			pr_info("%s: Failed to add heap id, expected %d got %d\n",
+				__func__, heap->id, ret);
+			ret = ret < 0 ? ret : -EINVAL;
+			goto out_unlock;
+		}
+	}
+
+	if (!heap->name) {
+		heap->name = kasprintf(GFP_KERNEL, "heap%d", heap->id);
+		if (!heap->name) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
 	}
 
 	debug_file = debugfs_create_file(heap->name, 0664,
@@ -1467,6 +1632,7 @@ int ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
 		}
 	}
 	dev->heap_cnt++;
+out_unlock:
 	up_write(&dev->lock);
 	return 0;
 }
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index b09d751..e03e940 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -255,6 +255,9 @@ struct ion_heap {
 	wait_queue_head_t waitqueue;
 	struct task_struct *task;
 
+	unsigned int *fallbacks;
+	int fallback_cnt;
+
 	int (*debug_show)(struct ion_heap *heap, struct seq_file *, void *);
 };
 
@@ -381,6 +384,10 @@ size_t ion_heap_freelist_shrink(struct ion_heap *heap,
 size_t ion_heap_freelist_size(struct ion_heap *heap);
 
 
+int ion_map_usage_ids(struct ion_client *client,
+			unsigned int __user *usage_ids,
+			int cnt);
+
 /**
  * functions for creating and destroying the built in ion heaps.
  * architectures can add their own custom architecture specific
@@ -495,4 +502,12 @@ struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
 
 int ion_handle_put(struct ion_handle *handle);
 
+struct ion_handle *ion_alloc2(struct ion_client *client, size_t len,
+				size_t align, unsigned int usage_id,
+				unsigned int flags);
+
+int ion_query_heaps(struct ion_client *client,
+		struct ion_heap_data __user *buffer,
+		int cnt);
+
 #endif /* _ION_PRIV_H */
diff --git a/drivers/staging/android/uapi/ion.h b/drivers/staging/android/uapi/ion.h
index 145005f..d36b4e4 100644
--- a/drivers/staging/android/uapi/ion.h
+++ b/drivers/staging/android/uapi/ion.h
@@ -45,10 +45,17 @@ enum ion_heap_type {
 			       * must be last so device specific heaps always
 			       * are at the end of this enum
 			       */
+	ION_USAGE_ID_MAP,
 };
 
 #define ION_NUM_HEAP_IDS		(sizeof(unsigned int) * 8)
 
+/*
+ * This isn't completely random. The old Ion heap ID namespace goes up to
+ * 32 bits so take the first one after that to use as a dynamic setting
+ */
+#define ION_DYNAMIC_HEAP_ASSIGN		33
+
 /**
  * allocation flags - the lower 16 bits are used by core ion, the upper 16
  * bits are reserved for use by the heaps themselves.
@@ -65,6 +72,11 @@ enum ion_heap_type {
 					 * caches must be managed
 					 * manually
 					 */
+#define ION_FLAG_NO_HANDLE 3		/*
+					 * Return only an fd associated with
+					 * this buffer and not a handle
+					 */
+
 
 /**
  * DOC: Ion Userspace API
@@ -142,6 +154,96 @@ struct ion_abi_version {
 	__u32 reserved;
 };
 
+/**
+ * struct ion_new_alloc_data - metadata to/from usersapce allocation v2
+ * @len:		size of the allocation
+ * @align:		required alignment of the allocation
+ * @usage_id:		mapping to heap id to allocate. Will try fallbacks
+ *			if specified in the heap mapping
+ * @flags:		flags passed to heap
+ * @handle:		pointer that will be populated with a cookie to use to
+ *			refer to this allocation
+ * @fd:			optionally, may return just an fd with no handle
+ *			reference
+ */
+struct ion_new_alloc_data {
+	__u64 len;
+	__u64 align;
+	__u32 usage_id;
+	__u32 flags;
+	/*
+	 * I'd like to add a flag to just return the fd instead of just
+	 * a handle for those who want to skip the next step.
+	 */
+	union {
+		__u32 fd;
+		__u32 handle;
+	};
+	/*
+	 * Allocation has a definite problem of 'creep' so give more than
+	 * enough extra bits for expansion
+	 */
+	__u32 reserved0;
+	__u32 reserved1;
+	__u32 reserved2;
+};
+
+/**
+ * struct ion_usage_id_map - metadata to create a new usage map
+ * @usage_id - userspace allocated array of existing usage ids
+ * @cnt - number of ids to be mapped
+ * @new_id - will be populated with the new usage id
+ */
+struct ion_usage_id_map {
+	/* Array of usage ids provided by user */
+	__u64 usage_ids;
+	__u32 cnt;
+
+	/* Returned on success */
+	__u32 new_id;
+	/* Fields for future growth */
+	__u32 reserved0;
+	__u32 reserved1;
+};
+
+/**
+ * struct ion_usage_cnt - meta data for the count of heaps
+ * @cnt - returned count of number of heaps present
+ */
+struct ion_usage_cnt {
+	__u32 cnt; /* cnt returned */
+	__u32 reserved;
+};
+
+/**
+ * struct ion_heap_data - data about a heap
+ * @name - first 32 characters of the heap name
+ * @type - heap type
+ * @usage_id - usage id for the heap
+ */
+struct ion_heap_data {
+	char name[32];
+	__u32 type;
+	__u32 usage_id;
+	__u32 reserved0;
+	__u32 reserved1;
+	__u32 reserved2;
+};
+
+/**
+ * struct ion_heap_query - collection of data about all heaps
+ * @cnt - total number of heaps to be copied
+ * @heaps - buffer to copy heap data
+ */
+struct ion_heap_query {
+	__u32 cnt; /* Total number of heaps to be copied */
+	__u64 heaps; /* buffer to be populated */
+	__u32 reserved0;
+	__u32 reserved1;
+	__u32 reserved2;
+};
+
+
 #define ION_IOC_MAGIC		'I'
 
 /**
@@ -216,5 +318,38 @@ struct ion_abi_version {
 #define ION_IOC_ABI_VERSION    _IOR(ION_IOC_MAGIC, 8, \
 					struct ion_abi_version)
 
+/**
+ * DOC: ION_IOC_ALLOC2 - allocate memory via new API
+ *
+ * Same concept as ION_IOC_ALLOC except with updated ABI. Can return an fd
+ * directly in addition to a handle
+ */
+#define ION_IOC_ALLOC2           _IOWR(ION_IOC_MAGIC, 9, \
+					struct ion_new_alloc_data)
+
+/**
+ * DOC: ION_IOC_ID_MAP - remap an array of heap IDs
+ *
+ * Takes an ion_usage_id_map structure populated with information about
+ * fallback information. Returns a new usage id for use in allocation.
+ */
+#define ION_IOC_ID_MAP         _IOWR(ION_IOC_MAGIC, 10, \
+					struct ion_usage_id_map)
+/**
+ * DOC: ION_IOC_USAGE_CNT - returns a count of the number of usage ids
+ *
+ * Takes an ion_usage_cnt structure and returns the total number of usage
+ * ids available.
+ */
+#define ION_IOC_USAGE_CNT      _IOR(ION_IOC_MAGIC, 11, \
+					struct ion_usage_cnt)
+/**
+ * DOC: ION_IOC_HEAP_QUERY - information about available heaps
+ *
+ * Takes an ion_heap_query structure and populates information about
+ * availble Ion heaps.
+ */
+#define ION_IOC_HEAP_QUERY     _IOWR(ION_IOC_MAGIC, 12, \
+					struct ion_heap_query)
 
 #endif /* _UAPI_LINUX_ION_H */
-- 
2.5.5



More information about the devel mailing list