1/*
2
3 * drivers/staging/android/ion/ion.c
4 *
5 * Copyright (C) 2011 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14 * GNU General Public License for more details.
15 *
16 */
17
18#include <linux/device.h>
19#include <linux/err.h>
20#include <linux/file.h>
21#include <linux/freezer.h>
22#include <linux/fs.h>
23#include <linux/anon_inodes.h>
24#include <linux/kthread.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/miscdevice.h>
28#include <linux/export.h>
29#include <linux/mm.h>
30#include <linux/mm_types.h>
31#include <linux/rbtree.h>
32#include <linux/slab.h>
33#include <linux/seq_file.h>
34#include <linux/uaccess.h>
35#include <linux/vmalloc.h>
36#include <linux/debugfs.h>
37#include <linux/dma-buf.h>
38#include <linux/idr.h>
39
40#include "ion.h"
41#include "ion_priv.h"
42#include "compat_ion.h"
43
44/**
45 * struct ion_device - the metadata of the ion device node
46 * @dev:		the actual misc device
47 * @buffers:		an rb tree of all the existing buffers
48 * @buffer_lock:	lock protecting the tree of buffers
49 * @lock:		rwsem protecting the tree of heaps and clients
50 * @heaps:		list of all the heaps in the system
51 * @user_clients:	list of all the clients created from userspace
52 */
53struct ion_device {
54	struct miscdevice dev;
55	struct rb_root buffers;
56	struct mutex buffer_lock;
57	struct rw_semaphore lock;
58	struct plist_head heaps;
59	long (*custom_ioctl)(struct ion_client *client, unsigned int cmd,
60			     unsigned long arg);
61	struct rb_root clients;
62	struct dentry *debug_root;
63	struct dentry *heaps_debug_root;
64	struct dentry *clients_debug_root;
65};
66
67/**
68 * struct ion_client - a process/hw block local address space
69 * @node:		node in the tree of all clients
70 * @dev:		backpointer to ion device
71 * @handles:		an rb tree of all the handles in this client
72 * @idr:		an idr space for allocating handle ids
73 * @lock:		lock protecting the tree of handles
74 * @name:		used for debugging
75 * @display_name:	used for debugging (unique version of @name)
76 * @display_serial:	used for debugging (to make display_name unique)
77 * @task:		used for debugging
78 *
79 * A client represents a list of buffers this client may access.
80 * The mutex stored here is used to protect both handles tree
81 * as well as the handles themselves, and should be held while modifying either.
82 */
83struct ion_client {
84	struct rb_node node;
85	struct ion_device *dev;
86	struct rb_root handles;
87	struct idr idr;
88	struct mutex lock;
89	const char *name;
90	char *display_name;
91	int display_serial;
92	struct task_struct *task;
93	pid_t pid;
94	struct dentry *debug_root;
95};
96
97/**
98 * ion_handle - a client local reference to a buffer
99 * @ref:		reference count
100 * @client:		back pointer to the client the buffer resides in
101 * @buffer:		pointer to the buffer
102 * @node:		node in the client's handle rbtree
103 * @kmap_cnt:		count of times this client has mapped to kernel
104 * @id:			client-unique id allocated by client->idr
105 *
106 * Modifications to node, map_cnt or mapping should be protected by the
107 * lock in the client.  Other fields are never changed after initialization.
108 */
109struct ion_handle {
110	struct kref ref;
111	struct ion_client *client;
112	struct ion_buffer *buffer;
113	struct rb_node node;
114	unsigned int kmap_cnt;
115	int id;
116};
117
118bool ion_buffer_fault_user_mappings(struct ion_buffer *buffer)
119{
120	return (buffer->flags & ION_FLAG_CACHED) &&
121		!(buffer->flags & ION_FLAG_CACHED_NEEDS_SYNC);
122}
123
124bool ion_buffer_cached(struct ion_buffer *buffer)
125{
126	return !!(buffer->flags & ION_FLAG_CACHED);
127}
128
129static inline struct page *ion_buffer_page(struct page *page)
130{
131	return (struct page *)((unsigned long)page & ~(1UL));
132}
133
134static inline bool ion_buffer_page_is_dirty(struct page *page)
135{
136	return !!((unsigned long)page & 1UL);
137}
138
139static inline void ion_buffer_page_dirty(struct page **page)
140{
141	*page = (struct page *)((unsigned long)(*page) | 1UL);
142}
143
144static inline void ion_buffer_page_clean(struct page **page)
145{
146	*page = (struct page *)((unsigned long)(*page) & ~(1UL));
147}
148
149/* this function should only be called while dev->lock is held */
150static void ion_buffer_add(struct ion_device *dev,
151			   struct ion_buffer *buffer)
152{
153	struct rb_node **p = &dev->buffers.rb_node;
154	struct rb_node *parent = NULL;
155	struct ion_buffer *entry;
156
157	while (*p) {
158		parent = *p;
159		entry = rb_entry(parent, struct ion_buffer, node);
160
161		if (buffer < entry) {
162			p = &(*p)->rb_left;
163		} else if (buffer > entry) {
164			p = &(*p)->rb_right;
165		} else {
166			pr_err("%s: buffer already found.", __func__);
167			BUG();
168		}
169	}
170
171	rb_link_node(&buffer->node, parent, p);
172	rb_insert_color(&buffer->node, &dev->buffers);
173}
174
175/* this function should only be called while dev->lock is held */
176static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
177				     struct ion_device *dev,
178				     unsigned long len,
179				     unsigned long align,
180				     unsigned long flags)
181{
182	struct ion_buffer *buffer;
183	struct sg_table *table;
184	struct scatterlist *sg;
185	int i, ret;
186
187	buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
188	if (!buffer)
189		return ERR_PTR(-ENOMEM);
190
191	buffer->heap = heap;
192	buffer->flags = flags;
193	kref_init(&buffer->ref);
194
195	ret = heap->ops->allocate(heap, buffer, len, align, flags);
196
197	if (ret) {
198		if (!(heap->flags & ION_HEAP_FLAG_DEFER_FREE))
199			goto err2;
200
201		ion_heap_freelist_drain(heap, 0);
202		ret = heap->ops->allocate(heap, buffer, len, align,
203					  flags);
204		if (ret)
205			goto err2;
206	}
207
208	buffer->dev = dev;
209	buffer->size = len;
210
211	table = heap->ops->map_dma(heap, buffer);
212	if (WARN_ONCE(table == NULL,
213			"heap->ops->map_dma should return ERR_PTR on error"))
214		table = ERR_PTR(-EINVAL);
215	if (IS_ERR(table)) {
216		heap->ops->free(buffer);
217		kfree(buffer);
218		return ERR_CAST(table);
219	}
220	buffer->sg_table = table;
221	if (ion_buffer_fault_user_mappings(buffer)) {
222		int num_pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
223		struct scatterlist *sg;
224		int i, j, k = 0;
225
226		buffer->pages = vmalloc(sizeof(struct page *) * num_pages);
227		if (!buffer->pages) {
228			ret = -ENOMEM;
229			goto err1;
230		}
231
232		for_each_sg(table->sgl, sg, table->nents, i) {
233			struct page *page = sg_page(sg);
234
235			for (j = 0; j < sg->length / PAGE_SIZE; j++)
236				buffer->pages[k++] = page++;
237		}
238
239		if (ret)
240			goto err;
241	}
242
243	buffer->dev = dev;
244	buffer->size = len;
245	INIT_LIST_HEAD(&buffer->vmas);
246	mutex_init(&buffer->lock);
247	/* this will set up dma addresses for the sglist -- it is not
248	   technically correct as per the dma api -- a specific
249	   device isn't really taking ownership here.  However, in practice on
250	   our systems the only dma_address space is physical addresses.
251	   Additionally, we can't afford the overhead of invalidating every
252	   allocation via dma_map_sg. The implicit contract here is that
253	   memory coming from the heaps is ready for dma, ie if it has a
254	   cached mapping that mapping has been invalidated */
255	for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
256		sg_dma_address(sg) = sg_phys(sg);
257	mutex_lock(&dev->buffer_lock);
258	ion_buffer_add(dev, buffer);
259	mutex_unlock(&dev->buffer_lock);
260	return buffer;
261
262err:
263	heap->ops->unmap_dma(heap, buffer);
264	heap->ops->free(buffer);
265err1:
266	vfree(buffer->pages);
267err2:
268	kfree(buffer);
269	return ERR_PTR(ret);
270}
271
272void ion_buffer_destroy(struct ion_buffer *buffer)
273{
274	if (WARN_ON(buffer->kmap_cnt > 0))
275		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
276	buffer->heap->ops->unmap_dma(buffer->heap, buffer);
277	buffer->heap->ops->free(buffer);
278	vfree(buffer->pages);
279	kfree(buffer);
280}
281
282static void _ion_buffer_destroy(struct kref *kref)
283{
284	struct ion_buffer *buffer = container_of(kref, struct ion_buffer, ref);
285	struct ion_heap *heap = buffer->heap;
286	struct ion_device *dev = buffer->dev;
287
288	mutex_lock(&dev->buffer_lock);
289	rb_erase(&buffer->node, &dev->buffers);
290	mutex_unlock(&dev->buffer_lock);
291
292	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
293		ion_heap_freelist_add(heap, buffer);
294	else
295		ion_buffer_destroy(buffer);
296}
297
298static void ion_buffer_get(struct ion_buffer *buffer)
299{
300	kref_get(&buffer->ref);
301}
302
303static int ion_buffer_put(struct ion_buffer *buffer)
304{
305	return kref_put(&buffer->ref, _ion_buffer_destroy);
306}
307
308static void ion_buffer_add_to_handle(struct ion_buffer *buffer)
309{
310	mutex_lock(&buffer->lock);
311	buffer->handle_count++;
312	mutex_unlock(&buffer->lock);
313}
314
315static void ion_buffer_remove_from_handle(struct ion_buffer *buffer)
316{
317	/*
318	 * when a buffer is removed from a handle, if it is not in
319	 * any other handles, copy the taskcomm and the pid of the
320	 * process it's being removed from into the buffer.  At this
321	 * point there will be no way to track what processes this buffer is
322	 * being used by, it only exists as a dma_buf file descriptor.
323	 * The taskcomm and pid can provide a debug hint as to where this fd
324	 * is in the system
325	 */
326	mutex_lock(&buffer->lock);
327	buffer->handle_count--;
328	BUG_ON(buffer->handle_count < 0);
329	if (!buffer->handle_count) {
330		struct task_struct *task;
331
332		task = current->group_leader;
333		get_task_comm(buffer->task_comm, task);
334		buffer->pid = task_pid_nr(task);
335	}
336	mutex_unlock(&buffer->lock);
337}
338
339static struct ion_handle *ion_handle_create(struct ion_client *client,
340				     struct ion_buffer *buffer)
341{
342	struct ion_handle *handle;
343
344	handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
345	if (!handle)
346		return ERR_PTR(-ENOMEM);
347	kref_init(&handle->ref);
348	RB_CLEAR_NODE(&handle->node);
349	handle->client = client;
350	ion_buffer_get(buffer);
351	ion_buffer_add_to_handle(buffer);
352	handle->buffer = buffer;
353
354	return handle;
355}
356
357static void ion_handle_kmap_put(struct ion_handle *);
358
359static void ion_handle_destroy(struct kref *kref)
360{
361	struct ion_handle *handle = container_of(kref, struct ion_handle, ref);
362	struct ion_client *client = handle->client;
363	struct ion_buffer *buffer = handle->buffer;
364
365	mutex_lock(&buffer->lock);
366	while (handle->kmap_cnt)
367		ion_handle_kmap_put(handle);
368	mutex_unlock(&buffer->lock);
369
370	idr_remove(&client->idr, handle->id);
371	if (!RB_EMPTY_NODE(&handle->node))
372		rb_erase(&handle->node, &client->handles);
373
374	ion_buffer_remove_from_handle(buffer);
375	ion_buffer_put(buffer);
376
377	kfree(handle);
378}
379
380struct ion_buffer *ion_handle_buffer(struct ion_handle *handle)
381{
382	return handle->buffer;
383}
384
385static void ion_handle_get(struct ion_handle *handle)
386{
387	kref_get(&handle->ref);
388}
389
390static int ion_handle_put(struct ion_handle *handle)
391{
392	struct ion_client *client = handle->client;
393	int ret;
394
395	mutex_lock(&client->lock);
396	ret = kref_put(&handle->ref, ion_handle_destroy);
397	mutex_unlock(&client->lock);
398
399	return ret;
400}
401
402static struct ion_handle *ion_handle_lookup(struct ion_client *client,
403					    struct ion_buffer *buffer)
404{
405	struct rb_node *n = client->handles.rb_node;
406
407	while (n) {
408		struct ion_handle *entry = rb_entry(n, struct ion_handle, node);
409
410		if (buffer < entry->buffer)
411			n = n->rb_left;
412		else if (buffer > entry->buffer)
413			n = n->rb_right;
414		else
415			return entry;
416	}
417	return ERR_PTR(-EINVAL);
418}
419
420static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
421						int id)
422{
423	struct ion_handle *handle;
424
425	mutex_lock(&client->lock);
426	handle = idr_find(&client->idr, id);
427	if (handle)
428		ion_handle_get(handle);
429	mutex_unlock(&client->lock);
430
431	return handle ? handle : ERR_PTR(-EINVAL);
432}
433
434static bool ion_handle_validate(struct ion_client *client,
435				struct ion_handle *handle)
436{
437	WARN_ON(!mutex_is_locked(&client->lock));
438	return idr_find(&client->idr, handle->id) == handle;
439}
440
441static int ion_handle_add(struct ion_client *client, struct ion_handle *handle)
442{
443	int id;
444	struct rb_node **p = &client->handles.rb_node;
445	struct rb_node *parent = NULL;
446	struct ion_handle *entry;
447
448	id = idr_alloc(&client->idr, handle, 1, 0, GFP_KERNEL);
449	if (id < 0)
450		return id;
451
452	handle->id = id;
453
454	while (*p) {
455		parent = *p;
456		entry = rb_entry(parent, struct ion_handle, node);
457
458		if (handle->buffer < entry->buffer)
459			p = &(*p)->rb_left;
460		else if (handle->buffer > entry->buffer)
461			p = &(*p)->rb_right;
462		else
463			WARN(1, "%s: buffer already found.", __func__);
464	}
465
466	rb_link_node(&handle->node, parent, p);
467	rb_insert_color(&handle->node, &client->handles);
468
469	return 0;
470}
471
472struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
473			     size_t align, unsigned int heap_id_mask,
474			     unsigned int flags)
475{
476	struct ion_handle *handle;
477	struct ion_device *dev = client->dev;
478	struct ion_buffer *buffer = NULL;
479	struct ion_heap *heap;
480	int ret;
481
482	pr_debug("%s: len %zu align %zu heap_id_mask %u flags %x\n", __func__,
483		 len, align, heap_id_mask, flags);
484	/*
485	 * traverse the list of heaps available in this system in priority
486	 * order.  If the heap type is supported by the client, and matches the
487	 * request of the caller allocate from it.  Repeat until allocate has
488	 * succeeded or all heaps have been tried
489	 */
490	len = PAGE_ALIGN(len);
491
492	if (!len)
493		return ERR_PTR(-EINVAL);
494
495	down_read(&dev->lock);
496	plist_for_each_entry(heap, &dev->heaps, node) {
497		/* if the caller didn't specify this heap id */
498		if (!((1 << heap->id) & heap_id_mask))
499			continue;
500		buffer = ion_buffer_create(heap, dev, len, align, flags);
501		if (!IS_ERR(buffer))
502			break;
503	}
504	up_read(&dev->lock);
505
506	if (buffer == NULL)
507		return ERR_PTR(-ENODEV);
508
509	if (IS_ERR(buffer))
510		return ERR_CAST(buffer);
511
512	handle = ion_handle_create(client, buffer);
513
514	/*
515	 * ion_buffer_create will create a buffer with a ref_cnt of 1,
516	 * and ion_handle_create will take a second reference, drop one here
517	 */
518	ion_buffer_put(buffer);
519
520	if (IS_ERR(handle))
521		return handle;
522
523	mutex_lock(&client->lock);
524	ret = ion_handle_add(client, handle);
525	mutex_unlock(&client->lock);
526	if (ret) {
527		ion_handle_put(handle);
528		handle = ERR_PTR(ret);
529	}
530
531	return handle;
532}
533EXPORT_SYMBOL(ion_alloc);
534
535void ion_free(struct ion_client *client, struct ion_handle *handle)
536{
537	bool valid_handle;
538
539	BUG_ON(client != handle->client);
540
541	mutex_lock(&client->lock);
542	valid_handle = ion_handle_validate(client, handle);
543
544	if (!valid_handle) {
545		WARN(1, "%s: invalid handle passed to free.\n", __func__);
546		mutex_unlock(&client->lock);
547		return;
548	}
549	mutex_unlock(&client->lock);
550	ion_handle_put(handle);
551}
552EXPORT_SYMBOL(ion_free);
553
554int ion_phys(struct ion_client *client, struct ion_handle *handle,
555	     ion_phys_addr_t *addr, size_t *len)
556{
557	struct ion_buffer *buffer;
558	int ret;
559
560	mutex_lock(&client->lock);
561	if (!ion_handle_validate(client, handle)) {
562		mutex_unlock(&client->lock);
563		return -EINVAL;
564	}
565
566	buffer = handle->buffer;
567
568	if (!buffer->heap->ops->phys) {
569		pr_err("%s: ion_phys is not implemented by this heap (name=%s, type=%d).\n",
570			__func__, buffer->heap->name, buffer->heap->type);
571		mutex_unlock(&client->lock);
572		return -ENODEV;
573	}
574	mutex_unlock(&client->lock);
575	ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
576	return ret;
577}
578EXPORT_SYMBOL(ion_phys);
579
580static void *ion_buffer_kmap_get(struct ion_buffer *buffer)
581{
582	void *vaddr;
583
584	if (buffer->kmap_cnt) {
585		buffer->kmap_cnt++;
586		return buffer->vaddr;
587	}
588	vaddr = buffer->heap->ops->map_kernel(buffer->heap, buffer);
589	if (WARN_ONCE(vaddr == NULL,
590			"heap->ops->map_kernel should return ERR_PTR on error"))
591		return ERR_PTR(-EINVAL);
592	if (IS_ERR(vaddr))
593		return vaddr;
594	buffer->vaddr = vaddr;
595	buffer->kmap_cnt++;
596	return vaddr;
597}
598
599static void *ion_handle_kmap_get(struct ion_handle *handle)
600{
601	struct ion_buffer *buffer = handle->buffer;
602	void *vaddr;
603
604	if (handle->kmap_cnt) {
605		handle->kmap_cnt++;
606		return buffer->vaddr;
607	}
608	vaddr = ion_buffer_kmap_get(buffer);
609	if (IS_ERR(vaddr))
610		return vaddr;
611	handle->kmap_cnt++;
612	return vaddr;
613}
614
615static void ion_buffer_kmap_put(struct ion_buffer *buffer)
616{
617	buffer->kmap_cnt--;
618	if (!buffer->kmap_cnt) {
619		buffer->heap->ops->unmap_kernel(buffer->heap, buffer);
620		buffer->vaddr = NULL;
621	}
622}
623
624static void ion_handle_kmap_put(struct ion_handle *handle)
625{
626	struct ion_buffer *buffer = handle->buffer;
627
628	if (!handle->kmap_cnt) {
629		WARN(1, "%s: Double unmap detected! bailing...\n", __func__);
630		return;
631	}
632	handle->kmap_cnt--;
633	if (!handle->kmap_cnt)
634		ion_buffer_kmap_put(buffer);
635}
636
637void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
638{
639	struct ion_buffer *buffer;
640	void *vaddr;
641
642	mutex_lock(&client->lock);
643	if (!ion_handle_validate(client, handle)) {
644		pr_err("%s: invalid handle passed to map_kernel.\n",
645		       __func__);
646		mutex_unlock(&client->lock);
647		return ERR_PTR(-EINVAL);
648	}
649
650	buffer = handle->buffer;
651
652	if (!handle->buffer->heap->ops->map_kernel) {
653		pr_err("%s: map_kernel is not implemented by this heap.\n",
654		       __func__);
655		mutex_unlock(&client->lock);
656		return ERR_PTR(-ENODEV);
657	}
658
659	mutex_lock(&buffer->lock);
660	vaddr = ion_handle_kmap_get(handle);
661	mutex_unlock(&buffer->lock);
662	mutex_unlock(&client->lock);
663	return vaddr;
664}
665EXPORT_SYMBOL(ion_map_kernel);
666
667void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
668{
669	struct ion_buffer *buffer;
670
671	mutex_lock(&client->lock);
672	buffer = handle->buffer;
673	mutex_lock(&buffer->lock);
674	ion_handle_kmap_put(handle);
675	mutex_unlock(&buffer->lock);
676	mutex_unlock(&client->lock);
677}
678EXPORT_SYMBOL(ion_unmap_kernel);
679
680static int ion_debug_client_show(struct seq_file *s, void *unused)
681{
682	struct ion_client *client = s->private;
683	struct rb_node *n;
684	size_t sizes[ION_NUM_HEAP_IDS] = {0};
685	const char *names[ION_NUM_HEAP_IDS] = {NULL};
686	int i;
687
688	mutex_lock(&client->lock);
689	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
690		struct ion_handle *handle = rb_entry(n, struct ion_handle,
691						     node);
692		unsigned int id = handle->buffer->heap->id;
693
694		if (!names[id])
695			names[id] = handle->buffer->heap->name;
696		sizes[id] += handle->buffer->size;
697	}
698	mutex_unlock(&client->lock);
699
700	seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
701	for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
702		if (!names[i])
703			continue;
704		seq_printf(s, "%16.16s: %16zu\n", names[i], sizes[i]);
705	}
706	return 0;
707}
708
709static int ion_debug_client_open(struct inode *inode, struct file *file)
710{
711	return single_open(file, ion_debug_client_show, inode->i_private);
712}
713
714static const struct file_operations debug_client_fops = {
715	.open = ion_debug_client_open,
716	.read = seq_read,
717	.llseek = seq_lseek,
718	.release = single_release,
719};
720
721static int ion_get_client_serial(const struct rb_root *root,
722					const unsigned char *name)
723{
724	int serial = -1;
725	struct rb_node *node;
726
727	for (node = rb_first(root); node; node = rb_next(node)) {
728		struct ion_client *client = rb_entry(node, struct ion_client,
729						node);
730
731		if (strcmp(client->name, name))
732			continue;
733		serial = max(serial, client->display_serial);
734	}
735	return serial + 1;
736}
737
738struct ion_client *ion_client_create(struct ion_device *dev,
739				     const char *name)
740{
741	struct ion_client *client;
742	struct task_struct *task;
743	struct rb_node **p;
744	struct rb_node *parent = NULL;
745	struct ion_client *entry;
746	pid_t pid;
747
748	if (!name) {
749		pr_err("%s: Name cannot be null\n", __func__);
750		return ERR_PTR(-EINVAL);
751	}
752
753	get_task_struct(current->group_leader);
754	task_lock(current->group_leader);
755	pid = task_pid_nr(current->group_leader);
756	/* don't bother to store task struct for kernel threads,
757	   they can't be killed anyway */
758	if (current->group_leader->flags & PF_KTHREAD) {
759		put_task_struct(current->group_leader);
760		task = NULL;
761	} else {
762		task = current->group_leader;
763	}
764	task_unlock(current->group_leader);
765
766	client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
767	if (!client)
768		goto err_put_task_struct;
769
770	client->dev = dev;
771	client->handles = RB_ROOT;
772	idr_init(&client->idr);
773	mutex_init(&client->lock);
774	client->task = task;
775	client->pid = pid;
776	client->name = kstrdup(name, GFP_KERNEL);
777	if (!client->name)
778		goto err_free_client;
779
780	down_write(&dev->lock);
781	client->display_serial = ion_get_client_serial(&dev->clients, name);
782	client->display_name = kasprintf(
783		GFP_KERNEL, "%s-%d", name, client->display_serial);
784	if (!client->display_name) {
785		up_write(&dev->lock);
786		goto err_free_client_name;
787	}
788	p = &dev->clients.rb_node;
789	while (*p) {
790		parent = *p;
791		entry = rb_entry(parent, struct ion_client, node);
792
793		if (client < entry)
794			p = &(*p)->rb_left;
795		else if (client > entry)
796			p = &(*p)->rb_right;
797	}
798	rb_link_node(&client->node, parent, p);
799	rb_insert_color(&client->node, &dev->clients);
800
801	client->debug_root = debugfs_create_file(client->display_name, 0664,
802						dev->clients_debug_root,
803						client, &debug_client_fops);
804	if (!client->debug_root) {
805		char buf[256], *path;
806
807		path = dentry_path(dev->clients_debug_root, buf, 256);
808		pr_err("Failed to create client debugfs at %s/%s\n",
809			path, client->display_name);
810	}
811
812	up_write(&dev->lock);
813
814	return client;
815
816err_free_client_name:
817	kfree(client->name);
818err_free_client:
819	kfree(client);
820err_put_task_struct:
821	if (task)
822		put_task_struct(current->group_leader);
823	return ERR_PTR(-ENOMEM);
824}
825EXPORT_SYMBOL(ion_client_create);
826
827void ion_client_destroy(struct ion_client *client)
828{
829	struct ion_device *dev = client->dev;
830	struct rb_node *n;
831
832	pr_debug("%s: %d\n", __func__, __LINE__);
833	while ((n = rb_first(&client->handles))) {
834		struct ion_handle *handle = rb_entry(n, struct ion_handle,
835						     node);
836		ion_handle_destroy(&handle->ref);
837	}
838
839	idr_destroy(&client->idr);
840
841	down_write(&dev->lock);
842	if (client->task)
843		put_task_struct(client->task);
844	rb_erase(&client->node, &dev->clients);
845	debugfs_remove_recursive(client->debug_root);
846	up_write(&dev->lock);
847
848	kfree(client->display_name);
849	kfree(client->name);
850	kfree(client);
851}
852EXPORT_SYMBOL(ion_client_destroy);
853
854struct sg_table *ion_sg_table(struct ion_client *client,
855			      struct ion_handle *handle)
856{
857	struct ion_buffer *buffer;
858	struct sg_table *table;
859
860	mutex_lock(&client->lock);
861	if (!ion_handle_validate(client, handle)) {
862		pr_err("%s: invalid handle passed to map_dma.\n",
863		       __func__);
864		mutex_unlock(&client->lock);
865		return ERR_PTR(-EINVAL);
866	}
867	buffer = handle->buffer;
868	table = buffer->sg_table;
869	mutex_unlock(&client->lock);
870	return table;
871}
872EXPORT_SYMBOL(ion_sg_table);
873
874static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
875				       struct device *dev,
876				       enum dma_data_direction direction);
877
878static struct sg_table *ion_map_dma_buf(struct dma_buf_attachment *attachment,
879					enum dma_data_direction direction)
880{
881	struct dma_buf *dmabuf = attachment->dmabuf;
882	struct ion_buffer *buffer = dmabuf->priv;
883
884	ion_buffer_sync_for_device(buffer, attachment->dev, direction);
885	return buffer->sg_table;
886}
887
888static void ion_unmap_dma_buf(struct dma_buf_attachment *attachment,
889			      struct sg_table *table,
890			      enum dma_data_direction direction)
891{
892}
893
894void ion_pages_sync_for_device(struct device *dev, struct page *page,
895		size_t size, enum dma_data_direction dir)
896{
897	struct scatterlist sg;
898
899	sg_init_table(&sg, 1);
900	sg_set_page(&sg, page, size, 0);
901	/*
902	 * This is not correct - sg_dma_address needs a dma_addr_t that is valid
903	 * for the targeted device, but this works on the currently targeted
904	 * hardware.
905	 */
906	sg_dma_address(&sg) = page_to_phys(page);
907	dma_sync_sg_for_device(dev, &sg, 1, dir);
908}
909
910struct ion_vma_list {
911	struct list_head list;
912	struct vm_area_struct *vma;
913};
914
915static void ion_buffer_sync_for_device(struct ion_buffer *buffer,
916				       struct device *dev,
917				       enum dma_data_direction dir)
918{
919	struct ion_vma_list *vma_list;
920	int pages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
921	int i;
922
923	pr_debug("%s: syncing for device %s\n", __func__,
924		 dev ? dev_name(dev) : "null");
925
926	if (!ion_buffer_fault_user_mappings(buffer))
927		return;
928
929	mutex_lock(&buffer->lock);
930	for (i = 0; i < pages; i++) {
931		struct page *page = buffer->pages[i];
932
933		if (ion_buffer_page_is_dirty(page))
934			ion_pages_sync_for_device(dev, ion_buffer_page(page),
935							PAGE_SIZE, dir);
936
937		ion_buffer_page_clean(buffer->pages + i);
938	}
939	list_for_each_entry(vma_list, &buffer->vmas, list) {
940		struct vm_area_struct *vma = vma_list->vma;
941
942		zap_page_range(vma, vma->vm_start, vma->vm_end - vma->vm_start,
943			       NULL);
944	}
945	mutex_unlock(&buffer->lock);
946}
947
948static int ion_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
949{
950	struct ion_buffer *buffer = vma->vm_private_data;
951	unsigned long pfn;
952	int ret;
953
954	mutex_lock(&buffer->lock);
955	ion_buffer_page_dirty(buffer->pages + vmf->pgoff);
956	BUG_ON(!buffer->pages || !buffer->pages[vmf->pgoff]);
957
958	pfn = page_to_pfn(ion_buffer_page(buffer->pages[vmf->pgoff]));
959	ret = vm_insert_pfn(vma, (unsigned long)vmf->virtual_address, pfn);
960	mutex_unlock(&buffer->lock);
961	if (ret)
962		return VM_FAULT_ERROR;
963
964	return VM_FAULT_NOPAGE;
965}
966
967static void ion_vm_open(struct vm_area_struct *vma)
968{
969	struct ion_buffer *buffer = vma->vm_private_data;
970	struct ion_vma_list *vma_list;
971
972	vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
973	if (!vma_list)
974		return;
975	vma_list->vma = vma;
976	mutex_lock(&buffer->lock);
977	list_add(&vma_list->list, &buffer->vmas);
978	mutex_unlock(&buffer->lock);
979	pr_debug("%s: adding %p\n", __func__, vma);
980}
981
982static void ion_vm_close(struct vm_area_struct *vma)
983{
984	struct ion_buffer *buffer = vma->vm_private_data;
985	struct ion_vma_list *vma_list, *tmp;
986
987	pr_debug("%s\n", __func__);
988	mutex_lock(&buffer->lock);
989	list_for_each_entry_safe(vma_list, tmp, &buffer->vmas, list) {
990		if (vma_list->vma != vma)
991			continue;
992		list_del(&vma_list->list);
993		kfree(vma_list);
994		pr_debug("%s: deleting %p\n", __func__, vma);
995		break;
996	}
997	mutex_unlock(&buffer->lock);
998}
999
1000static struct vm_operations_struct ion_vma_ops = {
1001	.open = ion_vm_open,
1002	.close = ion_vm_close,
1003	.fault = ion_vm_fault,
1004};
1005
1006static int ion_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma)
1007{
1008	struct ion_buffer *buffer = dmabuf->priv;
1009	int ret = 0;
1010
1011	if (!buffer->heap->ops->map_user) {
1012		pr_err("%s: this heap does not define a method for mapping to userspace\n",
1013			__func__);
1014		return -EINVAL;
1015	}
1016
1017	if (ion_buffer_fault_user_mappings(buffer)) {
1018		vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND |
1019							VM_DONTDUMP;
1020		vma->vm_private_data = buffer;
1021		vma->vm_ops = &ion_vma_ops;
1022		ion_vm_open(vma);
1023		return 0;
1024	}
1025
1026	if (!(buffer->flags & ION_FLAG_CACHED))
1027		vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
1028
1029	mutex_lock(&buffer->lock);
1030	/* now map it to userspace */
1031	ret = buffer->heap->ops->map_user(buffer->heap, buffer, vma);
1032	mutex_unlock(&buffer->lock);
1033
1034	if (ret)
1035		pr_err("%s: failure mapping buffer to userspace\n",
1036		       __func__);
1037
1038	return ret;
1039}
1040
1041static void ion_dma_buf_release(struct dma_buf *dmabuf)
1042{
1043	struct ion_buffer *buffer = dmabuf->priv;
1044
1045	ion_buffer_put(buffer);
1046}
1047
1048static void *ion_dma_buf_kmap(struct dma_buf *dmabuf, unsigned long offset)
1049{
1050	struct ion_buffer *buffer = dmabuf->priv;
1051
1052	return buffer->vaddr + offset * PAGE_SIZE;
1053}
1054
1055static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
1056			       void *ptr)
1057{
1058}
1059
1060static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
1061					size_t len,
1062					enum dma_data_direction direction)
1063{
1064	struct ion_buffer *buffer = dmabuf->priv;
1065	void *vaddr;
1066
1067	if (!buffer->heap->ops->map_kernel) {
1068		pr_err("%s: map kernel is not implemented by this heap.\n",
1069		       __func__);
1070		return -ENODEV;
1071	}
1072
1073	mutex_lock(&buffer->lock);
1074	vaddr = ion_buffer_kmap_get(buffer);
1075	mutex_unlock(&buffer->lock);
1076	return PTR_ERR_OR_ZERO(vaddr);
1077}
1078
1079static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
1080				       size_t len,
1081				       enum dma_data_direction direction)
1082{
1083	struct ion_buffer *buffer = dmabuf->priv;
1084
1085	mutex_lock(&buffer->lock);
1086	ion_buffer_kmap_put(buffer);
1087	mutex_unlock(&buffer->lock);
1088}
1089
1090static struct dma_buf_ops dma_buf_ops = {
1091	.map_dma_buf = ion_map_dma_buf,
1092	.unmap_dma_buf = ion_unmap_dma_buf,
1093	.mmap = ion_mmap,
1094	.release = ion_dma_buf_release,
1095	.begin_cpu_access = ion_dma_buf_begin_cpu_access,
1096	.end_cpu_access = ion_dma_buf_end_cpu_access,
1097	.kmap_atomic = ion_dma_buf_kmap,
1098	.kunmap_atomic = ion_dma_buf_kunmap,
1099	.kmap = ion_dma_buf_kmap,
1100	.kunmap = ion_dma_buf_kunmap,
1101};
1102
1103struct dma_buf *ion_share_dma_buf(struct ion_client *client,
1104						struct ion_handle *handle)
1105{
1106	struct ion_buffer *buffer;
1107	struct dma_buf *dmabuf;
1108	bool valid_handle;
1109	DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
1110
1111	mutex_lock(&client->lock);
1112	valid_handle = ion_handle_validate(client, handle);
1113	if (!valid_handle) {
1114		WARN(1, "%s: invalid handle passed to share.\n", __func__);
1115		mutex_unlock(&client->lock);
1116		return ERR_PTR(-EINVAL);
1117	}
1118	buffer = handle->buffer;
1119	ion_buffer_get(buffer);
1120	mutex_unlock(&client->lock);
1121
1122	exp_info.ops = &dma_buf_ops;
1123	exp_info.size = buffer->size;
1124	exp_info.flags = O_RDWR;
1125	exp_info.priv = buffer;
1126
1127	dmabuf = dma_buf_export(&exp_info);
1128	if (IS_ERR(dmabuf)) {
1129		ion_buffer_put(buffer);
1130		return dmabuf;
1131	}
1132
1133	return dmabuf;
1134}
1135EXPORT_SYMBOL(ion_share_dma_buf);
1136
1137int ion_share_dma_buf_fd(struct ion_client *client, struct ion_handle *handle)
1138{
1139	struct dma_buf *dmabuf;
1140	int fd;
1141
1142	dmabuf = ion_share_dma_buf(client, handle);
1143	if (IS_ERR(dmabuf))
1144		return PTR_ERR(dmabuf);
1145
1146	fd = dma_buf_fd(dmabuf, O_CLOEXEC);
1147	if (fd < 0)
1148		dma_buf_put(dmabuf);
1149
1150	return fd;
1151}
1152EXPORT_SYMBOL(ion_share_dma_buf_fd);
1153
1154struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
1155{
1156	struct dma_buf *dmabuf;
1157	struct ion_buffer *buffer;
1158	struct ion_handle *handle;
1159	int ret;
1160
1161	dmabuf = dma_buf_get(fd);
1162	if (IS_ERR(dmabuf))
1163		return ERR_CAST(dmabuf);
1164	/* if this memory came from ion */
1165
1166	if (dmabuf->ops != &dma_buf_ops) {
1167		pr_err("%s: can not import dmabuf from another exporter\n",
1168		       __func__);
1169		dma_buf_put(dmabuf);
1170		return ERR_PTR(-EINVAL);
1171	}
1172	buffer = dmabuf->priv;
1173
1174	mutex_lock(&client->lock);
1175	/* if a handle exists for this buffer just take a reference to it */
1176	handle = ion_handle_lookup(client, buffer);
1177	if (!IS_ERR(handle)) {
1178		ion_handle_get(handle);
1179		mutex_unlock(&client->lock);
1180		goto end;
1181	}
1182
1183	handle = ion_handle_create(client, buffer);
1184	if (IS_ERR(handle)) {
1185		mutex_unlock(&client->lock);
1186		goto end;
1187	}
1188
1189	ret = ion_handle_add(client, handle);
1190	mutex_unlock(&client->lock);
1191	if (ret) {
1192		ion_handle_put(handle);
1193		handle = ERR_PTR(ret);
1194	}
1195
1196end:
1197	dma_buf_put(dmabuf);
1198	return handle;
1199}
1200EXPORT_SYMBOL(ion_import_dma_buf);
1201
1202static int ion_sync_for_device(struct ion_client *client, int fd)
1203{
1204	struct dma_buf *dmabuf;
1205	struct ion_buffer *buffer;
1206
1207	dmabuf = dma_buf_get(fd);
1208	if (IS_ERR(dmabuf))
1209		return PTR_ERR(dmabuf);
1210
1211	/* if this memory came from ion */
1212	if (dmabuf->ops != &dma_buf_ops) {
1213		pr_err("%s: can not sync dmabuf from another exporter\n",
1214		       __func__);
1215		dma_buf_put(dmabuf);
1216		return -EINVAL;
1217	}
1218	buffer = dmabuf->priv;
1219
1220	dma_sync_sg_for_device(NULL, buffer->sg_table->sgl,
1221			       buffer->sg_table->nents, DMA_BIDIRECTIONAL);
1222	dma_buf_put(dmabuf);
1223	return 0;
1224}
1225
1226/* fix up the cases where the ioctl direction bits are incorrect */
1227static unsigned int ion_ioctl_dir(unsigned int cmd)
1228{
1229	switch (cmd) {
1230	case ION_IOC_SYNC:
1231	case ION_IOC_FREE:
1232	case ION_IOC_CUSTOM:
1233		return _IOC_WRITE;
1234	default:
1235		return _IOC_DIR(cmd);
1236	}
1237}
1238
1239static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1240{
1241	struct ion_client *client = filp->private_data;
1242	struct ion_device *dev = client->dev;
1243	struct ion_handle *cleanup_handle = NULL;
1244	int ret = 0;
1245	unsigned int dir;
1246
1247	union {
1248		struct ion_fd_data fd;
1249		struct ion_allocation_data allocation;
1250		struct ion_handle_data handle;
1251		struct ion_custom_data custom;
1252	} data;
1253
1254	dir = ion_ioctl_dir(cmd);
1255
1256	if (_IOC_SIZE(cmd) > sizeof(data))
1257		return -EINVAL;
1258
1259	if (dir & _IOC_WRITE)
1260		if (copy_from_user(&data, (void __user *)arg, _IOC_SIZE(cmd)))
1261			return -EFAULT;
1262
1263	switch (cmd) {
1264	case ION_IOC_ALLOC:
1265	{
1266		struct ion_handle *handle;
1267
1268		handle = ion_alloc(client, data.allocation.len,
1269						data.allocation.align,
1270						data.allocation.heap_id_mask,
1271						data.allocation.flags);
1272		if (IS_ERR(handle))
1273			return PTR_ERR(handle);
1274
1275		data.allocation.handle = handle->id;
1276
1277		cleanup_handle = handle;
1278		break;
1279	}
1280	case ION_IOC_FREE:
1281	{
1282		struct ion_handle *handle;
1283
1284		handle = ion_handle_get_by_id(client, data.handle.handle);
1285		if (IS_ERR(handle))
1286			return PTR_ERR(handle);
1287		ion_free(client, handle);
1288		ion_handle_put(handle);
1289		break;
1290	}
1291	case ION_IOC_SHARE:
1292	case ION_IOC_MAP:
1293	{
1294		struct ion_handle *handle;
1295
1296		handle = ion_handle_get_by_id(client, data.handle.handle);
1297		if (IS_ERR(handle))
1298			return PTR_ERR(handle);
1299		data.fd.fd = ion_share_dma_buf_fd(client, handle);
1300		ion_handle_put(handle);
1301		if (data.fd.fd < 0)
1302			ret = data.fd.fd;
1303		break;
1304	}
1305	case ION_IOC_IMPORT:
1306	{
1307		struct ion_handle *handle;
1308
1309		handle = ion_import_dma_buf(client, data.fd.fd);
1310		if (IS_ERR(handle))
1311			ret = PTR_ERR(handle);
1312		else
1313			data.handle.handle = handle->id;
1314		break;
1315	}
1316	case ION_IOC_SYNC:
1317	{
1318		ret = ion_sync_for_device(client, data.fd.fd);
1319		break;
1320	}
1321	case ION_IOC_CUSTOM:
1322	{
1323		if (!dev->custom_ioctl)
1324			return -ENOTTY;
1325		ret = dev->custom_ioctl(client, data.custom.cmd,
1326						data.custom.arg);
1327		break;
1328	}
1329	default:
1330		return -ENOTTY;
1331	}
1332
1333	if (dir & _IOC_READ) {
1334		if (copy_to_user((void __user *)arg, &data, _IOC_SIZE(cmd))) {
1335			if (cleanup_handle)
1336				ion_free(client, cleanup_handle);
1337			return -EFAULT;
1338		}
1339	}
1340	return ret;
1341}
1342
1343static int ion_release(struct inode *inode, struct file *file)
1344{
1345	struct ion_client *client = file->private_data;
1346
1347	pr_debug("%s: %d\n", __func__, __LINE__);
1348	ion_client_destroy(client);
1349	return 0;
1350}
1351
1352static int ion_open(struct inode *inode, struct file *file)
1353{
1354	struct miscdevice *miscdev = file->private_data;
1355	struct ion_device *dev = container_of(miscdev, struct ion_device, dev);
1356	struct ion_client *client;
1357	char debug_name[64];
1358
1359	pr_debug("%s: %d\n", __func__, __LINE__);
1360	snprintf(debug_name, 64, "%u", task_pid_nr(current->group_leader));
1361	client = ion_client_create(dev, debug_name);
1362	if (IS_ERR(client))
1363		return PTR_ERR(client);
1364	file->private_data = client;
1365
1366	return 0;
1367}
1368
1369static const struct file_operations ion_fops = {
1370	.owner          = THIS_MODULE,
1371	.open           = ion_open,
1372	.release        = ion_release,
1373	.unlocked_ioctl = ion_ioctl,
1374	.compat_ioctl   = compat_ion_ioctl,
1375};
1376
1377static size_t ion_debug_heap_total(struct ion_client *client,
1378				   unsigned int id)
1379{
1380	size_t size = 0;
1381	struct rb_node *n;
1382
1383	mutex_lock(&client->lock);
1384	for (n = rb_first(&client->handles); n; n = rb_next(n)) {
1385		struct ion_handle *handle = rb_entry(n,
1386						     struct ion_handle,
1387						     node);
1388		if (handle->buffer->heap->id == id)
1389			size += handle->buffer->size;
1390	}
1391	mutex_unlock(&client->lock);
1392	return size;
1393}
1394
1395static int ion_debug_heap_show(struct seq_file *s, void *unused)
1396{
1397	struct ion_heap *heap = s->private;
1398	struct ion_device *dev = heap->dev;
1399	struct rb_node *n;
1400	size_t total_size = 0;
1401	size_t total_orphaned_size = 0;
1402
1403	seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
1404	seq_puts(s, "----------------------------------------------------\n");
1405
1406	for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
1407		struct ion_client *client = rb_entry(n, struct ion_client,
1408						     node);
1409		size_t size = ion_debug_heap_total(client, heap->id);
1410
1411		if (!size)
1412			continue;
1413		if (client->task) {
1414			char task_comm[TASK_COMM_LEN];
1415
1416			get_task_comm(task_comm, client->task);
1417			seq_printf(s, "%16s %16u %16zu\n", task_comm,
1418				   client->pid, size);
1419		} else {
1420			seq_printf(s, "%16s %16u %16zu\n", client->name,
1421				   client->pid, size);
1422		}
1423	}
1424	seq_puts(s, "----------------------------------------------------\n");
1425	seq_puts(s, "orphaned allocations (info is from last known client):\n");
1426	mutex_lock(&dev->buffer_lock);
1427	for (n = rb_first(&dev->buffers); n; n = rb_next(n)) {
1428		struct ion_buffer *buffer = rb_entry(n, struct ion_buffer,
1429						     node);
1430		if (buffer->heap->id != heap->id)
1431			continue;
1432		total_size += buffer->size;
1433		if (!buffer->handle_count) {
1434			seq_printf(s, "%16s %16u %16zu %d %d\n",
1435				   buffer->task_comm, buffer->pid,
1436				   buffer->size, buffer->kmap_cnt,
1437				   atomic_read(&buffer->ref.refcount));
1438			total_orphaned_size += buffer->size;
1439		}
1440	}
1441	mutex_unlock(&dev->buffer_lock);
1442	seq_puts(s, "----------------------------------------------------\n");
1443	seq_printf(s, "%16s %16zu\n", "total orphaned",
1444		   total_orphaned_size);
1445	seq_printf(s, "%16s %16zu\n", "total ", total_size);
1446	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1447		seq_printf(s, "%16s %16zu\n", "deferred free",
1448				heap->free_list_size);
1449	seq_puts(s, "----------------------------------------------------\n");
1450
1451	if (heap->debug_show)
1452		heap->debug_show(heap, s, unused);
1453
1454	return 0;
1455}
1456
1457static int ion_debug_heap_open(struct inode *inode, struct file *file)
1458{
1459	return single_open(file, ion_debug_heap_show, inode->i_private);
1460}
1461
1462static const struct file_operations debug_heap_fops = {
1463	.open = ion_debug_heap_open,
1464	.read = seq_read,
1465	.llseek = seq_lseek,
1466	.release = single_release,
1467};
1468
1469#ifdef DEBUG_HEAP_SHRINKER
1470static int debug_shrink_set(void *data, u64 val)
1471{
1472	struct ion_heap *heap = data;
1473	struct shrink_control sc;
1474	int objs;
1475
1476	sc.gfp_mask = -1;
1477	sc.nr_to_scan = 0;
1478
1479	if (!val)
1480		return 0;
1481
1482	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1483	sc.nr_to_scan = objs;
1484
1485	heap->shrinker.shrink(&heap->shrinker, &sc);
1486	return 0;
1487}
1488
1489static int debug_shrink_get(void *data, u64 *val)
1490{
1491	struct ion_heap *heap = data;
1492	struct shrink_control sc;
1493	int objs;
1494
1495	sc.gfp_mask = -1;
1496	sc.nr_to_scan = 0;
1497
1498	objs = heap->shrinker.shrink(&heap->shrinker, &sc);
1499	*val = objs;
1500	return 0;
1501}
1502
1503DEFINE_SIMPLE_ATTRIBUTE(debug_shrink_fops, debug_shrink_get,
1504			debug_shrink_set, "%llu\n");
1505#endif
1506
1507void ion_device_add_heap(struct ion_device *dev, struct ion_heap *heap)
1508{
1509	struct dentry *debug_file;
1510
1511	if (!heap->ops->allocate || !heap->ops->free || !heap->ops->map_dma ||
1512	    !heap->ops->unmap_dma)
1513		pr_err("%s: can not add heap with invalid ops struct.\n",
1514		       __func__);
1515
1516	spin_lock_init(&heap->free_lock);
1517	heap->free_list_size = 0;
1518
1519	if (heap->flags & ION_HEAP_FLAG_DEFER_FREE)
1520		ion_heap_init_deferred_free(heap);
1521
1522	if ((heap->flags & ION_HEAP_FLAG_DEFER_FREE) || heap->ops->shrink)
1523		ion_heap_init_shrinker(heap);
1524
1525	heap->dev = dev;
1526	down_write(&dev->lock);
1527	/* use negative heap->id to reverse the priority -- when traversing
1528	   the list later attempt higher id numbers first */
1529	plist_node_init(&heap->node, -heap->id);
1530	plist_add(&heap->node, &dev->heaps);
1531	debug_file = debugfs_create_file(heap->name, 0664,
1532					dev->heaps_debug_root, heap,
1533					&debug_heap_fops);
1534
1535	if (!debug_file) {
1536		char buf[256], *path;
1537
1538		path = dentry_path(dev->heaps_debug_root, buf, 256);
1539		pr_err("Failed to create heap debugfs at %s/%s\n",
1540			path, heap->name);
1541	}
1542
1543#ifdef DEBUG_HEAP_SHRINKER
1544	if (heap->shrinker.shrink) {
1545		char debug_name[64];
1546
1547		snprintf(debug_name, 64, "%s_shrink", heap->name);
1548		debug_file = debugfs_create_file(
1549			debug_name, 0644, dev->heaps_debug_root, heap,
1550			&debug_shrink_fops);
1551		if (!debug_file) {
1552			char buf[256], *path;
1553
1554			path = dentry_path(dev->heaps_debug_root, buf, 256);
1555			pr_err("Failed to create heap shrinker debugfs at %s/%s\n",
1556				path, debug_name);
1557		}
1558	}
1559#endif
1560	up_write(&dev->lock);
1561}
1562
1563struct ion_device *ion_device_create(long (*custom_ioctl)
1564				     (struct ion_client *client,
1565				      unsigned int cmd,
1566				      unsigned long arg))
1567{
1568	struct ion_device *idev;
1569	int ret;
1570
1571	idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
1572	if (!idev)
1573		return ERR_PTR(-ENOMEM);
1574
1575	idev->dev.minor = MISC_DYNAMIC_MINOR;
1576	idev->dev.name = "ion";
1577	idev->dev.fops = &ion_fops;
1578	idev->dev.parent = NULL;
1579	ret = misc_register(&idev->dev);
1580	if (ret) {
1581		pr_err("ion: failed to register misc device.\n");
1582		return ERR_PTR(ret);
1583	}
1584
1585	idev->debug_root = debugfs_create_dir("ion", NULL);
1586	if (!idev->debug_root) {
1587		pr_err("ion: failed to create debugfs root directory.\n");
1588		goto debugfs_done;
1589	}
1590	idev->heaps_debug_root = debugfs_create_dir("heaps", idev->debug_root);
1591	if (!idev->heaps_debug_root) {
1592		pr_err("ion: failed to create debugfs heaps directory.\n");
1593		goto debugfs_done;
1594	}
1595	idev->clients_debug_root = debugfs_create_dir("clients",
1596						idev->debug_root);
1597	if (!idev->clients_debug_root)
1598		pr_err("ion: failed to create debugfs clients directory.\n");
1599
1600debugfs_done:
1601
1602	idev->custom_ioctl = custom_ioctl;
1603	idev->buffers = RB_ROOT;
1604	mutex_init(&idev->buffer_lock);
1605	init_rwsem(&idev->lock);
1606	plist_head_init(&idev->heaps);
1607	idev->clients = RB_ROOT;
1608	return idev;
1609}
1610
1611void ion_device_destroy(struct ion_device *dev)
1612{
1613	misc_deregister(&dev->dev);
1614	debugfs_remove_recursive(dev->debug_root);
1615	/* XXX need to free the heaps and clients ? */
1616	kfree(dev);
1617}
1618
1619void __init ion_reserve(struct ion_platform_data *data)
1620{
1621	int i;
1622
1623	for (i = 0; i < data->nr; i++) {
1624		if (data->heaps[i].size == 0)
1625			continue;
1626
1627		if (data->heaps[i].base == 0) {
1628			phys_addr_t paddr;
1629
1630			paddr = memblock_alloc_base(data->heaps[i].size,
1631						    data->heaps[i].align,
1632						    MEMBLOCK_ALLOC_ANYWHERE);
1633			if (!paddr) {
1634				pr_err("%s: error allocating memblock for heap %d\n",
1635					__func__, i);
1636				continue;
1637			}
1638			data->heaps[i].base = paddr;
1639		} else {
1640			int ret = memblock_reserve(data->heaps[i].base,
1641					       data->heaps[i].size);
1642			if (ret)
1643				pr_err("memblock reserve of %zx@%lx failed\n",
1644				       data->heaps[i].size,
1645				       data->heaps[i].base);
1646		}
1647		pr_info("%s: %s reserved base %lx size %zu\n", __func__,
1648			data->heaps[i].name,
1649			data->heaps[i].base,
1650			data->heaps[i].size);
1651	}
1652}
1653