This source file includes following definitions.
- binder_selftest_alloc
- binder_alloc_get_free_async_space
1
2
3
4
5
6 #ifndef _LINUX_BINDER_ALLOC_H
7 #define _LINUX_BINDER_ALLOC_H
8
9 #include <linux/rbtree.h>
10 #include <linux/list.h>
11 #include <linux/mm.h>
12 #include <linux/rtmutex.h>
13 #include <linux/vmalloc.h>
14 #include <linux/slab.h>
15 #include <linux/list_lru.h>
16 #include <uapi/linux/android/binder.h>
17
18 extern struct list_lru binder_alloc_lru;
19 struct binder_transaction;
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38 struct binder_buffer {
39 struct list_head entry;
40 struct rb_node rb_node;
41
42 unsigned free:1;
43 unsigned allow_user_free:1;
44 unsigned async_transaction:1;
45 unsigned debug_id:29;
46
47 struct binder_transaction *transaction;
48
49 struct binder_node *target_node;
50 size_t data_size;
51 size_t offsets_size;
52 size_t extra_buffers_size;
53 void __user *user_data;
54 };
55
56
57
58
59
60
61
62 struct binder_lru_page {
63 struct list_head lru;
64 struct page *page_ptr;
65 struct binder_alloc *alloc;
66 };
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92 struct binder_alloc {
93 struct mutex mutex;
94 struct vm_area_struct *vma;
95 struct mm_struct *vma_vm_mm;
96 void __user *buffer;
97 struct list_head buffers;
98 struct rb_root free_buffers;
99 struct rb_root allocated_buffers;
100 size_t free_async_space;
101 struct binder_lru_page *pages;
102 size_t buffer_size;
103 uint32_t buffer_free;
104 int pid;
105 size_t pages_high;
106 };
107
108 #ifdef CONFIG_ANDROID_BINDER_IPC_SELFTEST
109 void binder_selftest_alloc(struct binder_alloc *alloc);
110 #else
111 static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
112 #endif
113 enum lru_status binder_alloc_free_page(struct list_head *item,
114 struct list_lru_one *lru,
115 spinlock_t *lock, void *cb_arg);
116 extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
117 size_t data_size,
118 size_t offsets_size,
119 size_t extra_buffers_size,
120 int is_async);
121 extern void binder_alloc_init(struct binder_alloc *alloc);
122 extern int binder_alloc_shrinker_init(void);
123 extern void binder_alloc_vma_close(struct binder_alloc *alloc);
124 extern struct binder_buffer *
125 binder_alloc_prepare_to_free(struct binder_alloc *alloc,
126 uintptr_t user_ptr);
127 extern void binder_alloc_free_buf(struct binder_alloc *alloc,
128 struct binder_buffer *buffer);
129 extern int binder_alloc_mmap_handler(struct binder_alloc *alloc,
130 struct vm_area_struct *vma);
131 extern void binder_alloc_deferred_release(struct binder_alloc *alloc);
132 extern int binder_alloc_get_allocated_count(struct binder_alloc *alloc);
133 extern void binder_alloc_print_allocated(struct seq_file *m,
134 struct binder_alloc *alloc);
135 void binder_alloc_print_pages(struct seq_file *m,
136 struct binder_alloc *alloc);
137
138
139
140
141
142
143
144 static inline size_t
145 binder_alloc_get_free_async_space(struct binder_alloc *alloc)
146 {
147 size_t free_async_space;
148
149 mutex_lock(&alloc->mutex);
150 free_async_space = alloc->free_async_space;
151 mutex_unlock(&alloc->mutex);
152 return free_async_space;
153 }
154
155 unsigned long
156 binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc,
157 struct binder_buffer *buffer,
158 binder_size_t buffer_offset,
159 const void __user *from,
160 size_t bytes);
161
162 int binder_alloc_copy_to_buffer(struct binder_alloc *alloc,
163 struct binder_buffer *buffer,
164 binder_size_t buffer_offset,
165 void *src,
166 size_t bytes);
167
168 int binder_alloc_copy_from_buffer(struct binder_alloc *alloc,
169 void *dest,
170 struct binder_buffer *buffer,
171 binder_size_t buffer_offset,
172 size_t bytes);
173
174 #endif
175