This source file includes following definitions.
- cpm_muram_init
- cpm_muram_alloc_common
- cpm_muram_alloc
- cpm_muram_free
- cpm_muram_alloc_fixed
- cpm_muram_addr
- cpm_muram_offset
- cpm_muram_dma
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 #include <linux/genalloc.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/of_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/export.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/slab.h>
25 #include <linux/io.h>
26 #include <soc/fsl/qe/qe.h>
27
28 static struct gen_pool *muram_pool;
29 static spinlock_t cpm_muram_lock;
30 static u8 __iomem *muram_vbase;
31 static phys_addr_t muram_pbase;
32
33 struct muram_block {
34 struct list_head head;
35 unsigned long start;
36 int size;
37 };
38
39 static LIST_HEAD(muram_block_list);
40
41
42 #define OF_MAX_ADDR_CELLS 4
43 #define GENPOOL_OFFSET (4096 * 8)
44
45 int cpm_muram_init(void)
46 {
47 struct device_node *np;
48 struct resource r;
49 u32 zero[OF_MAX_ADDR_CELLS] = {};
50 resource_size_t max = 0;
51 int i = 0;
52 int ret = 0;
53
54 if (muram_pbase)
55 return 0;
56
57 spin_lock_init(&cpm_muram_lock);
58 np = of_find_compatible_node(NULL, NULL, "fsl,cpm-muram-data");
59 if (!np) {
60
61 np = of_find_node_by_name(NULL, "data-only");
62 if (!np) {
63 pr_err("Cannot find CPM muram data node");
64 ret = -ENODEV;
65 goto out_muram;
66 }
67 }
68
69 muram_pool = gen_pool_create(0, -1);
70 if (!muram_pool) {
71 pr_err("Cannot allocate memory pool for CPM/QE muram");
72 ret = -ENOMEM;
73 goto out_muram;
74 }
75 muram_pbase = of_translate_address(np, zero);
76 if (muram_pbase == (phys_addr_t)OF_BAD_ADDR) {
77 pr_err("Cannot translate zero through CPM muram node");
78 ret = -ENODEV;
79 goto out_pool;
80 }
81
82 while (of_address_to_resource(np, i++, &r) == 0) {
83 if (r.end > max)
84 max = r.end;
85 ret = gen_pool_add(muram_pool, r.start - muram_pbase +
86 GENPOOL_OFFSET, resource_size(&r), -1);
87 if (ret) {
88 pr_err("QE: couldn't add muram to pool!\n");
89 goto out_pool;
90 }
91 }
92
93 muram_vbase = ioremap(muram_pbase, max - muram_pbase + 1);
94 if (!muram_vbase) {
95 pr_err("Cannot map QE muram");
96 ret = -ENOMEM;
97 goto out_pool;
98 }
99 goto out_muram;
100 out_pool:
101 gen_pool_destroy(muram_pool);
102 out_muram:
103 of_node_put(np);
104 return ret;
105 }
106
107
108
109
110
111
112
113
114
115 static unsigned long cpm_muram_alloc_common(unsigned long size,
116 genpool_algo_t algo, void *data)
117 {
118 struct muram_block *entry;
119 unsigned long start;
120
121 if (!muram_pool && cpm_muram_init())
122 goto out2;
123
124 start = gen_pool_alloc_algo(muram_pool, size, algo, data);
125 if (!start)
126 goto out2;
127 start = start - GENPOOL_OFFSET;
128 memset_io(cpm_muram_addr(start), 0, size);
129 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
130 if (!entry)
131 goto out1;
132 entry->start = start;
133 entry->size = size;
134 list_add(&entry->head, &muram_block_list);
135
136 return start;
137 out1:
138 gen_pool_free(muram_pool, start, size);
139 out2:
140 return (unsigned long)-ENOMEM;
141 }
142
143
144
145
146
147
148
149
150
151
152 unsigned long cpm_muram_alloc(unsigned long size, unsigned long align)
153 {
154 unsigned long start;
155 unsigned long flags;
156 struct genpool_data_align muram_pool_data;
157
158 spin_lock_irqsave(&cpm_muram_lock, flags);
159 muram_pool_data.align = align;
160 start = cpm_muram_alloc_common(size, gen_pool_first_fit_align,
161 &muram_pool_data);
162 spin_unlock_irqrestore(&cpm_muram_lock, flags);
163 return start;
164 }
165 EXPORT_SYMBOL(cpm_muram_alloc);
166
167
168
169
170
171 int cpm_muram_free(unsigned long offset)
172 {
173 unsigned long flags;
174 int size;
175 struct muram_block *tmp;
176
177 size = 0;
178 spin_lock_irqsave(&cpm_muram_lock, flags);
179 list_for_each_entry(tmp, &muram_block_list, head) {
180 if (tmp->start == offset) {
181 size = tmp->size;
182 list_del(&tmp->head);
183 kfree(tmp);
184 break;
185 }
186 }
187 gen_pool_free(muram_pool, offset + GENPOOL_OFFSET, size);
188 spin_unlock_irqrestore(&cpm_muram_lock, flags);
189 return size;
190 }
191 EXPORT_SYMBOL(cpm_muram_free);
192
193
194
195
196
197
198
199
200
201 unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size)
202 {
203 unsigned long start;
204 unsigned long flags;
205 struct genpool_data_fixed muram_pool_data_fixed;
206
207 spin_lock_irqsave(&cpm_muram_lock, flags);
208 muram_pool_data_fixed.offset = offset + GENPOOL_OFFSET;
209 start = cpm_muram_alloc_common(size, gen_pool_fixed_alloc,
210 &muram_pool_data_fixed);
211 spin_unlock_irqrestore(&cpm_muram_lock, flags);
212 return start;
213 }
214 EXPORT_SYMBOL(cpm_muram_alloc_fixed);
215
216
217
218
219
220 void __iomem *cpm_muram_addr(unsigned long offset)
221 {
222 return muram_vbase + offset;
223 }
224 EXPORT_SYMBOL(cpm_muram_addr);
225
226 unsigned long cpm_muram_offset(void __iomem *addr)
227 {
228 return addr - (void __iomem *)muram_vbase;
229 }
230 EXPORT_SYMBOL(cpm_muram_offset);
231
232
233
234
235
236 dma_addr_t cpm_muram_dma(void __iomem *addr)
237 {
238 return muram_pbase + ((u8 __iomem *)addr - muram_vbase);
239 }
240 EXPORT_SYMBOL(cpm_muram_dma);