This source file includes following definitions.
- tifm_7xx1_dummy_eject
- tifm_7xx1_eject
- tifm_7xx1_isr
- tifm_7xx1_toggle_sock_power
- tifm_7xx1_sock_power_off
- tifm_7xx1_sock_addr
- tifm_7xx1_switch_media
- tifm_7xx1_suspend
- tifm_7xx1_resume
- tifm_7xx1_dummy_has_ms_pif
- tifm_7xx1_has_ms_pif
- tifm_7xx1_probe
- tifm_7xx1_remove
1
2
3
4
5
6
7
8 #include <linux/tifm.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/module.h>
11
12 #define DRIVER_NAME "tifm_7xx1"
13 #define DRIVER_VERSION "0.8"
14
15 #define TIFM_IRQ_ENABLE 0x80000000
16 #define TIFM_IRQ_SOCKMASK(x) (x)
17 #define TIFM_IRQ_CARDMASK(x) ((x) << 8)
18 #define TIFM_IRQ_FIFOMASK(x) ((x) << 16)
19 #define TIFM_IRQ_SETALL 0xffffffff
20
21 static void tifm_7xx1_dummy_eject(struct tifm_adapter *fm,
22 struct tifm_dev *sock)
23 {
24 }
25
26 static void tifm_7xx1_eject(struct tifm_adapter *fm, struct tifm_dev *sock)
27 {
28 unsigned long flags;
29
30 spin_lock_irqsave(&fm->lock, flags);
31 fm->socket_change_set |= 1 << sock->socket_id;
32 tifm_queue_work(&fm->media_switcher);
33 spin_unlock_irqrestore(&fm->lock, flags);
34 }
35
36 static irqreturn_t tifm_7xx1_isr(int irq, void *dev_id)
37 {
38 struct tifm_adapter *fm = dev_id;
39 struct tifm_dev *sock;
40 unsigned int irq_status, cnt;
41
42 spin_lock(&fm->lock);
43 irq_status = readl(fm->addr + FM_INTERRUPT_STATUS);
44 if (irq_status == 0 || irq_status == (~0)) {
45 spin_unlock(&fm->lock);
46 return IRQ_NONE;
47 }
48
49 if (irq_status & TIFM_IRQ_ENABLE) {
50 writel(TIFM_IRQ_ENABLE, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
51
52 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
53 sock = fm->sockets[cnt];
54 if (sock) {
55 if ((irq_status >> cnt) & TIFM_IRQ_FIFOMASK(1))
56 sock->data_event(sock);
57 if ((irq_status >> cnt) & TIFM_IRQ_CARDMASK(1))
58 sock->card_event(sock);
59 }
60 }
61
62 fm->socket_change_set |= irq_status
63 & ((1 << fm->num_sockets) - 1);
64 }
65 writel(irq_status, fm->addr + FM_INTERRUPT_STATUS);
66
67 if (fm->finish_me)
68 complete_all(fm->finish_me);
69 else if (!fm->socket_change_set)
70 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
71 else
72 tifm_queue_work(&fm->media_switcher);
73
74 spin_unlock(&fm->lock);
75 return IRQ_HANDLED;
76 }
77
78 static unsigned char tifm_7xx1_toggle_sock_power(char __iomem *sock_addr)
79 {
80 unsigned int s_state;
81 int cnt;
82
83 writel(0x0e00, sock_addr + SOCK_CONTROL);
84
85 for (cnt = 16; cnt <= 256; cnt <<= 1) {
86 if (!(TIFM_SOCK_STATE_POWERED
87 & readl(sock_addr + SOCK_PRESENT_STATE)))
88 break;
89
90 msleep(cnt);
91 }
92
93 s_state = readl(sock_addr + SOCK_PRESENT_STATE);
94 if (!(TIFM_SOCK_STATE_OCCUPIED & s_state))
95 return 0;
96
97 writel(readl(sock_addr + SOCK_CONTROL) | TIFM_CTRL_LED,
98 sock_addr + SOCK_CONTROL);
99
100
101 if (((readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7)
102 == TIFM_TYPE_XD)
103 msleep(40);
104
105 writel((s_state & TIFM_CTRL_POWER_MASK) | 0x0c00,
106 sock_addr + SOCK_CONTROL);
107
108 msleep(20);
109 for (cnt = 16; cnt <= 256; cnt <<= 1) {
110 if ((TIFM_SOCK_STATE_POWERED
111 & readl(sock_addr + SOCK_PRESENT_STATE)))
112 break;
113
114 msleep(cnt);
115 }
116
117 writel(readl(sock_addr + SOCK_CONTROL) & (~TIFM_CTRL_LED),
118 sock_addr + SOCK_CONTROL);
119
120 return (readl(sock_addr + SOCK_PRESENT_STATE) >> 4) & 7;
121 }
122
123 inline static void tifm_7xx1_sock_power_off(char __iomem *sock_addr)
124 {
125 writel((~TIFM_CTRL_POWER_MASK) & readl(sock_addr + SOCK_CONTROL),
126 sock_addr + SOCK_CONTROL);
127 }
128
129 inline static char __iomem *
130 tifm_7xx1_sock_addr(char __iomem *base_addr, unsigned int sock_num)
131 {
132 return base_addr + ((sock_num + 1) << 10);
133 }
134
135 static void tifm_7xx1_switch_media(struct work_struct *work)
136 {
137 struct tifm_adapter *fm = container_of(work, struct tifm_adapter,
138 media_switcher);
139 struct tifm_dev *sock;
140 char __iomem *sock_addr;
141 unsigned long flags;
142 unsigned char media_id;
143 unsigned int socket_change_set, cnt;
144
145 spin_lock_irqsave(&fm->lock, flags);
146 socket_change_set = fm->socket_change_set;
147 fm->socket_change_set = 0;
148
149 dev_dbg(fm->dev.parent, "checking media set %x\n",
150 socket_change_set);
151
152 if (!socket_change_set) {
153 spin_unlock_irqrestore(&fm->lock, flags);
154 return;
155 }
156
157 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
158 if (!(socket_change_set & (1 << cnt)))
159 continue;
160 sock = fm->sockets[cnt];
161 if (sock) {
162 printk(KERN_INFO
163 "%s : demand removing card from socket %u:%u\n",
164 dev_name(&fm->dev), fm->id, cnt);
165 fm->sockets[cnt] = NULL;
166 sock_addr = sock->addr;
167 spin_unlock_irqrestore(&fm->lock, flags);
168 device_unregister(&sock->dev);
169 spin_lock_irqsave(&fm->lock, flags);
170 tifm_7xx1_sock_power_off(sock_addr);
171 writel(0x0e00, sock_addr + SOCK_CONTROL);
172 }
173
174 spin_unlock_irqrestore(&fm->lock, flags);
175
176 media_id = tifm_7xx1_toggle_sock_power(
177 tifm_7xx1_sock_addr(fm->addr, cnt));
178
179
180 sock = tifm_alloc_device(fm, cnt, media_id);
181 if (sock) {
182 sock->addr = tifm_7xx1_sock_addr(fm->addr, cnt);
183
184 if (!device_register(&sock->dev)) {
185 spin_lock_irqsave(&fm->lock, flags);
186 if (!fm->sockets[cnt]) {
187 fm->sockets[cnt] = sock;
188 sock = NULL;
189 }
190 spin_unlock_irqrestore(&fm->lock, flags);
191 }
192 if (sock)
193 tifm_free_device(&sock->dev);
194 }
195 spin_lock_irqsave(&fm->lock, flags);
196 }
197
198 writel(TIFM_IRQ_FIFOMASK(socket_change_set)
199 | TIFM_IRQ_CARDMASK(socket_change_set),
200 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
201
202 writel(TIFM_IRQ_FIFOMASK(socket_change_set)
203 | TIFM_IRQ_CARDMASK(socket_change_set),
204 fm->addr + FM_SET_INTERRUPT_ENABLE);
205
206 writel(TIFM_IRQ_ENABLE, fm->addr + FM_SET_INTERRUPT_ENABLE);
207 spin_unlock_irqrestore(&fm->lock, flags);
208 }
209
210 #ifdef CONFIG_PM
211
212 static int tifm_7xx1_suspend(struct pci_dev *dev, pm_message_t state)
213 {
214 struct tifm_adapter *fm = pci_get_drvdata(dev);
215 int cnt;
216
217 dev_dbg(&dev->dev, "suspending host\n");
218
219 for (cnt = 0; cnt < fm->num_sockets; cnt++) {
220 if (fm->sockets[cnt])
221 tifm_7xx1_sock_power_off(fm->sockets[cnt]->addr);
222 }
223
224 pci_save_state(dev);
225 pci_enable_wake(dev, pci_choose_state(dev, state), 0);
226 pci_disable_device(dev);
227 pci_set_power_state(dev, pci_choose_state(dev, state));
228 return 0;
229 }
230
231 static int tifm_7xx1_resume(struct pci_dev *dev)
232 {
233 struct tifm_adapter *fm = pci_get_drvdata(dev);
234 int rc;
235 unsigned long timeout;
236 unsigned int good_sockets = 0, bad_sockets = 0;
237 unsigned long flags;
238
239 unsigned char new_ids[4];
240 DECLARE_COMPLETION_ONSTACK(finish_resume);
241
242 if (WARN_ON(fm->num_sockets > ARRAY_SIZE(new_ids)))
243 return -ENXIO;
244
245 pci_set_power_state(dev, PCI_D0);
246 pci_restore_state(dev);
247 rc = pci_enable_device(dev);
248 if (rc)
249 return rc;
250 pci_set_master(dev);
251
252 dev_dbg(&dev->dev, "resuming host\n");
253
254 for (rc = 0; rc < fm->num_sockets; rc++)
255 new_ids[rc] = tifm_7xx1_toggle_sock_power(
256 tifm_7xx1_sock_addr(fm->addr, rc));
257 spin_lock_irqsave(&fm->lock, flags);
258 for (rc = 0; rc < fm->num_sockets; rc++) {
259 if (fm->sockets[rc]) {
260 if (fm->sockets[rc]->type == new_ids[rc])
261 good_sockets |= 1 << rc;
262 else
263 bad_sockets |= 1 << rc;
264 }
265 }
266
267 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
268 fm->addr + FM_SET_INTERRUPT_ENABLE);
269 dev_dbg(&dev->dev, "change sets on resume: good %x, bad %x\n",
270 good_sockets, bad_sockets);
271
272 fm->socket_change_set = 0;
273 if (good_sockets) {
274 fm->finish_me = &finish_resume;
275 spin_unlock_irqrestore(&fm->lock, flags);
276 timeout = wait_for_completion_timeout(&finish_resume, HZ);
277 dev_dbg(&dev->dev, "wait returned %lu\n", timeout);
278 writel(TIFM_IRQ_FIFOMASK(good_sockets)
279 | TIFM_IRQ_CARDMASK(good_sockets),
280 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
281 writel(TIFM_IRQ_FIFOMASK(good_sockets)
282 | TIFM_IRQ_CARDMASK(good_sockets),
283 fm->addr + FM_SET_INTERRUPT_ENABLE);
284 spin_lock_irqsave(&fm->lock, flags);
285 fm->finish_me = NULL;
286 fm->socket_change_set ^= good_sockets & fm->socket_change_set;
287 }
288
289 fm->socket_change_set |= bad_sockets;
290 if (fm->socket_change_set)
291 tifm_queue_work(&fm->media_switcher);
292
293 spin_unlock_irqrestore(&fm->lock, flags);
294 writel(TIFM_IRQ_ENABLE,
295 fm->addr + FM_SET_INTERRUPT_ENABLE);
296
297 return 0;
298 }
299
300 #else
301
302 #define tifm_7xx1_suspend NULL
303 #define tifm_7xx1_resume NULL
304
305 #endif
306
307 static int tifm_7xx1_dummy_has_ms_pif(struct tifm_adapter *fm,
308 struct tifm_dev *sock)
309 {
310 return 0;
311 }
312
313 static int tifm_7xx1_has_ms_pif(struct tifm_adapter *fm, struct tifm_dev *sock)
314 {
315 if (((fm->num_sockets == 4) && (sock->socket_id == 2))
316 || ((fm->num_sockets == 2) && (sock->socket_id == 0)))
317 return 1;
318
319 return 0;
320 }
321
322 static int tifm_7xx1_probe(struct pci_dev *dev,
323 const struct pci_device_id *dev_id)
324 {
325 struct tifm_adapter *fm;
326 int pci_dev_busy = 0;
327 int rc;
328
329 rc = pci_set_dma_mask(dev, DMA_BIT_MASK(32));
330 if (rc)
331 return rc;
332
333 rc = pci_enable_device(dev);
334 if (rc)
335 return rc;
336
337 pci_set_master(dev);
338
339 rc = pci_request_regions(dev, DRIVER_NAME);
340 if (rc) {
341 pci_dev_busy = 1;
342 goto err_out;
343 }
344
345 pci_intx(dev, 1);
346
347 fm = tifm_alloc_adapter(dev->device == PCI_DEVICE_ID_TI_XX21_XX11_FM
348 ? 4 : 2, &dev->dev);
349 if (!fm) {
350 rc = -ENOMEM;
351 goto err_out_int;
352 }
353
354 INIT_WORK(&fm->media_switcher, tifm_7xx1_switch_media);
355 fm->eject = tifm_7xx1_eject;
356 fm->has_ms_pif = tifm_7xx1_has_ms_pif;
357 pci_set_drvdata(dev, fm);
358
359 fm->addr = pci_ioremap_bar(dev, 0);
360 if (!fm->addr) {
361 rc = -ENODEV;
362 goto err_out_free;
363 }
364
365 rc = request_irq(dev->irq, tifm_7xx1_isr, IRQF_SHARED, DRIVER_NAME, fm);
366 if (rc)
367 goto err_out_unmap;
368
369 rc = tifm_add_adapter(fm);
370 if (rc)
371 goto err_out_irq;
372
373 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
374 fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
375 writel(TIFM_IRQ_ENABLE | TIFM_IRQ_SOCKMASK((1 << fm->num_sockets) - 1),
376 fm->addr + FM_SET_INTERRUPT_ENABLE);
377 return 0;
378
379 err_out_irq:
380 free_irq(dev->irq, fm);
381 err_out_unmap:
382 iounmap(fm->addr);
383 err_out_free:
384 tifm_free_adapter(fm);
385 err_out_int:
386 pci_intx(dev, 0);
387 pci_release_regions(dev);
388 err_out:
389 if (!pci_dev_busy)
390 pci_disable_device(dev);
391 return rc;
392 }
393
394 static void tifm_7xx1_remove(struct pci_dev *dev)
395 {
396 struct tifm_adapter *fm = pci_get_drvdata(dev);
397 int cnt;
398
399 fm->eject = tifm_7xx1_dummy_eject;
400 fm->has_ms_pif = tifm_7xx1_dummy_has_ms_pif;
401 writel(TIFM_IRQ_SETALL, fm->addr + FM_CLEAR_INTERRUPT_ENABLE);
402 free_irq(dev->irq, fm);
403
404 tifm_remove_adapter(fm);
405
406 for (cnt = 0; cnt < fm->num_sockets; cnt++)
407 tifm_7xx1_sock_power_off(tifm_7xx1_sock_addr(fm->addr, cnt));
408
409 iounmap(fm->addr);
410 pci_intx(dev, 0);
411 pci_release_regions(dev);
412
413 pci_disable_device(dev);
414 tifm_free_adapter(fm);
415 }
416
417 static const struct pci_device_id tifm_7xx1_pci_tbl[] = {
418 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX21_XX11_FM, PCI_ANY_ID,
419 PCI_ANY_ID, 0, 0, 0 },
420 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX12_FM, PCI_ANY_ID,
421 PCI_ANY_ID, 0, 0, 0 },
422 { PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_XX20_FM, PCI_ANY_ID,
423 PCI_ANY_ID, 0, 0, 0 },
424 { }
425 };
426
427 static struct pci_driver tifm_7xx1_driver = {
428 .name = DRIVER_NAME,
429 .id_table = tifm_7xx1_pci_tbl,
430 .probe = tifm_7xx1_probe,
431 .remove = tifm_7xx1_remove,
432 .suspend = tifm_7xx1_suspend,
433 .resume = tifm_7xx1_resume,
434 };
435
436 module_pci_driver(tifm_7xx1_driver);
437 MODULE_AUTHOR("Alex Dubov");
438 MODULE_DESCRIPTION("TI FlashMedia host driver");
439 MODULE_LICENSE("GPL");
440 MODULE_DEVICE_TABLE(pci, tifm_7xx1_pci_tbl);
441 MODULE_VERSION(DRIVER_VERSION);