This source file includes following definitions.
- hdq_reg_in
- hdq_reg_out
- hdq_reg_merge
- hdq_disable_interrupt
- hdq_wait_for_flag
- hdq_write_byte
- hdq_isr
- omap_w1_search_bus
- _omap_hdq_reset
- omap_hdq_break
- hdq_read_byte
- omap_hdq_get
- omap_hdq_put
- omap_w1_triplet
- omap_w1_reset_bus
- omap_w1_read_byte
- omap_w1_write_byte
- omap_hdq_probe
- omap_hdq_remove
1
2
3
4
5
6
7
8
9
10
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/interrupt.h>
15 #include <linux/slab.h>
16 #include <linux/err.h>
17 #include <linux/io.h>
18 #include <linux/sched.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/of.h>
21
22 #include <linux/w1.h>
23
24 #define MOD_NAME "OMAP_HDQ:"
25
26 #define OMAP_HDQ_REVISION 0x00
27 #define OMAP_HDQ_TX_DATA 0x04
28 #define OMAP_HDQ_RX_DATA 0x08
29 #define OMAP_HDQ_CTRL_STATUS 0x0c
30 #define OMAP_HDQ_CTRL_STATUS_SINGLE BIT(7)
31 #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK BIT(6)
32 #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE BIT(5)
33 #define OMAP_HDQ_CTRL_STATUS_GO BIT(4)
34 #define OMAP_HDQ_CTRL_STATUS_PRESENCE BIT(3)
35 #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION BIT(2)
36 #define OMAP_HDQ_CTRL_STATUS_DIR BIT(1)
37 #define OMAP_HDQ_INT_STATUS 0x10
38 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE BIT(2)
39 #define OMAP_HDQ_INT_STATUS_RXCOMPLETE BIT(1)
40 #define OMAP_HDQ_INT_STATUS_TIMEOUT BIT(0)
41 #define OMAP_HDQ_SYSCONFIG 0x14
42 #define OMAP_HDQ_SYSCONFIG_SOFTRESET BIT(1)
43 #define OMAP_HDQ_SYSCONFIG_AUTOIDLE BIT(0)
44 #define OMAP_HDQ_SYSCONFIG_NOIDLE 0x0
45 #define OMAP_HDQ_SYSSTATUS 0x18
46 #define OMAP_HDQ_SYSSTATUS_RESETDONE BIT(0)
47
48 #define OMAP_HDQ_FLAG_CLEAR 0
49 #define OMAP_HDQ_FLAG_SET 1
50 #define OMAP_HDQ_TIMEOUT (HZ/5)
51
52 #define OMAP_HDQ_MAX_USER 4
53
54 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
55
56 static int w1_id;
57 module_param(w1_id, int, S_IRUSR);
58 MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection in HDQ mode");
59
60 struct hdq_data {
61 struct device *dev;
62 void __iomem *hdq_base;
63
64 struct mutex hdq_mutex;
65 int hdq_usecount;
66 u8 hdq_irqstatus;
67
68 spinlock_t hdq_spinlock;
69
70
71
72
73
74 int init_trans;
75 int rrw;
76
77 int mode;
78
79 };
80
81
82 static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
83 {
84 return __raw_readl(hdq_data->hdq_base + offset);
85 }
86
87 static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
88 {
89 __raw_writel(val, hdq_data->hdq_base + offset);
90 }
91
92 static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
93 u8 val, u8 mask)
94 {
95 u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
96 | (val & mask);
97 __raw_writel(new_val, hdq_data->hdq_base + offset);
98
99 return new_val;
100 }
101
102 static void hdq_disable_interrupt(struct hdq_data *hdq_data, u32 offset,
103 u32 mask)
104 {
105 u32 ie;
106
107 ie = readl(hdq_data->hdq_base + offset);
108 writel(ie & mask, hdq_data->hdq_base + offset);
109 }
110
111
112
113
114
115
116
117 static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
118 u8 flag, u8 flag_set, u8 *status)
119 {
120 int ret = 0;
121 unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
122
123 if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
124
125 while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
126 && time_before(jiffies, timeout)) {
127 schedule_timeout_uninterruptible(1);
128 }
129 if (*status & flag)
130 ret = -ETIMEDOUT;
131 } else if (flag_set == OMAP_HDQ_FLAG_SET) {
132
133 while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
134 && time_before(jiffies, timeout)) {
135 schedule_timeout_uninterruptible(1);
136 }
137 if (!(*status & flag))
138 ret = -ETIMEDOUT;
139 } else
140 return -EINVAL;
141
142 return ret;
143 }
144
145
146 static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
147 {
148 int ret;
149 u8 tmp_status;
150 unsigned long irqflags;
151
152 *status = 0;
153
154 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
155
156 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
157
158 hdq_data->hdq_irqstatus = 0;
159 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
160
161 hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
162
163
164 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
165 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
166
167 ret = wait_event_timeout(hdq_wait_queue,
168 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
169 if (ret == 0) {
170 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
171 ret = -ETIMEDOUT;
172 goto out;
173 }
174
175 *status = hdq_data->hdq_irqstatus;
176
177 if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
178 dev_dbg(hdq_data->dev, "timeout waiting for"
179 " TXCOMPLETE/RXCOMPLETE, %x", *status);
180 ret = -ETIMEDOUT;
181 goto out;
182 }
183
184
185 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
186 OMAP_HDQ_CTRL_STATUS_GO,
187 OMAP_HDQ_FLAG_CLEAR, &tmp_status);
188 if (ret) {
189 dev_dbg(hdq_data->dev, "timeout waiting GO bit"
190 " return to zero, %x", tmp_status);
191 }
192
193 out:
194 return ret;
195 }
196
197
198 static irqreturn_t hdq_isr(int irq, void *_hdq)
199 {
200 struct hdq_data *hdq_data = _hdq;
201 unsigned long irqflags;
202
203 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
204 hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
205 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
206 dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
207
208 if (hdq_data->hdq_irqstatus &
209 (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
210 | OMAP_HDQ_INT_STATUS_TIMEOUT)) {
211
212 wake_up(&hdq_wait_queue);
213 }
214
215 return IRQ_HANDLED;
216 }
217
218
219 static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
220 u8 search_type, w1_slave_found_callback slave_found)
221 {
222 u64 module_id, rn_le, cs, id;
223
224 if (w1_id)
225 module_id = w1_id;
226 else
227 module_id = 0x1;
228
229 rn_le = cpu_to_le64(module_id);
230
231
232
233
234 cs = w1_calc_crc8((u8 *)&rn_le, 7);
235 id = (cs << 56) | module_id;
236
237 slave_found(master_dev, id);
238 }
239
240 static int _omap_hdq_reset(struct hdq_data *hdq_data)
241 {
242 int ret;
243 u8 tmp_status;
244
245 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
246 OMAP_HDQ_SYSCONFIG_SOFTRESET);
247
248
249
250
251
252
253 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
254 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
255 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
256
257
258 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
259 OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
260 if (ret)
261 dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
262 tmp_status);
263 else {
264 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
265 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
266 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
267 hdq_data->mode);
268 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
269 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
270 }
271
272 return ret;
273 }
274
275
276 static int omap_hdq_break(struct hdq_data *hdq_data)
277 {
278 int ret = 0;
279 u8 tmp_status;
280 unsigned long irqflags;
281
282 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
283 if (ret < 0) {
284 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
285 ret = -EINTR;
286 goto rtn;
287 }
288
289 spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
290
291 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
292
293 hdq_data->hdq_irqstatus = 0;
294 spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
295
296
297 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
298 OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
299 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
300 OMAP_HDQ_CTRL_STATUS_GO);
301
302
303 ret = wait_event_timeout(hdq_wait_queue,
304 hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
305 if (ret == 0) {
306 dev_dbg(hdq_data->dev, "break wait elapsed\n");
307 ret = -EINTR;
308 goto out;
309 }
310
311 tmp_status = hdq_data->hdq_irqstatus;
312
313 if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
314 dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
315 tmp_status);
316 ret = -ETIMEDOUT;
317 goto out;
318 }
319
320
321
322
323
324 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_CTRL_STATUS) &
325 OMAP_HDQ_CTRL_STATUS_PRESENCE)) {
326 dev_dbg(hdq_data->dev, "Presence bit not set\n");
327 ret = -ETIMEDOUT;
328 goto out;
329 }
330
331
332
333
334
335 ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
336 OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
337 OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
338 &tmp_status);
339 if (ret)
340 dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
341 " return to zero, %x", tmp_status);
342
343 out:
344 mutex_unlock(&hdq_data->hdq_mutex);
345 rtn:
346 return ret;
347 }
348
349 static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
350 {
351 int ret = 0;
352 u8 status;
353
354 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
355 if (ret < 0) {
356 ret = -EINTR;
357 goto rtn;
358 }
359
360 if (!hdq_data->hdq_usecount) {
361 ret = -EINVAL;
362 goto out;
363 }
364
365 if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
366 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
367 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
368 OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
369
370
371
372 wait_event_timeout(hdq_wait_queue,
373 (hdq_data->hdq_irqstatus
374 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
375 OMAP_HDQ_TIMEOUT);
376
377 hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
378 OMAP_HDQ_CTRL_STATUS_DIR);
379 status = hdq_data->hdq_irqstatus;
380
381 if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
382 dev_dbg(hdq_data->dev, "timeout waiting for"
383 " RXCOMPLETE, %x", status);
384 ret = -ETIMEDOUT;
385 goto out;
386 }
387 }
388
389 *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
390 out:
391 mutex_unlock(&hdq_data->hdq_mutex);
392 rtn:
393 return ret;
394
395 }
396
397
398 static int omap_hdq_get(struct hdq_data *hdq_data)
399 {
400 int ret = 0;
401
402 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
403 if (ret < 0) {
404 ret = -EINTR;
405 goto rtn;
406 }
407
408 if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
409 dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
410 ret = -EINVAL;
411 goto out;
412 } else {
413 hdq_data->hdq_usecount++;
414 try_module_get(THIS_MODULE);
415 if (1 == hdq_data->hdq_usecount) {
416
417 pm_runtime_get_sync(hdq_data->dev);
418
419
420 if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
421 OMAP_HDQ_SYSSTATUS_RESETDONE)) {
422 ret = _omap_hdq_reset(hdq_data);
423 if (ret)
424
425 hdq_data->hdq_usecount--;
426 } else {
427
428 hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
429 OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
430 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK |
431 hdq_data->mode);
432 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
433 OMAP_HDQ_SYSCONFIG_NOIDLE);
434 hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
435 }
436 }
437 }
438
439 out:
440 mutex_unlock(&hdq_data->hdq_mutex);
441 rtn:
442 return ret;
443 }
444
445
446 static int omap_hdq_put(struct hdq_data *hdq_data)
447 {
448 int ret = 0;
449
450 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
451 if (ret < 0)
452 return -EINTR;
453
454 hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
455 OMAP_HDQ_SYSCONFIG_AUTOIDLE);
456 if (0 == hdq_data->hdq_usecount) {
457 dev_dbg(hdq_data->dev, "attempt to decrement use count"
458 " when it is zero");
459 ret = -EINVAL;
460 } else {
461 hdq_data->hdq_usecount--;
462 module_put(THIS_MODULE);
463 if (0 == hdq_data->hdq_usecount)
464 pm_runtime_put_sync(hdq_data->dev);
465 }
466 mutex_unlock(&hdq_data->hdq_mutex);
467
468 return ret;
469 }
470
471
472
473
474
475 static u8 omap_w1_triplet(void *_hdq, u8 bdir)
476 {
477 u8 id_bit, comp_bit;
478 int err;
479 u8 ret = 0x3;
480 struct hdq_data *hdq_data = _hdq;
481 u8 ctrl = OMAP_HDQ_CTRL_STATUS_SINGLE | OMAP_HDQ_CTRL_STATUS_GO |
482 OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK;
483 u8 mask = ctrl | OMAP_HDQ_CTRL_STATUS_DIR;
484
485 omap_hdq_get(_hdq);
486
487 err = mutex_lock_interruptible(&hdq_data->hdq_mutex);
488 if (err < 0) {
489 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
490 goto rtn;
491 }
492
493 hdq_data->hdq_irqstatus = 0;
494
495 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
496 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
497 err = wait_event_timeout(hdq_wait_queue,
498 (hdq_data->hdq_irqstatus
499 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
500 OMAP_HDQ_TIMEOUT);
501 if (err == 0) {
502 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
503 goto out;
504 }
505 id_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
506
507 hdq_data->hdq_irqstatus = 0;
508
509 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS,
510 ctrl | OMAP_HDQ_CTRL_STATUS_DIR, mask);
511 err = wait_event_timeout(hdq_wait_queue,
512 (hdq_data->hdq_irqstatus
513 & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
514 OMAP_HDQ_TIMEOUT);
515 if (err == 0) {
516 dev_dbg(hdq_data->dev, "RX wait elapsed\n");
517 goto out;
518 }
519 comp_bit = (hdq_reg_in(_hdq, OMAP_HDQ_RX_DATA) & 0x01);
520
521 if (id_bit && comp_bit) {
522 ret = 0x03;
523 goto out;
524 }
525 if (!id_bit && !comp_bit) {
526
527 ret = bdir ? 0x04 : 0;
528 } else {
529
530 bdir = id_bit;
531 ret = id_bit ? 0x05 : 0x02;
532 }
533
534
535 hdq_reg_out(_hdq, OMAP_HDQ_TX_DATA, bdir);
536 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, ctrl, mask);
537 err = wait_event_timeout(hdq_wait_queue,
538 (hdq_data->hdq_irqstatus
539 & OMAP_HDQ_INT_STATUS_TXCOMPLETE),
540 OMAP_HDQ_TIMEOUT);
541 if (err == 0) {
542 dev_dbg(hdq_data->dev, "TX wait elapsed\n");
543 goto out;
544 }
545
546 hdq_reg_merge(_hdq, OMAP_HDQ_CTRL_STATUS, 0,
547 OMAP_HDQ_CTRL_STATUS_SINGLE);
548
549 out:
550 mutex_unlock(&hdq_data->hdq_mutex);
551 rtn:
552 omap_hdq_put(_hdq);
553 return ret;
554 }
555
556
557 static u8 omap_w1_reset_bus(void *_hdq)
558 {
559 omap_hdq_get(_hdq);
560 omap_hdq_break(_hdq);
561 omap_hdq_put(_hdq);
562 return 0;
563 }
564
565
566 static u8 omap_w1_read_byte(void *_hdq)
567 {
568 struct hdq_data *hdq_data = _hdq;
569 u8 val = 0;
570 int ret;
571
572
573 if (hdq_data->init_trans == 0)
574 omap_hdq_get(hdq_data);
575
576 ret = hdq_read_byte(hdq_data, &val);
577 if (ret) {
578 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
579 if (ret < 0) {
580 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
581 return -EINTR;
582 }
583 hdq_data->init_trans = 0;
584 mutex_unlock(&hdq_data->hdq_mutex);
585 omap_hdq_put(hdq_data);
586 return -1;
587 }
588
589 hdq_disable_interrupt(hdq_data, OMAP_HDQ_CTRL_STATUS,
590 ~OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
591
592
593 if (hdq_data->init_trans) {
594 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
595 if (ret < 0) {
596 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
597 return -EINTR;
598 }
599 hdq_data->init_trans = 0;
600 mutex_unlock(&hdq_data->hdq_mutex);
601 omap_hdq_put(hdq_data);
602 }
603
604 return val;
605 }
606
607
608 static void omap_w1_write_byte(void *_hdq, u8 byte)
609 {
610 struct hdq_data *hdq_data = _hdq;
611 int ret;
612 u8 status;
613
614
615 if (hdq_data->init_trans == 0)
616 omap_hdq_get(hdq_data);
617
618
619
620
621
622
623 if (byte == W1_SKIP_ROM)
624 omap_hdq_break(hdq_data);
625
626 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
627 if (ret < 0) {
628 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
629 return;
630 }
631 hdq_data->init_trans++;
632 mutex_unlock(&hdq_data->hdq_mutex);
633
634 ret = hdq_write_byte(hdq_data, byte, &status);
635 if (ret < 0) {
636 dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
637 return;
638 }
639
640
641 if (hdq_data->init_trans > 1) {
642 omap_hdq_put(hdq_data);
643 ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
644 if (ret < 0) {
645 dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
646 return;
647 }
648 hdq_data->init_trans = 0;
649 mutex_unlock(&hdq_data->hdq_mutex);
650 }
651 }
652
653 static struct w1_bus_master omap_w1_master = {
654 .read_byte = omap_w1_read_byte,
655 .write_byte = omap_w1_write_byte,
656 .reset_bus = omap_w1_reset_bus,
657 };
658
659 static int omap_hdq_probe(struct platform_device *pdev)
660 {
661 struct device *dev = &pdev->dev;
662 struct hdq_data *hdq_data;
663 int ret, irq;
664 u8 rev;
665 const char *mode;
666
667 hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
668 if (!hdq_data) {
669 dev_dbg(&pdev->dev, "unable to allocate memory\n");
670 return -ENOMEM;
671 }
672
673 hdq_data->dev = dev;
674 platform_set_drvdata(pdev, hdq_data);
675
676 hdq_data->hdq_base = devm_platform_ioremap_resource(pdev, 0);
677 if (IS_ERR(hdq_data->hdq_base))
678 return PTR_ERR(hdq_data->hdq_base);
679
680 hdq_data->hdq_usecount = 0;
681 hdq_data->rrw = 0;
682 mutex_init(&hdq_data->hdq_mutex);
683
684 pm_runtime_enable(&pdev->dev);
685 ret = pm_runtime_get_sync(&pdev->dev);
686 if (ret < 0) {
687 dev_dbg(&pdev->dev, "pm_runtime_get_sync failed\n");
688 goto err_w1;
689 }
690
691 ret = _omap_hdq_reset(hdq_data);
692 if (ret) {
693 dev_dbg(&pdev->dev, "reset failed\n");
694 goto err_irq;
695 }
696
697 rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
698 dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
699 (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
700
701 spin_lock_init(&hdq_data->hdq_spinlock);
702
703 irq = platform_get_irq(pdev, 0);
704 if (irq < 0) {
705 dev_dbg(&pdev->dev, "Failed to get IRQ: %d\n", irq);
706 ret = irq;
707 goto err_irq;
708 }
709
710 ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
711 if (ret < 0) {
712 dev_dbg(&pdev->dev, "could not request irq\n");
713 goto err_irq;
714 }
715
716 omap_hdq_break(hdq_data);
717
718 pm_runtime_put_sync(&pdev->dev);
719
720 ret = of_property_read_string(pdev->dev.of_node, "ti,mode", &mode);
721 if (ret < 0 || !strcmp(mode, "hdq")) {
722 hdq_data->mode = 0;
723 omap_w1_master.search = omap_w1_search_bus;
724 } else {
725 hdq_data->mode = 1;
726 omap_w1_master.triplet = omap_w1_triplet;
727 }
728
729 omap_w1_master.data = hdq_data;
730
731 ret = w1_add_master_device(&omap_w1_master);
732 if (ret) {
733 dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
734 goto err_w1;
735 }
736
737 return 0;
738
739 err_irq:
740 pm_runtime_put_sync(&pdev->dev);
741 err_w1:
742 pm_runtime_disable(&pdev->dev);
743
744 return ret;
745 }
746
747 static int omap_hdq_remove(struct platform_device *pdev)
748 {
749 struct hdq_data *hdq_data = platform_get_drvdata(pdev);
750
751 mutex_lock(&hdq_data->hdq_mutex);
752
753 if (hdq_data->hdq_usecount) {
754 dev_dbg(&pdev->dev, "removed when use count is not zero\n");
755 mutex_unlock(&hdq_data->hdq_mutex);
756 return -EBUSY;
757 }
758
759 mutex_unlock(&hdq_data->hdq_mutex);
760
761
762 pm_runtime_disable(&pdev->dev);
763
764 w1_remove_master_device(&omap_w1_master);
765
766 return 0;
767 }
768
769 static const struct of_device_id omap_hdq_dt_ids[] = {
770 { .compatible = "ti,omap3-1w" },
771 { .compatible = "ti,am4372-hdq" },
772 {}
773 };
774 MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
775
776 static struct platform_driver omap_hdq_driver = {
777 .probe = omap_hdq_probe,
778 .remove = omap_hdq_remove,
779 .driver = {
780 .name = "omap_hdq",
781 .of_match_table = omap_hdq_dt_ids,
782 },
783 };
784 module_platform_driver(omap_hdq_driver);
785
786 MODULE_AUTHOR("Texas Instruments");
787 MODULE_DESCRIPTION("HDQ-1W driver Library");
788 MODULE_LICENSE("GPL");