1/*
2 * drivers/w1/masters/omap_hdq.c
3 *
4 * Copyright (C) 2007,2012 Texas Instruments, Inc.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 *
10 */
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/interrupt.h>
15#include <linux/slab.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/sched.h>
19#include <linux/pm_runtime.h>
20
21#include "../w1.h"
22#include "../w1_int.h"
23
24#define	MOD_NAME	"OMAP_HDQ:"
25
26#define OMAP_HDQ_REVISION			0x00
27#define OMAP_HDQ_TX_DATA			0x04
28#define OMAP_HDQ_RX_DATA			0x08
29#define OMAP_HDQ_CTRL_STATUS			0x0c
30#define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK	(1<<6)
31#define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE	(1<<5)
32#define OMAP_HDQ_CTRL_STATUS_GO			(1<<4)
33#define OMAP_HDQ_CTRL_STATUS_INITIALIZATION	(1<<2)
34#define OMAP_HDQ_CTRL_STATUS_DIR		(1<<1)
35#define OMAP_HDQ_CTRL_STATUS_MODE		(1<<0)
36#define OMAP_HDQ_INT_STATUS			0x10
37#define OMAP_HDQ_INT_STATUS_TXCOMPLETE		(1<<2)
38#define OMAP_HDQ_INT_STATUS_RXCOMPLETE		(1<<1)
39#define OMAP_HDQ_INT_STATUS_TIMEOUT		(1<<0)
40#define OMAP_HDQ_SYSCONFIG			0x14
41#define OMAP_HDQ_SYSCONFIG_SOFTRESET		(1<<1)
42#define OMAP_HDQ_SYSCONFIG_AUTOIDLE		(1<<0)
43#define OMAP_HDQ_SYSSTATUS			0x18
44#define OMAP_HDQ_SYSSTATUS_RESETDONE		(1<<0)
45
46#define OMAP_HDQ_FLAG_CLEAR			0
47#define OMAP_HDQ_FLAG_SET			1
48#define OMAP_HDQ_TIMEOUT			(HZ/5)
49
50#define OMAP_HDQ_MAX_USER			4
51
52static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue);
53static int w1_id;
54
55struct hdq_data {
56	struct device		*dev;
57	void __iomem		*hdq_base;
58	/* lock status update */
59	struct  mutex		hdq_mutex;
60	int			hdq_usecount;
61	u8			hdq_irqstatus;
62	/* device lock */
63	spinlock_t		hdq_spinlock;
64	/*
65	 * Used to control the call to omap_hdq_get and omap_hdq_put.
66	 * HDQ Protocol: Write the CMD|REG_address first, followed by
67	 * the data wrire or read.
68	 */
69	int			init_trans;
70};
71
72static int omap_hdq_probe(struct platform_device *pdev);
73static int omap_hdq_remove(struct platform_device *pdev);
74
75static const struct of_device_id omap_hdq_dt_ids[] = {
76	{ .compatible = "ti,omap3-1w" },
77	{}
78};
79MODULE_DEVICE_TABLE(of, omap_hdq_dt_ids);
80
81static struct platform_driver omap_hdq_driver = {
82	.probe =	omap_hdq_probe,
83	.remove =	omap_hdq_remove,
84	.driver =	{
85		.name =	"omap_hdq",
86		.of_match_table = omap_hdq_dt_ids,
87	},
88};
89
90static u8 omap_w1_read_byte(void *_hdq);
91static void omap_w1_write_byte(void *_hdq, u8 byte);
92static u8 omap_w1_reset_bus(void *_hdq);
93static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
94		u8 search_type,	w1_slave_found_callback slave_found);
95
96
97static struct w1_bus_master omap_w1_master = {
98	.read_byte	= omap_w1_read_byte,
99	.write_byte	= omap_w1_write_byte,
100	.reset_bus	= omap_w1_reset_bus,
101	.search		= omap_w1_search_bus,
102};
103
104/* HDQ register I/O routines */
105static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset)
106{
107	return __raw_readl(hdq_data->hdq_base + offset);
108}
109
110static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val)
111{
112	__raw_writel(val, hdq_data->hdq_base + offset);
113}
114
115static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset,
116			u8 val, u8 mask)
117{
118	u8 new_val = (__raw_readl(hdq_data->hdq_base + offset) & ~mask)
119			| (val & mask);
120	__raw_writel(new_val, hdq_data->hdq_base + offset);
121
122	return new_val;
123}
124
125/*
126 * Wait for one or more bits in flag change.
127 * HDQ_FLAG_SET: wait until any bit in the flag is set.
128 * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared.
129 * return 0 on success and -ETIMEDOUT in the case of timeout.
130 */
131static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset,
132		u8 flag, u8 flag_set, u8 *status)
133{
134	int ret = 0;
135	unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT;
136
137	if (flag_set == OMAP_HDQ_FLAG_CLEAR) {
138		/* wait for the flag clear */
139		while (((*status = hdq_reg_in(hdq_data, offset)) & flag)
140			&& time_before(jiffies, timeout)) {
141			schedule_timeout_uninterruptible(1);
142		}
143		if (*status & flag)
144			ret = -ETIMEDOUT;
145	} else if (flag_set == OMAP_HDQ_FLAG_SET) {
146		/* wait for the flag set */
147		while (!((*status = hdq_reg_in(hdq_data, offset)) & flag)
148			&& time_before(jiffies, timeout)) {
149			schedule_timeout_uninterruptible(1);
150		}
151		if (!(*status & flag))
152			ret = -ETIMEDOUT;
153	} else
154		return -EINVAL;
155
156	return ret;
157}
158
159/* write out a byte and fill *status with HDQ_INT_STATUS */
160static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status)
161{
162	int ret;
163	u8 tmp_status;
164	unsigned long irqflags;
165
166	*status = 0;
167
168	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
169	/* clear interrupt flags via a dummy read */
170	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
171	/* ISR loads it with new INT_STATUS */
172	hdq_data->hdq_irqstatus = 0;
173	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
174
175	hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val);
176
177	/* set the GO bit */
178	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO,
179		OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
180	/* wait for the TXCOMPLETE bit */
181	ret = wait_event_timeout(hdq_wait_queue,
182		hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
183	if (ret == 0) {
184		dev_dbg(hdq_data->dev, "TX wait elapsed\n");
185		ret = -ETIMEDOUT;
186		goto out;
187	}
188
189	*status = hdq_data->hdq_irqstatus;
190	/* check irqstatus */
191	if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) {
192		dev_dbg(hdq_data->dev, "timeout waiting for"
193			" TXCOMPLETE/RXCOMPLETE, %x", *status);
194		ret = -ETIMEDOUT;
195		goto out;
196	}
197
198	/* wait for the GO bit return to zero */
199	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
200			OMAP_HDQ_CTRL_STATUS_GO,
201			OMAP_HDQ_FLAG_CLEAR, &tmp_status);
202	if (ret) {
203		dev_dbg(hdq_data->dev, "timeout waiting GO bit"
204			" return to zero, %x", tmp_status);
205	}
206
207out:
208	return ret;
209}
210
211/* HDQ Interrupt service routine */
212static irqreturn_t hdq_isr(int irq, void *_hdq)
213{
214	struct hdq_data *hdq_data = _hdq;
215	unsigned long irqflags;
216
217	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
218	hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
219	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
220	dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus);
221
222	if (hdq_data->hdq_irqstatus &
223		(OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE
224		| OMAP_HDQ_INT_STATUS_TIMEOUT)) {
225		/* wake up sleeping process */
226		wake_up(&hdq_wait_queue);
227	}
228
229	return IRQ_HANDLED;
230}
231
232/* HDQ Mode: always return success */
233static u8 omap_w1_reset_bus(void *_hdq)
234{
235	return 0;
236}
237
238/* W1 search callback function */
239static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev,
240		u8 search_type, w1_slave_found_callback slave_found)
241{
242	u64 module_id, rn_le, cs, id;
243
244	if (w1_id)
245		module_id = w1_id;
246	else
247		module_id = 0x1;
248
249	rn_le = cpu_to_le64(module_id);
250	/*
251	 * HDQ might not obey truly the 1-wire spec.
252	 * So calculate CRC based on module parameter.
253	 */
254	cs = w1_calc_crc8((u8 *)&rn_le, 7);
255	id = (cs << 56) | module_id;
256
257	slave_found(master_dev, id);
258}
259
260static int _omap_hdq_reset(struct hdq_data *hdq_data)
261{
262	int ret;
263	u8 tmp_status;
264
265	hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET);
266	/*
267	 * Select HDQ mode & enable clocks.
268	 * It is observed that INT flags can't be cleared via a read and GO/INIT
269	 * won't return to zero if interrupt is disabled. So we always enable
270	 * interrupt.
271	 */
272	hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
273		OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
274		OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
275
276	/* wait for reset to complete */
277	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS,
278		OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status);
279	if (ret)
280		dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x",
281				tmp_status);
282	else {
283		hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
284			OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
285			OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
286		hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
287			OMAP_HDQ_SYSCONFIG_AUTOIDLE);
288	}
289
290	return ret;
291}
292
293/* Issue break pulse to the device */
294static int omap_hdq_break(struct hdq_data *hdq_data)
295{
296	int ret = 0;
297	u8 tmp_status;
298	unsigned long irqflags;
299
300	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
301	if (ret < 0) {
302		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
303		ret = -EINTR;
304		goto rtn;
305	}
306
307	spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags);
308	/* clear interrupt flags via a dummy read */
309	hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
310	/* ISR loads it with new INT_STATUS */
311	hdq_data->hdq_irqstatus = 0;
312	spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags);
313
314	/* set the INIT and GO bit */
315	hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
316		OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO,
317		OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
318		OMAP_HDQ_CTRL_STATUS_GO);
319
320	/* wait for the TIMEOUT bit */
321	ret = wait_event_timeout(hdq_wait_queue,
322		hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT);
323	if (ret == 0) {
324		dev_dbg(hdq_data->dev, "break wait elapsed\n");
325		ret = -EINTR;
326		goto out;
327	}
328
329	tmp_status = hdq_data->hdq_irqstatus;
330	/* check irqstatus */
331	if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) {
332		dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x",
333				tmp_status);
334		ret = -ETIMEDOUT;
335		goto out;
336	}
337	/*
338	 * wait for both INIT and GO bits rerurn to zero.
339	 * zero wait time expected for interrupt mode.
340	 */
341	ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS,
342			OMAP_HDQ_CTRL_STATUS_INITIALIZATION |
343			OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR,
344			&tmp_status);
345	if (ret)
346		dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits"
347			" return to zero, %x", tmp_status);
348
349out:
350	mutex_unlock(&hdq_data->hdq_mutex);
351rtn:
352	return ret;
353}
354
355static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val)
356{
357	int ret = 0;
358	u8 status;
359
360	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
361	if (ret < 0) {
362		ret = -EINTR;
363		goto rtn;
364	}
365
366	if (!hdq_data->hdq_usecount) {
367		ret = -EINVAL;
368		goto out;
369	}
370
371	if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
372		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS,
373			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO,
374			OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO);
375		/*
376		 * The RX comes immediately after TX.
377		 */
378		wait_event_timeout(hdq_wait_queue,
379				   (hdq_data->hdq_irqstatus
380				    & OMAP_HDQ_INT_STATUS_RXCOMPLETE),
381				   OMAP_HDQ_TIMEOUT);
382
383		hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0,
384			OMAP_HDQ_CTRL_STATUS_DIR);
385		status = hdq_data->hdq_irqstatus;
386		/* check irqstatus */
387		if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) {
388			dev_dbg(hdq_data->dev, "timeout waiting for"
389				" RXCOMPLETE, %x", status);
390			ret = -ETIMEDOUT;
391			goto out;
392		}
393	}
394	/* the data is ready. Read it in! */
395	*val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA);
396out:
397	mutex_unlock(&hdq_data->hdq_mutex);
398rtn:
399	return ret;
400
401}
402
403/* Enable clocks and set the controller to HDQ mode */
404static int omap_hdq_get(struct hdq_data *hdq_data)
405{
406	int ret = 0;
407
408	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
409	if (ret < 0) {
410		ret = -EINTR;
411		goto rtn;
412	}
413
414	if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) {
415		dev_dbg(hdq_data->dev, "attempt to exceed the max use count");
416		ret = -EINVAL;
417		goto out;
418	} else {
419		hdq_data->hdq_usecount++;
420		try_module_get(THIS_MODULE);
421		if (1 == hdq_data->hdq_usecount) {
422
423			pm_runtime_get_sync(hdq_data->dev);
424
425			/* make sure HDQ is out of reset */
426			if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) &
427				OMAP_HDQ_SYSSTATUS_RESETDONE)) {
428				ret = _omap_hdq_reset(hdq_data);
429				if (ret)
430					/* back up the count */
431					hdq_data->hdq_usecount--;
432			} else {
433				/* select HDQ mode & enable clocks */
434				hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS,
435					OMAP_HDQ_CTRL_STATUS_CLOCKENABLE |
436					OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK);
437				hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG,
438					OMAP_HDQ_SYSCONFIG_AUTOIDLE);
439				hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS);
440			}
441		}
442	}
443
444out:
445	mutex_unlock(&hdq_data->hdq_mutex);
446rtn:
447	return ret;
448}
449
450/* Disable clocks to the module */
451static int omap_hdq_put(struct hdq_data *hdq_data)
452{
453	int ret = 0;
454
455	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
456	if (ret < 0)
457		return -EINTR;
458
459	if (0 == hdq_data->hdq_usecount) {
460		dev_dbg(hdq_data->dev, "attempt to decrement use count"
461			" when it is zero");
462		ret = -EINVAL;
463	} else {
464		hdq_data->hdq_usecount--;
465		module_put(THIS_MODULE);
466		if (0 == hdq_data->hdq_usecount)
467			pm_runtime_put_sync(hdq_data->dev);
468	}
469	mutex_unlock(&hdq_data->hdq_mutex);
470
471	return ret;
472}
473
474/* Read a byte of data from the device */
475static u8 omap_w1_read_byte(void *_hdq)
476{
477	struct hdq_data *hdq_data = _hdq;
478	u8 val = 0;
479	int ret;
480
481	ret = hdq_read_byte(hdq_data, &val);
482	if (ret) {
483		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
484		if (ret < 0) {
485			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
486			return -EINTR;
487		}
488		hdq_data->init_trans = 0;
489		mutex_unlock(&hdq_data->hdq_mutex);
490		omap_hdq_put(hdq_data);
491		return -1;
492	}
493
494	/* Write followed by a read, release the module */
495	if (hdq_data->init_trans) {
496		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
497		if (ret < 0) {
498			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
499			return -EINTR;
500		}
501		hdq_data->init_trans = 0;
502		mutex_unlock(&hdq_data->hdq_mutex);
503		omap_hdq_put(hdq_data);
504	}
505
506	return val;
507}
508
509/* Write a byte of data to the device */
510static void omap_w1_write_byte(void *_hdq, u8 byte)
511{
512	struct hdq_data *hdq_data = _hdq;
513	int ret;
514	u8 status;
515
516	/* First write to initialize the transfer */
517	if (hdq_data->init_trans == 0)
518		omap_hdq_get(hdq_data);
519
520	ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
521	if (ret < 0) {
522		dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
523		return;
524	}
525	hdq_data->init_trans++;
526	mutex_unlock(&hdq_data->hdq_mutex);
527
528	ret = hdq_write_byte(hdq_data, byte, &status);
529	if (ret < 0) {
530		dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status);
531		return;
532	}
533
534	/* Second write, data transferred. Release the module */
535	if (hdq_data->init_trans > 1) {
536		omap_hdq_put(hdq_data);
537		ret = mutex_lock_interruptible(&hdq_data->hdq_mutex);
538		if (ret < 0) {
539			dev_dbg(hdq_data->dev, "Could not acquire mutex\n");
540			return;
541		}
542		hdq_data->init_trans = 0;
543		mutex_unlock(&hdq_data->hdq_mutex);
544	}
545}
546
547static int omap_hdq_probe(struct platform_device *pdev)
548{
549	struct device *dev = &pdev->dev;
550	struct hdq_data *hdq_data;
551	struct resource *res;
552	int ret, irq;
553	u8 rev;
554
555	hdq_data = devm_kzalloc(dev, sizeof(*hdq_data), GFP_KERNEL);
556	if (!hdq_data) {
557		dev_dbg(&pdev->dev, "unable to allocate memory\n");
558		return -ENOMEM;
559	}
560
561	hdq_data->dev = dev;
562	platform_set_drvdata(pdev, hdq_data);
563
564	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
565	hdq_data->hdq_base = devm_ioremap_resource(dev, res);
566	if (IS_ERR(hdq_data->hdq_base))
567		return PTR_ERR(hdq_data->hdq_base);
568
569	hdq_data->hdq_usecount = 0;
570	mutex_init(&hdq_data->hdq_mutex);
571
572	pm_runtime_enable(&pdev->dev);
573	pm_runtime_get_sync(&pdev->dev);
574
575	rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION);
576	dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n",
577		(rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt");
578
579	spin_lock_init(&hdq_data->hdq_spinlock);
580
581	irq = platform_get_irq(pdev, 0);
582	if (irq	< 0) {
583		ret = -ENXIO;
584		goto err_irq;
585	}
586
587	ret = devm_request_irq(dev, irq, hdq_isr, 0, "omap_hdq", hdq_data);
588	if (ret < 0) {
589		dev_dbg(&pdev->dev, "could not request irq\n");
590		goto err_irq;
591	}
592
593	omap_hdq_break(hdq_data);
594
595	pm_runtime_put_sync(&pdev->dev);
596
597	omap_w1_master.data = hdq_data;
598
599	ret = w1_add_master_device(&omap_w1_master);
600	if (ret) {
601		dev_dbg(&pdev->dev, "Failure in registering w1 master\n");
602		goto err_w1;
603	}
604
605	return 0;
606
607err_irq:
608	pm_runtime_put_sync(&pdev->dev);
609err_w1:
610	pm_runtime_disable(&pdev->dev);
611
612	return ret;
613}
614
615static int omap_hdq_remove(struct platform_device *pdev)
616{
617	struct hdq_data *hdq_data = platform_get_drvdata(pdev);
618
619	mutex_lock(&hdq_data->hdq_mutex);
620
621	if (hdq_data->hdq_usecount) {
622		dev_dbg(&pdev->dev, "removed when use count is not zero\n");
623		mutex_unlock(&hdq_data->hdq_mutex);
624		return -EBUSY;
625	}
626
627	mutex_unlock(&hdq_data->hdq_mutex);
628
629	/* remove module dependency */
630	pm_runtime_disable(&pdev->dev);
631
632	return 0;
633}
634
635module_platform_driver(omap_hdq_driver);
636
637module_param(w1_id, int, S_IRUSR);
638MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection");
639
640MODULE_AUTHOR("Texas Instruments");
641MODULE_DESCRIPTION("HDQ driver Library");
642MODULE_LICENSE("GPL");
643