This source file includes following definitions.
- nvec_register_notifier
- nvec_unregister_notifier
- nvec_status_notifier
- nvec_msg_alloc
- nvec_msg_free
- nvec_msg_is_event
- nvec_msg_size
- nvec_gpio_set_value
- nvec_write_async
- nvec_write_sync
- nvec_toggle_global_events
- nvec_event_mask
- nvec_request_master
- parse_msg
- nvec_dispatch
- nvec_tx_completed
- nvec_rx_completed
- nvec_invalid_flags
- nvec_tx_set
- nvec_interrupt
- tegra_init_i2c_slave
- nvec_disable_i2c_slave
- nvec_power_off
- tegra_nvec_probe
- tegra_nvec_remove
- nvec_suspend
- nvec_resume
1
2
3
4
5
6
7
8
9
10
11
12
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/atomic.h>
16 #include <linux/clk.h>
17 #include <linux/completion.h>
18 #include <linux/delay.h>
19 #include <linux/err.h>
20 #include <linux/gpio/consumer.h>
21 #include <linux/interrupt.h>
22 #include <linux/io.h>
23 #include <linux/irq.h>
24 #include <linux/of.h>
25 #include <linux/list.h>
26 #include <linux/mfd/core.h>
27 #include <linux/mutex.h>
28 #include <linux/notifier.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/workqueue.h>
32
33 #include "nvec.h"
34
35 #define I2C_CNFG 0x00
36 #define I2C_CNFG_PACKET_MODE_EN BIT(10)
37 #define I2C_CNFG_NEW_MASTER_SFM BIT(11)
38 #define I2C_CNFG_DEBOUNCE_CNT_SHIFT 12
39
40 #define I2C_SL_CNFG 0x20
41 #define I2C_SL_NEWSL BIT(2)
42 #define I2C_SL_NACK BIT(1)
43 #define I2C_SL_RESP BIT(0)
44 #define I2C_SL_IRQ BIT(3)
45 #define END_TRANS BIT(4)
46 #define RCVD BIT(2)
47 #define RNW BIT(1)
48
49 #define I2C_SL_RCVD 0x24
50 #define I2C_SL_STATUS 0x28
51 #define I2C_SL_ADDR1 0x2c
52 #define I2C_SL_ADDR2 0x30
53 #define I2C_SL_DELAY_COUNT 0x3c
54
55
56
57
58
59
60 enum nvec_msg_category {
61 NVEC_MSG_RX,
62 NVEC_MSG_TX,
63 };
64
65 enum nvec_sleep_subcmds {
66 GLOBAL_EVENTS,
67 AP_PWR_DOWN,
68 AP_SUSPEND,
69 };
70
71 #define CNF_EVENT_REPORTING 0x01
72 #define GET_FIRMWARE_VERSION 0x15
73 #define LID_SWITCH BIT(1)
74 #define PWR_BUTTON BIT(15)
75
76 static struct nvec_chip *nvec_power_handle;
77
78 static const struct mfd_cell nvec_devices[] = {
79 {
80 .name = "nvec-kbd",
81 },
82 {
83 .name = "nvec-mouse",
84 },
85 {
86 .name = "nvec-power",
87 .id = 0,
88 },
89 {
90 .name = "nvec-power",
91 .id = 1,
92 },
93 {
94 .name = "nvec-paz00",
95 },
96 };
97
98
99
100
101
102
103
104
105
106
107 int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
108 unsigned int events)
109 {
110 return atomic_notifier_chain_register(&nvec->notifier_list, nb);
111 }
112 EXPORT_SYMBOL_GPL(nvec_register_notifier);
113
114
115
116
117
118
119
120
121
122 int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
123 {
124 return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
125 }
126 EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
127
128
129
130
131
132
133
134 static int nvec_status_notifier(struct notifier_block *nb,
135 unsigned long event_type, void *data)
136 {
137 struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
138 nvec_status_notifier);
139 unsigned char *msg = data;
140
141 if (event_type != NVEC_CNTL)
142 return NOTIFY_DONE;
143
144 dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
145 print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
146 msg, msg[1] + 2, true);
147
148 return NOTIFY_OK;
149 }
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165 static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
166 enum nvec_msg_category category)
167 {
168 int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
169
170 for (; i < NVEC_POOL_SIZE; i++) {
171 if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
172 dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
173 return &nvec->msg_pool[i];
174 }
175 }
176
177 dev_err(nvec->dev, "could not allocate %s buffer\n",
178 (category == NVEC_MSG_TX) ? "TX" : "RX");
179
180 return NULL;
181 }
182
183
184
185
186
187
188
189
190 void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
191 {
192 if (msg != &nvec->tx_scratch)
193 dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
194 atomic_set(&msg->used, 0);
195 }
196 EXPORT_SYMBOL_GPL(nvec_msg_free);
197
198
199
200
201
202 static bool nvec_msg_is_event(struct nvec_msg *msg)
203 {
204 return msg->data[0] >> 7;
205 }
206
207
208
209
210
211
212
213 static size_t nvec_msg_size(struct nvec_msg *msg)
214 {
215 bool is_event = nvec_msg_is_event(msg);
216 int event_length = (msg->data[0] & 0x60) >> 5;
217
218
219 if (!is_event || event_length == NVEC_VAR_SIZE)
220 return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
221 else if (event_length == NVEC_2BYTES)
222 return 2;
223 else if (event_length == NVEC_3BYTES)
224 return 3;
225 return 0;
226 }
227
228
229
230
231
232
233
234
235 static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
236 {
237 dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
238 gpiod_get_value(nvec->gpiod), value);
239 gpiod_set_value(nvec->gpiod, value);
240 }
241
242
243
244
245
246
247
248
249
250
251
252
253
254 int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
255 short size)
256 {
257 struct nvec_msg *msg;
258 unsigned long flags;
259
260 msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
261
262 if (!msg)
263 return -ENOMEM;
264
265 msg->data[0] = size;
266 memcpy(msg->data + 1, data, size);
267 msg->size = size + 1;
268
269 spin_lock_irqsave(&nvec->tx_lock, flags);
270 list_add_tail(&msg->node, &nvec->tx_data);
271 spin_unlock_irqrestore(&nvec->tx_lock, flags);
272
273 schedule_work(&nvec->tx_work);
274
275 return 0;
276 }
277 EXPORT_SYMBOL(nvec_write_async);
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296 int nvec_write_sync(struct nvec_chip *nvec,
297 const unsigned char *data, short size,
298 struct nvec_msg **msg)
299 {
300 mutex_lock(&nvec->sync_write_mutex);
301
302 *msg = NULL;
303 nvec->sync_write_pending = (data[1] << 8) + data[0];
304
305 if (nvec_write_async(nvec, data, size) < 0) {
306 mutex_unlock(&nvec->sync_write_mutex);
307 return -ENOMEM;
308 }
309
310 dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
311 nvec->sync_write_pending);
312 if (!(wait_for_completion_timeout(&nvec->sync_write,
313 msecs_to_jiffies(2000)))) {
314 dev_warn(nvec->dev,
315 "timeout waiting for sync write to complete\n");
316 mutex_unlock(&nvec->sync_write_mutex);
317 return -ETIMEDOUT;
318 }
319
320 dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
321
322 *msg = nvec->last_sync_msg;
323
324 mutex_unlock(&nvec->sync_write_mutex);
325
326 return 0;
327 }
328 EXPORT_SYMBOL(nvec_write_sync);
329
330
331
332
333
334
335
336
337 static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
338 {
339 unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
340
341 nvec_write_async(nvec, global_events, 3);
342 }
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357 static void nvec_event_mask(char *ev, u32 mask)
358 {
359 ev[3] = mask >> 16 & 0xff;
360 ev[4] = mask >> 24 & 0xff;
361 ev[5] = mask >> 0 & 0xff;
362 ev[6] = mask >> 8 & 0xff;
363 }
364
365
366
367
368
369
370
371
372
373 static void nvec_request_master(struct work_struct *work)
374 {
375 struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
376 unsigned long flags;
377 long err;
378 struct nvec_msg *msg;
379
380 spin_lock_irqsave(&nvec->tx_lock, flags);
381 while (!list_empty(&nvec->tx_data)) {
382 msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
383 spin_unlock_irqrestore(&nvec->tx_lock, flags);
384 nvec_gpio_set_value(nvec, 0);
385 err = wait_for_completion_interruptible_timeout(
386 &nvec->ec_transfer, msecs_to_jiffies(5000));
387
388 if (err == 0) {
389 dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
390 nvec_gpio_set_value(nvec, 1);
391 msg->pos = 0;
392 }
393
394 spin_lock_irqsave(&nvec->tx_lock, flags);
395
396 if (err > 0) {
397 list_del_init(&msg->node);
398 nvec_msg_free(nvec, msg);
399 }
400 }
401 spin_unlock_irqrestore(&nvec->tx_lock, flags);
402 }
403
404
405
406
407
408
409
410
411
412 static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
413 {
414 if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
415 dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
416 return -EINVAL;
417 }
418
419 if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
420 print_hex_dump(KERN_WARNING, "ec system event ",
421 DUMP_PREFIX_NONE, 16, 1, msg->data,
422 msg->data[1] + 2, true);
423
424 atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
425 msg->data);
426
427 return 0;
428 }
429
430
431
432
433
434
435
436
437 static void nvec_dispatch(struct work_struct *work)
438 {
439 struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
440 unsigned long flags;
441 struct nvec_msg *msg;
442
443 spin_lock_irqsave(&nvec->rx_lock, flags);
444 while (!list_empty(&nvec->rx_data)) {
445 msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
446 list_del_init(&msg->node);
447 spin_unlock_irqrestore(&nvec->rx_lock, flags);
448
449 if (nvec->sync_write_pending ==
450 (msg->data[2] << 8) + msg->data[0]) {
451 dev_dbg(nvec->dev, "sync write completed!\n");
452 nvec->sync_write_pending = 0;
453 nvec->last_sync_msg = msg;
454 complete(&nvec->sync_write);
455 } else {
456 parse_msg(nvec, msg);
457 nvec_msg_free(nvec, msg);
458 }
459 spin_lock_irqsave(&nvec->rx_lock, flags);
460 }
461 spin_unlock_irqrestore(&nvec->rx_lock, flags);
462 }
463
464
465
466
467
468
469
470 static void nvec_tx_completed(struct nvec_chip *nvec)
471 {
472
473 if (nvec->tx->pos != nvec->tx->size) {
474 dev_err(nvec->dev, "premature END_TRANS, resending\n");
475 nvec->tx->pos = 0;
476 nvec_gpio_set_value(nvec, 0);
477 } else {
478 nvec->state = 0;
479 }
480 }
481
482
483
484
485
486
487
488 static void nvec_rx_completed(struct nvec_chip *nvec)
489 {
490 if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
491 dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
492 (uint)nvec_msg_size(nvec->rx),
493 (uint)nvec->rx->pos);
494
495 nvec_msg_free(nvec, nvec->rx);
496 nvec->state = 0;
497
498
499 if (nvec->rx->data[0] == NVEC_BAT)
500 complete(&nvec->ec_transfer);
501
502 return;
503 }
504
505 spin_lock(&nvec->rx_lock);
506
507
508
509
510
511 list_add_tail(&nvec->rx->node, &nvec->rx_data);
512
513 spin_unlock(&nvec->rx_lock);
514
515 nvec->state = 0;
516
517 if (!nvec_msg_is_event(nvec->rx))
518 complete(&nvec->ec_transfer);
519
520 schedule_work(&nvec->rx_work);
521 }
522
523
524
525
526
527
528
529 static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
530 bool reset)
531 {
532 dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
533 status, nvec->state);
534 if (reset)
535 nvec->state = 0;
536 }
537
538
539
540
541
542
543
544
545
546 static void nvec_tx_set(struct nvec_chip *nvec)
547 {
548 spin_lock(&nvec->tx_lock);
549 if (list_empty(&nvec->tx_data)) {
550 dev_err(nvec->dev, "empty tx - sending no-op\n");
551 memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
552 nvec->tx_scratch.size = 3;
553 nvec->tx_scratch.pos = 0;
554 nvec->tx = &nvec->tx_scratch;
555 list_add_tail(&nvec->tx->node, &nvec->tx_data);
556 } else {
557 nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
558 node);
559 nvec->tx->pos = 0;
560 }
561 spin_unlock(&nvec->tx_lock);
562
563 dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
564 (uint)nvec->tx->size, nvec->tx->data[1]);
565 }
566
567
568
569
570
571
572
573
574
575
576 static irqreturn_t nvec_interrupt(int irq, void *dev)
577 {
578 unsigned long status;
579 unsigned int received = 0;
580 unsigned char to_send = 0xff;
581 const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
582 struct nvec_chip *nvec = dev;
583 unsigned int state = nvec->state;
584
585 status = readl(nvec->base + I2C_SL_STATUS);
586
587
588 if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
589 dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
590 return IRQ_HANDLED;
591 }
592 if ((status & I2C_SL_IRQ) == 0) {
593 dev_err(nvec->dev, "Spurious IRQ\n");
594 return IRQ_HANDLED;
595 }
596
597
598 if ((status & RNW) == 0) {
599 received = readl(nvec->base + I2C_SL_RCVD);
600 if (status & RCVD)
601 writel(0, nvec->base + I2C_SL_RCVD);
602 }
603
604 if (status == (I2C_SL_IRQ | RCVD))
605 nvec->state = 0;
606
607 switch (nvec->state) {
608 case 0:
609 if (status != (I2C_SL_IRQ | RCVD))
610 nvec_invalid_flags(nvec, status, false);
611 break;
612 case 1:
613 if (status != I2C_SL_IRQ) {
614 nvec_invalid_flags(nvec, status, true);
615 } else {
616 nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
617
618 if (unlikely(!nvec->rx)) {
619 nvec->state = 0;
620 break;
621 }
622 nvec->rx->data[0] = received;
623 nvec->rx->pos = 1;
624 nvec->state = 2;
625 }
626 break;
627 case 2:
628 if (status == (I2C_SL_IRQ | RNW | RCVD)) {
629 udelay(33);
630 if (nvec->rx->data[0] != 0x01) {
631 dev_err(nvec->dev,
632 "Read without prior read command\n");
633 nvec->state = 0;
634 break;
635 }
636 nvec_msg_free(nvec, nvec->rx);
637 nvec->state = 3;
638 nvec_tx_set(nvec);
639 to_send = nvec->tx->data[0];
640 nvec->tx->pos = 1;
641 } else if (status == (I2C_SL_IRQ)) {
642 nvec->rx->data[1] = received;
643 nvec->rx->pos = 2;
644 nvec->state = 4;
645 } else {
646 nvec_invalid_flags(nvec, status, true);
647 }
648 break;
649 case 3:
650 if (status & END_TRANS) {
651 nvec_tx_completed(nvec);
652 } else if ((status & RNW) == 0 || (status & RCVD)) {
653 nvec_invalid_flags(nvec, status, true);
654 } else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
655 to_send = nvec->tx->data[nvec->tx->pos++];
656 } else {
657 dev_err(nvec->dev,
658 "tx buffer underflow on %p (%u > %u)\n",
659 nvec->tx,
660 (uint)(nvec->tx ? nvec->tx->pos : 0),
661 (uint)(nvec->tx ? nvec->tx->size : 0));
662 nvec->state = 0;
663 }
664 break;
665 case 4:
666 if ((status & (END_TRANS | RNW)) == END_TRANS)
667 nvec_rx_completed(nvec);
668 else if (status & (RNW | RCVD))
669 nvec_invalid_flags(nvec, status, true);
670 else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
671 nvec->rx->data[nvec->rx->pos++] = received;
672 else
673 dev_err(nvec->dev,
674 "RX buffer overflow on %p: Trying to write byte %u of %u\n",
675 nvec->rx, nvec->rx ? nvec->rx->pos : 0,
676 NVEC_MSG_SIZE);
677 break;
678 default:
679 nvec->state = 0;
680 }
681
682
683 if ((status & (RCVD | RNW)) == RCVD) {
684 if (received != nvec->i2c_addr)
685 dev_err(nvec->dev,
686 "received address 0x%02x, expected 0x%02x\n",
687 received, nvec->i2c_addr);
688 nvec->state = 1;
689 }
690
691
692 if ((status & (RNW | END_TRANS)) == RNW)
693 writel(to_send, nvec->base + I2C_SL_RCVD);
694
695
696 if (status == (I2C_SL_IRQ | RNW | RCVD))
697 nvec_gpio_set_value(nvec, 1);
698
699 dev_dbg(nvec->dev,
700 "Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
701 (status & RNW) == 0 ? "received" : "R=",
702 received,
703 (status & (RNW | END_TRANS)) ? "sent" : "S=",
704 to_send,
705 state,
706 status & END_TRANS ? " END_TRANS" : "",
707 status & RCVD ? " RCVD" : "",
708 status & RNW ? " RNW" : "");
709
710
711
712
713
714
715
716 udelay(100);
717
718 return IRQ_HANDLED;
719 }
720
721 static void tegra_init_i2c_slave(struct nvec_chip *nvec)
722 {
723 u32 val;
724
725 clk_prepare_enable(nvec->i2c_clk);
726
727 reset_control_assert(nvec->rst);
728 udelay(2);
729 reset_control_deassert(nvec->rst);
730
731 val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
732 (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
733 writel(val, nvec->base + I2C_CNFG);
734
735 clk_set_rate(nvec->i2c_clk, 8 * 80000);
736
737 writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
738 writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
739
740 writel(nvec->i2c_addr >> 1, nvec->base + I2C_SL_ADDR1);
741 writel(0, nvec->base + I2C_SL_ADDR2);
742
743 enable_irq(nvec->irq);
744 }
745
746 #ifdef CONFIG_PM_SLEEP
747 static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
748 {
749 disable_irq(nvec->irq);
750 writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
751 clk_disable_unprepare(nvec->i2c_clk);
752 }
753 #endif
754
755 static void nvec_power_off(void)
756 {
757 char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
758
759 nvec_toggle_global_events(nvec_power_handle, false);
760 nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
761 }
762
763 static int tegra_nvec_probe(struct platform_device *pdev)
764 {
765 int err, ret;
766 struct clk *i2c_clk;
767 struct device *dev = &pdev->dev;
768 struct nvec_chip *nvec;
769 struct nvec_msg *msg;
770 void __iomem *base;
771 char get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
772 unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
773 enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
774
775 if (!dev->of_node) {
776 dev_err(dev, "must be instantiated using device tree\n");
777 return -ENODEV;
778 }
779
780 nvec = devm_kzalloc(dev, sizeof(struct nvec_chip), GFP_KERNEL);
781 if (!nvec)
782 return -ENOMEM;
783
784 platform_set_drvdata(pdev, nvec);
785 nvec->dev = dev;
786
787 if (of_property_read_u32(dev->of_node, "slave-addr", &nvec->i2c_addr)) {
788 dev_err(dev, "no i2c address specified");
789 return -ENODEV;
790 }
791
792 base = devm_platform_ioremap_resource(pdev, 0);
793 if (IS_ERR(base))
794 return PTR_ERR(base);
795
796 nvec->irq = platform_get_irq(pdev, 0);
797 if (nvec->irq < 0)
798 return -ENODEV;
799
800 i2c_clk = devm_clk_get(dev, "div-clk");
801 if (IS_ERR(i2c_clk)) {
802 dev_err(dev, "failed to get controller clock\n");
803 return -ENODEV;
804 }
805
806 nvec->rst = devm_reset_control_get_exclusive(dev, "i2c");
807 if (IS_ERR(nvec->rst)) {
808 dev_err(dev, "failed to get controller reset\n");
809 return PTR_ERR(nvec->rst);
810 }
811
812 nvec->base = base;
813 nvec->i2c_clk = i2c_clk;
814 nvec->rx = &nvec->msg_pool[0];
815
816 ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
817
818 init_completion(&nvec->sync_write);
819 init_completion(&nvec->ec_transfer);
820 mutex_init(&nvec->sync_write_mutex);
821 spin_lock_init(&nvec->tx_lock);
822 spin_lock_init(&nvec->rx_lock);
823 INIT_LIST_HEAD(&nvec->rx_data);
824 INIT_LIST_HEAD(&nvec->tx_data);
825 INIT_WORK(&nvec->rx_work, nvec_dispatch);
826 INIT_WORK(&nvec->tx_work, nvec_request_master);
827
828 nvec->gpiod = devm_gpiod_get(dev, "request", GPIOD_OUT_HIGH);
829 if (IS_ERR(nvec->gpiod)) {
830 dev_err(dev, "couldn't request gpio\n");
831 return PTR_ERR(nvec->gpiod);
832 }
833
834 err = devm_request_irq(dev, nvec->irq, nvec_interrupt, 0,
835 "nvec", nvec);
836 if (err) {
837 dev_err(dev, "couldn't request irq\n");
838 return -ENODEV;
839 }
840 disable_irq(nvec->irq);
841
842 tegra_init_i2c_slave(nvec);
843
844
845 nvec_toggle_global_events(nvec, true);
846
847 nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
848 nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
849
850 nvec_power_handle = nvec;
851 pm_power_off = nvec_power_off;
852
853
854 err = nvec_write_sync(nvec, get_firmware_version, 2, &msg);
855
856 if (!err) {
857 dev_warn(dev,
858 "ec firmware version %02x.%02x.%02x / %02x\n",
859 msg->data[4], msg->data[5],
860 msg->data[6], msg->data[7]);
861
862 nvec_msg_free(nvec, msg);
863 }
864
865 ret = mfd_add_devices(dev, 0, nvec_devices,
866 ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
867 if (ret)
868 dev_err(dev, "error adding subdevices\n");
869
870
871 nvec_write_async(nvec, unmute_speakers, 4);
872
873
874 nvec_event_mask(enable_event, LID_SWITCH);
875 nvec_write_async(nvec, enable_event, 7);
876
877
878 nvec_event_mask(enable_event, PWR_BUTTON);
879 nvec_write_async(nvec, enable_event, 7);
880
881 return 0;
882 }
883
884 static int tegra_nvec_remove(struct platform_device *pdev)
885 {
886 struct nvec_chip *nvec = platform_get_drvdata(pdev);
887
888 nvec_toggle_global_events(nvec, false);
889 mfd_remove_devices(nvec->dev);
890 nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
891 cancel_work_sync(&nvec->rx_work);
892 cancel_work_sync(&nvec->tx_work);
893
894 pm_power_off = NULL;
895
896 return 0;
897 }
898
899 #ifdef CONFIG_PM_SLEEP
900 static int nvec_suspend(struct device *dev)
901 {
902 int err;
903 struct nvec_chip *nvec = dev_get_drvdata(dev);
904 struct nvec_msg *msg;
905 char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
906
907 dev_dbg(nvec->dev, "suspending\n");
908
909
910 nvec_toggle_global_events(nvec, false);
911
912 err = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend), &msg);
913 if (!err)
914 nvec_msg_free(nvec, msg);
915
916 nvec_disable_i2c_slave(nvec);
917
918 return 0;
919 }
920
921 static int nvec_resume(struct device *dev)
922 {
923 struct nvec_chip *nvec = dev_get_drvdata(dev);
924
925 dev_dbg(nvec->dev, "resuming\n");
926 tegra_init_i2c_slave(nvec);
927 nvec_toggle_global_events(nvec, true);
928
929 return 0;
930 }
931 #endif
932
933 static SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
934
935
936 static const struct of_device_id nvidia_nvec_of_match[] = {
937 { .compatible = "nvidia,nvec", },
938 {},
939 };
940 MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
941
942 static struct platform_driver nvec_device_driver = {
943 .probe = tegra_nvec_probe,
944 .remove = tegra_nvec_remove,
945 .driver = {
946 .name = "nvec",
947 .pm = &nvec_pm_ops,
948 .of_match_table = nvidia_nvec_of_match,
949 }
950 };
951
952 module_platform_driver(nvec_device_driver);
953
954 MODULE_ALIAS("platform:nvec");
955 MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
956 MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
957 MODULE_LICENSE("GPL");