1/*
2 * NVEC: NVIDIA compliant embedded controller interface
3 *
4 * Copyright (C) 2011 The AC100 Kernel Team <ac100@lists.lauchpad.net>
5 *
6 * Authors:  Pierre-Hugues Husson <phhusson@free.fr>
7 *           Ilya Petrov <ilya.muromec@gmail.com>
8 *           Marc Dietrich <marvin24@gmx.de>
9 *           Julian Andres Klode <jak@jak-linux.org>
10 *
11 * This file is subject to the terms and conditions of the GNU General Public
12 * License.  See the file "COPYING" in the main directory of this archive
13 * for more details.
14 *
15 */
16
17/* #define DEBUG */
18
19#include <linux/kernel.h>
20#include <linux/module.h>
21#include <linux/atomic.h>
22#include <linux/clk.h>
23#include <linux/completion.h>
24#include <linux/delay.h>
25#include <linux/err.h>
26#include <linux/gpio.h>
27#include <linux/interrupt.h>
28#include <linux/io.h>
29#include <linux/irq.h>
30#include <linux/of.h>
31#include <linux/of_gpio.h>
32#include <linux/list.h>
33#include <linux/mfd/core.h>
34#include <linux/mutex.h>
35#include <linux/notifier.h>
36#include <linux/slab.h>
37#include <linux/spinlock.h>
38#include <linux/workqueue.h>
39
40#include "nvec.h"
41
42#define I2C_CNFG			0x00
43#define I2C_CNFG_PACKET_MODE_EN		(1<<10)
44#define I2C_CNFG_NEW_MASTER_SFM		(1<<11)
45#define I2C_CNFG_DEBOUNCE_CNT_SHIFT	12
46
47#define I2C_SL_CNFG		0x20
48#define I2C_SL_NEWSL		(1<<2)
49#define I2C_SL_NACK		(1<<1)
50#define I2C_SL_RESP		(1<<0)
51#define I2C_SL_IRQ		(1<<3)
52#define END_TRANS		(1<<4)
53#define RCVD			(1<<2)
54#define RNW			(1<<1)
55
56#define I2C_SL_RCVD		0x24
57#define I2C_SL_STATUS		0x28
58#define I2C_SL_ADDR1		0x2c
59#define I2C_SL_ADDR2		0x30
60#define I2C_SL_DELAY_COUNT	0x3c
61
62/**
63 * enum nvec_msg_category - Message categories for nvec_msg_alloc()
64 * @NVEC_MSG_RX: The message is an incoming message (from EC)
65 * @NVEC_MSG_TX: The message is an outgoing message (to EC)
66 */
67enum nvec_msg_category  {
68	NVEC_MSG_RX,
69	NVEC_MSG_TX,
70};
71
72enum nvec_sleep_subcmds {
73	GLOBAL_EVENTS,
74	AP_PWR_DOWN,
75	AP_SUSPEND,
76};
77
78#define CNF_EVENT_REPORTING 0x01
79#define GET_FIRMWARE_VERSION 0x15
80#define LID_SWITCH BIT(1)
81#define PWR_BUTTON BIT(15)
82
83static struct nvec_chip *nvec_power_handle;
84
85static const struct mfd_cell nvec_devices[] = {
86	{
87		.name = "nvec-kbd",
88	},
89	{
90		.name = "nvec-mouse",
91	},
92	{
93		.name = "nvec-power",
94		.id = 0,
95	},
96	{
97		.name = "nvec-power",
98		.id = 1,
99	},
100	{
101		.name = "nvec-paz00",
102	},
103};
104
105/**
106 * nvec_register_notifier - Register a notifier with nvec
107 * @nvec: A &struct nvec_chip
108 * @nb: The notifier block to register
109 *
110 * Registers a notifier with @nvec. The notifier will be added to an atomic
111 * notifier chain that is called for all received messages except those that
112 * correspond to a request initiated by nvec_write_sync().
113 */
114int nvec_register_notifier(struct nvec_chip *nvec, struct notifier_block *nb,
115			   unsigned int events)
116{
117	return atomic_notifier_chain_register(&nvec->notifier_list, nb);
118}
119EXPORT_SYMBOL_GPL(nvec_register_notifier);
120
121/**
122 * nvec_unregister_notifier - Unregister a notifier with nvec
123 * @nvec: A &struct nvec_chip
124 * @nb: The notifier block to unregister
125 *
126 * Unregisters a notifier with @nvec. The notifier will be removed from the
127 * atomic notifier chain.
128 */
129int nvec_unregister_notifier(struct nvec_chip *nvec, struct notifier_block *nb)
130{
131	return atomic_notifier_chain_unregister(&nvec->notifier_list, nb);
132}
133EXPORT_SYMBOL_GPL(nvec_unregister_notifier);
134
135/**
136 * nvec_status_notifier - The final notifier
137 *
138 * Prints a message about control events not handled in the notifier
139 * chain.
140 */
141static int nvec_status_notifier(struct notifier_block *nb,
142				unsigned long event_type, void *data)
143{
144	struct nvec_chip *nvec = container_of(nb, struct nvec_chip,
145						nvec_status_notifier);
146	unsigned char *msg = (unsigned char *)data;
147
148	if (event_type != NVEC_CNTL)
149		return NOTIFY_DONE;
150
151	dev_warn(nvec->dev, "unhandled msg type %ld\n", event_type);
152	print_hex_dump(KERN_WARNING, "payload: ", DUMP_PREFIX_NONE, 16, 1,
153		msg, msg[1] + 2, true);
154
155	return NOTIFY_OK;
156}
157
158/**
159 * nvec_msg_alloc:
160 * @nvec: A &struct nvec_chip
161 * @category: Pool category, see &enum nvec_msg_category
162 *
163 * Allocate a single &struct nvec_msg object from the message pool of
164 * @nvec. The result shall be passed to nvec_msg_free() if no longer
165 * used.
166 *
167 * Outgoing messages are placed in the upper 75% of the pool, keeping the
168 * lower 25% available for RX buffers only. The reason is to prevent a
169 * situation where all buffers are full and a message is thus endlessly
170 * retried because the response could never be processed.
171 */
172static struct nvec_msg *nvec_msg_alloc(struct nvec_chip *nvec,
173				       enum nvec_msg_category category)
174{
175	int i = (category == NVEC_MSG_TX) ? (NVEC_POOL_SIZE / 4) : 0;
176
177	for (; i < NVEC_POOL_SIZE; i++) {
178		if (atomic_xchg(&nvec->msg_pool[i].used, 1) == 0) {
179			dev_vdbg(nvec->dev, "INFO: Allocate %i\n", i);
180			return &nvec->msg_pool[i];
181		}
182	}
183
184	dev_err(nvec->dev, "could not allocate %s buffer\n",
185		(category == NVEC_MSG_TX) ? "TX" : "RX");
186
187	return NULL;
188}
189
190/**
191 * nvec_msg_free:
192 * @nvec: A &struct nvec_chip
193 * @msg:  A message (must be allocated by nvec_msg_alloc() and belong to @nvec)
194 *
195 * Free the given message
196 */
197void nvec_msg_free(struct nvec_chip *nvec, struct nvec_msg *msg)
198{
199	if (msg != &nvec->tx_scratch)
200		dev_vdbg(nvec->dev, "INFO: Free %ti\n", msg - nvec->msg_pool);
201	atomic_set(&msg->used, 0);
202}
203EXPORT_SYMBOL_GPL(nvec_msg_free);
204
205/**
206 * nvec_msg_is_event - Return %true if @msg is an event
207 * @msg: A message
208 */
209static bool nvec_msg_is_event(struct nvec_msg *msg)
210{
211	return msg->data[0] >> 7;
212}
213
214/**
215 * nvec_msg_size - Get the size of a message
216 * @msg: The message to get the size for
217 *
218 * This only works for received messages, not for outgoing messages.
219 */
220static size_t nvec_msg_size(struct nvec_msg *msg)
221{
222	bool is_event = nvec_msg_is_event(msg);
223	int event_length = (msg->data[0] & 0x60) >> 5;
224
225	/* for variable size, payload size in byte 1 + count (1) + cmd (1) */
226	if (!is_event || event_length == NVEC_VAR_SIZE)
227		return (msg->pos || msg->size) ? (msg->data[1] + 2) : 0;
228	else if (event_length == NVEC_2BYTES)
229		return 2;
230	else if (event_length == NVEC_3BYTES)
231		return 3;
232	return 0;
233}
234
235/**
236 * nvec_gpio_set_value - Set the GPIO value
237 * @nvec: A &struct nvec_chip
238 * @value: The value to write (0 or 1)
239 *
240 * Like gpio_set_value(), but generating debugging information
241 */
242static void nvec_gpio_set_value(struct nvec_chip *nvec, int value)
243{
244	dev_dbg(nvec->dev, "GPIO changed from %u to %u\n",
245		gpio_get_value(nvec->gpio), value);
246	gpio_set_value(nvec->gpio, value);
247}
248
249/**
250 * nvec_write_async - Asynchronously write a message to NVEC
251 * @nvec: An nvec_chip instance
252 * @data: The message data, starting with the request type
253 * @size: The size of @data
254 *
255 * Queue a single message to be transferred to the embedded controller
256 * and return immediately.
257 *
258 * Returns: 0 on success, a negative error code on failure. If a failure
259 * occurred, the nvec driver may print an error.
260 */
261int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
262			short size)
263{
264	struct nvec_msg *msg;
265	unsigned long flags;
266
267	msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
268
269	if (msg == NULL)
270		return -ENOMEM;
271
272	msg->data[0] = size;
273	memcpy(msg->data + 1, data, size);
274	msg->size = size + 1;
275
276	spin_lock_irqsave(&nvec->tx_lock, flags);
277	list_add_tail(&msg->node, &nvec->tx_data);
278	spin_unlock_irqrestore(&nvec->tx_lock, flags);
279
280	schedule_work(&nvec->tx_work);
281
282	return 0;
283}
284EXPORT_SYMBOL(nvec_write_async);
285
286/**
287 * nvec_write_sync - Write a message to nvec and read the response
288 * @nvec: An &struct nvec_chip
289 * @data: The data to write
290 * @size: The size of @data
291 *
292 * This is similar to nvec_write_async(), but waits for the
293 * request to be answered before returning. This function
294 * uses a mutex and can thus not be called from e.g.
295 * interrupt handlers.
296 *
297 * Returns: A pointer to the response message on success,
298 * %NULL on failure. Free with nvec_msg_free() once no longer
299 * used.
300 */
301struct nvec_msg *nvec_write_sync(struct nvec_chip *nvec,
302		const unsigned char *data, short size)
303{
304	struct nvec_msg *msg;
305
306	mutex_lock(&nvec->sync_write_mutex);
307
308	nvec->sync_write_pending = (data[1] << 8) + data[0];
309
310	if (nvec_write_async(nvec, data, size) < 0) {
311		mutex_unlock(&nvec->sync_write_mutex);
312		return NULL;
313	}
314
315	dev_dbg(nvec->dev, "nvec_sync_write: 0x%04x\n",
316					nvec->sync_write_pending);
317	if (!(wait_for_completion_timeout(&nvec->sync_write,
318				msecs_to_jiffies(2000)))) {
319		dev_warn(nvec->dev, "timeout waiting for sync write to complete\n");
320		mutex_unlock(&nvec->sync_write_mutex);
321		return NULL;
322	}
323
324	dev_dbg(nvec->dev, "nvec_sync_write: pong!\n");
325
326	msg = nvec->last_sync_msg;
327
328	mutex_unlock(&nvec->sync_write_mutex);
329
330	return msg;
331}
332EXPORT_SYMBOL(nvec_write_sync);
333
334/**
335 * nvec_toggle_global_events - enables or disables global event reporting
336 * @nvec: nvec handle
337 * @state: true for enable, false for disable
338 *
339 * This switches on/off global event reports by the embedded controller.
340 */
341static void nvec_toggle_global_events(struct nvec_chip *nvec, bool state)
342{
343	unsigned char global_events[] = { NVEC_SLEEP, GLOBAL_EVENTS, state };
344
345	nvec_write_async(nvec, global_events, 3);
346}
347
348/**
349 * nvec_event_mask - fill the command string with event bitfield
350 * ev: points to event command string
351 * mask: bit to insert into the event mask
352 *
353 * Configure event command expects a 32 bit bitfield which describes
354 * which events to enable. The bitfield has the following structure
355 * (from highest byte to lowest):
356 *	system state bits 7-0
357 *	system state bits 15-8
358 *	oem system state bits 7-0
359 *	oem system state bits 15-8
360 */
361static void nvec_event_mask(char *ev, u32 mask)
362{
363	ev[3] = mask >> 16 & 0xff;
364	ev[4] = mask >> 24 & 0xff;
365	ev[5] = mask >> 0  & 0xff;
366	ev[6] = mask >> 8  & 0xff;
367}
368
369/**
370 * nvec_request_master - Process outgoing messages
371 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
372 *
373 * Processes all outgoing requests by sending the request and awaiting the
374 * response, then continuing with the next request. Once a request has a
375 * matching response, it will be freed and removed from the list.
376 */
377static void nvec_request_master(struct work_struct *work)
378{
379	struct nvec_chip *nvec = container_of(work, struct nvec_chip, tx_work);
380	unsigned long flags;
381	long err;
382	struct nvec_msg *msg;
383
384	spin_lock_irqsave(&nvec->tx_lock, flags);
385	while (!list_empty(&nvec->tx_data)) {
386		msg = list_first_entry(&nvec->tx_data, struct nvec_msg, node);
387		spin_unlock_irqrestore(&nvec->tx_lock, flags);
388		nvec_gpio_set_value(nvec, 0);
389		err = wait_for_completion_interruptible_timeout(
390				&nvec->ec_transfer, msecs_to_jiffies(5000));
391
392		if (err == 0) {
393			dev_warn(nvec->dev, "timeout waiting for ec transfer\n");
394			nvec_gpio_set_value(nvec, 1);
395			msg->pos = 0;
396		}
397
398		spin_lock_irqsave(&nvec->tx_lock, flags);
399
400		if (err > 0) {
401			list_del_init(&msg->node);
402			nvec_msg_free(nvec, msg);
403		}
404	}
405	spin_unlock_irqrestore(&nvec->tx_lock, flags);
406}
407
408/**
409 * parse_msg - Print some information and call the notifiers on an RX message
410 * @nvec: A &struct nvec_chip
411 * @msg: A message received by @nvec
412 *
413 * Paarse some pieces of the message and then call the chain of notifiers
414 * registered via nvec_register_notifier.
415 */
416static int parse_msg(struct nvec_chip *nvec, struct nvec_msg *msg)
417{
418	if ((msg->data[0] & 1 << 7) == 0 && msg->data[3]) {
419		dev_err(nvec->dev, "ec responded %*ph\n", 4, msg->data);
420		return -EINVAL;
421	}
422
423	if ((msg->data[0] >> 7) == 1 && (msg->data[0] & 0x0f) == 5)
424		print_hex_dump(KERN_WARNING, "ec system event ",
425				DUMP_PREFIX_NONE, 16, 1, msg->data,
426				msg->data[1] + 2, true);
427
428	atomic_notifier_call_chain(&nvec->notifier_list, msg->data[0] & 0x8f,
429				   msg->data);
430
431	return 0;
432}
433
434/**
435 * nvec_dispatch - Process messages received from the EC
436 * @work: A &struct work_struct (the tx_worker member of &struct nvec_chip)
437 *
438 * Process messages previously received from the EC and put into the RX
439 * queue of the &struct nvec_chip instance associated with @work.
440 */
441static void nvec_dispatch(struct work_struct *work)
442{
443	struct nvec_chip *nvec = container_of(work, struct nvec_chip, rx_work);
444	unsigned long flags;
445	struct nvec_msg *msg;
446
447	spin_lock_irqsave(&nvec->rx_lock, flags);
448	while (!list_empty(&nvec->rx_data)) {
449		msg = list_first_entry(&nvec->rx_data, struct nvec_msg, node);
450		list_del_init(&msg->node);
451		spin_unlock_irqrestore(&nvec->rx_lock, flags);
452
453		if (nvec->sync_write_pending ==
454		      (msg->data[2] << 8) + msg->data[0]) {
455			dev_dbg(nvec->dev, "sync write completed!\n");
456			nvec->sync_write_pending = 0;
457			nvec->last_sync_msg = msg;
458			complete(&nvec->sync_write);
459		} else {
460			parse_msg(nvec, msg);
461			nvec_msg_free(nvec, msg);
462		}
463		spin_lock_irqsave(&nvec->rx_lock, flags);
464	}
465	spin_unlock_irqrestore(&nvec->rx_lock, flags);
466}
467
468/**
469 * nvec_tx_completed - Complete the current transfer
470 * @nvec: A &struct nvec_chip
471 *
472 * This is called when we have received an END_TRANS on a TX transfer.
473 */
474static void nvec_tx_completed(struct nvec_chip *nvec)
475{
476	/* We got an END_TRANS, let's skip this, maybe there's an event */
477	if (nvec->tx->pos != nvec->tx->size) {
478		dev_err(nvec->dev, "premature END_TRANS, resending\n");
479		nvec->tx->pos = 0;
480		nvec_gpio_set_value(nvec, 0);
481	} else {
482		nvec->state = 0;
483	}
484}
485
486/**
487 * nvec_rx_completed - Complete the current transfer
488 * @nvec: A &struct nvec_chip
489 *
490 * This is called when we have received an END_TRANS on a RX transfer.
491 */
492static void nvec_rx_completed(struct nvec_chip *nvec)
493{
494	if (nvec->rx->pos != nvec_msg_size(nvec->rx)) {
495		dev_err(nvec->dev, "RX incomplete: Expected %u bytes, got %u\n",
496			   (uint) nvec_msg_size(nvec->rx),
497			   (uint) nvec->rx->pos);
498
499		nvec_msg_free(nvec, nvec->rx);
500		nvec->state = 0;
501
502		/* Battery quirk - Often incomplete, and likes to crash */
503		if (nvec->rx->data[0] == NVEC_BAT)
504			complete(&nvec->ec_transfer);
505
506		return;
507	}
508
509	spin_lock(&nvec->rx_lock);
510
511	/* add the received data to the work list
512	   and move the ring buffer pointer to the next entry */
513	list_add_tail(&nvec->rx->node, &nvec->rx_data);
514
515	spin_unlock(&nvec->rx_lock);
516
517	nvec->state = 0;
518
519	if (!nvec_msg_is_event(nvec->rx))
520		complete(&nvec->ec_transfer);
521
522	schedule_work(&nvec->rx_work);
523}
524
525/**
526 * nvec_invalid_flags - Send an error message about invalid flags and jump
527 * @nvec: The nvec device
528 * @status: The status flags
529 * @reset: Whether we shall jump to state 0.
530 */
531static void nvec_invalid_flags(struct nvec_chip *nvec, unsigned int status,
532			       bool reset)
533{
534	dev_err(nvec->dev, "unexpected status flags 0x%02x during state %i\n",
535		status, nvec->state);
536	if (reset)
537		nvec->state = 0;
538}
539
540/**
541 * nvec_tx_set - Set the message to transfer (nvec->tx)
542 * @nvec: A &struct nvec_chip
543 *
544 * Gets the first entry from the tx_data list of @nvec and sets the
545 * tx member to it. If the tx_data list is empty, this uses the
546 * tx_scratch message to send a no operation message.
547 */
548static void nvec_tx_set(struct nvec_chip *nvec)
549{
550	spin_lock(&nvec->tx_lock);
551	if (list_empty(&nvec->tx_data)) {
552		dev_err(nvec->dev, "empty tx - sending no-op\n");
553		memcpy(nvec->tx_scratch.data, "\x02\x07\x02", 3);
554		nvec->tx_scratch.size = 3;
555		nvec->tx_scratch.pos = 0;
556		nvec->tx = &nvec->tx_scratch;
557		list_add_tail(&nvec->tx->node, &nvec->tx_data);
558	} else {
559		nvec->tx = list_first_entry(&nvec->tx_data, struct nvec_msg,
560					    node);
561		nvec->tx->pos = 0;
562	}
563	spin_unlock(&nvec->tx_lock);
564
565	dev_dbg(nvec->dev, "Sending message of length %u, command 0x%x\n",
566		(uint)nvec->tx->size, nvec->tx->data[1]);
567}
568
569/**
570 * nvec_interrupt - Interrupt handler
571 * @irq: The IRQ
572 * @dev: The nvec device
573 *
574 * Interrupt handler that fills our RX buffers and empties our TX
575 * buffers. This uses a finite state machine with ridiculous amounts
576 * of error checking, in order to be fairly reliable.
577 */
578static irqreturn_t nvec_interrupt(int irq, void *dev)
579{
580	unsigned long status;
581	unsigned int received = 0;
582	unsigned char to_send = 0xff;
583	const unsigned long irq_mask = I2C_SL_IRQ | END_TRANS | RCVD | RNW;
584	struct nvec_chip *nvec = dev;
585	unsigned int state = nvec->state;
586
587	status = readl(nvec->base + I2C_SL_STATUS);
588
589	/* Filter out some errors */
590	if ((status & irq_mask) == 0 && (status & ~irq_mask) != 0) {
591		dev_err(nvec->dev, "unexpected irq mask %lx\n", status);
592		return IRQ_HANDLED;
593	}
594	if ((status & I2C_SL_IRQ) == 0) {
595		dev_err(nvec->dev, "Spurious IRQ\n");
596		return IRQ_HANDLED;
597	}
598
599	/* The EC did not request a read, so it send us something, read it */
600	if ((status & RNW) == 0) {
601		received = readl(nvec->base + I2C_SL_RCVD);
602		if (status & RCVD)
603			writel(0, nvec->base + I2C_SL_RCVD);
604	}
605
606	if (status == (I2C_SL_IRQ | RCVD))
607		nvec->state = 0;
608
609	switch (nvec->state) {
610	case 0:		/* Verify that its a transfer start, the rest later */
611		if (status != (I2C_SL_IRQ | RCVD))
612			nvec_invalid_flags(nvec, status, false);
613		break;
614	case 1:		/* command byte */
615		if (status != I2C_SL_IRQ) {
616			nvec_invalid_flags(nvec, status, true);
617		} else {
618			nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
619			/* Should not happen in a normal world */
620			if (unlikely(nvec->rx == NULL)) {
621				nvec->state = 0;
622				break;
623			}
624			nvec->rx->data[0] = received;
625			nvec->rx->pos = 1;
626			nvec->state = 2;
627		}
628		break;
629	case 2:		/* first byte after command */
630		if (status == (I2C_SL_IRQ | RNW | RCVD)) {
631			udelay(33);
632			if (nvec->rx->data[0] != 0x01) {
633				dev_err(nvec->dev,
634					"Read without prior read command\n");
635				nvec->state = 0;
636				break;
637			}
638			nvec_msg_free(nvec, nvec->rx);
639			nvec->state = 3;
640			nvec_tx_set(nvec);
641			BUG_ON(nvec->tx->size < 1);
642			to_send = nvec->tx->data[0];
643			nvec->tx->pos = 1;
644		} else if (status == (I2C_SL_IRQ)) {
645			BUG_ON(nvec->rx == NULL);
646			nvec->rx->data[1] = received;
647			nvec->rx->pos = 2;
648			nvec->state = 4;
649		} else {
650			nvec_invalid_flags(nvec, status, true);
651		}
652		break;
653	case 3:		/* EC does a block read, we transmit data */
654		if (status & END_TRANS) {
655			nvec_tx_completed(nvec);
656		} else if ((status & RNW) == 0 || (status & RCVD)) {
657			nvec_invalid_flags(nvec, status, true);
658		} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
659			to_send = nvec->tx->data[nvec->tx->pos++];
660		} else {
661			dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
662				nvec->tx,
663				(uint) (nvec->tx ? nvec->tx->pos : 0),
664				(uint) (nvec->tx ? nvec->tx->size : 0));
665			nvec->state = 0;
666		}
667		break;
668	case 4:		/* EC does some write, we read the data */
669		if ((status & (END_TRANS | RNW)) == END_TRANS)
670			nvec_rx_completed(nvec);
671		else if (status & (RNW | RCVD))
672			nvec_invalid_flags(nvec, status, true);
673		else if (nvec->rx && nvec->rx->pos < NVEC_MSG_SIZE)
674			nvec->rx->data[nvec->rx->pos++] = received;
675		else
676			dev_err(nvec->dev,
677				"RX buffer overflow on %p: Trying to write byte %u of %u\n",
678				nvec->rx, nvec->rx ? nvec->rx->pos : 0,
679				NVEC_MSG_SIZE);
680		break;
681	default:
682		nvec->state = 0;
683	}
684
685	/* If we are told that a new transfer starts, verify it */
686	if ((status & (RCVD | RNW)) == RCVD) {
687		if (received != nvec->i2c_addr)
688			dev_err(nvec->dev,
689			"received address 0x%02x, expected 0x%02x\n",
690			received, nvec->i2c_addr);
691		nvec->state = 1;
692	}
693
694	/* Send data if requested, but not on end of transmission */
695	if ((status & (RNW | END_TRANS)) == RNW)
696		writel(to_send, nvec->base + I2C_SL_RCVD);
697
698	/* If we have send the first byte */
699	if (status == (I2C_SL_IRQ | RNW | RCVD))
700		nvec_gpio_set_value(nvec, 1);
701
702	dev_dbg(nvec->dev,
703		"Handled: %s 0x%02x, %s 0x%02x in state %u [%s%s%s]\n",
704		(status & RNW) == 0 ? "received" : "R=",
705		received,
706		(status & (RNW | END_TRANS)) ? "sent" : "S=",
707		to_send,
708		state,
709		status & END_TRANS ? " END_TRANS" : "",
710		status & RCVD ? " RCVD" : "",
711		status & RNW ? " RNW" : "");
712
713
714	/*
715	 * TODO: A correct fix needs to be found for this.
716	 *
717	 * We experience less incomplete messages with this delay than without
718	 * it, but we don't know why. Help is appreciated.
719	 */
720	udelay(100);
721
722	return IRQ_HANDLED;
723}
724
725static void tegra_init_i2c_slave(struct nvec_chip *nvec)
726{
727	u32 val;
728
729	clk_prepare_enable(nvec->i2c_clk);
730
731	reset_control_assert(nvec->rst);
732	udelay(2);
733	reset_control_deassert(nvec->rst);
734
735	val = I2C_CNFG_NEW_MASTER_SFM | I2C_CNFG_PACKET_MODE_EN |
736	    (0x2 << I2C_CNFG_DEBOUNCE_CNT_SHIFT);
737	writel(val, nvec->base + I2C_CNFG);
738
739	clk_set_rate(nvec->i2c_clk, 8 * 80000);
740
741	writel(I2C_SL_NEWSL, nvec->base + I2C_SL_CNFG);
742	writel(0x1E, nvec->base + I2C_SL_DELAY_COUNT);
743
744	writel(nvec->i2c_addr>>1, nvec->base + I2C_SL_ADDR1);
745	writel(0, nvec->base + I2C_SL_ADDR2);
746
747	enable_irq(nvec->irq);
748}
749
750#ifdef CONFIG_PM_SLEEP
751static void nvec_disable_i2c_slave(struct nvec_chip *nvec)
752{
753	disable_irq(nvec->irq);
754	writel(I2C_SL_NEWSL | I2C_SL_NACK, nvec->base + I2C_SL_CNFG);
755	clk_disable_unprepare(nvec->i2c_clk);
756}
757#endif
758
759static void nvec_power_off(void)
760{
761	char ap_pwr_down[] = { NVEC_SLEEP, AP_PWR_DOWN };
762
763	nvec_toggle_global_events(nvec_power_handle, false);
764	nvec_write_async(nvec_power_handle, ap_pwr_down, 2);
765}
766
767/*
768 *  Parse common device tree data
769 */
770static int nvec_i2c_parse_dt_pdata(struct nvec_chip *nvec)
771{
772	nvec->gpio = of_get_named_gpio(nvec->dev->of_node, "request-gpios", 0);
773
774	if (nvec->gpio < 0) {
775		dev_err(nvec->dev, "no gpio specified");
776		return -ENODEV;
777	}
778
779	if (of_property_read_u32(nvec->dev->of_node, "slave-addr",
780				&nvec->i2c_addr)) {
781		dev_err(nvec->dev, "no i2c address specified");
782		return -ENODEV;
783	}
784
785	return 0;
786}
787
788static int tegra_nvec_probe(struct platform_device *pdev)
789{
790	int err, ret;
791	struct clk *i2c_clk;
792	struct nvec_chip *nvec;
793	struct nvec_msg *msg;
794	struct resource *res;
795	void __iomem *base;
796	char	get_firmware_version[] = { NVEC_CNTL, GET_FIRMWARE_VERSION },
797		unmute_speakers[] = { NVEC_OEM0, 0x10, 0x59, 0x95 },
798		enable_event[7] = { NVEC_SYS, CNF_EVENT_REPORTING, true };
799
800	if (!pdev->dev.of_node) {
801		dev_err(&pdev->dev, "must be instantiated using device tree\n");
802		return -ENODEV;
803	}
804
805	nvec = devm_kzalloc(&pdev->dev, sizeof(struct nvec_chip), GFP_KERNEL);
806	if (!nvec)
807		return -ENOMEM;
808
809	platform_set_drvdata(pdev, nvec);
810	nvec->dev = &pdev->dev;
811
812	err = nvec_i2c_parse_dt_pdata(nvec);
813	if (err < 0)
814		return err;
815
816	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
817	base = devm_ioremap_resource(&pdev->dev, res);
818	if (IS_ERR(base))
819		return PTR_ERR(base);
820
821	nvec->irq = platform_get_irq(pdev, 0);
822	if (nvec->irq < 0) {
823		dev_err(&pdev->dev, "no irq resource?\n");
824		return -ENODEV;
825	}
826
827	i2c_clk = devm_clk_get(&pdev->dev, "div-clk");
828	if (IS_ERR(i2c_clk)) {
829		dev_err(nvec->dev, "failed to get controller clock\n");
830		return -ENODEV;
831	}
832
833	nvec->rst = devm_reset_control_get(&pdev->dev, "i2c");
834	if (IS_ERR(nvec->rst)) {
835		dev_err(nvec->dev, "failed to get controller reset\n");
836		return PTR_ERR(nvec->rst);
837	}
838
839	nvec->base = base;
840	nvec->i2c_clk = i2c_clk;
841	nvec->rx = &nvec->msg_pool[0];
842
843	ATOMIC_INIT_NOTIFIER_HEAD(&nvec->notifier_list);
844
845	init_completion(&nvec->sync_write);
846	init_completion(&nvec->ec_transfer);
847	mutex_init(&nvec->sync_write_mutex);
848	spin_lock_init(&nvec->tx_lock);
849	spin_lock_init(&nvec->rx_lock);
850	INIT_LIST_HEAD(&nvec->rx_data);
851	INIT_LIST_HEAD(&nvec->tx_data);
852	INIT_WORK(&nvec->rx_work, nvec_dispatch);
853	INIT_WORK(&nvec->tx_work, nvec_request_master);
854
855	err = devm_gpio_request_one(&pdev->dev, nvec->gpio, GPIOF_OUT_INIT_HIGH,
856					"nvec gpio");
857	if (err < 0) {
858		dev_err(nvec->dev, "couldn't request gpio\n");
859		return -ENODEV;
860	}
861
862	err = devm_request_irq(&pdev->dev, nvec->irq, nvec_interrupt, 0,
863				"nvec", nvec);
864	if (err) {
865		dev_err(nvec->dev, "couldn't request irq\n");
866		return -ENODEV;
867	}
868	disable_irq(nvec->irq);
869
870	tegra_init_i2c_slave(nvec);
871
872	/* enable event reporting */
873	nvec_toggle_global_events(nvec, true);
874
875	nvec->nvec_status_notifier.notifier_call = nvec_status_notifier;
876	nvec_register_notifier(nvec, &nvec->nvec_status_notifier, 0);
877
878	nvec_power_handle = nvec;
879	pm_power_off = nvec_power_off;
880
881	/* Get Firmware Version */
882	msg = nvec_write_sync(nvec, get_firmware_version, 2);
883
884	if (msg) {
885		dev_warn(nvec->dev, "ec firmware version %02x.%02x.%02x / %02x\n",
886			msg->data[4], msg->data[5], msg->data[6], msg->data[7]);
887
888		nvec_msg_free(nvec, msg);
889	}
890
891	ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
892			      ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
893	if (ret)
894		dev_err(nvec->dev, "error adding subdevices\n");
895
896	/* unmute speakers? */
897	nvec_write_async(nvec, unmute_speakers, 4);
898
899	/* enable lid switch event */
900	nvec_event_mask(enable_event, LID_SWITCH);
901	nvec_write_async(nvec, enable_event, 7);
902
903	/* enable power button event */
904	nvec_event_mask(enable_event, PWR_BUTTON);
905	nvec_write_async(nvec, enable_event, 7);
906
907	return 0;
908}
909
910static int tegra_nvec_remove(struct platform_device *pdev)
911{
912	struct nvec_chip *nvec = platform_get_drvdata(pdev);
913
914	nvec_toggle_global_events(nvec, false);
915	mfd_remove_devices(nvec->dev);
916	nvec_unregister_notifier(nvec, &nvec->nvec_status_notifier);
917	cancel_work_sync(&nvec->rx_work);
918	cancel_work_sync(&nvec->tx_work);
919	/* FIXME: needs check wether nvec is responsible for power off */
920	pm_power_off = NULL;
921
922	return 0;
923}
924
925#ifdef CONFIG_PM_SLEEP
926static int nvec_suspend(struct device *dev)
927{
928	struct platform_device *pdev = to_platform_device(dev);
929	struct nvec_chip *nvec = platform_get_drvdata(pdev);
930	struct nvec_msg *msg;
931	char ap_suspend[] = { NVEC_SLEEP, AP_SUSPEND };
932
933	dev_dbg(nvec->dev, "suspending\n");
934
935	/* keep these sync or you'll break suspend */
936	nvec_toggle_global_events(nvec, false);
937
938	msg = nvec_write_sync(nvec, ap_suspend, sizeof(ap_suspend));
939	nvec_msg_free(nvec, msg);
940
941	nvec_disable_i2c_slave(nvec);
942
943	return 0;
944}
945
946static int nvec_resume(struct device *dev)
947{
948	struct platform_device *pdev = to_platform_device(dev);
949	struct nvec_chip *nvec = platform_get_drvdata(pdev);
950
951	dev_dbg(nvec->dev, "resuming\n");
952	tegra_init_i2c_slave(nvec);
953	nvec_toggle_global_events(nvec, true);
954
955	return 0;
956}
957#endif
958
959static SIMPLE_DEV_PM_OPS(nvec_pm_ops, nvec_suspend, nvec_resume);
960
961/* Match table for of_platform binding */
962static const struct of_device_id nvidia_nvec_of_match[] = {
963	{ .compatible = "nvidia,nvec", },
964	{},
965};
966MODULE_DEVICE_TABLE(of, nvidia_nvec_of_match);
967
968static struct platform_driver nvec_device_driver = {
969	.probe   = tegra_nvec_probe,
970	.remove  = tegra_nvec_remove,
971	.driver  = {
972		.name = "nvec",
973		.pm = &nvec_pm_ops,
974		.of_match_table = nvidia_nvec_of_match,
975	}
976};
977
978module_platform_driver(nvec_device_driver);
979
980MODULE_ALIAS("platform:nvec");
981MODULE_DESCRIPTION("NVIDIA compliant embedded controller interface");
982MODULE_AUTHOR("Marc Dietrich <marvin24@gmx.de>");
983MODULE_LICENSE("GPL");
984