1 /*
2  * drivers/pci/pcie/aer/aerdrv_core.c
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * This file implements the core part of PCI-Express AER. When an pci-express
9  * error is delivered, an error message will be collected and printed to
10  * console, then, an error recovery procedure will be executed by following
11  * the pci error recovery rules.
12  *
13  * Copyright (C) 2006 Intel Corp.
14  *	Tom Long Nguyen (tom.l.nguyen@intel.com)
15  *	Zhang Yanmin (yanmin.zhang@intel.com)
16  *
17  */
18 
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/pm.h>
24 #include <linux/suspend.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/kfifo.h>
28 #include "aerdrv.h"
29 
30 static bool forceload;
31 static bool nosourceid;
32 module_param(forceload, bool, 0);
33 module_param(nosourceid, bool, 0);
34 
35 #define	PCI_EXP_AER_FLAGS	(PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
36 				 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
37 
pci_enable_pcie_error_reporting(struct pci_dev * dev)38 int pci_enable_pcie_error_reporting(struct pci_dev *dev)
39 {
40 	if (pcie_aer_get_firmware_first(dev))
41 		return -EIO;
42 
43 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
44 		return -EIO;
45 
46 	return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
47 }
48 EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
49 
pci_disable_pcie_error_reporting(struct pci_dev * dev)50 int pci_disable_pcie_error_reporting(struct pci_dev *dev)
51 {
52 	if (pcie_aer_get_firmware_first(dev))
53 		return -EIO;
54 
55 	return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
56 					  PCI_EXP_AER_FLAGS);
57 }
58 EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
59 
pci_cleanup_aer_uncorrect_error_status(struct pci_dev * dev)60 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
61 {
62 	int pos;
63 	u32 status;
64 
65 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
66 	if (!pos)
67 		return -EIO;
68 
69 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
70 	if (status)
71 		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
72 
73 	return 0;
74 }
75 EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
76 
pci_cleanup_aer_error_status_regs(struct pci_dev * dev)77 int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
78 {
79 	int pos;
80 	u32 status;
81 	int port_type;
82 
83 	if (!pci_is_pcie(dev))
84 		return -ENODEV;
85 
86 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
87 	if (!pos)
88 		return -EIO;
89 
90 	port_type = pci_pcie_type(dev);
91 	if (port_type == PCI_EXP_TYPE_ROOT_PORT) {
92 		pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &status);
93 		pci_write_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, status);
94 	}
95 
96 	pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
97 	pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS, status);
98 
99 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
100 	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
101 
102 	return 0;
103 }
104 
105 /**
106  * add_error_device - list device to be handled
107  * @e_info: pointer to error info
108  * @dev: pointer to pci_dev to be added
109  */
add_error_device(struct aer_err_info * e_info,struct pci_dev * dev)110 static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
111 {
112 	if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
113 		e_info->dev[e_info->error_dev_num] = dev;
114 		e_info->error_dev_num++;
115 		return 0;
116 	}
117 	return -ENOSPC;
118 }
119 
120 /**
121  * is_error_source - check whether the device is source of reported error
122  * @dev: pointer to pci_dev to be checked
123  * @e_info: pointer to reported error info
124  */
is_error_source(struct pci_dev * dev,struct aer_err_info * e_info)125 static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
126 {
127 	int pos;
128 	u32 status, mask;
129 	u16 reg16;
130 
131 	/*
132 	 * When bus id is equal to 0, it might be a bad id
133 	 * reported by root port.
134 	 */
135 	if (!nosourceid && (PCI_BUS_NUM(e_info->id) != 0)) {
136 		/* Device ID match? */
137 		if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
138 			return true;
139 
140 		/* Continue id comparing if there is no multiple error */
141 		if (!e_info->multi_error_valid)
142 			return false;
143 	}
144 
145 	/*
146 	 * When either
147 	 *      1) nosourceid==y;
148 	 *      2) bus id is equal to 0. Some ports might lose the bus
149 	 *              id of error source id;
150 	 *      3) There are multiple errors and prior id comparing fails;
151 	 * We check AER status registers to find possible reporter.
152 	 */
153 	if (atomic_read(&dev->enable_cnt) == 0)
154 		return false;
155 
156 	/* Check if AER is enabled */
157 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
158 	if (!(reg16 & PCI_EXP_AER_FLAGS))
159 		return false;
160 
161 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
162 	if (!pos)
163 		return false;
164 
165 	/* Check if error is recorded */
166 	if (e_info->severity == AER_CORRECTABLE) {
167 		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
168 		pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
169 	} else {
170 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
171 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
172 	}
173 	if (status & ~mask)
174 		return true;
175 
176 	return false;
177 }
178 
find_device_iter(struct pci_dev * dev,void * data)179 static int find_device_iter(struct pci_dev *dev, void *data)
180 {
181 	struct aer_err_info *e_info = (struct aer_err_info *)data;
182 
183 	if (is_error_source(dev, e_info)) {
184 		/* List this device */
185 		if (add_error_device(e_info, dev)) {
186 			/* We cannot handle more... Stop iteration */
187 			/* TODO: Should print error message here? */
188 			return 1;
189 		}
190 
191 		/* If there is only a single error, stop iteration */
192 		if (!e_info->multi_error_valid)
193 			return 1;
194 	}
195 	return 0;
196 }
197 
198 /**
199  * find_source_device - search through device hierarchy for source device
200  * @parent: pointer to Root Port pci_dev data structure
201  * @e_info: including detailed error information such like id
202  *
203  * Return true if found.
204  *
205  * Invoked by DPC when error is detected at the Root Port.
206  * Caller of this function must set id, severity, and multi_error_valid of
207  * struct aer_err_info pointed by @e_info properly.  This function must fill
208  * e_info->error_dev_num and e_info->dev[], based on the given information.
209  */
find_source_device(struct pci_dev * parent,struct aer_err_info * e_info)210 static bool find_source_device(struct pci_dev *parent,
211 		struct aer_err_info *e_info)
212 {
213 	struct pci_dev *dev = parent;
214 	int result;
215 
216 	/* Must reset in this function */
217 	e_info->error_dev_num = 0;
218 
219 	/* Is Root Port an agent that sends error message? */
220 	result = find_device_iter(dev, e_info);
221 	if (result)
222 		return true;
223 
224 	pci_walk_bus(parent->subordinate, find_device_iter, e_info);
225 
226 	if (!e_info->error_dev_num) {
227 		dev_printk(KERN_DEBUG, &parent->dev,
228 				"can't find device of ID%04x\n",
229 				e_info->id);
230 		return false;
231 	}
232 	return true;
233 }
234 
report_error_detected(struct pci_dev * dev,void * data)235 static int report_error_detected(struct pci_dev *dev, void *data)
236 {
237 	pci_ers_result_t vote;
238 	const struct pci_error_handlers *err_handler;
239 	struct aer_broadcast_data *result_data;
240 	result_data = (struct aer_broadcast_data *) data;
241 
242 	device_lock(&dev->dev);
243 	dev->error_state = result_data->state;
244 
245 	if (!dev->driver ||
246 		!dev->driver->err_handler ||
247 		!dev->driver->err_handler->error_detected) {
248 		if (result_data->state == pci_channel_io_frozen &&
249 			!(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) {
250 			/*
251 			 * In case of fatal recovery, if one of down-
252 			 * stream device has no driver. We might be
253 			 * unable to recover because a later insmod
254 			 * of a driver for this device is unaware of
255 			 * its hw state.
256 			 */
257 			dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
258 				   dev->driver ?
259 				   "no AER-aware driver" : "no driver");
260 		}
261 
262 		/*
263 		 * If there's any device in the subtree that does not
264 		 * have an error_detected callback, returning
265 		 * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
266 		 * the subsequent mmio_enabled/slot_reset/resume
267 		 * callbacks of "any" device in the subtree. All the
268 		 * devices in the subtree are left in the error state
269 		 * without recovery.
270 		 */
271 
272 		if (!(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
273 			vote = PCI_ERS_RESULT_NO_AER_DRIVER;
274 		else
275 			vote = PCI_ERS_RESULT_NONE;
276 	} else {
277 		err_handler = dev->driver->err_handler;
278 		vote = err_handler->error_detected(dev, result_data->state);
279 	}
280 
281 	result_data->result = merge_result(result_data->result, vote);
282 	device_unlock(&dev->dev);
283 	return 0;
284 }
285 
report_mmio_enabled(struct pci_dev * dev,void * data)286 static int report_mmio_enabled(struct pci_dev *dev, void *data)
287 {
288 	pci_ers_result_t vote;
289 	const struct pci_error_handlers *err_handler;
290 	struct aer_broadcast_data *result_data;
291 	result_data = (struct aer_broadcast_data *) data;
292 
293 	device_lock(&dev->dev);
294 	if (!dev->driver ||
295 		!dev->driver->err_handler ||
296 		!dev->driver->err_handler->mmio_enabled)
297 		goto out;
298 
299 	err_handler = dev->driver->err_handler;
300 	vote = err_handler->mmio_enabled(dev);
301 	result_data->result = merge_result(result_data->result, vote);
302 out:
303 	device_unlock(&dev->dev);
304 	return 0;
305 }
306 
report_slot_reset(struct pci_dev * dev,void * data)307 static int report_slot_reset(struct pci_dev *dev, void *data)
308 {
309 	pci_ers_result_t vote;
310 	const struct pci_error_handlers *err_handler;
311 	struct aer_broadcast_data *result_data;
312 	result_data = (struct aer_broadcast_data *) data;
313 
314 	device_lock(&dev->dev);
315 	if (!dev->driver ||
316 		!dev->driver->err_handler ||
317 		!dev->driver->err_handler->slot_reset)
318 		goto out;
319 
320 	err_handler = dev->driver->err_handler;
321 	vote = err_handler->slot_reset(dev);
322 	result_data->result = merge_result(result_data->result, vote);
323 out:
324 	device_unlock(&dev->dev);
325 	return 0;
326 }
327 
report_resume(struct pci_dev * dev,void * data)328 static int report_resume(struct pci_dev *dev, void *data)
329 {
330 	const struct pci_error_handlers *err_handler;
331 
332 	device_lock(&dev->dev);
333 	dev->error_state = pci_channel_io_normal;
334 
335 	if (!dev->driver ||
336 		!dev->driver->err_handler ||
337 		!dev->driver->err_handler->resume)
338 		goto out;
339 
340 	err_handler = dev->driver->err_handler;
341 	err_handler->resume(dev);
342 out:
343 	device_unlock(&dev->dev);
344 	return 0;
345 }
346 
347 /**
348  * broadcast_error_message - handle message broadcast to downstream drivers
349  * @dev: pointer to from where in a hierarchy message is broadcasted down
350  * @state: error state
351  * @error_mesg: message to print
352  * @cb: callback to be broadcasted
353  *
354  * Invoked during error recovery process. Once being invoked, the content
355  * of error severity will be broadcasted to all downstream drivers in a
356  * hierarchy in question.
357  */
broadcast_error_message(struct pci_dev * dev,enum pci_channel_state state,char * error_mesg,int (* cb)(struct pci_dev *,void *))358 static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
359 	enum pci_channel_state state,
360 	char *error_mesg,
361 	int (*cb)(struct pci_dev *, void *))
362 {
363 	struct aer_broadcast_data result_data;
364 
365 	dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
366 	result_data.state = state;
367 	if (cb == report_error_detected)
368 		result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
369 	else
370 		result_data.result = PCI_ERS_RESULT_RECOVERED;
371 
372 	if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
373 		/*
374 		 * If the error is reported by a bridge, we think this error
375 		 * is related to the downstream link of the bridge, so we
376 		 * do error recovery on all subordinates of the bridge instead
377 		 * of the bridge and clear the error status of the bridge.
378 		 */
379 		if (cb == report_error_detected)
380 			dev->error_state = state;
381 		pci_walk_bus(dev->subordinate, cb, &result_data);
382 		if (cb == report_resume) {
383 			pci_cleanup_aer_uncorrect_error_status(dev);
384 			dev->error_state = pci_channel_io_normal;
385 		}
386 	} else {
387 		/*
388 		 * If the error is reported by an end point, we think this
389 		 * error is related to the upstream link of the end point.
390 		 */
391 		pci_walk_bus(dev->bus, cb, &result_data);
392 	}
393 
394 	return result_data.result;
395 }
396 
397 /**
398  * default_reset_link - default reset function
399  * @dev: pointer to pci_dev data structure
400  *
401  * Invoked when performing link reset on a Downstream Port or a
402  * Root Port with no aer driver.
403  */
default_reset_link(struct pci_dev * dev)404 static pci_ers_result_t default_reset_link(struct pci_dev *dev)
405 {
406 	pci_reset_bridge_secondary_bus(dev);
407 	dev_printk(KERN_DEBUG, &dev->dev, "downstream link has been reset\n");
408 	return PCI_ERS_RESULT_RECOVERED;
409 }
410 
find_aer_service_iter(struct device * device,void * data)411 static int find_aer_service_iter(struct device *device, void *data)
412 {
413 	struct pcie_port_service_driver *service_driver, **drv;
414 
415 	drv = (struct pcie_port_service_driver **) data;
416 
417 	if (device->bus == &pcie_port_bus_type && device->driver) {
418 		service_driver = to_service_driver(device->driver);
419 		if (service_driver->service == PCIE_PORT_SERVICE_AER) {
420 			*drv = service_driver;
421 			return 1;
422 		}
423 	}
424 
425 	return 0;
426 }
427 
find_aer_service(struct pci_dev * dev)428 static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
429 {
430 	struct pcie_port_service_driver *drv = NULL;
431 
432 	device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
433 
434 	return drv;
435 }
436 
reset_link(struct pci_dev * dev)437 static pci_ers_result_t reset_link(struct pci_dev *dev)
438 {
439 	struct pci_dev *udev;
440 	pci_ers_result_t status;
441 	struct pcie_port_service_driver *driver;
442 
443 	if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
444 		/* Reset this port for all subordinates */
445 		udev = dev;
446 	} else {
447 		/* Reset the upstream component (likely downstream port) */
448 		udev = dev->bus->self;
449 	}
450 
451 	/* Use the aer driver of the component firstly */
452 	driver = find_aer_service(udev);
453 
454 	if (driver && driver->reset_link) {
455 		status = driver->reset_link(udev);
456 	} else if (udev->has_secondary_link) {
457 		status = default_reset_link(udev);
458 	} else {
459 		dev_printk(KERN_DEBUG, &dev->dev,
460 			"no link-reset support at upstream device %s\n",
461 			pci_name(udev));
462 		return PCI_ERS_RESULT_DISCONNECT;
463 	}
464 
465 	if (status != PCI_ERS_RESULT_RECOVERED) {
466 		dev_printk(KERN_DEBUG, &dev->dev,
467 			"link reset at upstream device %s failed\n",
468 			pci_name(udev));
469 		return PCI_ERS_RESULT_DISCONNECT;
470 	}
471 
472 	return status;
473 }
474 
475 /**
476  * do_recovery - handle nonfatal/fatal error recovery process
477  * @dev: pointer to a pci_dev data structure of agent detecting an error
478  * @severity: error severity type
479  *
480  * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
481  * error detected message to all downstream drivers within a hierarchy in
482  * question and return the returned code.
483  */
do_recovery(struct pci_dev * dev,int severity)484 static void do_recovery(struct pci_dev *dev, int severity)
485 {
486 	pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
487 	enum pci_channel_state state;
488 
489 	if (severity == AER_FATAL)
490 		state = pci_channel_io_frozen;
491 	else
492 		state = pci_channel_io_normal;
493 
494 	status = broadcast_error_message(dev,
495 			state,
496 			"error_detected",
497 			report_error_detected);
498 
499 	if (severity == AER_FATAL) {
500 		result = reset_link(dev);
501 		if (result != PCI_ERS_RESULT_RECOVERED)
502 			goto failed;
503 	}
504 
505 	if (status == PCI_ERS_RESULT_CAN_RECOVER)
506 		status = broadcast_error_message(dev,
507 				state,
508 				"mmio_enabled",
509 				report_mmio_enabled);
510 
511 	if (status == PCI_ERS_RESULT_NEED_RESET) {
512 		/*
513 		 * TODO: Should call platform-specific
514 		 * functions to reset slot before calling
515 		 * drivers' slot_reset callbacks?
516 		 */
517 		status = broadcast_error_message(dev,
518 				state,
519 				"slot_reset",
520 				report_slot_reset);
521 	}
522 
523 	if (status != PCI_ERS_RESULT_RECOVERED)
524 		goto failed;
525 
526 	broadcast_error_message(dev,
527 				state,
528 				"resume",
529 				report_resume);
530 
531 	dev_info(&dev->dev, "AER: Device recovery successful\n");
532 	return;
533 
534 failed:
535 	/* TODO: Should kernel panic here? */
536 	dev_info(&dev->dev, "AER: Device recovery failed\n");
537 }
538 
539 /**
540  * handle_error_source - handle logging error into an event log
541  * @aerdev: pointer to pcie_device data structure of the root port
542  * @dev: pointer to pci_dev data structure of error source device
543  * @info: comprehensive error information
544  *
545  * Invoked when an error being detected by Root Port.
546  */
handle_error_source(struct pcie_device * aerdev,struct pci_dev * dev,struct aer_err_info * info)547 static void handle_error_source(struct pcie_device *aerdev,
548 	struct pci_dev *dev,
549 	struct aer_err_info *info)
550 {
551 	int pos;
552 
553 	if (info->severity == AER_CORRECTABLE) {
554 		/*
555 		 * Correctable error does not need software intervention.
556 		 * No need to go through error recovery process.
557 		 */
558 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
559 		if (pos)
560 			pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
561 					info->status);
562 	} else
563 		do_recovery(dev, info->severity);
564 }
565 
566 #ifdef CONFIG_ACPI_APEI_PCIEAER
567 static void aer_recover_work_func(struct work_struct *work);
568 
569 #define AER_RECOVER_RING_ORDER		4
570 #define AER_RECOVER_RING_SIZE		(1 << AER_RECOVER_RING_ORDER)
571 
572 struct aer_recover_entry {
573 	u8	bus;
574 	u8	devfn;
575 	u16	domain;
576 	int	severity;
577 	struct aer_capability_regs *regs;
578 };
579 
580 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
581 		    AER_RECOVER_RING_SIZE);
582 /*
583  * Mutual exclusion for writers of aer_recover_ring, reader side don't
584  * need lock, because there is only one reader and lock is not needed
585  * between reader and writer.
586  */
587 static DEFINE_SPINLOCK(aer_recover_ring_lock);
588 static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
589 
aer_recover_queue(int domain,unsigned int bus,unsigned int devfn,int severity,struct aer_capability_regs * aer_regs)590 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
591 		       int severity, struct aer_capability_regs *aer_regs)
592 {
593 	unsigned long flags;
594 	struct aer_recover_entry entry = {
595 		.bus		= bus,
596 		.devfn		= devfn,
597 		.domain		= domain,
598 		.severity	= severity,
599 		.regs		= aer_regs,
600 	};
601 
602 	spin_lock_irqsave(&aer_recover_ring_lock, flags);
603 	if (kfifo_put(&aer_recover_ring, entry))
604 		schedule_work(&aer_recover_work);
605 	else
606 		pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
607 		       domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
608 	spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
609 }
610 EXPORT_SYMBOL_GPL(aer_recover_queue);
611 
aer_recover_work_func(struct work_struct * work)612 static void aer_recover_work_func(struct work_struct *work)
613 {
614 	struct aer_recover_entry entry;
615 	struct pci_dev *pdev;
616 
617 	while (kfifo_get(&aer_recover_ring, &entry)) {
618 		pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
619 						   entry.devfn);
620 		if (!pdev) {
621 			pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
622 			       entry.domain, entry.bus,
623 			       PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
624 			continue;
625 		}
626 		cper_print_aer(pdev, entry.severity, entry.regs);
627 		do_recovery(pdev, entry.severity);
628 		pci_dev_put(pdev);
629 	}
630 }
631 #endif
632 
633 /**
634  * get_device_error_info - read error status from dev and store it to info
635  * @dev: pointer to the device expected to have a error record
636  * @info: pointer to structure to store the error record
637  *
638  * Return 1 on success, 0 on error.
639  *
640  * Note that @info is reused among all error devices. Clear fields properly.
641  */
get_device_error_info(struct pci_dev * dev,struct aer_err_info * info)642 static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
643 {
644 	int pos, temp;
645 
646 	/* Must reset in this function */
647 	info->status = 0;
648 	info->tlp_header_valid = 0;
649 
650 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
651 
652 	/* The device might not support AER */
653 	if (!pos)
654 		return 1;
655 
656 	if (info->severity == AER_CORRECTABLE) {
657 		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
658 			&info->status);
659 		pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
660 			&info->mask);
661 		if (!(info->status & ~info->mask))
662 			return 0;
663 	} else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
664 		info->severity == AER_NONFATAL) {
665 
666 		/* Link is still healthy for IO reads */
667 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
668 			&info->status);
669 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
670 			&info->mask);
671 		if (!(info->status & ~info->mask))
672 			return 0;
673 
674 		/* Get First Error Pointer */
675 		pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
676 		info->first_error = PCI_ERR_CAP_FEP(temp);
677 
678 		if (info->status & AER_LOG_TLP_MASKS) {
679 			info->tlp_header_valid = 1;
680 			pci_read_config_dword(dev,
681 				pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
682 			pci_read_config_dword(dev,
683 				pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
684 			pci_read_config_dword(dev,
685 				pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
686 			pci_read_config_dword(dev,
687 				pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
688 		}
689 	}
690 
691 	return 1;
692 }
693 
aer_process_err_devices(struct pcie_device * p_device,struct aer_err_info * e_info)694 static inline void aer_process_err_devices(struct pcie_device *p_device,
695 			struct aer_err_info *e_info)
696 {
697 	int i;
698 
699 	/* Report all before handle them, not to lost records by reset etc. */
700 	for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
701 		if (get_device_error_info(e_info->dev[i], e_info))
702 			aer_print_error(e_info->dev[i], e_info);
703 	}
704 	for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
705 		if (get_device_error_info(e_info->dev[i], e_info))
706 			handle_error_source(p_device, e_info->dev[i], e_info);
707 	}
708 }
709 
710 /**
711  * aer_isr_one_error - consume an error detected by root port
712  * @p_device: pointer to error root port service device
713  * @e_src: pointer to an error source
714  */
aer_isr_one_error(struct pcie_device * p_device,struct aer_err_source * e_src)715 static void aer_isr_one_error(struct pcie_device *p_device,
716 		struct aer_err_source *e_src)
717 {
718 	struct aer_err_info *e_info;
719 
720 	/* struct aer_err_info might be big, so we allocate it with slab */
721 	e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
722 	if (!e_info) {
723 		dev_printk(KERN_DEBUG, &p_device->port->dev,
724 			"Can't allocate mem when processing AER errors\n");
725 		return;
726 	}
727 
728 	/*
729 	 * There is a possibility that both correctable error and
730 	 * uncorrectable error being logged. Report correctable error first.
731 	 */
732 	if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
733 		e_info->id = ERR_COR_ID(e_src->id);
734 		e_info->severity = AER_CORRECTABLE;
735 
736 		if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
737 			e_info->multi_error_valid = 1;
738 		else
739 			e_info->multi_error_valid = 0;
740 
741 		aer_print_port_info(p_device->port, e_info);
742 
743 		if (find_source_device(p_device->port, e_info))
744 			aer_process_err_devices(p_device, e_info);
745 	}
746 
747 	if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
748 		e_info->id = ERR_UNCOR_ID(e_src->id);
749 
750 		if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
751 			e_info->severity = AER_FATAL;
752 		else
753 			e_info->severity = AER_NONFATAL;
754 
755 		if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
756 			e_info->multi_error_valid = 1;
757 		else
758 			e_info->multi_error_valid = 0;
759 
760 		aer_print_port_info(p_device->port, e_info);
761 
762 		if (find_source_device(p_device->port, e_info))
763 			aer_process_err_devices(p_device, e_info);
764 	}
765 
766 	kfree(e_info);
767 }
768 
769 /**
770  * get_e_source - retrieve an error source
771  * @rpc: pointer to the root port which holds an error
772  * @e_src: pointer to store retrieved error source
773  *
774  * Return 1 if an error source is retrieved, otherwise 0.
775  *
776  * Invoked by DPC handler to consume an error.
777  */
get_e_source(struct aer_rpc * rpc,struct aer_err_source * e_src)778 static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
779 {
780 	unsigned long flags;
781 
782 	/* Lock access to Root error producer/consumer index */
783 	spin_lock_irqsave(&rpc->e_lock, flags);
784 	if (rpc->prod_idx == rpc->cons_idx) {
785 		spin_unlock_irqrestore(&rpc->e_lock, flags);
786 		return 0;
787 	}
788 
789 	*e_src = rpc->e_sources[rpc->cons_idx];
790 	rpc->cons_idx++;
791 	if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
792 		rpc->cons_idx = 0;
793 	spin_unlock_irqrestore(&rpc->e_lock, flags);
794 
795 	return 1;
796 }
797 
798 /**
799  * aer_isr - consume errors detected by root port
800  * @work: definition of this work item
801  *
802  * Invoked, as DPC, when root port records new detected error
803  */
aer_isr(struct work_struct * work)804 void aer_isr(struct work_struct *work)
805 {
806 	struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
807 	struct pcie_device *p_device = rpc->rpd;
808 	struct aer_err_source uninitialized_var(e_src);
809 
810 	mutex_lock(&rpc->rpc_mutex);
811 	while (get_e_source(rpc, &e_src))
812 		aer_isr_one_error(p_device, &e_src);
813 	mutex_unlock(&rpc->rpc_mutex);
814 }
815 
816 /**
817  * aer_init - provide AER initialization
818  * @dev: pointer to AER pcie device
819  *
820  * Invoked when AER service driver is loaded.
821  */
aer_init(struct pcie_device * dev)822 int aer_init(struct pcie_device *dev)
823 {
824 	if (forceload) {
825 		dev_printk(KERN_DEBUG, &dev->device,
826 			   "aerdrv forceload requested.\n");
827 		pcie_aer_force_firmware_first(dev->port, 0);
828 	}
829 	return 0;
830 }
831