This source file includes following definitions.
- xen_mce_chrdev_open
- xen_mce_chrdev_release
- xen_mce_chrdev_read
- xen_mce_chrdev_poll
- xen_mce_chrdev_ioctl
- xen_mce_log
- convert_log
- mc_queue_handle
- xen_mce_work_fn
- xen_mce_interrupt
- bind_virq_for_mce
- xen_late_init_mcelog
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35 #define pr_fmt(fmt) "xen_mcelog: " fmt
36
37 #include <linux/init.h>
38 #include <linux/types.h>
39 #include <linux/kernel.h>
40 #include <linux/slab.h>
41 #include <linux/fs.h>
42 #include <linux/device.h>
43 #include <linux/miscdevice.h>
44 #include <linux/uaccess.h>
45 #include <linux/capability.h>
46 #include <linux/poll.h>
47 #include <linux/sched.h>
48
49 #include <xen/interface/xen.h>
50 #include <xen/events.h>
51 #include <xen/interface/vcpu.h>
52 #include <xen/xen.h>
53 #include <asm/xen/hypercall.h>
54 #include <asm/xen/hypervisor.h>
55
56 static struct mc_info g_mi;
57 static struct mcinfo_logical_cpu *g_physinfo;
58 static uint32_t ncpus;
59
60 static DEFINE_MUTEX(mcelog_lock);
61
62 static struct xen_mce_log xen_mcelog = {
63 .signature = XEN_MCE_LOG_SIGNATURE,
64 .len = XEN_MCE_LOG_LEN,
65 .recordlen = sizeof(struct xen_mce),
66 };
67
68 static DEFINE_SPINLOCK(xen_mce_chrdev_state_lock);
69 static int xen_mce_chrdev_open_count;
70 static int xen_mce_chrdev_open_exclu;
71
72 static DECLARE_WAIT_QUEUE_HEAD(xen_mce_chrdev_wait);
73
74 static int xen_mce_chrdev_open(struct inode *inode, struct file *file)
75 {
76 spin_lock(&xen_mce_chrdev_state_lock);
77
78 if (xen_mce_chrdev_open_exclu ||
79 (xen_mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
80 spin_unlock(&xen_mce_chrdev_state_lock);
81
82 return -EBUSY;
83 }
84
85 if (file->f_flags & O_EXCL)
86 xen_mce_chrdev_open_exclu = 1;
87 xen_mce_chrdev_open_count++;
88
89 spin_unlock(&xen_mce_chrdev_state_lock);
90
91 return nonseekable_open(inode, file);
92 }
93
94 static int xen_mce_chrdev_release(struct inode *inode, struct file *file)
95 {
96 spin_lock(&xen_mce_chrdev_state_lock);
97
98 xen_mce_chrdev_open_count--;
99 xen_mce_chrdev_open_exclu = 0;
100
101 spin_unlock(&xen_mce_chrdev_state_lock);
102
103 return 0;
104 }
105
106 static ssize_t xen_mce_chrdev_read(struct file *filp, char __user *ubuf,
107 size_t usize, loff_t *off)
108 {
109 char __user *buf = ubuf;
110 unsigned num;
111 int i, err;
112
113 mutex_lock(&mcelog_lock);
114
115 num = xen_mcelog.next;
116
117
118 err = -EINVAL;
119 if (*off != 0 || usize < XEN_MCE_LOG_LEN*sizeof(struct xen_mce))
120 goto out;
121
122 err = 0;
123 for (i = 0; i < num; i++) {
124 struct xen_mce *m = &xen_mcelog.entry[i];
125
126 err |= copy_to_user(buf, m, sizeof(*m));
127 buf += sizeof(*m);
128 }
129
130 memset(xen_mcelog.entry, 0, num * sizeof(struct xen_mce));
131 xen_mcelog.next = 0;
132
133 if (err)
134 err = -EFAULT;
135
136 out:
137 mutex_unlock(&mcelog_lock);
138
139 return err ? err : buf - ubuf;
140 }
141
142 static __poll_t xen_mce_chrdev_poll(struct file *file, poll_table *wait)
143 {
144 poll_wait(file, &xen_mce_chrdev_wait, wait);
145
146 if (xen_mcelog.next)
147 return EPOLLIN | EPOLLRDNORM;
148
149 return 0;
150 }
151
152 static long xen_mce_chrdev_ioctl(struct file *f, unsigned int cmd,
153 unsigned long arg)
154 {
155 int __user *p = (int __user *)arg;
156
157 if (!capable(CAP_SYS_ADMIN))
158 return -EPERM;
159
160 switch (cmd) {
161 case MCE_GET_RECORD_LEN:
162 return put_user(sizeof(struct xen_mce), p);
163 case MCE_GET_LOG_LEN:
164 return put_user(XEN_MCE_LOG_LEN, p);
165 case MCE_GETCLEAR_FLAGS: {
166 unsigned flags;
167
168 do {
169 flags = xen_mcelog.flags;
170 } while (cmpxchg(&xen_mcelog.flags, flags, 0) != flags);
171
172 return put_user(flags, p);
173 }
174 default:
175 return -ENOTTY;
176 }
177 }
178
179 static const struct file_operations xen_mce_chrdev_ops = {
180 .open = xen_mce_chrdev_open,
181 .release = xen_mce_chrdev_release,
182 .read = xen_mce_chrdev_read,
183 .poll = xen_mce_chrdev_poll,
184 .unlocked_ioctl = xen_mce_chrdev_ioctl,
185 .llseek = no_llseek,
186 };
187
188 static struct miscdevice xen_mce_chrdev_device = {
189 MISC_MCELOG_MINOR,
190 "mcelog",
191 &xen_mce_chrdev_ops,
192 };
193
194
195
196
197 static void xen_mce_log(struct xen_mce *mce)
198 {
199 unsigned entry;
200
201 entry = xen_mcelog.next;
202
203
204
205
206
207
208 if (entry >= XEN_MCE_LOG_LEN) {
209 set_bit(XEN_MCE_OVERFLOW,
210 (unsigned long *)&xen_mcelog.flags);
211 return;
212 }
213
214 memcpy(xen_mcelog.entry + entry, mce, sizeof(struct xen_mce));
215
216 xen_mcelog.next++;
217 }
218
219 static int convert_log(struct mc_info *mi)
220 {
221 struct mcinfo_common *mic;
222 struct mcinfo_global *mc_global;
223 struct mcinfo_bank *mc_bank;
224 struct xen_mce m;
225 uint32_t i;
226
227 mic = NULL;
228 x86_mcinfo_lookup(&mic, mi, MC_TYPE_GLOBAL);
229 if (unlikely(!mic)) {
230 pr_warn("Failed to find global error info\n");
231 return -ENODEV;
232 }
233
234 memset(&m, 0, sizeof(struct xen_mce));
235
236 mc_global = (struct mcinfo_global *)mic;
237 m.mcgstatus = mc_global->mc_gstatus;
238 m.apicid = mc_global->mc_apicid;
239
240 for (i = 0; i < ncpus; i++)
241 if (g_physinfo[i].mc_apicid == m.apicid)
242 break;
243 if (unlikely(i == ncpus)) {
244 pr_warn("Failed to match cpu with apicid %d\n", m.apicid);
245 return -ENODEV;
246 }
247
248 m.socketid = g_physinfo[i].mc_chipid;
249 m.cpu = m.extcpu = g_physinfo[i].mc_cpunr;
250 m.cpuvendor = (__u8)g_physinfo[i].mc_vendor;
251 m.mcgcap = g_physinfo[i].mc_msrvalues[__MC_MSR_MCGCAP].value;
252
253 mic = NULL;
254 x86_mcinfo_lookup(&mic, mi, MC_TYPE_BANK);
255 if (unlikely(!mic)) {
256 pr_warn("Fail to find bank error info\n");
257 return -ENODEV;
258 }
259
260 do {
261 if ((!mic) || (mic->size == 0) ||
262 (mic->type != MC_TYPE_GLOBAL &&
263 mic->type != MC_TYPE_BANK &&
264 mic->type != MC_TYPE_EXTENDED &&
265 mic->type != MC_TYPE_RECOVERY))
266 break;
267
268 if (mic->type == MC_TYPE_BANK) {
269 mc_bank = (struct mcinfo_bank *)mic;
270 m.misc = mc_bank->mc_misc;
271 m.status = mc_bank->mc_status;
272 m.addr = mc_bank->mc_addr;
273 m.tsc = mc_bank->mc_tsc;
274 m.bank = mc_bank->mc_bank;
275 m.finished = 1;
276
277 xen_mce_log(&m);
278 }
279 mic = x86_mcinfo_next(mic);
280 } while (1);
281
282 return 0;
283 }
284
285 static int mc_queue_handle(uint32_t flags)
286 {
287 struct xen_mc mc_op;
288 int ret = 0;
289
290 mc_op.cmd = XEN_MC_fetch;
291 set_xen_guest_handle(mc_op.u.mc_fetch.data, &g_mi);
292 do {
293 mc_op.u.mc_fetch.flags = flags;
294 ret = HYPERVISOR_mca(&mc_op);
295 if (ret) {
296 pr_err("Failed to fetch %surgent error log\n",
297 flags == XEN_MC_URGENT ? "" : "non");
298 break;
299 }
300
301 if (mc_op.u.mc_fetch.flags & XEN_MC_NODATA ||
302 mc_op.u.mc_fetch.flags & XEN_MC_FETCHFAILED)
303 break;
304 else {
305 ret = convert_log(&g_mi);
306 if (ret)
307 pr_warn("Failed to convert this error log, continue acking it anyway\n");
308
309 mc_op.u.mc_fetch.flags = flags | XEN_MC_ACK;
310 ret = HYPERVISOR_mca(&mc_op);
311 if (ret) {
312 pr_err("Failed to ack previous error log\n");
313 break;
314 }
315 }
316 } while (1);
317
318 return ret;
319 }
320
321
322 static void xen_mce_work_fn(struct work_struct *work)
323 {
324 int err;
325
326 mutex_lock(&mcelog_lock);
327
328
329 err = mc_queue_handle(XEN_MC_URGENT);
330 if (err)
331 pr_err("Failed to handle urgent mc_info queue, continue handling nonurgent mc_info queue anyway\n");
332
333
334 err = mc_queue_handle(XEN_MC_NONURGENT);
335 if (err)
336 pr_err("Failed to handle nonurgent mc_info queue\n");
337
338
339 wake_up_interruptible(&xen_mce_chrdev_wait);
340
341 mutex_unlock(&mcelog_lock);
342 }
343 static DECLARE_WORK(xen_mce_work, xen_mce_work_fn);
344
345 static irqreturn_t xen_mce_interrupt(int irq, void *dev_id)
346 {
347 schedule_work(&xen_mce_work);
348 return IRQ_HANDLED;
349 }
350
351 static int bind_virq_for_mce(void)
352 {
353 int ret;
354 struct xen_mc mc_op;
355
356 memset(&mc_op, 0, sizeof(struct xen_mc));
357
358
359 mc_op.cmd = XEN_MC_physcpuinfo;
360 set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
361 ret = HYPERVISOR_mca(&mc_op);
362 if (ret) {
363 pr_err("Failed to get CPU numbers\n");
364 return ret;
365 }
366
367
368 ncpus = mc_op.u.mc_physcpuinfo.ncpus;
369 g_physinfo = kcalloc(ncpus, sizeof(struct mcinfo_logical_cpu),
370 GFP_KERNEL);
371 if (!g_physinfo)
372 return -ENOMEM;
373 set_xen_guest_handle(mc_op.u.mc_physcpuinfo.info, g_physinfo);
374 ret = HYPERVISOR_mca(&mc_op);
375 if (ret) {
376 pr_err("Failed to get CPU info\n");
377 kfree(g_physinfo);
378 return ret;
379 }
380
381 ret = bind_virq_to_irqhandler(VIRQ_MCA, 0,
382 xen_mce_interrupt, 0, "mce", NULL);
383 if (ret < 0) {
384 pr_err("Failed to bind virq\n");
385 kfree(g_physinfo);
386 return ret;
387 }
388
389 return 0;
390 }
391
392 static int __init xen_late_init_mcelog(void)
393 {
394 int ret;
395
396
397 if (!xen_initial_domain())
398 return -ENODEV;
399
400
401 ret = misc_register(&xen_mce_chrdev_device);
402 if (ret)
403 return ret;
404
405 ret = bind_virq_for_mce();
406 if (ret)
407 goto deregister;
408
409 pr_info("/dev/mcelog registered by Xen\n");
410
411 return 0;
412
413 deregister:
414 misc_deregister(&xen_mce_chrdev_device);
415 return ret;
416 }
417 device_initcall(xen_late_init_mcelog);