root/drivers/mtd/mtdoops.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. mark_page_used
  2. mark_page_unused
  3. page_is_used
  4. mtdoops_erase_block
  5. mtdoops_inc_counter
  6. mtdoops_workfunc_erase
  7. mtdoops_write
  8. mtdoops_workfunc_write
  9. find_next_position
  10. mtdoops_do_dump
  11. mtdoops_notify_add
  12. mtdoops_notify_remove
  13. mtdoops_init
  14. mtdoops_exit

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * MTD Oops/Panic logger
   4  *
   5  * Copyright © 2007 Nokia Corporation. All rights reserved.
   6  *
   7  * Author: Richard Purdie <rpurdie@openedhand.com>
   8  */
   9 
  10 #include <linux/kernel.h>
  11 #include <linux/module.h>
  12 #include <linux/console.h>
  13 #include <linux/vmalloc.h>
  14 #include <linux/workqueue.h>
  15 #include <linux/sched.h>
  16 #include <linux/wait.h>
  17 #include <linux/delay.h>
  18 #include <linux/interrupt.h>
  19 #include <linux/mtd/mtd.h>
  20 #include <linux/kmsg_dump.h>
  21 
  22 /* Maximum MTD partition size */
  23 #define MTDOOPS_MAX_MTD_SIZE (8 * 1024 * 1024)
  24 
  25 #define MTDOOPS_KERNMSG_MAGIC 0x5d005d00
  26 #define MTDOOPS_HEADER_SIZE   8
  27 
  28 static unsigned long record_size = 4096;
  29 module_param(record_size, ulong, 0400);
  30 MODULE_PARM_DESC(record_size,
  31                 "record size for MTD OOPS pages in bytes (default 4096)");
  32 
  33 static char mtddev[80];
  34 module_param_string(mtddev, mtddev, 80, 0400);
  35 MODULE_PARM_DESC(mtddev,
  36                 "name or index number of the MTD device to use");
  37 
  38 static int dump_oops = 1;
  39 module_param(dump_oops, int, 0600);
  40 MODULE_PARM_DESC(dump_oops,
  41                 "set to 1 to dump oopses, 0 to only dump panics (default 1)");
  42 
  43 static struct mtdoops_context {
  44         struct kmsg_dumper dump;
  45 
  46         int mtd_index;
  47         struct work_struct work_erase;
  48         struct work_struct work_write;
  49         struct mtd_info *mtd;
  50         int oops_pages;
  51         int nextpage;
  52         int nextcount;
  53         unsigned long *oops_page_used;
  54 
  55         void *oops_buf;
  56 } oops_cxt;
  57 
  58 static void mark_page_used(struct mtdoops_context *cxt, int page)
  59 {
  60         set_bit(page, cxt->oops_page_used);
  61 }
  62 
  63 static void mark_page_unused(struct mtdoops_context *cxt, int page)
  64 {
  65         clear_bit(page, cxt->oops_page_used);
  66 }
  67 
  68 static int page_is_used(struct mtdoops_context *cxt, int page)
  69 {
  70         return test_bit(page, cxt->oops_page_used);
  71 }
  72 
  73 static int mtdoops_erase_block(struct mtdoops_context *cxt, int offset)
  74 {
  75         struct mtd_info *mtd = cxt->mtd;
  76         u32 start_page_offset = mtd_div_by_eb(offset, mtd) * mtd->erasesize;
  77         u32 start_page = start_page_offset / record_size;
  78         u32 erase_pages = mtd->erasesize / record_size;
  79         struct erase_info erase;
  80         int ret;
  81         int page;
  82 
  83         erase.addr = offset;
  84         erase.len = mtd->erasesize;
  85 
  86         ret = mtd_erase(mtd, &erase);
  87         if (ret) {
  88                 printk(KERN_WARNING "mtdoops: erase of region [0x%llx, 0x%llx] on \"%s\" failed\n",
  89                        (unsigned long long)erase.addr,
  90                        (unsigned long long)erase.len, mtddev);
  91                 return ret;
  92         }
  93 
  94         /* Mark pages as unused */
  95         for (page = start_page; page < start_page + erase_pages; page++)
  96                 mark_page_unused(cxt, page);
  97 
  98         return 0;
  99 }
 100 
 101 static void mtdoops_inc_counter(struct mtdoops_context *cxt)
 102 {
 103         cxt->nextpage++;
 104         if (cxt->nextpage >= cxt->oops_pages)
 105                 cxt->nextpage = 0;
 106         cxt->nextcount++;
 107         if (cxt->nextcount == 0xffffffff)
 108                 cxt->nextcount = 0;
 109 
 110         if (page_is_used(cxt, cxt->nextpage)) {
 111                 schedule_work(&cxt->work_erase);
 112                 return;
 113         }
 114 
 115         printk(KERN_DEBUG "mtdoops: ready %d, %d (no erase)\n",
 116                cxt->nextpage, cxt->nextcount);
 117 }
 118 
 119 /* Scheduled work - when we can't proceed without erasing a block */
 120 static void mtdoops_workfunc_erase(struct work_struct *work)
 121 {
 122         struct mtdoops_context *cxt =
 123                         container_of(work, struct mtdoops_context, work_erase);
 124         struct mtd_info *mtd = cxt->mtd;
 125         int i = 0, j, ret, mod;
 126 
 127         /* We were unregistered */
 128         if (!mtd)
 129                 return;
 130 
 131         mod = (cxt->nextpage * record_size) % mtd->erasesize;
 132         if (mod != 0) {
 133                 cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size);
 134                 if (cxt->nextpage >= cxt->oops_pages)
 135                         cxt->nextpage = 0;
 136         }
 137 
 138         while ((ret = mtd_block_isbad(mtd, cxt->nextpage * record_size)) > 0) {
 139 badblock:
 140                 printk(KERN_WARNING "mtdoops: bad block at %08lx\n",
 141                        cxt->nextpage * record_size);
 142                 i++;
 143                 cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size);
 144                 if (cxt->nextpage >= cxt->oops_pages)
 145                         cxt->nextpage = 0;
 146                 if (i == cxt->oops_pages / (mtd->erasesize / record_size)) {
 147                         printk(KERN_ERR "mtdoops: all blocks bad!\n");
 148                         return;
 149                 }
 150         }
 151 
 152         if (ret < 0) {
 153                 printk(KERN_ERR "mtdoops: mtd_block_isbad failed, aborting\n");
 154                 return;
 155         }
 156 
 157         for (j = 0, ret = -1; (j < 3) && (ret < 0); j++)
 158                 ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size);
 159 
 160         if (ret >= 0) {
 161                 printk(KERN_DEBUG "mtdoops: ready %d, %d\n",
 162                        cxt->nextpage, cxt->nextcount);
 163                 return;
 164         }
 165 
 166         if (ret == -EIO) {
 167                 ret = mtd_block_markbad(mtd, cxt->nextpage * record_size);
 168                 if (ret < 0 && ret != -EOPNOTSUPP) {
 169                         printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n");
 170                         return;
 171                 }
 172         }
 173         goto badblock;
 174 }
 175 
 176 static void mtdoops_write(struct mtdoops_context *cxt, int panic)
 177 {
 178         struct mtd_info *mtd = cxt->mtd;
 179         size_t retlen;
 180         u32 *hdr;
 181         int ret;
 182 
 183         /* Add mtdoops header to the buffer */
 184         hdr = cxt->oops_buf;
 185         hdr[0] = cxt->nextcount;
 186         hdr[1] = MTDOOPS_KERNMSG_MAGIC;
 187 
 188         if (panic) {
 189                 ret = mtd_panic_write(mtd, cxt->nextpage * record_size,
 190                                       record_size, &retlen, cxt->oops_buf);
 191                 if (ret == -EOPNOTSUPP) {
 192                         printk(KERN_ERR "mtdoops: Cannot write from panic without panic_write\n");
 193                         return;
 194                 }
 195         } else
 196                 ret = mtd_write(mtd, cxt->nextpage * record_size,
 197                                 record_size, &retlen, cxt->oops_buf);
 198 
 199         if (retlen != record_size || ret < 0)
 200                 printk(KERN_ERR "mtdoops: write failure at %ld (%td of %ld written), error %d\n",
 201                        cxt->nextpage * record_size, retlen, record_size, ret);
 202         mark_page_used(cxt, cxt->nextpage);
 203         memset(cxt->oops_buf, 0xff, record_size);
 204 
 205         mtdoops_inc_counter(cxt);
 206 }
 207 
 208 static void mtdoops_workfunc_write(struct work_struct *work)
 209 {
 210         struct mtdoops_context *cxt =
 211                         container_of(work, struct mtdoops_context, work_write);
 212 
 213         mtdoops_write(cxt, 0);
 214 }
 215 
 216 static void find_next_position(struct mtdoops_context *cxt)
 217 {
 218         struct mtd_info *mtd = cxt->mtd;
 219         int ret, page, maxpos = 0;
 220         u32 count[2], maxcount = 0xffffffff;
 221         size_t retlen;
 222 
 223         for (page = 0; page < cxt->oops_pages; page++) {
 224                 if (mtd_block_isbad(mtd, page * record_size))
 225                         continue;
 226                 /* Assume the page is used */
 227                 mark_page_used(cxt, page);
 228                 ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE,
 229                                &retlen, (u_char *)&count[0]);
 230                 if (retlen != MTDOOPS_HEADER_SIZE ||
 231                                 (ret < 0 && !mtd_is_bitflip(ret))) {
 232                         printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n",
 233                                page * record_size, retlen,
 234                                MTDOOPS_HEADER_SIZE, ret);
 235                         continue;
 236                 }
 237 
 238                 if (count[0] == 0xffffffff && count[1] == 0xffffffff)
 239                         mark_page_unused(cxt, page);
 240                 if (count[0] == 0xffffffff || count[1] != MTDOOPS_KERNMSG_MAGIC)
 241                         continue;
 242                 if (maxcount == 0xffffffff) {
 243                         maxcount = count[0];
 244                         maxpos = page;
 245                 } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) {
 246                         maxcount = count[0];
 247                         maxpos = page;
 248                 } else if (count[0] > maxcount && count[0] < 0xc0000000) {
 249                         maxcount = count[0];
 250                         maxpos = page;
 251                 } else if (count[0] > maxcount && count[0] > 0xc0000000
 252                                         && maxcount > 0x80000000) {
 253                         maxcount = count[0];
 254                         maxpos = page;
 255                 }
 256         }
 257         if (maxcount == 0xffffffff) {
 258                 cxt->nextpage = cxt->oops_pages - 1;
 259                 cxt->nextcount = 0;
 260         }
 261         else {
 262                 cxt->nextpage = maxpos;
 263                 cxt->nextcount = maxcount;
 264         }
 265 
 266         mtdoops_inc_counter(cxt);
 267 }
 268 
 269 static void mtdoops_do_dump(struct kmsg_dumper *dumper,
 270                             enum kmsg_dump_reason reason)
 271 {
 272         struct mtdoops_context *cxt = container_of(dumper,
 273                         struct mtdoops_context, dump);
 274 
 275         /* Only dump oopses if dump_oops is set */
 276         if (reason == KMSG_DUMP_OOPS && !dump_oops)
 277                 return;
 278 
 279         kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
 280                              record_size - MTDOOPS_HEADER_SIZE, NULL);
 281 
 282         /* Panics must be written immediately */
 283         if (reason != KMSG_DUMP_OOPS)
 284                 mtdoops_write(cxt, 1);
 285 
 286         /* For other cases, schedule work to write it "nicely" */
 287         schedule_work(&cxt->work_write);
 288 }
 289 
 290 static void mtdoops_notify_add(struct mtd_info *mtd)
 291 {
 292         struct mtdoops_context *cxt = &oops_cxt;
 293         u64 mtdoops_pages = div_u64(mtd->size, record_size);
 294         int err;
 295 
 296         if (!strcmp(mtd->name, mtddev))
 297                 cxt->mtd_index = mtd->index;
 298 
 299         if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
 300                 return;
 301 
 302         if (mtd->size < mtd->erasesize * 2) {
 303                 printk(KERN_ERR "mtdoops: MTD partition %d not big enough for mtdoops\n",
 304                        mtd->index);
 305                 return;
 306         }
 307         if (mtd->erasesize < record_size) {
 308                 printk(KERN_ERR "mtdoops: eraseblock size of MTD partition %d too small\n",
 309                        mtd->index);
 310                 return;
 311         }
 312         if (mtd->size > MTDOOPS_MAX_MTD_SIZE) {
 313                 printk(KERN_ERR "mtdoops: mtd%d is too large (limit is %d MiB)\n",
 314                        mtd->index, MTDOOPS_MAX_MTD_SIZE / 1024 / 1024);
 315                 return;
 316         }
 317 
 318         /* oops_page_used is a bit field */
 319         cxt->oops_page_used =
 320                 vmalloc(array_size(sizeof(unsigned long),
 321                                    DIV_ROUND_UP(mtdoops_pages,
 322                                                 BITS_PER_LONG)));
 323         if (!cxt->oops_page_used) {
 324                 printk(KERN_ERR "mtdoops: could not allocate page array\n");
 325                 return;
 326         }
 327 
 328         cxt->dump.max_reason = KMSG_DUMP_OOPS;
 329         cxt->dump.dump = mtdoops_do_dump;
 330         err = kmsg_dump_register(&cxt->dump);
 331         if (err) {
 332                 printk(KERN_ERR "mtdoops: registering kmsg dumper failed, error %d\n", err);
 333                 vfree(cxt->oops_page_used);
 334                 cxt->oops_page_used = NULL;
 335                 return;
 336         }
 337 
 338         cxt->mtd = mtd;
 339         cxt->oops_pages = (int)mtd->size / record_size;
 340         find_next_position(cxt);
 341         printk(KERN_INFO "mtdoops: Attached to MTD device %d\n", mtd->index);
 342 }
 343 
 344 static void mtdoops_notify_remove(struct mtd_info *mtd)
 345 {
 346         struct mtdoops_context *cxt = &oops_cxt;
 347 
 348         if (mtd->index != cxt->mtd_index || cxt->mtd_index < 0)
 349                 return;
 350 
 351         if (kmsg_dump_unregister(&cxt->dump) < 0)
 352                 printk(KERN_WARNING "mtdoops: could not unregister kmsg_dumper\n");
 353 
 354         cxt->mtd = NULL;
 355         flush_work(&cxt->work_erase);
 356         flush_work(&cxt->work_write);
 357 }
 358 
 359 
 360 static struct mtd_notifier mtdoops_notifier = {
 361         .add    = mtdoops_notify_add,
 362         .remove = mtdoops_notify_remove,
 363 };
 364 
 365 static int __init mtdoops_init(void)
 366 {
 367         struct mtdoops_context *cxt = &oops_cxt;
 368         int mtd_index;
 369         char *endp;
 370 
 371         if (strlen(mtddev) == 0) {
 372                 printk(KERN_ERR "mtdoops: mtd device (mtddev=name/number) must be supplied\n");
 373                 return -EINVAL;
 374         }
 375         if ((record_size & 4095) != 0) {
 376                 printk(KERN_ERR "mtdoops: record_size must be a multiple of 4096\n");
 377                 return -EINVAL;
 378         }
 379         if (record_size < 4096) {
 380                 printk(KERN_ERR "mtdoops: record_size must be over 4096 bytes\n");
 381                 return -EINVAL;
 382         }
 383 
 384         /* Setup the MTD device to use */
 385         cxt->mtd_index = -1;
 386         mtd_index = simple_strtoul(mtddev, &endp, 0);
 387         if (*endp == '\0')
 388                 cxt->mtd_index = mtd_index;
 389 
 390         cxt->oops_buf = vmalloc(record_size);
 391         if (!cxt->oops_buf) {
 392                 printk(KERN_ERR "mtdoops: failed to allocate buffer workspace\n");
 393                 return -ENOMEM;
 394         }
 395         memset(cxt->oops_buf, 0xff, record_size);
 396 
 397         INIT_WORK(&cxt->work_erase, mtdoops_workfunc_erase);
 398         INIT_WORK(&cxt->work_write, mtdoops_workfunc_write);
 399 
 400         register_mtd_user(&mtdoops_notifier);
 401         return 0;
 402 }
 403 
 404 static void __exit mtdoops_exit(void)
 405 {
 406         struct mtdoops_context *cxt = &oops_cxt;
 407 
 408         unregister_mtd_user(&mtdoops_notifier);
 409         vfree(cxt->oops_buf);
 410         vfree(cxt->oops_page_used);
 411 }
 412 
 413 
 414 module_init(mtdoops_init);
 415 module_exit(mtdoops_exit);
 416 
 417 MODULE_LICENSE("GPL");
 418 MODULE_AUTHOR("Richard Purdie <rpurdie@openedhand.com>");
 419 MODULE_DESCRIPTION("MTD Oops/Panic console logger/driver");

/* [<][>][^][v][top][bottom][index][help] */