root/kernel/printk/printk_safe.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. queue_flush_work
  2. __printf
  3. printk_safe_flush_line
  4. printk_safe_flush_buffer
  5. report_message_lost
  6. __printk_safe_flush
  7. printk_safe_flush
  8. printk_safe_flush_on_panic
  9. __printf
  10. printk_nmi_enter
  11. printk_nmi_exit
  12. printk_nmi_direct_enter
  13. printk_nmi_direct_exit
  14. __printf
  15. __printf
  16. __printk_safe_enter
  17. __printk_safe_exit
  18. __printf
  19. printk_safe_init

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*
   3  * printk_safe.c - Safe printk for printk-deadlock-prone contexts
   4  */
   5 
   6 #include <linux/preempt.h>
   7 #include <linux/spinlock.h>
   8 #include <linux/debug_locks.h>
   9 #include <linux/smp.h>
  10 #include <linux/cpumask.h>
  11 #include <linux/irq_work.h>
  12 #include <linux/printk.h>
  13 
  14 #include "internal.h"
  15 
  16 /*
  17  * printk() could not take logbuf_lock in NMI context. Instead,
  18  * it uses an alternative implementation that temporary stores
  19  * the strings into a per-CPU buffer. The content of the buffer
  20  * is later flushed into the main ring buffer via IRQ work.
  21  *
  22  * The alternative implementation is chosen transparently
  23  * by examinig current printk() context mask stored in @printk_context
  24  * per-CPU variable.
  25  *
  26  * The implementation allows to flush the strings also from another CPU.
  27  * There are situations when we want to make sure that all buffers
  28  * were handled or when IRQs are blocked.
  29  */
  30 
  31 #define SAFE_LOG_BUF_LEN ((1 << CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT) -     \
  32                                 sizeof(atomic_t) -                      \
  33                                 sizeof(atomic_t) -                      \
  34                                 sizeof(struct irq_work))
  35 
  36 struct printk_safe_seq_buf {
  37         atomic_t                len;    /* length of written data */
  38         atomic_t                message_lost;
  39         struct irq_work         work;   /* IRQ work that flushes the buffer */
  40         unsigned char           buffer[SAFE_LOG_BUF_LEN];
  41 };
  42 
  43 static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
  44 static DEFINE_PER_CPU(int, printk_context);
  45 
  46 #ifdef CONFIG_PRINTK_NMI
  47 static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
  48 #endif
  49 
  50 /* Get flushed in a more safe context. */
  51 static void queue_flush_work(struct printk_safe_seq_buf *s)
  52 {
  53         if (printk_percpu_data_ready())
  54                 irq_work_queue(&s->work);
  55 }
  56 
  57 /*
  58  * Add a message to per-CPU context-dependent buffer. NMI and printk-safe
  59  * have dedicated buffers, because otherwise printk-safe preempted by
  60  * NMI-printk would have overwritten the NMI messages.
  61  *
  62  * The messages are flushed from irq work (or from panic()), possibly,
  63  * from other CPU, concurrently with printk_safe_log_store(). Should this
  64  * happen, printk_safe_log_store() will notice the buffer->len mismatch
  65  * and repeat the write.
  66  */
  67 static __printf(2, 0) int printk_safe_log_store(struct printk_safe_seq_buf *s,
  68                                                 const char *fmt, va_list args)
  69 {
  70         int add;
  71         size_t len;
  72         va_list ap;
  73 
  74 again:
  75         len = atomic_read(&s->len);
  76 
  77         /* The trailing '\0' is not counted into len. */
  78         if (len >= sizeof(s->buffer) - 1) {
  79                 atomic_inc(&s->message_lost);
  80                 queue_flush_work(s);
  81                 return 0;
  82         }
  83 
  84         /*
  85          * Make sure that all old data have been read before the buffer
  86          * was reset. This is not needed when we just append data.
  87          */
  88         if (!len)
  89                 smp_rmb();
  90 
  91         va_copy(ap, args);
  92         add = vscnprintf(s->buffer + len, sizeof(s->buffer) - len, fmt, ap);
  93         va_end(ap);
  94         if (!add)
  95                 return 0;
  96 
  97         /*
  98          * Do it once again if the buffer has been flushed in the meantime.
  99          * Note that atomic_cmpxchg() is an implicit memory barrier that
 100          * makes sure that the data were written before updating s->len.
 101          */
 102         if (atomic_cmpxchg(&s->len, len, len + add) != len)
 103                 goto again;
 104 
 105         queue_flush_work(s);
 106         return add;
 107 }
 108 
 109 static inline void printk_safe_flush_line(const char *text, int len)
 110 {
 111         /*
 112          * Avoid any console drivers calls from here, because we may be
 113          * in NMI or printk_safe context (when in panic). The messages
 114          * must go only into the ring buffer at this stage.  Consoles will
 115          * get explicitly called later when a crashdump is not generated.
 116          */
 117         printk_deferred("%.*s", len, text);
 118 }
 119 
 120 /* printk part of the temporary buffer line by line */
 121 static int printk_safe_flush_buffer(const char *start, size_t len)
 122 {
 123         const char *c, *end;
 124         bool header;
 125 
 126         c = start;
 127         end = start + len;
 128         header = true;
 129 
 130         /* Print line by line. */
 131         while (c < end) {
 132                 if (*c == '\n') {
 133                         printk_safe_flush_line(start, c - start + 1);
 134                         start = ++c;
 135                         header = true;
 136                         continue;
 137                 }
 138 
 139                 /* Handle continuous lines or missing new line. */
 140                 if ((c + 1 < end) && printk_get_level(c)) {
 141                         if (header) {
 142                                 c = printk_skip_level(c);
 143                                 continue;
 144                         }
 145 
 146                         printk_safe_flush_line(start, c - start);
 147                         start = c++;
 148                         header = true;
 149                         continue;
 150                 }
 151 
 152                 header = false;
 153                 c++;
 154         }
 155 
 156         /* Check if there was a partial line. Ignore pure header. */
 157         if (start < end && !header) {
 158                 static const char newline[] = KERN_CONT "\n";
 159 
 160                 printk_safe_flush_line(start, end - start);
 161                 printk_safe_flush_line(newline, strlen(newline));
 162         }
 163 
 164         return len;
 165 }
 166 
 167 static void report_message_lost(struct printk_safe_seq_buf *s)
 168 {
 169         int lost = atomic_xchg(&s->message_lost, 0);
 170 
 171         if (lost)
 172                 printk_deferred("Lost %d message(s)!\n", lost);
 173 }
 174 
 175 /*
 176  * Flush data from the associated per-CPU buffer. The function
 177  * can be called either via IRQ work or independently.
 178  */
 179 static void __printk_safe_flush(struct irq_work *work)
 180 {
 181         static raw_spinlock_t read_lock =
 182                 __RAW_SPIN_LOCK_INITIALIZER(read_lock);
 183         struct printk_safe_seq_buf *s =
 184                 container_of(work, struct printk_safe_seq_buf, work);
 185         unsigned long flags;
 186         size_t len;
 187         int i;
 188 
 189         /*
 190          * The lock has two functions. First, one reader has to flush all
 191          * available message to make the lockless synchronization with
 192          * writers easier. Second, we do not want to mix messages from
 193          * different CPUs. This is especially important when printing
 194          * a backtrace.
 195          */
 196         raw_spin_lock_irqsave(&read_lock, flags);
 197 
 198         i = 0;
 199 more:
 200         len = atomic_read(&s->len);
 201 
 202         /*
 203          * This is just a paranoid check that nobody has manipulated
 204          * the buffer an unexpected way. If we printed something then
 205          * @len must only increase. Also it should never overflow the
 206          * buffer size.
 207          */
 208         if ((i && i >= len) || len > sizeof(s->buffer)) {
 209                 const char *msg = "printk_safe_flush: internal error\n";
 210 
 211                 printk_safe_flush_line(msg, strlen(msg));
 212                 len = 0;
 213         }
 214 
 215         if (!len)
 216                 goto out; /* Someone else has already flushed the buffer. */
 217 
 218         /* Make sure that data has been written up to the @len */
 219         smp_rmb();
 220         i += printk_safe_flush_buffer(s->buffer + i, len - i);
 221 
 222         /*
 223          * Check that nothing has got added in the meantime and truncate
 224          * the buffer. Note that atomic_cmpxchg() is an implicit memory
 225          * barrier that makes sure that the data were copied before
 226          * updating s->len.
 227          */
 228         if (atomic_cmpxchg(&s->len, len, 0) != len)
 229                 goto more;
 230 
 231 out:
 232         report_message_lost(s);
 233         raw_spin_unlock_irqrestore(&read_lock, flags);
 234 }
 235 
 236 /**
 237  * printk_safe_flush - flush all per-cpu nmi buffers.
 238  *
 239  * The buffers are flushed automatically via IRQ work. This function
 240  * is useful only when someone wants to be sure that all buffers have
 241  * been flushed at some point.
 242  */
 243 void printk_safe_flush(void)
 244 {
 245         int cpu;
 246 
 247         for_each_possible_cpu(cpu) {
 248 #ifdef CONFIG_PRINTK_NMI
 249                 __printk_safe_flush(&per_cpu(nmi_print_seq, cpu).work);
 250 #endif
 251                 __printk_safe_flush(&per_cpu(safe_print_seq, cpu).work);
 252         }
 253 }
 254 
 255 /**
 256  * printk_safe_flush_on_panic - flush all per-cpu nmi buffers when the system
 257  *      goes down.
 258  *
 259  * Similar to printk_safe_flush() but it can be called even in NMI context when
 260  * the system goes down. It does the best effort to get NMI messages into
 261  * the main ring buffer.
 262  *
 263  * Note that it could try harder when there is only one CPU online.
 264  */
 265 void printk_safe_flush_on_panic(void)
 266 {
 267         /*
 268          * Make sure that we could access the main ring buffer.
 269          * Do not risk a double release when more CPUs are up.
 270          */
 271         if (raw_spin_is_locked(&logbuf_lock)) {
 272                 if (num_online_cpus() > 1)
 273                         return;
 274 
 275                 debug_locks_off();
 276                 raw_spin_lock_init(&logbuf_lock);
 277         }
 278 
 279         printk_safe_flush();
 280 }
 281 
 282 #ifdef CONFIG_PRINTK_NMI
 283 /*
 284  * Safe printk() for NMI context. It uses a per-CPU buffer to
 285  * store the message. NMIs are not nested, so there is always only
 286  * one writer running. But the buffer might get flushed from another
 287  * CPU, so we need to be careful.
 288  */
 289 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
 290 {
 291         struct printk_safe_seq_buf *s = this_cpu_ptr(&nmi_print_seq);
 292 
 293         return printk_safe_log_store(s, fmt, args);
 294 }
 295 
 296 void notrace printk_nmi_enter(void)
 297 {
 298         this_cpu_or(printk_context, PRINTK_NMI_CONTEXT_MASK);
 299 }
 300 
 301 void notrace printk_nmi_exit(void)
 302 {
 303         this_cpu_and(printk_context, ~PRINTK_NMI_CONTEXT_MASK);
 304 }
 305 
 306 /*
 307  * Marks a code that might produce many messages in NMI context
 308  * and the risk of losing them is more critical than eventual
 309  * reordering.
 310  *
 311  * It has effect only when called in NMI context. Then printk()
 312  * will try to store the messages into the main logbuf directly
 313  * and use the per-CPU buffers only as a fallback when the lock
 314  * is not available.
 315  */
 316 void printk_nmi_direct_enter(void)
 317 {
 318         if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
 319                 this_cpu_or(printk_context, PRINTK_NMI_DIRECT_CONTEXT_MASK);
 320 }
 321 
 322 void printk_nmi_direct_exit(void)
 323 {
 324         this_cpu_and(printk_context, ~PRINTK_NMI_DIRECT_CONTEXT_MASK);
 325 }
 326 
 327 #else
 328 
 329 static __printf(1, 0) int vprintk_nmi(const char *fmt, va_list args)
 330 {
 331         return 0;
 332 }
 333 
 334 #endif /* CONFIG_PRINTK_NMI */
 335 
 336 /*
 337  * Lock-less printk(), to avoid deadlocks should the printk() recurse
 338  * into itself. It uses a per-CPU buffer to store the message, just like
 339  * NMI.
 340  */
 341 static __printf(1, 0) int vprintk_safe(const char *fmt, va_list args)
 342 {
 343         struct printk_safe_seq_buf *s = this_cpu_ptr(&safe_print_seq);
 344 
 345         return printk_safe_log_store(s, fmt, args);
 346 }
 347 
 348 /* Can be preempted by NMI. */
 349 void __printk_safe_enter(void)
 350 {
 351         this_cpu_inc(printk_context);
 352 }
 353 
 354 /* Can be preempted by NMI. */
 355 void __printk_safe_exit(void)
 356 {
 357         this_cpu_dec(printk_context);
 358 }
 359 
 360 __printf(1, 0) int vprintk_func(const char *fmt, va_list args)
 361 {
 362         /*
 363          * Try to use the main logbuf even in NMI. But avoid calling console
 364          * drivers that might have their own locks.
 365          */
 366         if ((this_cpu_read(printk_context) & PRINTK_NMI_DIRECT_CONTEXT_MASK) &&
 367             raw_spin_trylock(&logbuf_lock)) {
 368                 int len;
 369 
 370                 len = vprintk_store(0, LOGLEVEL_DEFAULT, NULL, 0, fmt, args);
 371                 raw_spin_unlock(&logbuf_lock);
 372                 defer_console_output();
 373                 return len;
 374         }
 375 
 376         /* Use extra buffer in NMI when logbuf_lock is taken or in safe mode. */
 377         if (this_cpu_read(printk_context) & PRINTK_NMI_CONTEXT_MASK)
 378                 return vprintk_nmi(fmt, args);
 379 
 380         /* Use extra buffer to prevent a recursion deadlock in safe mode. */
 381         if (this_cpu_read(printk_context) & PRINTK_SAFE_CONTEXT_MASK)
 382                 return vprintk_safe(fmt, args);
 383 
 384         /* No obstacles. */
 385         return vprintk_default(fmt, args);
 386 }
 387 
 388 void __init printk_safe_init(void)
 389 {
 390         int cpu;
 391 
 392         for_each_possible_cpu(cpu) {
 393                 struct printk_safe_seq_buf *s;
 394 
 395                 s = &per_cpu(safe_print_seq, cpu);
 396                 init_irq_work(&s->work, __printk_safe_flush);
 397 
 398 #ifdef CONFIG_PRINTK_NMI
 399                 s = &per_cpu(nmi_print_seq, cpu);
 400                 init_irq_work(&s->work, __printk_safe_flush);
 401 #endif
 402         }
 403 
 404         /* Flush pending messages that did not have scheduled IRQ works. */
 405         printk_safe_flush();
 406 }

/* [<][>][^][v][top][bottom][index][help] */