root/include/linux/u64_stats_sync.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. u64_stats_init
  2. u64_stats_update_begin
  3. u64_stats_update_end
  4. u64_stats_update_begin_irqsave
  5. u64_stats_update_end_irqrestore
  6. __u64_stats_fetch_begin
  7. u64_stats_fetch_begin
  8. __u64_stats_fetch_retry
  9. u64_stats_fetch_retry
  10. u64_stats_fetch_begin_irq
  11. u64_stats_fetch_retry_irq

   1 /* SPDX-License-Identifier: GPL-2.0 */
   2 #ifndef _LINUX_U64_STATS_SYNC_H
   3 #define _LINUX_U64_STATS_SYNC_H
   4 
   5 /*
   6  * To properly implement 64bits network statistics on 32bit and 64bit hosts,
   7  * we provide a synchronization point, that is a noop on 64bit or UP kernels.
   8  *
   9  * Key points :
  10  * 1) Use a seqcount on SMP 32bits, with low overhead.
  11  * 2) Whole thing is a noop on 64bit arches or UP kernels.
  12  * 3) Write side must ensure mutual exclusion or one seqcount update could
  13  *    be lost, thus blocking readers forever.
  14  *    If this synchronization point is not a mutex, but a spinlock or
  15  *    spinlock_bh() or disable_bh() :
  16  * 3.1) Write side should not sleep.
  17  * 3.2) Write side should not allow preemption.
  18  * 3.3) If applicable, interrupts should be disabled.
  19  *
  20  * 4) If reader fetches several counters, there is no guarantee the whole values
  21  *    are consistent (remember point 1) : this is a noop on 64bit arches anyway)
  22  *
  23  * 5) readers are allowed to sleep or be preempted/interrupted : They perform
  24  *    pure reads. But if they have to fetch many values, it's better to not allow
  25  *    preemptions/interruptions to avoid many retries.
  26  *
  27  * 6) If counter might be written by an interrupt, readers should block interrupts.
  28  *    (On UP, there is no seqcount_t protection, a reader allowing interrupts could
  29  *     read partial values)
  30  *
  31  * 7) For irq and softirq uses, readers can use u64_stats_fetch_begin_irq() and
  32  *    u64_stats_fetch_retry_irq() helpers
  33  *
  34  * Usage :
  35  *
  36  * Stats producer (writer) should use following template granted it already got
  37  * an exclusive access to counters (a lock is already taken, or per cpu
  38  * data is used [in a non preemptable context])
  39  *
  40  *   spin_lock_bh(...) or other synchronization to get exclusive access
  41  *   ...
  42  *   u64_stats_update_begin(&stats->syncp);
  43  *   stats->bytes64 += len; // non atomic operation
  44  *   stats->packets64++;    // non atomic operation
  45  *   u64_stats_update_end(&stats->syncp);
  46  *
  47  * While a consumer (reader) should use following template to get consistent
  48  * snapshot for each variable (but no guarantee on several ones)
  49  *
  50  * u64 tbytes, tpackets;
  51  * unsigned int start;
  52  *
  53  * do {
  54  *         start = u64_stats_fetch_begin(&stats->syncp);
  55  *         tbytes = stats->bytes64; // non atomic operation
  56  *         tpackets = stats->packets64; // non atomic operation
  57  * } while (u64_stats_fetch_retry(&stats->syncp, start));
  58  *
  59  *
  60  * Example of use in drivers/net/loopback.c, using per_cpu containers,
  61  * in BH disabled context.
  62  */
  63 #include <linux/seqlock.h>
  64 
  65 struct u64_stats_sync {
  66 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  67         seqcount_t      seq;
  68 #endif
  69 };
  70 
  71 
  72 static inline void u64_stats_init(struct u64_stats_sync *syncp)
  73 {
  74 #if BITS_PER_LONG == 32 && defined(CONFIG_SMP)
  75         seqcount_init(&syncp->seq);
  76 #endif
  77 }
  78 
  79 static inline void u64_stats_update_begin(struct u64_stats_sync *syncp)
  80 {
  81 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  82         write_seqcount_begin(&syncp->seq);
  83 #endif
  84 }
  85 
  86 static inline void u64_stats_update_end(struct u64_stats_sync *syncp)
  87 {
  88 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  89         write_seqcount_end(&syncp->seq);
  90 #endif
  91 }
  92 
  93 static inline unsigned long
  94 u64_stats_update_begin_irqsave(struct u64_stats_sync *syncp)
  95 {
  96         unsigned long flags = 0;
  97 
  98 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
  99         local_irq_save(flags);
 100         write_seqcount_begin(&syncp->seq);
 101 #endif
 102         return flags;
 103 }
 104 
 105 static inline void
 106 u64_stats_update_end_irqrestore(struct u64_stats_sync *syncp,
 107                                 unsigned long flags)
 108 {
 109 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 110         write_seqcount_end(&syncp->seq);
 111         local_irq_restore(flags);
 112 #endif
 113 }
 114 
 115 static inline unsigned int __u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
 116 {
 117 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 118         return read_seqcount_begin(&syncp->seq);
 119 #else
 120         return 0;
 121 #endif
 122 }
 123 
 124 static inline unsigned int u64_stats_fetch_begin(const struct u64_stats_sync *syncp)
 125 {
 126 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
 127         preempt_disable();
 128 #endif
 129         return __u64_stats_fetch_begin(syncp);
 130 }
 131 
 132 static inline bool __u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 133                                          unsigned int start)
 134 {
 135 #if BITS_PER_LONG==32 && defined(CONFIG_SMP)
 136         return read_seqcount_retry(&syncp->seq, start);
 137 #else
 138         return false;
 139 #endif
 140 }
 141 
 142 static inline bool u64_stats_fetch_retry(const struct u64_stats_sync *syncp,
 143                                          unsigned int start)
 144 {
 145 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
 146         preempt_enable();
 147 #endif
 148         return __u64_stats_fetch_retry(syncp, start);
 149 }
 150 
 151 /*
 152  * In case irq handlers can update u64 counters, readers can use following helpers
 153  * - SMP 32bit arches use seqcount protection, irq safe.
 154  * - UP 32bit must disable irqs.
 155  * - 64bit have no problem atomically reading u64 values, irq safe.
 156  */
 157 static inline unsigned int u64_stats_fetch_begin_irq(const struct u64_stats_sync *syncp)
 158 {
 159 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
 160         local_irq_disable();
 161 #endif
 162         return __u64_stats_fetch_begin(syncp);
 163 }
 164 
 165 static inline bool u64_stats_fetch_retry_irq(const struct u64_stats_sync *syncp,
 166                                              unsigned int start)
 167 {
 168 #if BITS_PER_LONG==32 && !defined(CONFIG_SMP)
 169         local_irq_enable();
 170 #endif
 171         return __u64_stats_fetch_retry(syncp, start);
 172 }
 173 
 174 #endif /* _LINUX_U64_STATS_SYNC_H */

/* [<][>][^][v][top][bottom][index][help] */