This source file includes following definitions.
- _mix_pool_bytes
- __mix_pool_bytes
- mix_pool_bytes
- fast_mix
- process_random_ready_list
- credit_entropy_bits
- credit_entropy_bits_safe
- parse_trust_cpu
- crng_initialize
- do_numa_crng_init
- numa_crng_init
- numa_crng_init
- crng_fast_load
- crng_slow_load
- crng_reseed
- _extract_crng
- extract_crng
- _crng_backtrack_protect
- crng_backtrack_protect
- extract_crng_user
- add_device_randomness
- add_timer_randomness
- add_input_randomness
- add_interrupt_bench
- get_reg
- add_interrupt_randomness
- add_disk_randomness
- xfer_secondary_pool
- _xfer_secondary_pool
- push_to_pool
- account
- extract_buf
- _extract_entropy
- extract_entropy
- extract_entropy_user
- _warn_unseeded_randomness
- _get_random_bytes
- get_random_bytes
- entropy_timer
- try_to_generate_entropy
- wait_for_random_bytes
- rng_is_initialized
- add_random_ready_callback
- del_random_ready_callback
- get_random_bytes_arch
- init_std_data
- rand_initialize
- rand_initialize_disk
- _random_read
- random_read
- urandom_read
- random_poll
- write_pool
- random_write
- random_ioctl
- random_fasync
- SYSCALL_DEFINE3
- proc_do_uuid
- proc_do_entropy
- get_random_u64
- get_random_u32
- invalidate_batched_entropy
- randomize_page
- add_hwgenerator_randomness
- add_bootloader_randomness
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310 #include <linux/utsname.h>
311 #include <linux/module.h>
312 #include <linux/kernel.h>
313 #include <linux/major.h>
314 #include <linux/string.h>
315 #include <linux/fcntl.h>
316 #include <linux/slab.h>
317 #include <linux/random.h>
318 #include <linux/poll.h>
319 #include <linux/init.h>
320 #include <linux/fs.h>
321 #include <linux/genhd.h>
322 #include <linux/interrupt.h>
323 #include <linux/mm.h>
324 #include <linux/nodemask.h>
325 #include <linux/spinlock.h>
326 #include <linux/kthread.h>
327 #include <linux/percpu.h>
328 #include <linux/cryptohash.h>
329 #include <linux/fips.h>
330 #include <linux/ptrace.h>
331 #include <linux/workqueue.h>
332 #include <linux/irq.h>
333 #include <linux/ratelimit.h>
334 #include <linux/syscalls.h>
335 #include <linux/completion.h>
336 #include <linux/uuid.h>
337 #include <crypto/chacha.h>
338
339 #include <asm/processor.h>
340 #include <linux/uaccess.h>
341 #include <asm/irq.h>
342 #include <asm/irq_regs.h>
343 #include <asm/io.h>
344
345 #define CREATE_TRACE_POINTS
346 #include <trace/events/random.h>
347
348
349
350
351
352
353 #define INPUT_POOL_SHIFT 12
354 #define INPUT_POOL_WORDS (1 << (INPUT_POOL_SHIFT-5))
355 #define OUTPUT_POOL_SHIFT 10
356 #define OUTPUT_POOL_WORDS (1 << (OUTPUT_POOL_SHIFT-5))
357 #define SEC_XFER_SIZE 512
358 #define EXTRACT_SIZE 10
359
360
361 #define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
362
363
364
365
366
367
368
369
370 #define ENTROPY_SHIFT 3
371 #define ENTROPY_BITS(r) ((r)->entropy_count >> ENTROPY_SHIFT)
372
373
374
375
376
377 static int random_read_wakeup_bits = 64;
378
379
380
381
382
383
384 static int random_write_wakeup_bits = 28 * OUTPUT_POOL_WORDS;
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431 static const struct poolinfo {
432 int poolbitshift, poolwords, poolbytes, poolfracbits;
433 #define S(x) ilog2(x)+5, (x), (x)*4, (x) << (ENTROPY_SHIFT+5)
434 int tap1, tap2, tap3, tap4, tap5;
435 } poolinfo_table[] = {
436
437
438 { S(128), 104, 76, 51, 25, 1 },
439
440
441 { S(32), 26, 19, 14, 7, 1 },
442 #if 0
443
444 { S(2048), 1638, 1231, 819, 411, 1 },
445
446
447 { S(1024), 817, 615, 412, 204, 1 },
448
449
450 { S(1024), 819, 616, 410, 207, 2 },
451
452
453 { S(512), 411, 308, 208, 104, 1 },
454
455
456 { S(512), 409, 307, 206, 102, 2 },
457
458 { S(512), 409, 309, 205, 103, 2 },
459
460
461 { S(256), 205, 155, 101, 52, 1 },
462
463
464 { S(128), 103, 78, 51, 27, 2 },
465
466
467 { S(64), 52, 39, 26, 14, 1 },
468 #endif
469 };
470
471
472
473
474 static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
475 static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
476 static struct fasync_struct *fasync;
477
478 static DEFINE_SPINLOCK(random_ready_list_lock);
479 static LIST_HEAD(random_ready_list);
480
481 struct crng_state {
482 __u32 state[16];
483 unsigned long init_time;
484 spinlock_t lock;
485 };
486
487 static struct crng_state primary_crng = {
488 .lock = __SPIN_LOCK_UNLOCKED(primary_crng.lock),
489 };
490
491
492
493
494
495
496
497
498
499 static int crng_init = 0;
500 #define crng_ready() (likely(crng_init > 1))
501 static int crng_init_cnt = 0;
502 static unsigned long crng_global_init_time = 0;
503 #define CRNG_INIT_CNT_THRESH (2*CHACHA_KEY_SIZE)
504 static void _extract_crng(struct crng_state *crng, __u8 out[CHACHA_BLOCK_SIZE]);
505 static void _crng_backtrack_protect(struct crng_state *crng,
506 __u8 tmp[CHACHA_BLOCK_SIZE], int used);
507 static void process_random_ready_list(void);
508 static void _get_random_bytes(void *buf, int nbytes);
509
510 static struct ratelimit_state unseeded_warning =
511 RATELIMIT_STATE_INIT("warn_unseeded_randomness", HZ, 3);
512 static struct ratelimit_state urandom_warning =
513 RATELIMIT_STATE_INIT("warn_urandom_randomness", HZ, 3);
514
515 static int ratelimit_disable __read_mostly;
516
517 module_param_named(ratelimit_disable, ratelimit_disable, int, 0644);
518 MODULE_PARM_DESC(ratelimit_disable, "Disable random ratelimit suppression");
519
520
521
522
523
524
525
526
527 struct entropy_store;
528 struct entropy_store {
529
530 const struct poolinfo *poolinfo;
531 __u32 *pool;
532 const char *name;
533 struct entropy_store *pull;
534 struct work_struct push_work;
535
536
537 unsigned long last_pulled;
538 spinlock_t lock;
539 unsigned short add_ptr;
540 unsigned short input_rotate;
541 int entropy_count;
542 unsigned int initialized:1;
543 unsigned int last_data_init:1;
544 __u8 last_data[EXTRACT_SIZE];
545 };
546
547 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
548 size_t nbytes, int min, int rsvd);
549 static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
550 size_t nbytes, int fips);
551
552 static void crng_reseed(struct crng_state *crng, struct entropy_store *r);
553 static void push_to_pool(struct work_struct *work);
554 static __u32 input_pool_data[INPUT_POOL_WORDS] __latent_entropy;
555 static __u32 blocking_pool_data[OUTPUT_POOL_WORDS] __latent_entropy;
556
557 static struct entropy_store input_pool = {
558 .poolinfo = &poolinfo_table[0],
559 .name = "input",
560 .lock = __SPIN_LOCK_UNLOCKED(input_pool.lock),
561 .pool = input_pool_data
562 };
563
564 static struct entropy_store blocking_pool = {
565 .poolinfo = &poolinfo_table[1],
566 .name = "blocking",
567 .pull = &input_pool,
568 .lock = __SPIN_LOCK_UNLOCKED(blocking_pool.lock),
569 .pool = blocking_pool_data,
570 .push_work = __WORK_INITIALIZER(blocking_pool.push_work,
571 push_to_pool),
572 };
573
574 static __u32 const twist_table[8] = {
575 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
576 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
577
578
579
580
581
582
583
584
585
586
587
588 static void _mix_pool_bytes(struct entropy_store *r, const void *in,
589 int nbytes)
590 {
591 unsigned long i, tap1, tap2, tap3, tap4, tap5;
592 int input_rotate;
593 int wordmask = r->poolinfo->poolwords - 1;
594 const char *bytes = in;
595 __u32 w;
596
597 tap1 = r->poolinfo->tap1;
598 tap2 = r->poolinfo->tap2;
599 tap3 = r->poolinfo->tap3;
600 tap4 = r->poolinfo->tap4;
601 tap5 = r->poolinfo->tap5;
602
603 input_rotate = r->input_rotate;
604 i = r->add_ptr;
605
606
607 while (nbytes--) {
608 w = rol32(*bytes++, input_rotate);
609 i = (i - 1) & wordmask;
610
611
612 w ^= r->pool[i];
613 w ^= r->pool[(i + tap1) & wordmask];
614 w ^= r->pool[(i + tap2) & wordmask];
615 w ^= r->pool[(i + tap3) & wordmask];
616 w ^= r->pool[(i + tap4) & wordmask];
617 w ^= r->pool[(i + tap5) & wordmask];
618
619
620 r->pool[i] = (w >> 3) ^ twist_table[w & 7];
621
622
623
624
625
626
627
628 input_rotate = (input_rotate + (i ? 7 : 14)) & 31;
629 }
630
631 r->input_rotate = input_rotate;
632 r->add_ptr = i;
633 }
634
635 static void __mix_pool_bytes(struct entropy_store *r, const void *in,
636 int nbytes)
637 {
638 trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
639 _mix_pool_bytes(r, in, nbytes);
640 }
641
642 static void mix_pool_bytes(struct entropy_store *r, const void *in,
643 int nbytes)
644 {
645 unsigned long flags;
646
647 trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
648 spin_lock_irqsave(&r->lock, flags);
649 _mix_pool_bytes(r, in, nbytes);
650 spin_unlock_irqrestore(&r->lock, flags);
651 }
652
653 struct fast_pool {
654 __u32 pool[4];
655 unsigned long last;
656 unsigned short reg_idx;
657 unsigned char count;
658 };
659
660
661
662
663
664
665 static void fast_mix(struct fast_pool *f)
666 {
667 __u32 a = f->pool[0], b = f->pool[1];
668 __u32 c = f->pool[2], d = f->pool[3];
669
670 a += b; c += d;
671 b = rol32(b, 6); d = rol32(d, 27);
672 d ^= a; b ^= c;
673
674 a += b; c += d;
675 b = rol32(b, 16); d = rol32(d, 14);
676 d ^= a; b ^= c;
677
678 a += b; c += d;
679 b = rol32(b, 6); d = rol32(d, 27);
680 d ^= a; b ^= c;
681
682 a += b; c += d;
683 b = rol32(b, 16); d = rol32(d, 14);
684 d ^= a; b ^= c;
685
686 f->pool[0] = a; f->pool[1] = b;
687 f->pool[2] = c; f->pool[3] = d;
688 f->count++;
689 }
690
691 static void process_random_ready_list(void)
692 {
693 unsigned long flags;
694 struct random_ready_callback *rdy, *tmp;
695
696 spin_lock_irqsave(&random_ready_list_lock, flags);
697 list_for_each_entry_safe(rdy, tmp, &random_ready_list, list) {
698 struct module *owner = rdy->owner;
699
700 list_del_init(&rdy->list);
701 rdy->func(rdy);
702 module_put(owner);
703 }
704 spin_unlock_irqrestore(&random_ready_list_lock, flags);
705 }
706
707
708
709
710
711
712 static void credit_entropy_bits(struct entropy_store *r, int nbits)
713 {
714 int entropy_count, orig, has_initialized = 0;
715 const int pool_size = r->poolinfo->poolfracbits;
716 int nfrac = nbits << ENTROPY_SHIFT;
717
718 if (!nbits)
719 return;
720
721 retry:
722 entropy_count = orig = READ_ONCE(r->entropy_count);
723 if (nfrac < 0) {
724
725 entropy_count += nfrac;
726 } else {
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748 int pnfrac = nfrac;
749 const int s = r->poolinfo->poolbitshift + ENTROPY_SHIFT + 2;
750
751
752 do {
753 unsigned int anfrac = min(pnfrac, pool_size/2);
754 unsigned int add =
755 ((pool_size - entropy_count)*anfrac*3) >> s;
756
757 entropy_count += add;
758 pnfrac -= anfrac;
759 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
760 }
761
762 if (unlikely(entropy_count < 0)) {
763 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
764 r->name, entropy_count);
765 WARN_ON(1);
766 entropy_count = 0;
767 } else if (entropy_count > pool_size)
768 entropy_count = pool_size;
769 if ((r == &blocking_pool) && !r->initialized &&
770 (entropy_count >> ENTROPY_SHIFT) > 128)
771 has_initialized = 1;
772 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
773 goto retry;
774
775 if (has_initialized) {
776 r->initialized = 1;
777 wake_up_interruptible(&random_read_wait);
778 kill_fasync(&fasync, SIGIO, POLL_IN);
779 }
780
781 trace_credit_entropy_bits(r->name, nbits,
782 entropy_count >> ENTROPY_SHIFT, _RET_IP_);
783
784 if (r == &input_pool) {
785 int entropy_bits = entropy_count >> ENTROPY_SHIFT;
786 struct entropy_store *other = &blocking_pool;
787
788 if (crng_init < 2) {
789 if (entropy_bits < 128)
790 return;
791 crng_reseed(&primary_crng, r);
792 entropy_bits = r->entropy_count >> ENTROPY_SHIFT;
793 }
794
795
796 if (entropy_bits >= random_read_wakeup_bits &&
797 !other->initialized) {
798 schedule_work(&other->push_work);
799 return;
800 }
801
802
803 if (entropy_bits >= random_read_wakeup_bits &&
804 wq_has_sleeper(&random_read_wait)) {
805 wake_up_interruptible(&random_read_wait);
806 kill_fasync(&fasync, SIGIO, POLL_IN);
807 }
808
809
810
811
812 if (!work_pending(&other->push_work) &&
813 (ENTROPY_BITS(r) > 6 * r->poolinfo->poolbytes) &&
814 (ENTROPY_BITS(other) <= 6 * other->poolinfo->poolbytes))
815 schedule_work(&other->push_work);
816 }
817 }
818
819 static int credit_entropy_bits_safe(struct entropy_store *r, int nbits)
820 {
821 const int nbits_max = r->poolinfo->poolwords * 32;
822
823 if (nbits < 0)
824 return -EINVAL;
825
826
827 nbits = min(nbits, nbits_max);
828
829 credit_entropy_bits(r, nbits);
830 return 0;
831 }
832
833
834
835
836
837
838
839 #define CRNG_RESEED_INTERVAL (300*HZ)
840
841 static DECLARE_WAIT_QUEUE_HEAD(crng_init_wait);
842
843 #ifdef CONFIG_NUMA
844
845
846
847
848
849
850 static struct crng_state **crng_node_pool __read_mostly;
851 #endif
852
853 static void invalidate_batched_entropy(void);
854 static void numa_crng_init(void);
855
856 static bool trust_cpu __ro_after_init = IS_ENABLED(CONFIG_RANDOM_TRUST_CPU);
857 static int __init parse_trust_cpu(char *arg)
858 {
859 return kstrtobool(arg, &trust_cpu);
860 }
861 early_param("random.trust_cpu", parse_trust_cpu);
862
863 static void crng_initialize(struct crng_state *crng)
864 {
865 int i;
866 int arch_init = 1;
867 unsigned long rv;
868
869 memcpy(&crng->state[0], "expand 32-byte k", 16);
870 if (crng == &primary_crng)
871 _extract_entropy(&input_pool, &crng->state[4],
872 sizeof(__u32) * 12, 0);
873 else
874 _get_random_bytes(&crng->state[4], sizeof(__u32) * 12);
875 for (i = 4; i < 16; i++) {
876 if (!arch_get_random_seed_long(&rv) &&
877 !arch_get_random_long(&rv)) {
878 rv = random_get_entropy();
879 arch_init = 0;
880 }
881 crng->state[i] ^= rv;
882 }
883 if (trust_cpu && arch_init && crng == &primary_crng) {
884 invalidate_batched_entropy();
885 numa_crng_init();
886 crng_init = 2;
887 pr_notice("random: crng done (trusting CPU's manufacturer)\n");
888 }
889 crng->init_time = jiffies - CRNG_RESEED_INTERVAL - 1;
890 }
891
892 #ifdef CONFIG_NUMA
893 static void do_numa_crng_init(struct work_struct *work)
894 {
895 int i;
896 struct crng_state *crng;
897 struct crng_state **pool;
898
899 pool = kcalloc(nr_node_ids, sizeof(*pool), GFP_KERNEL|__GFP_NOFAIL);
900 for_each_online_node(i) {
901 crng = kmalloc_node(sizeof(struct crng_state),
902 GFP_KERNEL | __GFP_NOFAIL, i);
903 spin_lock_init(&crng->lock);
904 crng_initialize(crng);
905 pool[i] = crng;
906 }
907 mb();
908 if (cmpxchg(&crng_node_pool, NULL, pool)) {
909 for_each_node(i)
910 kfree(pool[i]);
911 kfree(pool);
912 }
913 }
914
915 static DECLARE_WORK(numa_crng_init_work, do_numa_crng_init);
916
917 static void numa_crng_init(void)
918 {
919 schedule_work(&numa_crng_init_work);
920 }
921 #else
922 static void numa_crng_init(void) {}
923 #endif
924
925
926
927
928
929 static int crng_fast_load(const char *cp, size_t len)
930 {
931 unsigned long flags;
932 char *p;
933
934 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
935 return 0;
936 if (crng_init != 0) {
937 spin_unlock_irqrestore(&primary_crng.lock, flags);
938 return 0;
939 }
940 p = (unsigned char *) &primary_crng.state[4];
941 while (len > 0 && crng_init_cnt < CRNG_INIT_CNT_THRESH) {
942 p[crng_init_cnt % CHACHA_KEY_SIZE] ^= *cp;
943 cp++; crng_init_cnt++; len--;
944 }
945 spin_unlock_irqrestore(&primary_crng.lock, flags);
946 if (crng_init_cnt >= CRNG_INIT_CNT_THRESH) {
947 invalidate_batched_entropy();
948 crng_init = 1;
949 wake_up_interruptible(&crng_init_wait);
950 pr_notice("random: fast init done\n");
951 }
952 return 1;
953 }
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969 static int crng_slow_load(const char *cp, size_t len)
970 {
971 unsigned long flags;
972 static unsigned char lfsr = 1;
973 unsigned char tmp;
974 unsigned i, max = CHACHA_KEY_SIZE;
975 const char * src_buf = cp;
976 char * dest_buf = (char *) &primary_crng.state[4];
977
978 if (!spin_trylock_irqsave(&primary_crng.lock, flags))
979 return 0;
980 if (crng_init != 0) {
981 spin_unlock_irqrestore(&primary_crng.lock, flags);
982 return 0;
983 }
984 if (len > max)
985 max = len;
986
987 for (i = 0; i < max ; i++) {
988 tmp = lfsr;
989 lfsr >>= 1;
990 if (tmp & 1)
991 lfsr ^= 0xE1;
992 tmp = dest_buf[i % CHACHA_KEY_SIZE];
993 dest_buf[i % CHACHA_KEY_SIZE] ^= src_buf[i % len] ^ lfsr;
994 lfsr += (tmp << 3) | (tmp >> 5);
995 }
996 spin_unlock_irqrestore(&primary_crng.lock, flags);
997 return 1;
998 }
999
1000 static void crng_reseed(struct crng_state *crng, struct entropy_store *r)
1001 {
1002 unsigned long flags;
1003 int i, num;
1004 union {
1005 __u8 block[CHACHA_BLOCK_SIZE];
1006 __u32 key[8];
1007 } buf;
1008
1009 if (r) {
1010 num = extract_entropy(r, &buf, 32, 16, 0);
1011 if (num == 0)
1012 return;
1013 } else {
1014 _extract_crng(&primary_crng, buf.block);
1015 _crng_backtrack_protect(&primary_crng, buf.block,
1016 CHACHA_KEY_SIZE);
1017 }
1018 spin_lock_irqsave(&crng->lock, flags);
1019 for (i = 0; i < 8; i++) {
1020 unsigned long rv;
1021 if (!arch_get_random_seed_long(&rv) &&
1022 !arch_get_random_long(&rv))
1023 rv = random_get_entropy();
1024 crng->state[i+4] ^= buf.key[i] ^ rv;
1025 }
1026 memzero_explicit(&buf, sizeof(buf));
1027 crng->init_time = jiffies;
1028 spin_unlock_irqrestore(&crng->lock, flags);
1029 if (crng == &primary_crng && crng_init < 2) {
1030 invalidate_batched_entropy();
1031 numa_crng_init();
1032 crng_init = 2;
1033 process_random_ready_list();
1034 wake_up_interruptible(&crng_init_wait);
1035 pr_notice("random: crng init done\n");
1036 if (unseeded_warning.missed) {
1037 pr_notice("random: %d get_random_xx warning(s) missed "
1038 "due to ratelimiting\n",
1039 unseeded_warning.missed);
1040 unseeded_warning.missed = 0;
1041 }
1042 if (urandom_warning.missed) {
1043 pr_notice("random: %d urandom warning(s) missed "
1044 "due to ratelimiting\n",
1045 urandom_warning.missed);
1046 urandom_warning.missed = 0;
1047 }
1048 }
1049 }
1050
1051 static void _extract_crng(struct crng_state *crng,
1052 __u8 out[CHACHA_BLOCK_SIZE])
1053 {
1054 unsigned long v, flags;
1055
1056 if (crng_ready() &&
1057 (time_after(crng_global_init_time, crng->init_time) ||
1058 time_after(jiffies, crng->init_time + CRNG_RESEED_INTERVAL)))
1059 crng_reseed(crng, crng == &primary_crng ? &input_pool : NULL);
1060 spin_lock_irqsave(&crng->lock, flags);
1061 if (arch_get_random_long(&v))
1062 crng->state[14] ^= v;
1063 chacha20_block(&crng->state[0], out);
1064 if (crng->state[12] == 0)
1065 crng->state[13]++;
1066 spin_unlock_irqrestore(&crng->lock, flags);
1067 }
1068
1069 static void extract_crng(__u8 out[CHACHA_BLOCK_SIZE])
1070 {
1071 struct crng_state *crng = NULL;
1072
1073 #ifdef CONFIG_NUMA
1074 if (crng_node_pool)
1075 crng = crng_node_pool[numa_node_id()];
1076 if (crng == NULL)
1077 #endif
1078 crng = &primary_crng;
1079 _extract_crng(crng, out);
1080 }
1081
1082
1083
1084
1085
1086 static void _crng_backtrack_protect(struct crng_state *crng,
1087 __u8 tmp[CHACHA_BLOCK_SIZE], int used)
1088 {
1089 unsigned long flags;
1090 __u32 *s, *d;
1091 int i;
1092
1093 used = round_up(used, sizeof(__u32));
1094 if (used + CHACHA_KEY_SIZE > CHACHA_BLOCK_SIZE) {
1095 extract_crng(tmp);
1096 used = 0;
1097 }
1098 spin_lock_irqsave(&crng->lock, flags);
1099 s = (__u32 *) &tmp[used];
1100 d = &crng->state[4];
1101 for (i=0; i < 8; i++)
1102 *d++ ^= *s++;
1103 spin_unlock_irqrestore(&crng->lock, flags);
1104 }
1105
1106 static void crng_backtrack_protect(__u8 tmp[CHACHA_BLOCK_SIZE], int used)
1107 {
1108 struct crng_state *crng = NULL;
1109
1110 #ifdef CONFIG_NUMA
1111 if (crng_node_pool)
1112 crng = crng_node_pool[numa_node_id()];
1113 if (crng == NULL)
1114 #endif
1115 crng = &primary_crng;
1116 _crng_backtrack_protect(crng, tmp, used);
1117 }
1118
1119 static ssize_t extract_crng_user(void __user *buf, size_t nbytes)
1120 {
1121 ssize_t ret = 0, i = CHACHA_BLOCK_SIZE;
1122 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1123 int large_request = (nbytes > 256);
1124
1125 while (nbytes) {
1126 if (large_request && need_resched()) {
1127 if (signal_pending(current)) {
1128 if (ret == 0)
1129 ret = -ERESTARTSYS;
1130 break;
1131 }
1132 schedule();
1133 }
1134
1135 extract_crng(tmp);
1136 i = min_t(int, nbytes, CHACHA_BLOCK_SIZE);
1137 if (copy_to_user(buf, tmp, i)) {
1138 ret = -EFAULT;
1139 break;
1140 }
1141
1142 nbytes -= i;
1143 buf += i;
1144 ret += i;
1145 }
1146 crng_backtrack_protect(tmp, i);
1147
1148
1149 memzero_explicit(tmp, sizeof(tmp));
1150
1151 return ret;
1152 }
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162 struct timer_rand_state {
1163 cycles_t last_time;
1164 long last_delta, last_delta2;
1165 };
1166
1167 #define INIT_TIMER_RAND_STATE { INITIAL_JIFFIES, };
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177 void add_device_randomness(const void *buf, unsigned int size)
1178 {
1179 unsigned long time = random_get_entropy() ^ jiffies;
1180 unsigned long flags;
1181
1182 if (!crng_ready() && size)
1183 crng_slow_load(buf, size);
1184
1185 trace_add_device_randomness(size, _RET_IP_);
1186 spin_lock_irqsave(&input_pool.lock, flags);
1187 _mix_pool_bytes(&input_pool, buf, size);
1188 _mix_pool_bytes(&input_pool, &time, sizeof(time));
1189 spin_unlock_irqrestore(&input_pool.lock, flags);
1190 }
1191 EXPORT_SYMBOL(add_device_randomness);
1192
1193 static struct timer_rand_state input_timer_state = INIT_TIMER_RAND_STATE;
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205 static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
1206 {
1207 struct entropy_store *r;
1208 struct {
1209 long jiffies;
1210 unsigned cycles;
1211 unsigned num;
1212 } sample;
1213 long delta, delta2, delta3;
1214
1215 sample.jiffies = jiffies;
1216 sample.cycles = random_get_entropy();
1217 sample.num = num;
1218 r = &input_pool;
1219 mix_pool_bytes(r, &sample, sizeof(sample));
1220
1221
1222
1223
1224
1225
1226 delta = sample.jiffies - state->last_time;
1227 state->last_time = sample.jiffies;
1228
1229 delta2 = delta - state->last_delta;
1230 state->last_delta = delta;
1231
1232 delta3 = delta2 - state->last_delta2;
1233 state->last_delta2 = delta2;
1234
1235 if (delta < 0)
1236 delta = -delta;
1237 if (delta2 < 0)
1238 delta2 = -delta2;
1239 if (delta3 < 0)
1240 delta3 = -delta3;
1241 if (delta > delta2)
1242 delta = delta2;
1243 if (delta > delta3)
1244 delta = delta3;
1245
1246
1247
1248
1249
1250
1251 credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
1252 }
1253
1254 void add_input_randomness(unsigned int type, unsigned int code,
1255 unsigned int value)
1256 {
1257 static unsigned char last_value;
1258
1259
1260 if (value == last_value)
1261 return;
1262
1263 last_value = value;
1264 add_timer_randomness(&input_timer_state,
1265 (type << 4) ^ code ^ (code >> 4) ^ value);
1266 trace_add_input_randomness(ENTROPY_BITS(&input_pool));
1267 }
1268 EXPORT_SYMBOL_GPL(add_input_randomness);
1269
1270 static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
1271
1272 #ifdef ADD_INTERRUPT_BENCH
1273 static unsigned long avg_cycles, avg_deviation;
1274
1275 #define AVG_SHIFT 8
1276 #define FIXED_1_2 (1 << (AVG_SHIFT-1))
1277
1278 static void add_interrupt_bench(cycles_t start)
1279 {
1280 long delta = random_get_entropy() - start;
1281
1282
1283 delta = delta - ((avg_cycles + FIXED_1_2) >> AVG_SHIFT);
1284 avg_cycles += delta;
1285
1286 delta = abs(delta) - ((avg_deviation + FIXED_1_2) >> AVG_SHIFT);
1287 avg_deviation += delta;
1288 }
1289 #else
1290 #define add_interrupt_bench(x)
1291 #endif
1292
1293 static __u32 get_reg(struct fast_pool *f, struct pt_regs *regs)
1294 {
1295 __u32 *ptr = (__u32 *) regs;
1296 unsigned int idx;
1297
1298 if (regs == NULL)
1299 return 0;
1300 idx = READ_ONCE(f->reg_idx);
1301 if (idx >= sizeof(struct pt_regs) / sizeof(__u32))
1302 idx = 0;
1303 ptr += idx++;
1304 WRITE_ONCE(f->reg_idx, idx);
1305 return *ptr;
1306 }
1307
1308 void add_interrupt_randomness(int irq, int irq_flags)
1309 {
1310 struct entropy_store *r;
1311 struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness);
1312 struct pt_regs *regs = get_irq_regs();
1313 unsigned long now = jiffies;
1314 cycles_t cycles = random_get_entropy();
1315 __u32 c_high, j_high;
1316 __u64 ip;
1317 unsigned long seed;
1318 int credit = 0;
1319
1320 if (cycles == 0)
1321 cycles = get_reg(fast_pool, regs);
1322 c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
1323 j_high = (sizeof(now) > 4) ? now >> 32 : 0;
1324 fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
1325 fast_pool->pool[1] ^= now ^ c_high;
1326 ip = regs ? instruction_pointer(regs) : _RET_IP_;
1327 fast_pool->pool[2] ^= ip;
1328 fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
1329 get_reg(fast_pool, regs);
1330
1331 fast_mix(fast_pool);
1332 add_interrupt_bench(cycles);
1333
1334 if (unlikely(crng_init == 0)) {
1335 if ((fast_pool->count >= 64) &&
1336 crng_fast_load((char *) fast_pool->pool,
1337 sizeof(fast_pool->pool))) {
1338 fast_pool->count = 0;
1339 fast_pool->last = now;
1340 }
1341 return;
1342 }
1343
1344 if ((fast_pool->count < 64) &&
1345 !time_after(now, fast_pool->last + HZ))
1346 return;
1347
1348 r = &input_pool;
1349 if (!spin_trylock(&r->lock))
1350 return;
1351
1352 fast_pool->last = now;
1353 __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool));
1354
1355
1356
1357
1358
1359
1360
1361 if (arch_get_random_seed_long(&seed)) {
1362 __mix_pool_bytes(r, &seed, sizeof(seed));
1363 credit = 1;
1364 }
1365 spin_unlock(&r->lock);
1366
1367 fast_pool->count = 0;
1368
1369
1370 credit_entropy_bits(r, credit + 1);
1371 }
1372 EXPORT_SYMBOL_GPL(add_interrupt_randomness);
1373
1374 #ifdef CONFIG_BLOCK
1375 void add_disk_randomness(struct gendisk *disk)
1376 {
1377 if (!disk || !disk->random)
1378 return;
1379
1380 add_timer_randomness(disk->random, 0x100 + disk_devt(disk));
1381 trace_add_disk_randomness(disk_devt(disk), ENTROPY_BITS(&input_pool));
1382 }
1383 EXPORT_SYMBOL_GPL(add_disk_randomness);
1384 #endif
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397 static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes);
1398 static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1399 {
1400 if (!r->pull ||
1401 r->entropy_count >= (nbytes << (ENTROPY_SHIFT + 3)) ||
1402 r->entropy_count > r->poolinfo->poolfracbits)
1403 return;
1404
1405 _xfer_secondary_pool(r, nbytes);
1406 }
1407
1408 static void _xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
1409 {
1410 __u32 tmp[OUTPUT_POOL_WORDS];
1411
1412 int bytes = nbytes;
1413
1414
1415 bytes = max_t(int, bytes, random_read_wakeup_bits / 8);
1416
1417 bytes = min_t(int, bytes, sizeof(tmp));
1418
1419 trace_xfer_secondary_pool(r->name, bytes * 8, nbytes * 8,
1420 ENTROPY_BITS(r), ENTROPY_BITS(r->pull));
1421 bytes = extract_entropy(r->pull, tmp, bytes,
1422 random_read_wakeup_bits / 8, 0);
1423 mix_pool_bytes(r, tmp, bytes);
1424 credit_entropy_bits(r, bytes*8);
1425 }
1426
1427
1428
1429
1430
1431
1432
1433 static void push_to_pool(struct work_struct *work)
1434 {
1435 struct entropy_store *r = container_of(work, struct entropy_store,
1436 push_work);
1437 BUG_ON(!r);
1438 _xfer_secondary_pool(r, random_read_wakeup_bits/8);
1439 trace_push_to_pool(r->name, r->entropy_count >> ENTROPY_SHIFT,
1440 r->pull->entropy_count >> ENTROPY_SHIFT);
1441 }
1442
1443
1444
1445
1446
1447 static size_t account(struct entropy_store *r, size_t nbytes, int min,
1448 int reserved)
1449 {
1450 int entropy_count, orig, have_bytes;
1451 size_t ibytes, nfrac;
1452
1453 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
1454
1455
1456 retry:
1457 entropy_count = orig = READ_ONCE(r->entropy_count);
1458 ibytes = nbytes;
1459
1460 have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
1461
1462 if ((have_bytes -= reserved) < 0)
1463 have_bytes = 0;
1464 ibytes = min_t(size_t, ibytes, have_bytes);
1465 if (ibytes < min)
1466 ibytes = 0;
1467
1468 if (unlikely(entropy_count < 0)) {
1469 pr_warn("random: negative entropy count: pool %s count %d\n",
1470 r->name, entropy_count);
1471 WARN_ON(1);
1472 entropy_count = 0;
1473 }
1474 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1475 if ((size_t) entropy_count > nfrac)
1476 entropy_count -= nfrac;
1477 else
1478 entropy_count = 0;
1479
1480 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
1481 goto retry;
1482
1483 trace_debit_entropy(r->name, 8 * ibytes);
1484 if (ibytes &&
1485 (r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
1486 wake_up_interruptible(&random_write_wait);
1487 kill_fasync(&fasync, SIGIO, POLL_OUT);
1488 }
1489
1490 return ibytes;
1491 }
1492
1493
1494
1495
1496
1497
1498
1499 static void extract_buf(struct entropy_store *r, __u8 *out)
1500 {
1501 int i;
1502 union {
1503 __u32 w[5];
1504 unsigned long l[LONGS(20)];
1505 } hash;
1506 __u32 workspace[SHA_WORKSPACE_WORDS];
1507 unsigned long flags;
1508
1509
1510
1511
1512
1513 sha_init(hash.w);
1514 for (i = 0; i < LONGS(20); i++) {
1515 unsigned long v;
1516 if (!arch_get_random_long(&v))
1517 break;
1518 hash.l[i] = v;
1519 }
1520
1521
1522 spin_lock_irqsave(&r->lock, flags);
1523 for (i = 0; i < r->poolinfo->poolwords; i += 16)
1524 sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1536 spin_unlock_irqrestore(&r->lock, flags);
1537
1538 memzero_explicit(workspace, sizeof(workspace));
1539
1540
1541
1542
1543
1544
1545 hash.w[0] ^= hash.w[3];
1546 hash.w[1] ^= hash.w[4];
1547 hash.w[2] ^= rol32(hash.w[2], 16);
1548
1549 memcpy(out, &hash, EXTRACT_SIZE);
1550 memzero_explicit(&hash, sizeof(hash));
1551 }
1552
1553 static ssize_t _extract_entropy(struct entropy_store *r, void *buf,
1554 size_t nbytes, int fips)
1555 {
1556 ssize_t ret = 0, i;
1557 __u8 tmp[EXTRACT_SIZE];
1558 unsigned long flags;
1559
1560 while (nbytes) {
1561 extract_buf(r, tmp);
1562
1563 if (fips) {
1564 spin_lock_irqsave(&r->lock, flags);
1565 if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
1566 panic("Hardware RNG duplicated output!\n");
1567 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1568 spin_unlock_irqrestore(&r->lock, flags);
1569 }
1570 i = min_t(int, nbytes, EXTRACT_SIZE);
1571 memcpy(buf, tmp, i);
1572 nbytes -= i;
1573 buf += i;
1574 ret += i;
1575 }
1576
1577
1578 memzero_explicit(tmp, sizeof(tmp));
1579
1580 return ret;
1581 }
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592 static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1593 size_t nbytes, int min, int reserved)
1594 {
1595 __u8 tmp[EXTRACT_SIZE];
1596 unsigned long flags;
1597
1598
1599 if (fips_enabled) {
1600 spin_lock_irqsave(&r->lock, flags);
1601 if (!r->last_data_init) {
1602 r->last_data_init = 1;
1603 spin_unlock_irqrestore(&r->lock, flags);
1604 trace_extract_entropy(r->name, EXTRACT_SIZE,
1605 ENTROPY_BITS(r), _RET_IP_);
1606 xfer_secondary_pool(r, EXTRACT_SIZE);
1607 extract_buf(r, tmp);
1608 spin_lock_irqsave(&r->lock, flags);
1609 memcpy(r->last_data, tmp, EXTRACT_SIZE);
1610 }
1611 spin_unlock_irqrestore(&r->lock, flags);
1612 }
1613
1614 trace_extract_entropy(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1615 xfer_secondary_pool(r, nbytes);
1616 nbytes = account(r, nbytes, min, reserved);
1617
1618 return _extract_entropy(r, buf, nbytes, fips_enabled);
1619 }
1620
1621
1622
1623
1624
1625 static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1626 size_t nbytes)
1627 {
1628 ssize_t ret = 0, i;
1629 __u8 tmp[EXTRACT_SIZE];
1630 int large_request = (nbytes > 256);
1631
1632 trace_extract_entropy_user(r->name, nbytes, ENTROPY_BITS(r), _RET_IP_);
1633 if (!r->initialized && r->pull) {
1634 xfer_secondary_pool(r, ENTROPY_BITS(r->pull)/8);
1635 if (!r->initialized)
1636 return 0;
1637 }
1638 xfer_secondary_pool(r, nbytes);
1639 nbytes = account(r, nbytes, 0, 0);
1640
1641 while (nbytes) {
1642 if (large_request && need_resched()) {
1643 if (signal_pending(current)) {
1644 if (ret == 0)
1645 ret = -ERESTARTSYS;
1646 break;
1647 }
1648 schedule();
1649 }
1650
1651 extract_buf(r, tmp);
1652 i = min_t(int, nbytes, EXTRACT_SIZE);
1653 if (copy_to_user(buf, tmp, i)) {
1654 ret = -EFAULT;
1655 break;
1656 }
1657
1658 nbytes -= i;
1659 buf += i;
1660 ret += i;
1661 }
1662
1663
1664 memzero_explicit(tmp, sizeof(tmp));
1665
1666 return ret;
1667 }
1668
1669 #define warn_unseeded_randomness(previous) \
1670 _warn_unseeded_randomness(__func__, (void *) _RET_IP_, (previous))
1671
1672 static void _warn_unseeded_randomness(const char *func_name, void *caller,
1673 void **previous)
1674 {
1675 #ifdef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1676 const bool print_once = false;
1677 #else
1678 static bool print_once __read_mostly;
1679 #endif
1680
1681 if (print_once ||
1682 crng_ready() ||
1683 (previous && (caller == READ_ONCE(*previous))))
1684 return;
1685 WRITE_ONCE(*previous, caller);
1686 #ifndef CONFIG_WARN_ALL_UNSEEDED_RANDOM
1687 print_once = true;
1688 #endif
1689 if (__ratelimit(&unseeded_warning))
1690 printk_deferred(KERN_NOTICE "random: %s called from %pS "
1691 "with crng_init=%d\n", func_name, caller,
1692 crng_init);
1693 }
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705 static void _get_random_bytes(void *buf, int nbytes)
1706 {
1707 __u8 tmp[CHACHA_BLOCK_SIZE] __aligned(4);
1708
1709 trace_get_random_bytes(nbytes, _RET_IP_);
1710
1711 while (nbytes >= CHACHA_BLOCK_SIZE) {
1712 extract_crng(buf);
1713 buf += CHACHA_BLOCK_SIZE;
1714 nbytes -= CHACHA_BLOCK_SIZE;
1715 }
1716
1717 if (nbytes > 0) {
1718 extract_crng(tmp);
1719 memcpy(buf, tmp, nbytes);
1720 crng_backtrack_protect(tmp, nbytes);
1721 } else
1722 crng_backtrack_protect(tmp, CHACHA_BLOCK_SIZE);
1723 memzero_explicit(tmp, sizeof(tmp));
1724 }
1725
1726 void get_random_bytes(void *buf, int nbytes)
1727 {
1728 static void *previous;
1729
1730 warn_unseeded_randomness(&previous);
1731 _get_random_bytes(buf, nbytes);
1732 }
1733 EXPORT_SYMBOL(get_random_bytes);
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749 static void entropy_timer(struct timer_list *t)
1750 {
1751 credit_entropy_bits(&input_pool, 1);
1752 }
1753
1754
1755
1756
1757
1758 static void try_to_generate_entropy(void)
1759 {
1760 struct {
1761 unsigned long now;
1762 struct timer_list timer;
1763 } stack;
1764
1765 stack.now = random_get_entropy();
1766
1767
1768 if (stack.now == random_get_entropy())
1769 return;
1770
1771 timer_setup_on_stack(&stack.timer, entropy_timer, 0);
1772 while (!crng_ready()) {
1773 if (!timer_pending(&stack.timer))
1774 mod_timer(&stack.timer, jiffies+1);
1775 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1776 schedule();
1777 stack.now = random_get_entropy();
1778 }
1779
1780 del_timer_sync(&stack.timer);
1781 destroy_timer_on_stack(&stack.timer);
1782 mix_pool_bytes(&input_pool, &stack.now, sizeof(stack.now));
1783 }
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795 int wait_for_random_bytes(void)
1796 {
1797 if (likely(crng_ready()))
1798 return 0;
1799
1800 do {
1801 int ret;
1802 ret = wait_event_interruptible_timeout(crng_init_wait, crng_ready(), HZ);
1803 if (ret)
1804 return ret > 0 ? 0 : ret;
1805
1806 try_to_generate_entropy();
1807 } while (!crng_ready());
1808
1809 return 0;
1810 }
1811 EXPORT_SYMBOL(wait_for_random_bytes);
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822 bool rng_is_initialized(void)
1823 {
1824 return crng_ready();
1825 }
1826 EXPORT_SYMBOL(rng_is_initialized);
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836 int add_random_ready_callback(struct random_ready_callback *rdy)
1837 {
1838 struct module *owner;
1839 unsigned long flags;
1840 int err = -EALREADY;
1841
1842 if (crng_ready())
1843 return err;
1844
1845 owner = rdy->owner;
1846 if (!try_module_get(owner))
1847 return -ENOENT;
1848
1849 spin_lock_irqsave(&random_ready_list_lock, flags);
1850 if (crng_ready())
1851 goto out;
1852
1853 owner = NULL;
1854
1855 list_add(&rdy->list, &random_ready_list);
1856 err = 0;
1857
1858 out:
1859 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1860
1861 module_put(owner);
1862
1863 return err;
1864 }
1865 EXPORT_SYMBOL(add_random_ready_callback);
1866
1867
1868
1869
1870 void del_random_ready_callback(struct random_ready_callback *rdy)
1871 {
1872 unsigned long flags;
1873 struct module *owner = NULL;
1874
1875 spin_lock_irqsave(&random_ready_list_lock, flags);
1876 if (!list_empty(&rdy->list)) {
1877 list_del_init(&rdy->list);
1878 owner = rdy->owner;
1879 }
1880 spin_unlock_irqrestore(&random_ready_list_lock, flags);
1881
1882 module_put(owner);
1883 }
1884 EXPORT_SYMBOL(del_random_ready_callback);
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898 int __must_check get_random_bytes_arch(void *buf, int nbytes)
1899 {
1900 int left = nbytes;
1901 char *p = buf;
1902
1903 trace_get_random_bytes_arch(left, _RET_IP_);
1904 while (left) {
1905 unsigned long v;
1906 int chunk = min_t(int, left, sizeof(unsigned long));
1907
1908 if (!arch_get_random_long(&v))
1909 break;
1910
1911 memcpy(p, &v, chunk);
1912 p += chunk;
1913 left -= chunk;
1914 }
1915
1916 return nbytes - left;
1917 }
1918 EXPORT_SYMBOL(get_random_bytes_arch);
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929 static void __init init_std_data(struct entropy_store *r)
1930 {
1931 int i;
1932 ktime_t now = ktime_get_real();
1933 unsigned long rv;
1934
1935 r->last_pulled = jiffies;
1936 mix_pool_bytes(r, &now, sizeof(now));
1937 for (i = r->poolinfo->poolbytes; i > 0; i -= sizeof(rv)) {
1938 if (!arch_get_random_seed_long(&rv) &&
1939 !arch_get_random_long(&rv))
1940 rv = random_get_entropy();
1941 mix_pool_bytes(r, &rv, sizeof(rv));
1942 }
1943 mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
1944 }
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956 int __init rand_initialize(void)
1957 {
1958 init_std_data(&input_pool);
1959 init_std_data(&blocking_pool);
1960 crng_initialize(&primary_crng);
1961 crng_global_init_time = jiffies;
1962 if (ratelimit_disable) {
1963 urandom_warning.interval = 0;
1964 unseeded_warning.interval = 0;
1965 }
1966 return 0;
1967 }
1968
1969 #ifdef CONFIG_BLOCK
1970 void rand_initialize_disk(struct gendisk *disk)
1971 {
1972 struct timer_rand_state *state;
1973
1974
1975
1976
1977
1978 state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
1979 if (state) {
1980 state->last_time = INITIAL_JIFFIES;
1981 disk->random = state;
1982 }
1983 }
1984 #endif
1985
1986 static ssize_t
1987 _random_read(int nonblock, char __user *buf, size_t nbytes)
1988 {
1989 ssize_t n;
1990
1991 if (nbytes == 0)
1992 return 0;
1993
1994 nbytes = min_t(size_t, nbytes, SEC_XFER_SIZE);
1995 while (1) {
1996 n = extract_entropy_user(&blocking_pool, buf, nbytes);
1997 if (n < 0)
1998 return n;
1999 trace_random_read(n*8, (nbytes-n)*8,
2000 ENTROPY_BITS(&blocking_pool),
2001 ENTROPY_BITS(&input_pool));
2002 if (n > 0)
2003 return n;
2004
2005
2006 if (nonblock)
2007 return -EAGAIN;
2008
2009 wait_event_interruptible(random_read_wait,
2010 blocking_pool.initialized &&
2011 (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits));
2012 if (signal_pending(current))
2013 return -ERESTARTSYS;
2014 }
2015 }
2016
2017 static ssize_t
2018 random_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
2019 {
2020 return _random_read(file->f_flags & O_NONBLOCK, buf, nbytes);
2021 }
2022
2023 static ssize_t
2024 urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
2025 {
2026 unsigned long flags;
2027 static int maxwarn = 10;
2028 int ret;
2029
2030 if (!crng_ready() && maxwarn > 0) {
2031 maxwarn--;
2032 if (__ratelimit(&urandom_warning))
2033 printk(KERN_NOTICE "random: %s: uninitialized "
2034 "urandom read (%zd bytes read)\n",
2035 current->comm, nbytes);
2036 spin_lock_irqsave(&primary_crng.lock, flags);
2037 crng_init_cnt = 0;
2038 spin_unlock_irqrestore(&primary_crng.lock, flags);
2039 }
2040 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
2041 ret = extract_crng_user(buf, nbytes);
2042 trace_urandom_read(8 * nbytes, 0, ENTROPY_BITS(&input_pool));
2043 return ret;
2044 }
2045
2046 static __poll_t
2047 random_poll(struct file *file, poll_table * wait)
2048 {
2049 __poll_t mask;
2050
2051 poll_wait(file, &random_read_wait, wait);
2052 poll_wait(file, &random_write_wait, wait);
2053 mask = 0;
2054 if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
2055 mask |= EPOLLIN | EPOLLRDNORM;
2056 if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
2057 mask |= EPOLLOUT | EPOLLWRNORM;
2058 return mask;
2059 }
2060
2061 static int
2062 write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
2063 {
2064 size_t bytes;
2065 __u32 t, buf[16];
2066 const char __user *p = buffer;
2067
2068 while (count > 0) {
2069 int b, i = 0;
2070
2071 bytes = min(count, sizeof(buf));
2072 if (copy_from_user(&buf, p, bytes))
2073 return -EFAULT;
2074
2075 for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
2076 if (!arch_get_random_int(&t))
2077 break;
2078 buf[i] ^= t;
2079 }
2080
2081 count -= bytes;
2082 p += bytes;
2083
2084 mix_pool_bytes(r, buf, bytes);
2085 cond_resched();
2086 }
2087
2088 return 0;
2089 }
2090
2091 static ssize_t random_write(struct file *file, const char __user *buffer,
2092 size_t count, loff_t *ppos)
2093 {
2094 size_t ret;
2095
2096 ret = write_pool(&input_pool, buffer, count);
2097 if (ret)
2098 return ret;
2099
2100 return (ssize_t)count;
2101 }
2102
2103 static long random_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
2104 {
2105 int size, ent_count;
2106 int __user *p = (int __user *)arg;
2107 int retval;
2108
2109 switch (cmd) {
2110 case RNDGETENTCNT:
2111
2112 ent_count = ENTROPY_BITS(&input_pool);
2113 if (put_user(ent_count, p))
2114 return -EFAULT;
2115 return 0;
2116 case RNDADDTOENTCNT:
2117 if (!capable(CAP_SYS_ADMIN))
2118 return -EPERM;
2119 if (get_user(ent_count, p))
2120 return -EFAULT;
2121 return credit_entropy_bits_safe(&input_pool, ent_count);
2122 case RNDADDENTROPY:
2123 if (!capable(CAP_SYS_ADMIN))
2124 return -EPERM;
2125 if (get_user(ent_count, p++))
2126 return -EFAULT;
2127 if (ent_count < 0)
2128 return -EINVAL;
2129 if (get_user(size, p++))
2130 return -EFAULT;
2131 retval = write_pool(&input_pool, (const char __user *)p,
2132 size);
2133 if (retval < 0)
2134 return retval;
2135 return credit_entropy_bits_safe(&input_pool, ent_count);
2136 case RNDZAPENTCNT:
2137 case RNDCLEARPOOL:
2138
2139
2140
2141
2142 if (!capable(CAP_SYS_ADMIN))
2143 return -EPERM;
2144 input_pool.entropy_count = 0;
2145 blocking_pool.entropy_count = 0;
2146 return 0;
2147 case RNDRESEEDCRNG:
2148 if (!capable(CAP_SYS_ADMIN))
2149 return -EPERM;
2150 if (crng_init < 2)
2151 return -ENODATA;
2152 crng_reseed(&primary_crng, NULL);
2153 crng_global_init_time = jiffies - 1;
2154 return 0;
2155 default:
2156 return -EINVAL;
2157 }
2158 }
2159
2160 static int random_fasync(int fd, struct file *filp, int on)
2161 {
2162 return fasync_helper(fd, filp, on, &fasync);
2163 }
2164
2165 const struct file_operations random_fops = {
2166 .read = random_read,
2167 .write = random_write,
2168 .poll = random_poll,
2169 .unlocked_ioctl = random_ioctl,
2170 .fasync = random_fasync,
2171 .llseek = noop_llseek,
2172 };
2173
2174 const struct file_operations urandom_fops = {
2175 .read = urandom_read,
2176 .write = random_write,
2177 .unlocked_ioctl = random_ioctl,
2178 .fasync = random_fasync,
2179 .llseek = noop_llseek,
2180 };
2181
2182 SYSCALL_DEFINE3(getrandom, char __user *, buf, size_t, count,
2183 unsigned int, flags)
2184 {
2185 int ret;
2186
2187 if (flags & ~(GRND_NONBLOCK|GRND_RANDOM))
2188 return -EINVAL;
2189
2190 if (count > INT_MAX)
2191 count = INT_MAX;
2192
2193 if (flags & GRND_RANDOM)
2194 return _random_read(flags & GRND_NONBLOCK, buf, count);
2195
2196 if (!crng_ready()) {
2197 if (flags & GRND_NONBLOCK)
2198 return -EAGAIN;
2199 ret = wait_for_random_bytes();
2200 if (unlikely(ret))
2201 return ret;
2202 }
2203 return urandom_read(NULL, buf, count, NULL);
2204 }
2205
2206
2207
2208
2209
2210
2211
2212 #ifdef CONFIG_SYSCTL
2213
2214 #include <linux/sysctl.h>
2215
2216 static int min_read_thresh = 8, min_write_thresh;
2217 static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
2218 static int max_write_thresh = INPUT_POOL_WORDS * 32;
2219 static int random_min_urandom_seed = 60;
2220 static char sysctl_bootid[16];
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231 static int proc_do_uuid(struct ctl_table *table, int write,
2232 void __user *buffer, size_t *lenp, loff_t *ppos)
2233 {
2234 struct ctl_table fake_table;
2235 unsigned char buf[64], tmp_uuid[16], *uuid;
2236
2237 uuid = table->data;
2238 if (!uuid) {
2239 uuid = tmp_uuid;
2240 generate_random_uuid(uuid);
2241 } else {
2242 static DEFINE_SPINLOCK(bootid_spinlock);
2243
2244 spin_lock(&bootid_spinlock);
2245 if (!uuid[8])
2246 generate_random_uuid(uuid);
2247 spin_unlock(&bootid_spinlock);
2248 }
2249
2250 sprintf(buf, "%pU", uuid);
2251
2252 fake_table.data = buf;
2253 fake_table.maxlen = sizeof(buf);
2254
2255 return proc_dostring(&fake_table, write, buffer, lenp, ppos);
2256 }
2257
2258
2259
2260
2261 static int proc_do_entropy(struct ctl_table *table, int write,
2262 void __user *buffer, size_t *lenp, loff_t *ppos)
2263 {
2264 struct ctl_table fake_table;
2265 int entropy_count;
2266
2267 entropy_count = *(int *)table->data >> ENTROPY_SHIFT;
2268
2269 fake_table.data = &entropy_count;
2270 fake_table.maxlen = sizeof(entropy_count);
2271
2272 return proc_dointvec(&fake_table, write, buffer, lenp, ppos);
2273 }
2274
2275 static int sysctl_poolsize = INPUT_POOL_WORDS * 32;
2276 extern struct ctl_table random_table[];
2277 struct ctl_table random_table[] = {
2278 {
2279 .procname = "poolsize",
2280 .data = &sysctl_poolsize,
2281 .maxlen = sizeof(int),
2282 .mode = 0444,
2283 .proc_handler = proc_dointvec,
2284 },
2285 {
2286 .procname = "entropy_avail",
2287 .maxlen = sizeof(int),
2288 .mode = 0444,
2289 .proc_handler = proc_do_entropy,
2290 .data = &input_pool.entropy_count,
2291 },
2292 {
2293 .procname = "read_wakeup_threshold",
2294 .data = &random_read_wakeup_bits,
2295 .maxlen = sizeof(int),
2296 .mode = 0644,
2297 .proc_handler = proc_dointvec_minmax,
2298 .extra1 = &min_read_thresh,
2299 .extra2 = &max_read_thresh,
2300 },
2301 {
2302 .procname = "write_wakeup_threshold",
2303 .data = &random_write_wakeup_bits,
2304 .maxlen = sizeof(int),
2305 .mode = 0644,
2306 .proc_handler = proc_dointvec_minmax,
2307 .extra1 = &min_write_thresh,
2308 .extra2 = &max_write_thresh,
2309 },
2310 {
2311 .procname = "urandom_min_reseed_secs",
2312 .data = &random_min_urandom_seed,
2313 .maxlen = sizeof(int),
2314 .mode = 0644,
2315 .proc_handler = proc_dointvec,
2316 },
2317 {
2318 .procname = "boot_id",
2319 .data = &sysctl_bootid,
2320 .maxlen = 16,
2321 .mode = 0444,
2322 .proc_handler = proc_do_uuid,
2323 },
2324 {
2325 .procname = "uuid",
2326 .maxlen = 16,
2327 .mode = 0444,
2328 .proc_handler = proc_do_uuid,
2329 },
2330 #ifdef ADD_INTERRUPT_BENCH
2331 {
2332 .procname = "add_interrupt_avg_cycles",
2333 .data = &avg_cycles,
2334 .maxlen = sizeof(avg_cycles),
2335 .mode = 0444,
2336 .proc_handler = proc_doulongvec_minmax,
2337 },
2338 {
2339 .procname = "add_interrupt_avg_deviation",
2340 .data = &avg_deviation,
2341 .maxlen = sizeof(avg_deviation),
2342 .mode = 0444,
2343 .proc_handler = proc_doulongvec_minmax,
2344 },
2345 #endif
2346 { }
2347 };
2348 #endif
2349
2350 struct batched_entropy {
2351 union {
2352 u64 entropy_u64[CHACHA_BLOCK_SIZE / sizeof(u64)];
2353 u32 entropy_u32[CHACHA_BLOCK_SIZE / sizeof(u32)];
2354 };
2355 unsigned int position;
2356 spinlock_t batch_lock;
2357 };
2358
2359
2360
2361
2362
2363
2364
2365
2366
2367 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u64) = {
2368 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u64.lock),
2369 };
2370
2371 u64 get_random_u64(void)
2372 {
2373 u64 ret;
2374 unsigned long flags;
2375 struct batched_entropy *batch;
2376 static void *previous;
2377
2378 warn_unseeded_randomness(&previous);
2379
2380 batch = raw_cpu_ptr(&batched_entropy_u64);
2381 spin_lock_irqsave(&batch->batch_lock, flags);
2382 if (batch->position % ARRAY_SIZE(batch->entropy_u64) == 0) {
2383 extract_crng((u8 *)batch->entropy_u64);
2384 batch->position = 0;
2385 }
2386 ret = batch->entropy_u64[batch->position++];
2387 spin_unlock_irqrestore(&batch->batch_lock, flags);
2388 return ret;
2389 }
2390 EXPORT_SYMBOL(get_random_u64);
2391
2392 static DEFINE_PER_CPU(struct batched_entropy, batched_entropy_u32) = {
2393 .batch_lock = __SPIN_LOCK_UNLOCKED(batched_entropy_u32.lock),
2394 };
2395 u32 get_random_u32(void)
2396 {
2397 u32 ret;
2398 unsigned long flags;
2399 struct batched_entropy *batch;
2400 static void *previous;
2401
2402 warn_unseeded_randomness(&previous);
2403
2404 batch = raw_cpu_ptr(&batched_entropy_u32);
2405 spin_lock_irqsave(&batch->batch_lock, flags);
2406 if (batch->position % ARRAY_SIZE(batch->entropy_u32) == 0) {
2407 extract_crng((u8 *)batch->entropy_u32);
2408 batch->position = 0;
2409 }
2410 ret = batch->entropy_u32[batch->position++];
2411 spin_unlock_irqrestore(&batch->batch_lock, flags);
2412 return ret;
2413 }
2414 EXPORT_SYMBOL(get_random_u32);
2415
2416
2417
2418
2419
2420 static void invalidate_batched_entropy(void)
2421 {
2422 int cpu;
2423 unsigned long flags;
2424
2425 for_each_possible_cpu (cpu) {
2426 struct batched_entropy *batched_entropy;
2427
2428 batched_entropy = per_cpu_ptr(&batched_entropy_u32, cpu);
2429 spin_lock_irqsave(&batched_entropy->batch_lock, flags);
2430 batched_entropy->position = 0;
2431 spin_unlock(&batched_entropy->batch_lock);
2432
2433 batched_entropy = per_cpu_ptr(&batched_entropy_u64, cpu);
2434 spin_lock(&batched_entropy->batch_lock);
2435 batched_entropy->position = 0;
2436 spin_unlock_irqrestore(&batched_entropy->batch_lock, flags);
2437 }
2438 }
2439
2440
2441
2442
2443
2444
2445
2446
2447
2448
2449
2450
2451
2452
2453
2454 unsigned long
2455 randomize_page(unsigned long start, unsigned long range)
2456 {
2457 if (!PAGE_ALIGNED(start)) {
2458 range -= PAGE_ALIGN(start) - start;
2459 start = PAGE_ALIGN(start);
2460 }
2461
2462 if (start > ULONG_MAX - range)
2463 range = ULONG_MAX - start;
2464
2465 range >>= PAGE_SHIFT;
2466
2467 if (range == 0)
2468 return start;
2469
2470 return start + (get_random_long() % range << PAGE_SHIFT);
2471 }
2472
2473
2474
2475
2476
2477 void add_hwgenerator_randomness(const char *buffer, size_t count,
2478 size_t entropy)
2479 {
2480 struct entropy_store *poolp = &input_pool;
2481
2482 if (unlikely(crng_init == 0)) {
2483 crng_fast_load(buffer, count);
2484 return;
2485 }
2486
2487
2488
2489
2490
2491 wait_event_interruptible(random_write_wait, kthread_should_stop() ||
2492 ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
2493 mix_pool_bytes(poolp, buffer, count);
2494 credit_entropy_bits(poolp, entropy);
2495 }
2496 EXPORT_SYMBOL_GPL(add_hwgenerator_randomness);
2497
2498
2499
2500
2501
2502
2503 void add_bootloader_randomness(const void *buf, unsigned int size)
2504 {
2505 if (IS_ENABLED(CONFIG_RANDOM_TRUST_BOOTLOADER))
2506 add_hwgenerator_randomness(buf, size, size * 8);
2507 else
2508 add_device_randomness(buf, size);
2509 }
2510 EXPORT_SYMBOL_GPL(add_bootloader_randomness);