This source file includes following definitions.
- __firmware_loading_timeout
- __fw_fallback_set_timeout
- fw_fallback_set_cache_timeout
- fw_fallback_set_default_timeout
- firmware_loading_timeout
- fw_sysfs_done
- fw_sysfs_loading
- fw_sysfs_wait_timeout
- to_fw_sysfs
- __fw_load_abort
- fw_load_abort
- kill_pending_fw_fallback_reqs
- timeout_show
- timeout_store
- fw_dev_release
- do_firmware_uevent
- firmware_uevent
- register_sysfs_loader
- unregister_sysfs_loader
- firmware_loading_show
- firmware_loading_store
- firmware_rw_data
- firmware_rw
- firmware_data_read
- fw_realloc_pages
- firmware_data_write
- fw_create_instance
- fw_load_sysfs_fallback
- fw_load_from_user_helper
- fw_force_sysfs_fallback
- fw_run_sysfs_fallback
- firmware_fallback_sysfs
1
2
3 #include <linux/types.h>
4 #include <linux/kconfig.h>
5 #include <linux/list.h>
6 #include <linux/slab.h>
7 #include <linux/security.h>
8 #include <linux/highmem.h>
9 #include <linux/umh.h>
10 #include <linux/sysctl.h>
11 #include <linux/vmalloc.h>
12
13 #include "fallback.h"
14 #include "firmware.h"
15
16
17
18
19
20 extern struct firmware_fallback_config fw_fallback_config;
21
22
23 static inline int __firmware_loading_timeout(void)
24 {
25 return fw_fallback_config.loading_timeout;
26 }
27
28
29 static void __fw_fallback_set_timeout(int timeout)
30 {
31 fw_fallback_config.loading_timeout = timeout;
32 }
33
34
35
36
37
38
39
40 void fw_fallback_set_cache_timeout(void)
41 {
42 fw_fallback_config.old_timeout = __firmware_loading_timeout();
43 __fw_fallback_set_timeout(10);
44 }
45
46
47 void fw_fallback_set_default_timeout(void)
48 {
49 __fw_fallback_set_timeout(fw_fallback_config.old_timeout);
50 }
51
52 static long firmware_loading_timeout(void)
53 {
54 return __firmware_loading_timeout() > 0 ?
55 __firmware_loading_timeout() * HZ : MAX_JIFFY_OFFSET;
56 }
57
58 static inline bool fw_sysfs_done(struct fw_priv *fw_priv)
59 {
60 return __fw_state_check(fw_priv, FW_STATUS_DONE);
61 }
62
63 static inline bool fw_sysfs_loading(struct fw_priv *fw_priv)
64 {
65 return __fw_state_check(fw_priv, FW_STATUS_LOADING);
66 }
67
68 static inline int fw_sysfs_wait_timeout(struct fw_priv *fw_priv, long timeout)
69 {
70 return __fw_state_wait_common(fw_priv, timeout);
71 }
72
73 struct fw_sysfs {
74 bool nowait;
75 struct device dev;
76 struct fw_priv *fw_priv;
77 struct firmware *fw;
78 };
79
80 static struct fw_sysfs *to_fw_sysfs(struct device *dev)
81 {
82 return container_of(dev, struct fw_sysfs, dev);
83 }
84
85 static void __fw_load_abort(struct fw_priv *fw_priv)
86 {
87
88
89
90
91 if (fw_sysfs_done(fw_priv))
92 return;
93
94 list_del_init(&fw_priv->pending_list);
95 fw_state_aborted(fw_priv);
96 }
97
98 static void fw_load_abort(struct fw_sysfs *fw_sysfs)
99 {
100 struct fw_priv *fw_priv = fw_sysfs->fw_priv;
101
102 __fw_load_abort(fw_priv);
103 }
104
105 static LIST_HEAD(pending_fw_head);
106
107 void kill_pending_fw_fallback_reqs(bool only_kill_custom)
108 {
109 struct fw_priv *fw_priv;
110 struct fw_priv *next;
111
112 mutex_lock(&fw_lock);
113 list_for_each_entry_safe(fw_priv, next, &pending_fw_head,
114 pending_list) {
115 if (!fw_priv->need_uevent || !only_kill_custom)
116 __fw_load_abort(fw_priv);
117 }
118 mutex_unlock(&fw_lock);
119 }
120
121 static ssize_t timeout_show(struct class *class, struct class_attribute *attr,
122 char *buf)
123 {
124 return sprintf(buf, "%d\n", __firmware_loading_timeout());
125 }
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140 static ssize_t timeout_store(struct class *class, struct class_attribute *attr,
141 const char *buf, size_t count)
142 {
143 int tmp_loading_timeout = simple_strtol(buf, NULL, 10);
144
145 if (tmp_loading_timeout < 0)
146 tmp_loading_timeout = 0;
147
148 __fw_fallback_set_timeout(tmp_loading_timeout);
149
150 return count;
151 }
152 static CLASS_ATTR_RW(timeout);
153
154 static struct attribute *firmware_class_attrs[] = {
155 &class_attr_timeout.attr,
156 NULL,
157 };
158 ATTRIBUTE_GROUPS(firmware_class);
159
160 static void fw_dev_release(struct device *dev)
161 {
162 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
163
164 kfree(fw_sysfs);
165 }
166
167 static int do_firmware_uevent(struct fw_sysfs *fw_sysfs, struct kobj_uevent_env *env)
168 {
169 if (add_uevent_var(env, "FIRMWARE=%s", fw_sysfs->fw_priv->fw_name))
170 return -ENOMEM;
171 if (add_uevent_var(env, "TIMEOUT=%i", __firmware_loading_timeout()))
172 return -ENOMEM;
173 if (add_uevent_var(env, "ASYNC=%d", fw_sysfs->nowait))
174 return -ENOMEM;
175
176 return 0;
177 }
178
179 static int firmware_uevent(struct device *dev, struct kobj_uevent_env *env)
180 {
181 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
182 int err = 0;
183
184 mutex_lock(&fw_lock);
185 if (fw_sysfs->fw_priv)
186 err = do_firmware_uevent(fw_sysfs, env);
187 mutex_unlock(&fw_lock);
188 return err;
189 }
190
191 static struct class firmware_class = {
192 .name = "firmware",
193 .class_groups = firmware_class_groups,
194 .dev_uevent = firmware_uevent,
195 .dev_release = fw_dev_release,
196 };
197
198 int register_sysfs_loader(void)
199 {
200 return class_register(&firmware_class);
201 }
202
203 void unregister_sysfs_loader(void)
204 {
205 class_unregister(&firmware_class);
206 }
207
208 static ssize_t firmware_loading_show(struct device *dev,
209 struct device_attribute *attr, char *buf)
210 {
211 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
212 int loading = 0;
213
214 mutex_lock(&fw_lock);
215 if (fw_sysfs->fw_priv)
216 loading = fw_sysfs_loading(fw_sysfs->fw_priv);
217 mutex_unlock(&fw_lock);
218
219 return sprintf(buf, "%d\n", loading);
220 }
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235 static ssize_t firmware_loading_store(struct device *dev,
236 struct device_attribute *attr,
237 const char *buf, size_t count)
238 {
239 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
240 struct fw_priv *fw_priv;
241 ssize_t written = count;
242 int loading = simple_strtol(buf, NULL, 10);
243
244 mutex_lock(&fw_lock);
245 fw_priv = fw_sysfs->fw_priv;
246 if (fw_state_is_aborted(fw_priv))
247 goto out;
248
249 switch (loading) {
250 case 1:
251
252 if (!fw_sysfs_done(fw_priv)) {
253 fw_free_paged_buf(fw_priv);
254 fw_state_start(fw_priv);
255 }
256 break;
257 case 0:
258 if (fw_sysfs_loading(fw_priv)) {
259 int rc;
260
261
262
263
264
265
266
267 rc = fw_map_paged_buf(fw_priv);
268 if (rc)
269 dev_err(dev, "%s: map pages failed\n",
270 __func__);
271 else
272 rc = security_kernel_post_read_file(NULL,
273 fw_priv->data, fw_priv->size,
274 READING_FIRMWARE);
275
276
277
278
279
280 list_del_init(&fw_priv->pending_list);
281 if (rc) {
282 fw_state_aborted(fw_priv);
283 written = rc;
284 } else {
285 fw_state_done(fw_priv);
286 }
287 break;
288 }
289
290 default:
291 dev_err(dev, "%s: unexpected value (%d)\n", __func__, loading);
292
293 case -1:
294 fw_load_abort(fw_sysfs);
295 break;
296 }
297 out:
298 mutex_unlock(&fw_lock);
299 return written;
300 }
301
302 static DEVICE_ATTR(loading, 0644, firmware_loading_show, firmware_loading_store);
303
304 static void firmware_rw_data(struct fw_priv *fw_priv, char *buffer,
305 loff_t offset, size_t count, bool read)
306 {
307 if (read)
308 memcpy(buffer, fw_priv->data + offset, count);
309 else
310 memcpy(fw_priv->data + offset, buffer, count);
311 }
312
313 static void firmware_rw(struct fw_priv *fw_priv, char *buffer,
314 loff_t offset, size_t count, bool read)
315 {
316 while (count) {
317 void *page_data;
318 int page_nr = offset >> PAGE_SHIFT;
319 int page_ofs = offset & (PAGE_SIZE-1);
320 int page_cnt = min_t(size_t, PAGE_SIZE - page_ofs, count);
321
322 page_data = kmap(fw_priv->pages[page_nr]);
323
324 if (read)
325 memcpy(buffer, page_data + page_ofs, page_cnt);
326 else
327 memcpy(page_data + page_ofs, buffer, page_cnt);
328
329 kunmap(fw_priv->pages[page_nr]);
330 buffer += page_cnt;
331 offset += page_cnt;
332 count -= page_cnt;
333 }
334 }
335
336 static ssize_t firmware_data_read(struct file *filp, struct kobject *kobj,
337 struct bin_attribute *bin_attr,
338 char *buffer, loff_t offset, size_t count)
339 {
340 struct device *dev = kobj_to_dev(kobj);
341 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
342 struct fw_priv *fw_priv;
343 ssize_t ret_count;
344
345 mutex_lock(&fw_lock);
346 fw_priv = fw_sysfs->fw_priv;
347 if (!fw_priv || fw_sysfs_done(fw_priv)) {
348 ret_count = -ENODEV;
349 goto out;
350 }
351 if (offset > fw_priv->size) {
352 ret_count = 0;
353 goto out;
354 }
355 if (count > fw_priv->size - offset)
356 count = fw_priv->size - offset;
357
358 ret_count = count;
359
360 if (fw_priv->data)
361 firmware_rw_data(fw_priv, buffer, offset, count, true);
362 else
363 firmware_rw(fw_priv, buffer, offset, count, true);
364
365 out:
366 mutex_unlock(&fw_lock);
367 return ret_count;
368 }
369
370 static int fw_realloc_pages(struct fw_sysfs *fw_sysfs, int min_size)
371 {
372 int err;
373
374 err = fw_grow_paged_buf(fw_sysfs->fw_priv,
375 PAGE_ALIGN(min_size) >> PAGE_SHIFT);
376 if (err)
377 fw_load_abort(fw_sysfs);
378 return err;
379 }
380
381
382
383
384
385
386
387
388
389
390
391
392
393 static ssize_t firmware_data_write(struct file *filp, struct kobject *kobj,
394 struct bin_attribute *bin_attr,
395 char *buffer, loff_t offset, size_t count)
396 {
397 struct device *dev = kobj_to_dev(kobj);
398 struct fw_sysfs *fw_sysfs = to_fw_sysfs(dev);
399 struct fw_priv *fw_priv;
400 ssize_t retval;
401
402 if (!capable(CAP_SYS_RAWIO))
403 return -EPERM;
404
405 mutex_lock(&fw_lock);
406 fw_priv = fw_sysfs->fw_priv;
407 if (!fw_priv || fw_sysfs_done(fw_priv)) {
408 retval = -ENODEV;
409 goto out;
410 }
411
412 if (fw_priv->data) {
413 if (offset + count > fw_priv->allocated_size) {
414 retval = -ENOMEM;
415 goto out;
416 }
417 firmware_rw_data(fw_priv, buffer, offset, count, false);
418 retval = count;
419 } else {
420 retval = fw_realloc_pages(fw_sysfs, offset + count);
421 if (retval)
422 goto out;
423
424 retval = count;
425 firmware_rw(fw_priv, buffer, offset, count, false);
426 }
427
428 fw_priv->size = max_t(size_t, offset + count, fw_priv->size);
429 out:
430 mutex_unlock(&fw_lock);
431 return retval;
432 }
433
434 static struct bin_attribute firmware_attr_data = {
435 .attr = { .name = "data", .mode = 0644 },
436 .size = 0,
437 .read = firmware_data_read,
438 .write = firmware_data_write,
439 };
440
441 static struct attribute *fw_dev_attrs[] = {
442 &dev_attr_loading.attr,
443 NULL
444 };
445
446 static struct bin_attribute *fw_dev_bin_attrs[] = {
447 &firmware_attr_data,
448 NULL
449 };
450
451 static const struct attribute_group fw_dev_attr_group = {
452 .attrs = fw_dev_attrs,
453 .bin_attrs = fw_dev_bin_attrs,
454 };
455
456 static const struct attribute_group *fw_dev_attr_groups[] = {
457 &fw_dev_attr_group,
458 NULL
459 };
460
461 static struct fw_sysfs *
462 fw_create_instance(struct firmware *firmware, const char *fw_name,
463 struct device *device, enum fw_opt opt_flags)
464 {
465 struct fw_sysfs *fw_sysfs;
466 struct device *f_dev;
467
468 fw_sysfs = kzalloc(sizeof(*fw_sysfs), GFP_KERNEL);
469 if (!fw_sysfs) {
470 fw_sysfs = ERR_PTR(-ENOMEM);
471 goto exit;
472 }
473
474 fw_sysfs->nowait = !!(opt_flags & FW_OPT_NOWAIT);
475 fw_sysfs->fw = firmware;
476 f_dev = &fw_sysfs->dev;
477
478 device_initialize(f_dev);
479 dev_set_name(f_dev, "%s", fw_name);
480 f_dev->parent = device;
481 f_dev->class = &firmware_class;
482 f_dev->groups = fw_dev_attr_groups;
483 exit:
484 return fw_sysfs;
485 }
486
487
488
489
490
491
492
493
494
495 static int fw_load_sysfs_fallback(struct fw_sysfs *fw_sysfs,
496 enum fw_opt opt_flags, long timeout)
497 {
498 int retval = 0;
499 struct device *f_dev = &fw_sysfs->dev;
500 struct fw_priv *fw_priv = fw_sysfs->fw_priv;
501
502
503 if (!fw_priv->data)
504 fw_priv->is_paged_buf = true;
505
506 dev_set_uevent_suppress(f_dev, true);
507
508 retval = device_add(f_dev);
509 if (retval) {
510 dev_err(f_dev, "%s: device_register failed\n", __func__);
511 goto err_put_dev;
512 }
513
514 mutex_lock(&fw_lock);
515 list_add(&fw_priv->pending_list, &pending_fw_head);
516 mutex_unlock(&fw_lock);
517
518 if (opt_flags & FW_OPT_UEVENT) {
519 fw_priv->need_uevent = true;
520 dev_set_uevent_suppress(f_dev, false);
521 dev_dbg(f_dev, "firmware: requesting %s\n", fw_priv->fw_name);
522 kobject_uevent(&fw_sysfs->dev.kobj, KOBJ_ADD);
523 } else {
524 timeout = MAX_JIFFY_OFFSET;
525 }
526
527 retval = fw_sysfs_wait_timeout(fw_priv, timeout);
528 if (retval < 0 && retval != -ENOENT) {
529 mutex_lock(&fw_lock);
530 fw_load_abort(fw_sysfs);
531 mutex_unlock(&fw_lock);
532 }
533
534 if (fw_state_is_aborted(fw_priv)) {
535 if (retval == -ERESTARTSYS)
536 retval = -EINTR;
537 else
538 retval = -EAGAIN;
539 } else if (fw_priv->is_paged_buf && !fw_priv->data)
540 retval = -ENOMEM;
541
542 device_del(f_dev);
543 err_put_dev:
544 put_device(f_dev);
545 return retval;
546 }
547
548 static int fw_load_from_user_helper(struct firmware *firmware,
549 const char *name, struct device *device,
550 enum fw_opt opt_flags)
551 {
552 struct fw_sysfs *fw_sysfs;
553 long timeout;
554 int ret;
555
556 timeout = firmware_loading_timeout();
557 if (opt_flags & FW_OPT_NOWAIT) {
558 timeout = usermodehelper_read_lock_wait(timeout);
559 if (!timeout) {
560 dev_dbg(device, "firmware: %s loading timed out\n",
561 name);
562 return -EBUSY;
563 }
564 } else {
565 ret = usermodehelper_read_trylock();
566 if (WARN_ON(ret)) {
567 dev_err(device, "firmware: %s will not be loaded\n",
568 name);
569 return ret;
570 }
571 }
572
573 fw_sysfs = fw_create_instance(firmware, name, device, opt_flags);
574 if (IS_ERR(fw_sysfs)) {
575 ret = PTR_ERR(fw_sysfs);
576 goto out_unlock;
577 }
578
579 fw_sysfs->fw_priv = firmware->priv;
580 ret = fw_load_sysfs_fallback(fw_sysfs, opt_flags, timeout);
581
582 if (!ret)
583 ret = assign_fw(firmware, device, opt_flags);
584
585 out_unlock:
586 usermodehelper_read_unlock();
587
588 return ret;
589 }
590
591 static bool fw_force_sysfs_fallback(enum fw_opt opt_flags)
592 {
593 if (fw_fallback_config.force_sysfs_fallback)
594 return true;
595 if (!(opt_flags & FW_OPT_USERHELPER))
596 return false;
597 return true;
598 }
599
600 static bool fw_run_sysfs_fallback(enum fw_opt opt_flags)
601 {
602 int ret;
603
604 if (fw_fallback_config.ignore_sysfs_fallback) {
605 pr_info_once("Ignoring firmware sysfs fallback due to sysctl knob\n");
606 return false;
607 }
608
609 if ((opt_flags & FW_OPT_NOFALLBACK))
610 return false;
611
612
613 ret = security_kernel_load_data(LOADING_FIRMWARE);
614 if (ret < 0)
615 return false;
616
617 return fw_force_sysfs_fallback(opt_flags);
618 }
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640 int firmware_fallback_sysfs(struct firmware *fw, const char *name,
641 struct device *device,
642 enum fw_opt opt_flags,
643 int ret)
644 {
645 if (!fw_run_sysfs_fallback(opt_flags))
646 return ret;
647
648 if (!(opt_flags & FW_OPT_NO_WARN))
649 dev_warn(device, "Falling back to sysfs fallback for: %s\n",
650 name);
651 else
652 dev_dbg(device, "Falling back to sysfs fallback for: %s\n",
653 name);
654 return fw_load_from_user_helper(fw, name, device, opt_flags);
655 }