Lines Matching refs:rfkill

49 struct rfkill {  struct
77 #define to_rfkill(d) container_of(d, struct rfkill, dev) argument
127 static void rfkill_led_trigger_event(struct rfkill *rfkill) in rfkill_led_trigger_event() argument
131 if (!rfkill->registered) in rfkill_led_trigger_event()
134 trigger = &rfkill->led_trigger; in rfkill_led_trigger_event()
136 if (rfkill->state & RFKILL_BLOCK_ANY) in rfkill_led_trigger_event()
144 struct rfkill *rfkill; in rfkill_led_trigger_activate() local
146 rfkill = container_of(led->trigger, struct rfkill, led_trigger); in rfkill_led_trigger_activate()
148 rfkill_led_trigger_event(rfkill); in rfkill_led_trigger_activate()
151 const char *rfkill_get_led_trigger_name(struct rfkill *rfkill) in rfkill_get_led_trigger_name() argument
153 return rfkill->led_trigger.name; in rfkill_get_led_trigger_name()
157 void rfkill_set_led_trigger_name(struct rfkill *rfkill, const char *name) in rfkill_set_led_trigger_name() argument
159 BUG_ON(!rfkill); in rfkill_set_led_trigger_name()
161 rfkill->ledtrigname = name; in rfkill_set_led_trigger_name()
165 static int rfkill_led_trigger_register(struct rfkill *rfkill) in rfkill_led_trigger_register() argument
167 rfkill->led_trigger.name = rfkill->ledtrigname in rfkill_led_trigger_register()
168 ? : dev_name(&rfkill->dev); in rfkill_led_trigger_register()
169 rfkill->led_trigger.activate = rfkill_led_trigger_activate; in rfkill_led_trigger_register()
170 return led_trigger_register(&rfkill->led_trigger); in rfkill_led_trigger_register()
173 static void rfkill_led_trigger_unregister(struct rfkill *rfkill) in rfkill_led_trigger_unregister() argument
175 led_trigger_unregister(&rfkill->led_trigger); in rfkill_led_trigger_unregister()
178 static void rfkill_led_trigger_event(struct rfkill *rfkill) in rfkill_led_trigger_event() argument
182 static inline int rfkill_led_trigger_register(struct rfkill *rfkill) in rfkill_led_trigger_register() argument
187 static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill) in rfkill_led_trigger_unregister() argument
192 static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill, in rfkill_fill_event() argument
197 ev->idx = rfkill->idx; in rfkill_fill_event()
198 ev->type = rfkill->type; in rfkill_fill_event()
201 spin_lock_irqsave(&rfkill->lock, flags); in rfkill_fill_event()
202 ev->hard = !!(rfkill->state & RFKILL_BLOCK_HW); in rfkill_fill_event()
203 ev->soft = !!(rfkill->state & (RFKILL_BLOCK_SW | in rfkill_fill_event()
205 spin_unlock_irqrestore(&rfkill->lock, flags); in rfkill_fill_event()
208 static void rfkill_send_events(struct rfkill *rfkill, enum rfkill_operation op) in rfkill_send_events() argument
217 rfkill_fill_event(&ev->ev, rfkill, op); in rfkill_send_events()
225 static void rfkill_event(struct rfkill *rfkill) in rfkill_event() argument
227 if (!rfkill->registered) in rfkill_event()
230 kobject_uevent(&rfkill->dev.kobj, KOBJ_CHANGE); in rfkill_event()
233 rfkill_send_events(rfkill, RFKILL_OP_CHANGE); in rfkill_event()
236 static bool __rfkill_set_hw_state(struct rfkill *rfkill, in __rfkill_set_hw_state() argument
242 BUG_ON(!rfkill); in __rfkill_set_hw_state()
244 spin_lock_irqsave(&rfkill->lock, flags); in __rfkill_set_hw_state()
245 prev = !!(rfkill->state & RFKILL_BLOCK_HW); in __rfkill_set_hw_state()
247 rfkill->state |= RFKILL_BLOCK_HW; in __rfkill_set_hw_state()
249 rfkill->state &= ~RFKILL_BLOCK_HW; in __rfkill_set_hw_state()
251 any = !!(rfkill->state & RFKILL_BLOCK_ANY); in __rfkill_set_hw_state()
252 spin_unlock_irqrestore(&rfkill->lock, flags); in __rfkill_set_hw_state()
254 rfkill_led_trigger_event(rfkill); in __rfkill_set_hw_state()
268 static void rfkill_set_block(struct rfkill *rfkill, bool blocked) in rfkill_set_block() argument
274 if (unlikely(rfkill->dev.power.power_state.event & PM_EVENT_SLEEP)) in rfkill_set_block()
282 if (rfkill->ops->query) in rfkill_set_block()
283 rfkill->ops->query(rfkill, rfkill->data); in rfkill_set_block()
285 spin_lock_irqsave(&rfkill->lock, flags); in rfkill_set_block()
286 prev = rfkill->state & RFKILL_BLOCK_SW; in rfkill_set_block()
288 if (rfkill->state & RFKILL_BLOCK_SW) in rfkill_set_block()
289 rfkill->state |= RFKILL_BLOCK_SW_PREV; in rfkill_set_block()
291 rfkill->state &= ~RFKILL_BLOCK_SW_PREV; in rfkill_set_block()
294 rfkill->state |= RFKILL_BLOCK_SW; in rfkill_set_block()
296 rfkill->state &= ~RFKILL_BLOCK_SW; in rfkill_set_block()
298 rfkill->state |= RFKILL_BLOCK_SW_SETCALL; in rfkill_set_block()
299 spin_unlock_irqrestore(&rfkill->lock, flags); in rfkill_set_block()
301 err = rfkill->ops->set_block(rfkill->data, blocked); in rfkill_set_block()
303 spin_lock_irqsave(&rfkill->lock, flags); in rfkill_set_block()
310 if (rfkill->state & RFKILL_BLOCK_SW_PREV) in rfkill_set_block()
311 rfkill->state |= RFKILL_BLOCK_SW; in rfkill_set_block()
313 rfkill->state &= ~RFKILL_BLOCK_SW; in rfkill_set_block()
315 rfkill->state &= ~RFKILL_BLOCK_SW_SETCALL; in rfkill_set_block()
316 rfkill->state &= ~RFKILL_BLOCK_SW_PREV; in rfkill_set_block()
317 curr = rfkill->state & RFKILL_BLOCK_SW; in rfkill_set_block()
318 spin_unlock_irqrestore(&rfkill->lock, flags); in rfkill_set_block()
320 rfkill_led_trigger_event(rfkill); in rfkill_set_block()
323 rfkill_event(rfkill); in rfkill_set_block()
342 struct rfkill *rfkill; in __rfkill_switch_all() local
353 list_for_each_entry(rfkill, &rfkill_list, node) { in __rfkill_switch_all()
354 if (rfkill->type != type && type != RFKILL_TYPE_ALL) in __rfkill_switch_all()
357 rfkill_set_block(rfkill, blocked); in __rfkill_switch_all()
395 struct rfkill *rfkill; in rfkill_epo() local
404 list_for_each_entry(rfkill, &rfkill_list, node) in rfkill_epo()
405 rfkill_set_block(rfkill, true); in rfkill_epo()
481 bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked) in rfkill_set_hw_state() argument
485 ret = __rfkill_set_hw_state(rfkill, blocked, &change); in rfkill_set_hw_state()
487 if (!rfkill->registered) in rfkill_set_hw_state()
491 schedule_work(&rfkill->uevent_work); in rfkill_set_hw_state()
497 static void __rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) in __rfkill_set_sw_state() argument
502 if (rfkill->state & RFKILL_BLOCK_SW_SETCALL) in __rfkill_set_sw_state()
506 rfkill->state |= bit; in __rfkill_set_sw_state()
508 rfkill->state &= ~bit; in __rfkill_set_sw_state()
511 bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked) in rfkill_set_sw_state() argument
516 BUG_ON(!rfkill); in rfkill_set_sw_state()
518 spin_lock_irqsave(&rfkill->lock, flags); in rfkill_set_sw_state()
519 prev = !!(rfkill->state & RFKILL_BLOCK_SW); in rfkill_set_sw_state()
520 __rfkill_set_sw_state(rfkill, blocked); in rfkill_set_sw_state()
521 hwblock = !!(rfkill->state & RFKILL_BLOCK_HW); in rfkill_set_sw_state()
523 spin_unlock_irqrestore(&rfkill->lock, flags); in rfkill_set_sw_state()
525 if (!rfkill->registered) in rfkill_set_sw_state()
529 schedule_work(&rfkill->uevent_work); in rfkill_set_sw_state()
531 rfkill_led_trigger_event(rfkill); in rfkill_set_sw_state()
537 void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked) in rfkill_init_sw_state() argument
541 BUG_ON(!rfkill); in rfkill_init_sw_state()
542 BUG_ON(rfkill->registered); in rfkill_init_sw_state()
544 spin_lock_irqsave(&rfkill->lock, flags); in rfkill_init_sw_state()
545 __rfkill_set_sw_state(rfkill, blocked); in rfkill_init_sw_state()
546 rfkill->persistent = true; in rfkill_init_sw_state()
547 spin_unlock_irqrestore(&rfkill->lock, flags); in rfkill_init_sw_state()
551 void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw) in rfkill_set_states() argument
556 BUG_ON(!rfkill); in rfkill_set_states()
558 spin_lock_irqsave(&rfkill->lock, flags); in rfkill_set_states()
564 swprev = !!(rfkill->state & RFKILL_BLOCK_SW); in rfkill_set_states()
565 hwprev = !!(rfkill->state & RFKILL_BLOCK_HW); in rfkill_set_states()
566 __rfkill_set_sw_state(rfkill, sw); in rfkill_set_states()
568 rfkill->state |= RFKILL_BLOCK_HW; in rfkill_set_states()
570 rfkill->state &= ~RFKILL_BLOCK_HW; in rfkill_set_states()
572 spin_unlock_irqrestore(&rfkill->lock, flags); in rfkill_set_states()
574 if (!rfkill->registered) { in rfkill_set_states()
575 rfkill->persistent = true; in rfkill_set_states()
578 schedule_work(&rfkill->uevent_work); in rfkill_set_states()
580 rfkill_led_trigger_event(rfkill); in rfkill_set_states()
588 struct rfkill *rfkill = to_rfkill(dev); in name_show() local
590 return sprintf(buf, "%s\n", rfkill->name); in name_show()
623 struct rfkill *rfkill = to_rfkill(dev); in type_show() local
625 return sprintf(buf, "%s\n", rfkill_get_type_str(rfkill->type)); in type_show()
632 struct rfkill *rfkill = to_rfkill(dev); in index_show() local
634 return sprintf(buf, "%d\n", rfkill->idx); in index_show()
641 struct rfkill *rfkill = to_rfkill(dev); in persistent_show() local
643 return sprintf(buf, "%d\n", rfkill->persistent); in persistent_show()
650 struct rfkill *rfkill = to_rfkill(dev); in hard_show() local
652 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_HW) ? 1 : 0 ); in hard_show()
659 struct rfkill *rfkill = to_rfkill(dev); in soft_show() local
661 return sprintf(buf, "%d\n", (rfkill->state & RFKILL_BLOCK_SW) ? 1 : 0 ); in soft_show()
667 struct rfkill *rfkill = to_rfkill(dev); in soft_store() local
682 rfkill_set_block(rfkill, state); in soft_store()
702 struct rfkill *rfkill = to_rfkill(dev); in state_show() local
704 return sprintf(buf, "%d\n", user_state_from_blocked(rfkill->state)); in state_show()
710 struct rfkill *rfkill = to_rfkill(dev); in state_store() local
726 rfkill_set_block(rfkill, state == RFKILL_USER_STATE_SOFT_BLOCKED); in state_store()
755 struct rfkill *rfkill = to_rfkill(dev); in rfkill_release() local
757 kfree(rfkill); in rfkill_release()
762 struct rfkill *rfkill = to_rfkill(dev); in rfkill_dev_uevent() local
767 error = add_uevent_var(env, "RFKILL_NAME=%s", rfkill->name); in rfkill_dev_uevent()
771 rfkill_get_type_str(rfkill->type)); in rfkill_dev_uevent()
774 spin_lock_irqsave(&rfkill->lock, flags); in rfkill_dev_uevent()
775 state = rfkill->state; in rfkill_dev_uevent()
776 spin_unlock_irqrestore(&rfkill->lock, flags); in rfkill_dev_uevent()
782 void rfkill_pause_polling(struct rfkill *rfkill) in rfkill_pause_polling() argument
784 BUG_ON(!rfkill); in rfkill_pause_polling()
786 if (!rfkill->ops->poll) in rfkill_pause_polling()
789 cancel_delayed_work_sync(&rfkill->poll_work); in rfkill_pause_polling()
793 void rfkill_resume_polling(struct rfkill *rfkill) in rfkill_resume_polling() argument
795 BUG_ON(!rfkill); in rfkill_resume_polling()
797 if (!rfkill->ops->poll) in rfkill_resume_polling()
801 &rfkill->poll_work, 0); in rfkill_resume_polling()
808 struct rfkill *rfkill = to_rfkill(dev); in rfkill_suspend() local
810 rfkill_pause_polling(rfkill); in rfkill_suspend()
817 struct rfkill *rfkill = to_rfkill(dev); in rfkill_resume() local
820 if (!rfkill->persistent) { in rfkill_resume()
821 cur = !!(rfkill->state & RFKILL_BLOCK_SW); in rfkill_resume()
822 rfkill_set_block(rfkill, cur); in rfkill_resume()
825 rfkill_resume_polling(rfkill); in rfkill_resume()
844 bool rfkill_blocked(struct rfkill *rfkill) in rfkill_blocked() argument
849 spin_lock_irqsave(&rfkill->lock, flags); in rfkill_blocked()
850 state = rfkill->state; in rfkill_blocked()
851 spin_unlock_irqrestore(&rfkill->lock, flags); in rfkill_blocked()
858 struct rfkill * __must_check rfkill_alloc(const char *name, in rfkill_alloc()
864 struct rfkill *rfkill; in rfkill_alloc() local
879 rfkill = kzalloc(sizeof(*rfkill) + strlen(name) + 1, GFP_KERNEL); in rfkill_alloc()
880 if (!rfkill) in rfkill_alloc()
883 spin_lock_init(&rfkill->lock); in rfkill_alloc()
884 INIT_LIST_HEAD(&rfkill->node); in rfkill_alloc()
885 rfkill->type = type; in rfkill_alloc()
886 strcpy(rfkill->name, name); in rfkill_alloc()
887 rfkill->ops = ops; in rfkill_alloc()
888 rfkill->data = ops_data; in rfkill_alloc()
890 dev = &rfkill->dev; in rfkill_alloc()
895 return rfkill; in rfkill_alloc()
901 struct rfkill *rfkill; in rfkill_poll() local
903 rfkill = container_of(work, struct rfkill, poll_work.work); in rfkill_poll()
910 rfkill->ops->poll(rfkill, rfkill->data); in rfkill_poll()
913 &rfkill->poll_work, in rfkill_poll()
919 struct rfkill *rfkill; in rfkill_uevent_work() local
921 rfkill = container_of(work, struct rfkill, uevent_work); in rfkill_uevent_work()
924 rfkill_event(rfkill); in rfkill_uevent_work()
930 struct rfkill *rfkill; in rfkill_sync_work() local
933 rfkill = container_of(work, struct rfkill, sync_work); in rfkill_sync_work()
936 cur = rfkill_global_states[rfkill->type].cur; in rfkill_sync_work()
937 rfkill_set_block(rfkill, cur); in rfkill_sync_work()
941 int __must_check rfkill_register(struct rfkill *rfkill) in rfkill_register() argument
944 struct device *dev = &rfkill->dev; in rfkill_register()
947 BUG_ON(!rfkill); in rfkill_register()
951 if (rfkill->registered) { in rfkill_register()
956 rfkill->idx = rfkill_no; in rfkill_register()
960 list_add_tail(&rfkill->node, &rfkill_list); in rfkill_register()
966 error = rfkill_led_trigger_register(rfkill); in rfkill_register()
970 rfkill->registered = true; in rfkill_register()
972 INIT_DELAYED_WORK(&rfkill->poll_work, rfkill_poll); in rfkill_register()
973 INIT_WORK(&rfkill->uevent_work, rfkill_uevent_work); in rfkill_register()
974 INIT_WORK(&rfkill->sync_work, rfkill_sync_work); in rfkill_register()
976 if (rfkill->ops->poll) in rfkill_register()
978 &rfkill->poll_work, in rfkill_register()
981 if (!rfkill->persistent || rfkill_epo_lock_active) { in rfkill_register()
982 schedule_work(&rfkill->sync_work); in rfkill_register()
985 bool soft_blocked = !!(rfkill->state & RFKILL_BLOCK_SW); in rfkill_register()
988 __rfkill_switch_all(rfkill->type, soft_blocked); in rfkill_register()
992 rfkill_send_events(rfkill, RFKILL_OP_ADD); in rfkill_register()
998 device_del(&rfkill->dev); in rfkill_register()
1000 list_del_init(&rfkill->node); in rfkill_register()
1007 void rfkill_unregister(struct rfkill *rfkill) in rfkill_unregister() argument
1009 BUG_ON(!rfkill); in rfkill_unregister()
1011 if (rfkill->ops->poll) in rfkill_unregister()
1012 cancel_delayed_work_sync(&rfkill->poll_work); in rfkill_unregister()
1014 cancel_work_sync(&rfkill->uevent_work); in rfkill_unregister()
1015 cancel_work_sync(&rfkill->sync_work); in rfkill_unregister()
1017 rfkill->registered = false; in rfkill_unregister()
1019 device_del(&rfkill->dev); in rfkill_unregister()
1022 rfkill_send_events(rfkill, RFKILL_OP_DEL); in rfkill_unregister()
1023 list_del_init(&rfkill->node); in rfkill_unregister()
1026 rfkill_led_trigger_unregister(rfkill); in rfkill_unregister()
1030 void rfkill_destroy(struct rfkill *rfkill) in rfkill_destroy() argument
1032 if (rfkill) in rfkill_destroy()
1033 put_device(&rfkill->dev); in rfkill_destroy()
1040 struct rfkill *rfkill; in rfkill_fop_open() local
1058 list_for_each_entry(rfkill, &rfkill_list, node) { in rfkill_fop_open()
1062 rfkill_fill_event(&ev->ev, rfkill, RFKILL_OP_ADD); in rfkill_fop_open()
1143 struct rfkill *rfkill; in rfkill_fop_write() local
1177 list_for_each_entry(rfkill, &rfkill_list, node) { in rfkill_fop_write()
1178 if (rfkill->idx != ev.idx && ev.op != RFKILL_OP_CHANGE_ALL) in rfkill_fop_write()
1181 if (rfkill->type != ev.type && ev.type != RFKILL_TYPE_ALL) in rfkill_fop_write()
1184 rfkill_set_block(rfkill, ev.soft); in rfkill_fop_write()