Searched refs:desc (Results 1 - 200 of 2168) sorted by relevance

1234567891011

/linux-4.1.27/kernel/irq/
H A Dsettings.h35 irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) irq_settings_clr_and_set() argument
37 desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK); irq_settings_clr_and_set()
38 desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); irq_settings_clr_and_set()
41 static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) irq_settings_is_per_cpu() argument
43 return desc->status_use_accessors & _IRQ_PER_CPU; irq_settings_is_per_cpu()
46 static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc) irq_settings_is_per_cpu_devid() argument
48 return desc->status_use_accessors & _IRQ_PER_CPU_DEVID; irq_settings_is_per_cpu_devid()
51 static inline void irq_settings_set_per_cpu(struct irq_desc *desc) irq_settings_set_per_cpu() argument
53 desc->status_use_accessors |= _IRQ_PER_CPU; irq_settings_set_per_cpu()
56 static inline void irq_settings_set_no_balancing(struct irq_desc *desc) irq_settings_set_no_balancing() argument
58 desc->status_use_accessors |= _IRQ_NO_BALANCING; irq_settings_set_no_balancing()
61 static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) irq_settings_has_no_balance_set() argument
63 return desc->status_use_accessors & _IRQ_NO_BALANCING; irq_settings_has_no_balance_set()
66 static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) irq_settings_get_trigger_mask() argument
68 return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK; irq_settings_get_trigger_mask()
72 irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) irq_settings_set_trigger_mask() argument
74 desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK; irq_settings_set_trigger_mask()
75 desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK; irq_settings_set_trigger_mask()
78 static inline bool irq_settings_is_level(struct irq_desc *desc) irq_settings_is_level() argument
80 return desc->status_use_accessors & _IRQ_LEVEL; irq_settings_is_level()
83 static inline void irq_settings_clr_level(struct irq_desc *desc) irq_settings_clr_level() argument
85 desc->status_use_accessors &= ~_IRQ_LEVEL; irq_settings_clr_level()
88 static inline void irq_settings_set_level(struct irq_desc *desc) irq_settings_set_level() argument
90 desc->status_use_accessors |= _IRQ_LEVEL; irq_settings_set_level()
93 static inline bool irq_settings_can_request(struct irq_desc *desc) irq_settings_can_request() argument
95 return !(desc->status_use_accessors & _IRQ_NOREQUEST); irq_settings_can_request()
98 static inline void irq_settings_clr_norequest(struct irq_desc *desc) irq_settings_clr_norequest() argument
100 desc->status_use_accessors &= ~_IRQ_NOREQUEST; irq_settings_clr_norequest()
103 static inline void irq_settings_set_norequest(struct irq_desc *desc) irq_settings_set_norequest() argument
105 desc->status_use_accessors |= _IRQ_NOREQUEST; irq_settings_set_norequest()
108 static inline bool irq_settings_can_thread(struct irq_desc *desc) irq_settings_can_thread() argument
110 return !(desc->status_use_accessors & _IRQ_NOTHREAD); irq_settings_can_thread()
113 static inline void irq_settings_clr_nothread(struct irq_desc *desc) irq_settings_clr_nothread() argument
115 desc->status_use_accessors &= ~_IRQ_NOTHREAD; irq_settings_clr_nothread()
118 static inline void irq_settings_set_nothread(struct irq_desc *desc) irq_settings_set_nothread() argument
120 desc->status_use_accessors |= _IRQ_NOTHREAD; irq_settings_set_nothread()
123 static inline bool irq_settings_can_probe(struct irq_desc *desc) irq_settings_can_probe() argument
125 return !(desc->status_use_accessors & _IRQ_NOPROBE); irq_settings_can_probe()
128 static inline void irq_settings_clr_noprobe(struct irq_desc *desc) irq_settings_clr_noprobe() argument
130 desc->status_use_accessors &= ~_IRQ_NOPROBE; irq_settings_clr_noprobe()
133 static inline void irq_settings_set_noprobe(struct irq_desc *desc) irq_settings_set_noprobe() argument
135 desc->status_use_accessors |= _IRQ_NOPROBE; irq_settings_set_noprobe()
138 static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) irq_settings_can_move_pcntxt() argument
140 return desc->status_use_accessors & _IRQ_MOVE_PCNTXT; irq_settings_can_move_pcntxt()
143 static inline bool irq_settings_can_autoenable(struct irq_desc *desc) irq_settings_can_autoenable() argument
145 return !(desc->status_use_accessors & _IRQ_NOAUTOEN); irq_settings_can_autoenable()
148 static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) irq_settings_is_nested_thread() argument
150 return desc->status_use_accessors & _IRQ_NESTED_THREAD; irq_settings_is_nested_thread()
153 static inline bool irq_settings_is_polled(struct irq_desc *desc) irq_settings_is_polled() argument
155 return desc->status_use_accessors & _IRQ_IS_POLLED; irq_settings_is_polled()
H A Ddebug.h7 #define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
8 #define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)
12 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) print_irq_desc() argument
14 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", print_irq_desc()
15 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); print_irq_desc()
16 printk("->handle_irq(): %p, ", desc->handle_irq); print_irq_desc()
17 print_symbol("%s\n", (unsigned long)desc->handle_irq); print_irq_desc()
18 printk("->irq_data.chip(): %p, ", desc->irq_data.chip); print_irq_desc()
19 print_symbol("%s\n", (unsigned long)desc->irq_data.chip); print_irq_desc()
20 printk("->action(): %p\n", desc->action); print_irq_desc()
21 if (desc->action) { print_irq_desc()
22 printk("->action->handler(): %p, ", desc->action->handler); print_irq_desc()
23 print_symbol("%s\n", (unsigned long)desc->action->handler); print_irq_desc()
H A Dpm.c17 bool irq_pm_check_wakeup(struct irq_desc *desc) irq_pm_check_wakeup() argument
19 if (irqd_is_wakeup_armed(&desc->irq_data)) { irq_pm_check_wakeup()
20 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); irq_pm_check_wakeup()
21 desc->istate |= IRQS_SUSPENDED | IRQS_PENDING; irq_pm_check_wakeup()
22 desc->depth++; irq_pm_check_wakeup()
23 irq_disable(desc); irq_pm_check_wakeup()
31 * Called from __setup_irq() with desc->lock held after @action has
34 void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) irq_pm_install_action() argument
36 desc->nr_actions++; irq_pm_install_action()
39 desc->force_resume_depth++; irq_pm_install_action()
41 WARN_ON_ONCE(desc->force_resume_depth && irq_pm_install_action()
42 desc->force_resume_depth != desc->nr_actions); irq_pm_install_action()
45 desc->no_suspend_depth++; irq_pm_install_action()
47 desc->cond_suspend_depth++; irq_pm_install_action()
49 WARN_ON_ONCE(desc->no_suspend_depth && irq_pm_install_action()
50 (desc->no_suspend_depth + irq_pm_install_action()
51 desc->cond_suspend_depth) != desc->nr_actions); irq_pm_install_action()
55 * Called from __free_irq() with desc->lock held after @action has
58 void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) irq_pm_remove_action() argument
60 desc->nr_actions--; irq_pm_remove_action()
63 desc->force_resume_depth--; irq_pm_remove_action()
66 desc->no_suspend_depth--; irq_pm_remove_action()
68 desc->cond_suspend_depth--; irq_pm_remove_action()
71 static bool suspend_device_irq(struct irq_desc *desc, int irq) suspend_device_irq() argument
73 if (!desc->action || desc->no_suspend_depth) suspend_device_irq()
76 if (irqd_is_wakeup_set(&desc->irq_data)) { suspend_device_irq()
77 irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); suspend_device_irq()
87 desc->istate |= IRQS_SUSPENDED; suspend_device_irq()
88 __disable_irq(desc, irq); suspend_device_irq()
96 if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) suspend_device_irq()
97 mask_irq(desc); suspend_device_irq()
119 struct irq_desc *desc; suspend_device_irqs() local
122 for_each_irq_desc(irq, desc) { for_each_irq_desc()
126 raw_spin_lock_irqsave(&desc->lock, flags); for_each_irq_desc()
127 sync = suspend_device_irq(desc, irq); for_each_irq_desc()
128 raw_spin_unlock_irqrestore(&desc->lock, flags); for_each_irq_desc()
136 static void resume_irq(struct irq_desc *desc, int irq) resume_irq() argument
138 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); resume_irq()
140 if (desc->istate & IRQS_SUSPENDED) resume_irq()
144 if (!desc->force_resume_depth) resume_irq()
148 desc->depth++; resume_irq()
150 desc->istate &= ~IRQS_SUSPENDED; resume_irq()
151 __enable_irq(desc, irq); resume_irq()
156 struct irq_desc *desc; resume_irqs() local
159 for_each_irq_desc(irq, desc) { for_each_irq_desc()
161 bool is_early = desc->action && for_each_irq_desc()
162 desc->action->flags & IRQF_EARLY_RESUME; for_each_irq_desc()
167 raw_spin_lock_irqsave(&desc->lock, flags); for_each_irq_desc()
168 resume_irq(desc, irq); for_each_irq_desc()
169 raw_spin_unlock_irqrestore(&desc->lock, flags); for_each_irq_desc()
H A Dchip.c32 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_set_chip() local
34 if (!desc) irq_set_chip()
40 desc->irq_data.chip = chip; irq_set_chip()
41 irq_put_desc_unlock(desc, flags); irq_set_chip()
59 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); irq_set_irq_type() local
62 if (!desc) irq_set_irq_type()
66 ret = __irq_set_trigger(desc, irq, type); irq_set_irq_type()
67 irq_put_desc_busunlock(desc, flags); irq_set_irq_type()
82 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_set_handler_data() local
84 if (!desc) irq_set_handler_data()
86 desc->irq_data.handler_data = data; irq_set_handler_data()
87 irq_put_desc_unlock(desc, flags); irq_set_handler_data()
104 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); irq_set_msi_desc_off() local
106 if (!desc) irq_set_msi_desc_off()
108 desc->irq_data.msi_desc = entry; irq_set_msi_desc_off()
111 irq_put_desc_unlock(desc, flags); irq_set_msi_desc_off()
137 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_set_chip_data() local
139 if (!desc) irq_set_chip_data()
141 desc->irq_data.chip_data = data; irq_set_chip_data()
142 irq_put_desc_unlock(desc, flags); irq_set_chip_data()
149 struct irq_desc *desc = irq_to_desc(irq); irq_get_irq_data() local
151 return desc ? &desc->irq_data : NULL; irq_get_irq_data()
155 static void irq_state_clr_disabled(struct irq_desc *desc) irq_state_clr_disabled() argument
157 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); irq_state_clr_disabled()
160 static void irq_state_set_disabled(struct irq_desc *desc) irq_state_set_disabled() argument
162 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); irq_state_set_disabled()
165 static void irq_state_clr_masked(struct irq_desc *desc) irq_state_clr_masked() argument
167 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); irq_state_clr_masked()
170 static void irq_state_set_masked(struct irq_desc *desc) irq_state_set_masked() argument
172 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); irq_state_set_masked()
175 int irq_startup(struct irq_desc *desc, bool resend) irq_startup() argument
179 irq_state_clr_disabled(desc); irq_startup()
180 desc->depth = 0; irq_startup()
182 irq_domain_activate_irq(&desc->irq_data); irq_startup()
183 if (desc->irq_data.chip->irq_startup) { irq_startup()
184 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); irq_startup()
185 irq_state_clr_masked(desc); irq_startup()
187 irq_enable(desc); irq_startup()
190 check_irq_resend(desc, desc->irq_data.irq); irq_startup()
194 void irq_shutdown(struct irq_desc *desc) irq_shutdown() argument
196 irq_state_set_disabled(desc); irq_shutdown()
197 desc->depth = 1; irq_shutdown()
198 if (desc->irq_data.chip->irq_shutdown) irq_shutdown()
199 desc->irq_data.chip->irq_shutdown(&desc->irq_data); irq_shutdown()
200 else if (desc->irq_data.chip->irq_disable) irq_shutdown()
201 desc->irq_data.chip->irq_disable(&desc->irq_data); irq_shutdown()
203 desc->irq_data.chip->irq_mask(&desc->irq_data); irq_shutdown()
204 irq_domain_deactivate_irq(&desc->irq_data); irq_shutdown()
205 irq_state_set_masked(desc); irq_shutdown()
208 void irq_enable(struct irq_desc *desc) irq_enable() argument
210 irq_state_clr_disabled(desc); irq_enable()
211 if (desc->irq_data.chip->irq_enable) irq_enable()
212 desc->irq_data.chip->irq_enable(&desc->irq_data); irq_enable()
214 desc->irq_data.chip->irq_unmask(&desc->irq_data); irq_enable()
215 irq_state_clr_masked(desc); irq_enable()
220 * @desc: irq descriptor which should be disabled
231 void irq_disable(struct irq_desc *desc) irq_disable() argument
233 irq_state_set_disabled(desc); irq_disable()
234 if (desc->irq_data.chip->irq_disable) { irq_disable()
235 desc->irq_data.chip->irq_disable(&desc->irq_data); irq_disable()
236 irq_state_set_masked(desc); irq_disable()
240 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) irq_percpu_enable() argument
242 if (desc->irq_data.chip->irq_enable) irq_percpu_enable()
243 desc->irq_data.chip->irq_enable(&desc->irq_data); irq_percpu_enable()
245 desc->irq_data.chip->irq_unmask(&desc->irq_data); irq_percpu_enable()
246 cpumask_set_cpu(cpu, desc->percpu_enabled); irq_percpu_enable()
249 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) irq_percpu_disable() argument
251 if (desc->irq_data.chip->irq_disable) irq_percpu_disable()
252 desc->irq_data.chip->irq_disable(&desc->irq_data); irq_percpu_disable()
254 desc->irq_data.chip->irq_mask(&desc->irq_data); irq_percpu_disable()
255 cpumask_clear_cpu(cpu, desc->percpu_enabled); irq_percpu_disable()
258 static inline void mask_ack_irq(struct irq_desc *desc) mask_ack_irq() argument
260 if (desc->irq_data.chip->irq_mask_ack) mask_ack_irq()
261 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); mask_ack_irq()
263 desc->irq_data.chip->irq_mask(&desc->irq_data); mask_ack_irq()
264 if (desc->irq_data.chip->irq_ack) mask_ack_irq()
265 desc->irq_data.chip->irq_ack(&desc->irq_data); mask_ack_irq()
267 irq_state_set_masked(desc); mask_ack_irq()
270 void mask_irq(struct irq_desc *desc) mask_irq() argument
272 if (desc->irq_data.chip->irq_mask) { mask_irq()
273 desc->irq_data.chip->irq_mask(&desc->irq_data); mask_irq()
274 irq_state_set_masked(desc); mask_irq()
278 void unmask_irq(struct irq_desc *desc) unmask_irq() argument
280 if (desc->irq_data.chip->irq_unmask) { unmask_irq()
281 desc->irq_data.chip->irq_unmask(&desc->irq_data); unmask_irq()
282 irq_state_clr_masked(desc); unmask_irq()
286 void unmask_threaded_irq(struct irq_desc *desc) unmask_threaded_irq() argument
288 struct irq_chip *chip = desc->irq_data.chip; unmask_threaded_irq()
291 chip->irq_eoi(&desc->irq_data); unmask_threaded_irq()
294 chip->irq_unmask(&desc->irq_data); unmask_threaded_irq()
295 irq_state_clr_masked(desc); unmask_threaded_irq()
309 struct irq_desc *desc = irq_to_desc(irq); handle_nested_irq() local
315 raw_spin_lock_irq(&desc->lock); handle_nested_irq()
317 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_nested_irq()
318 kstat_incr_irqs_this_cpu(irq, desc); handle_nested_irq()
320 action = desc->action; handle_nested_irq()
321 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { handle_nested_irq()
322 desc->istate |= IRQS_PENDING; handle_nested_irq()
326 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); handle_nested_irq()
327 raw_spin_unlock_irq(&desc->lock); handle_nested_irq()
331 note_interrupt(irq, desc, action_ret); handle_nested_irq()
333 raw_spin_lock_irq(&desc->lock); handle_nested_irq()
334 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); handle_nested_irq()
337 raw_spin_unlock_irq(&desc->lock); handle_nested_irq()
341 static bool irq_check_poll(struct irq_desc *desc) irq_check_poll() argument
343 if (!(desc->istate & IRQS_POLL_INPROGRESS)) irq_check_poll()
345 return irq_wait_for_poll(desc); irq_check_poll()
348 static bool irq_may_run(struct irq_desc *desc) irq_may_run() argument
356 if (!irqd_has_set(&desc->irq_data, mask)) irq_may_run()
364 if (irq_pm_check_wakeup(desc)) irq_may_run()
370 return irq_check_poll(desc); irq_may_run()
376 * @desc: the interrupt description structure for this irq
386 handle_simple_irq(unsigned int irq, struct irq_desc *desc) handle_simple_irq() argument
388 raw_spin_lock(&desc->lock); handle_simple_irq()
390 if (!irq_may_run(desc)) handle_simple_irq()
393 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_simple_irq()
394 kstat_incr_irqs_this_cpu(irq, desc); handle_simple_irq()
396 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { handle_simple_irq()
397 desc->istate |= IRQS_PENDING; handle_simple_irq()
401 handle_irq_event(desc); handle_simple_irq()
404 raw_spin_unlock(&desc->lock); handle_simple_irq()
412 static void cond_unmask_irq(struct irq_desc *desc) cond_unmask_irq() argument
421 if (!irqd_irq_disabled(&desc->irq_data) && cond_unmask_irq()
422 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) cond_unmask_irq()
423 unmask_irq(desc); cond_unmask_irq()
429 * @desc: the interrupt description structure for this irq
437 handle_level_irq(unsigned int irq, struct irq_desc *desc) handle_level_irq() argument
439 raw_spin_lock(&desc->lock); handle_level_irq()
440 mask_ack_irq(desc); handle_level_irq()
442 if (!irq_may_run(desc)) handle_level_irq()
445 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_level_irq()
446 kstat_incr_irqs_this_cpu(irq, desc); handle_level_irq()
452 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { handle_level_irq()
453 desc->istate |= IRQS_PENDING; handle_level_irq()
457 handle_irq_event(desc); handle_level_irq()
459 cond_unmask_irq(desc); handle_level_irq()
462 raw_spin_unlock(&desc->lock); handle_level_irq()
467 static inline void preflow_handler(struct irq_desc *desc) preflow_handler() argument
469 if (desc->preflow_handler) preflow_handler()
470 desc->preflow_handler(&desc->irq_data); preflow_handler()
473 static inline void preflow_handler(struct irq_desc *desc) { } preflow_handler() argument
476 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) cond_unmask_eoi_irq() argument
478 if (!(desc->istate & IRQS_ONESHOT)) { cond_unmask_eoi_irq()
479 chip->irq_eoi(&desc->irq_data); cond_unmask_eoi_irq()
488 if (!irqd_irq_disabled(&desc->irq_data) && cond_unmask_eoi_irq()
489 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { cond_unmask_eoi_irq()
490 chip->irq_eoi(&desc->irq_data); cond_unmask_eoi_irq()
491 unmask_irq(desc); cond_unmask_eoi_irq()
493 chip->irq_eoi(&desc->irq_data); cond_unmask_eoi_irq()
500 * @desc: the interrupt description structure for this irq
508 handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) handle_fasteoi_irq() argument
510 struct irq_chip *chip = desc->irq_data.chip; handle_fasteoi_irq()
512 raw_spin_lock(&desc->lock); handle_fasteoi_irq()
514 if (!irq_may_run(desc)) handle_fasteoi_irq()
517 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_fasteoi_irq()
518 kstat_incr_irqs_this_cpu(irq, desc); handle_fasteoi_irq()
524 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { handle_fasteoi_irq()
525 desc->istate |= IRQS_PENDING; handle_fasteoi_irq()
526 mask_irq(desc); handle_fasteoi_irq()
530 if (desc->istate & IRQS_ONESHOT) handle_fasteoi_irq()
531 mask_irq(desc); handle_fasteoi_irq()
533 preflow_handler(desc); handle_fasteoi_irq()
534 handle_irq_event(desc); handle_fasteoi_irq()
536 cond_unmask_eoi_irq(desc, chip); handle_fasteoi_irq()
538 raw_spin_unlock(&desc->lock); handle_fasteoi_irq()
542 chip->irq_eoi(&desc->irq_data); handle_fasteoi_irq()
543 raw_spin_unlock(&desc->lock); handle_fasteoi_irq()
550 * @desc: the interrupt description structure for this irq
564 handle_edge_irq(unsigned int irq, struct irq_desc *desc) handle_edge_irq() argument
566 raw_spin_lock(&desc->lock); handle_edge_irq()
568 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_edge_irq()
570 if (!irq_may_run(desc)) { handle_edge_irq()
571 desc->istate |= IRQS_PENDING; handle_edge_irq()
572 mask_ack_irq(desc); handle_edge_irq()
580 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { handle_edge_irq()
581 desc->istate |= IRQS_PENDING; handle_edge_irq()
582 mask_ack_irq(desc); handle_edge_irq()
586 kstat_incr_irqs_this_cpu(irq, desc); handle_edge_irq()
589 desc->irq_data.chip->irq_ack(&desc->irq_data); handle_edge_irq()
592 if (unlikely(!desc->action)) { handle_edge_irq()
593 mask_irq(desc); handle_edge_irq()
602 if (unlikely(desc->istate & IRQS_PENDING)) { handle_edge_irq()
603 if (!irqd_irq_disabled(&desc->irq_data) && handle_edge_irq()
604 irqd_irq_masked(&desc->irq_data)) handle_edge_irq()
605 unmask_irq(desc); handle_edge_irq()
608 handle_irq_event(desc); handle_edge_irq()
610 } while ((desc->istate & IRQS_PENDING) && handle_edge_irq()
611 !irqd_irq_disabled(&desc->irq_data)); handle_edge_irq()
614 raw_spin_unlock(&desc->lock); handle_edge_irq()
622 * @desc: the interrupt description structure for this irq
627 void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) handle_edge_eoi_irq() argument
629 struct irq_chip *chip = irq_desc_get_chip(desc); handle_edge_eoi_irq()
631 raw_spin_lock(&desc->lock); handle_edge_eoi_irq()
633 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_edge_eoi_irq()
635 if (!irq_may_run(desc)) { handle_edge_eoi_irq()
636 desc->istate |= IRQS_PENDING; handle_edge_eoi_irq()
644 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { handle_edge_eoi_irq()
645 desc->istate |= IRQS_PENDING; handle_edge_eoi_irq()
649 kstat_incr_irqs_this_cpu(irq, desc); handle_edge_eoi_irq()
652 if (unlikely(!desc->action)) handle_edge_eoi_irq()
655 handle_irq_event(desc); handle_edge_eoi_irq()
657 } while ((desc->istate & IRQS_PENDING) && handle_edge_eoi_irq()
658 !irqd_irq_disabled(&desc->irq_data)); handle_edge_eoi_irq()
661 chip->irq_eoi(&desc->irq_data); handle_edge_eoi_irq()
662 raw_spin_unlock(&desc->lock); handle_edge_eoi_irq()
669 * @desc: the interrupt description structure for this irq
674 handle_percpu_irq(unsigned int irq, struct irq_desc *desc) handle_percpu_irq() argument
676 struct irq_chip *chip = irq_desc_get_chip(desc); handle_percpu_irq()
678 kstat_incr_irqs_this_cpu(irq, desc); handle_percpu_irq()
681 chip->irq_ack(&desc->irq_data); handle_percpu_irq()
683 handle_irq_event_percpu(desc, desc->action); handle_percpu_irq()
686 chip->irq_eoi(&desc->irq_data); handle_percpu_irq()
692 * @desc: the interrupt description structure for this irq
701 void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc) handle_percpu_devid_irq() argument
703 struct irq_chip *chip = irq_desc_get_chip(desc); handle_percpu_devid_irq()
704 struct irqaction *action = desc->action; handle_percpu_devid_irq()
708 kstat_incr_irqs_this_cpu(irq, desc); handle_percpu_devid_irq()
711 chip->irq_ack(&desc->irq_data); handle_percpu_devid_irq()
718 chip->irq_eoi(&desc->irq_data); handle_percpu_devid_irq()
726 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); __irq_set_handler() local
728 if (!desc) __irq_set_handler()
734 struct irq_data *irq_data = &desc->irq_data; __irq_set_handler()
763 if (desc->irq_data.chip != &no_irq_chip) __irq_set_handler()
764 mask_ack_irq(desc); __irq_set_handler()
765 irq_state_set_disabled(desc); __irq_set_handler()
766 desc->depth = 1; __irq_set_handler()
768 desc->handle_irq = handle; __irq_set_handler()
769 desc->name = name; __irq_set_handler()
772 irq_settings_set_noprobe(desc); __irq_set_handler()
773 irq_settings_set_norequest(desc); __irq_set_handler()
774 irq_settings_set_nothread(desc); __irq_set_handler()
775 irq_startup(desc, true); __irq_set_handler()
778 irq_put_desc_busunlock(desc, flags); __irq_set_handler()
794 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_modify_status() local
796 if (!desc) irq_modify_status()
798 irq_settings_clr_and_set(desc, clr, set); irq_modify_status()
800 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | irq_modify_status()
802 if (irq_settings_has_no_balance_set(desc)) irq_modify_status()
803 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); irq_modify_status()
804 if (irq_settings_is_per_cpu(desc)) irq_modify_status()
805 irqd_set(&desc->irq_data, IRQD_PER_CPU); irq_modify_status()
806 if (irq_settings_can_move_pcntxt(desc)) irq_modify_status()
807 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); irq_modify_status()
808 if (irq_settings_is_level(desc)) irq_modify_status()
809 irqd_set(&desc->irq_data, IRQD_LEVEL); irq_modify_status()
811 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); irq_modify_status()
813 irq_put_desc_unlock(desc, flags); irq_modify_status()
825 struct irq_desc *desc; irq_cpu_online() local
831 desc = irq_to_desc(irq); for_each_active_irq()
832 if (!desc) for_each_active_irq()
835 raw_spin_lock_irqsave(&desc->lock, flags); for_each_active_irq()
837 chip = irq_data_get_irq_chip(&desc->irq_data); for_each_active_irq()
840 !irqd_irq_disabled(&desc->irq_data))) for_each_active_irq()
841 chip->irq_cpu_online(&desc->irq_data); for_each_active_irq()
843 raw_spin_unlock_irqrestore(&desc->lock, flags); for_each_active_irq()
855 struct irq_desc *desc; irq_cpu_offline() local
861 desc = irq_to_desc(irq); for_each_active_irq()
862 if (!desc) for_each_active_irq()
865 raw_spin_lock_irqsave(&desc->lock, flags); for_each_active_irq()
867 chip = irq_data_get_irq_chip(&desc->irq_data); for_each_active_irq()
870 !irqd_irq_disabled(&desc->irq_data))) for_each_active_irq()
871 chip->irq_cpu_offline(&desc->irq_data); for_each_active_irq()
873 raw_spin_unlock_irqrestore(&desc->lock, flags); for_each_active_irq()
H A Dautoprobe.c33 struct irq_desc *desc; probe_irq_on() local
46 for_each_irq_desc_reverse(i, desc) { for_each_irq_desc_reverse()
47 raw_spin_lock_irq(&desc->lock); for_each_irq_desc_reverse()
48 if (!desc->action && irq_settings_can_probe(desc)) { for_each_irq_desc_reverse()
53 if (desc->irq_data.chip->irq_set_type) for_each_irq_desc_reverse()
54 desc->irq_data.chip->irq_set_type(&desc->irq_data, for_each_irq_desc_reverse()
56 irq_startup(desc, false); for_each_irq_desc_reverse()
58 raw_spin_unlock_irq(&desc->lock); for_each_irq_desc_reverse()
69 for_each_irq_desc_reverse(i, desc) { for_each_irq_desc_reverse()
70 raw_spin_lock_irq(&desc->lock); for_each_irq_desc_reverse()
71 if (!desc->action && irq_settings_can_probe(desc)) { for_each_irq_desc_reverse()
72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; for_each_irq_desc_reverse()
73 if (irq_startup(desc, false)) for_each_irq_desc_reverse()
74 desc->istate |= IRQS_PENDING; for_each_irq_desc_reverse()
76 raw_spin_unlock_irq(&desc->lock); for_each_irq_desc_reverse()
87 for_each_irq_desc(i, desc) { for_each_irq_desc()
88 raw_spin_lock_irq(&desc->lock); for_each_irq_desc()
90 if (desc->istate & IRQS_AUTODETECT) { for_each_irq_desc()
92 if (!(desc->istate & IRQS_WAITING)) { for_each_irq_desc()
93 desc->istate &= ~IRQS_AUTODETECT; for_each_irq_desc()
94 irq_shutdown(desc); for_each_irq_desc()
99 raw_spin_unlock_irq(&desc->lock); for_each_irq_desc()
121 struct irq_desc *desc; probe_irq_mask() local
124 for_each_irq_desc(i, desc) { for_each_irq_desc()
125 raw_spin_lock_irq(&desc->lock); for_each_irq_desc()
126 if (desc->istate & IRQS_AUTODETECT) { for_each_irq_desc()
127 if (i < 16 && !(desc->istate & IRQS_WAITING)) for_each_irq_desc()
130 desc->istate &= ~IRQS_AUTODETECT; for_each_irq_desc()
131 irq_shutdown(desc); for_each_irq_desc()
133 raw_spin_unlock_irq(&desc->lock); for_each_irq_desc()
161 struct irq_desc *desc; probe_irq_off() local
163 for_each_irq_desc(i, desc) { for_each_irq_desc()
164 raw_spin_lock_irq(&desc->lock); for_each_irq_desc()
166 if (desc->istate & IRQS_AUTODETECT) { for_each_irq_desc()
167 if (!(desc->istate & IRQS_WAITING)) { for_each_irq_desc()
172 desc->istate &= ~IRQS_AUTODETECT; for_each_irq_desc()
173 irq_shutdown(desc); for_each_irq_desc()
175 raw_spin_unlock_irq(&desc->lock); for_each_irq_desc()
H A Dirqdesc.c39 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) alloc_masks() argument
41 if (!zalloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) alloc_masks()
45 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { alloc_masks()
46 free_cpumask_var(desc->irq_data.affinity); alloc_masks()
53 static void desc_smp_init(struct irq_desc *desc, int node) desc_smp_init() argument
55 desc->irq_data.node = node; desc_smp_init()
56 cpumask_copy(desc->irq_data.affinity, irq_default_affinity); desc_smp_init()
58 cpumask_clear(desc->pending_mask); desc_smp_init()
62 static inline int desc_node(struct irq_desc *desc) desc_node() argument
64 return desc->irq_data.node; desc_node()
69 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } desc_smp_init() argument
70 static inline void desc_smp_init(struct irq_desc *desc, int node) { } desc_node() argument
71 static inline int desc_node(struct irq_desc *desc) { return 0; } desc_node() argument
74 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, desc_set_defaults() argument
79 desc->irq_data.irq = irq; desc_set_defaults()
80 desc->irq_data.chip = &no_irq_chip; desc_set_defaults()
81 desc->irq_data.chip_data = NULL; desc_set_defaults()
82 desc->irq_data.handler_data = NULL; desc_set_defaults()
83 desc->irq_data.msi_desc = NULL; desc_set_defaults()
84 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); desc_set_defaults()
85 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); desc_set_defaults()
86 desc->handle_irq = handle_bad_irq; desc_set_defaults()
87 desc->depth = 1; desc_set_defaults()
88 desc->irq_count = 0; desc_set_defaults()
89 desc->irqs_unhandled = 0; desc_set_defaults()
90 desc->name = NULL; desc_set_defaults()
91 desc->owner = owner; desc_set_defaults()
93 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; desc_set_defaults()
94 desc_smp_init(desc, node); desc_set_defaults()
107 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) irq_insert_desc() argument
109 radix_tree_insert(&irq_desc_tree, irq, desc); irq_insert_desc()
124 static void free_masks(struct irq_desc *desc) free_masks() argument
127 free_cpumask_var(desc->pending_mask); free_masks()
129 free_cpumask_var(desc->irq_data.affinity); free_masks()
132 static inline void free_masks(struct irq_desc *desc) { } free_masks() argument
147 struct irq_desc *desc; alloc_desc() local
150 desc = kzalloc_node(sizeof(*desc), gfp, node); alloc_desc()
151 if (!desc) alloc_desc()
154 desc->kstat_irqs = alloc_percpu(unsigned int); alloc_desc()
155 if (!desc->kstat_irqs) alloc_desc()
158 if (alloc_masks(desc, gfp, node)) alloc_desc()
161 raw_spin_lock_init(&desc->lock); alloc_desc()
162 lockdep_set_class(&desc->lock, &irq_desc_lock_class); alloc_desc()
164 desc_set_defaults(irq, desc, node, owner); alloc_desc()
166 return desc; alloc_desc()
169 free_percpu(desc->kstat_irqs); alloc_desc()
171 kfree(desc); alloc_desc()
177 struct irq_desc *desc = irq_to_desc(irq); free_desc() local
179 unregister_irq_proc(irq, desc); free_desc()
191 free_masks(desc); free_desc()
192 free_percpu(desc->kstat_irqs); free_desc()
193 kfree(desc); free_desc()
199 struct irq_desc *desc; alloc_descs() local
203 desc = alloc_desc(start + i, node, owner); alloc_descs()
204 if (!desc) alloc_descs()
207 irq_insert_desc(start + i, desc); alloc_descs()
233 struct irq_desc *desc; early_irq_init() local
251 desc = alloc_desc(i, node, NULL); early_irq_init()
253 irq_insert_desc(i, desc); early_irq_init()
271 struct irq_desc *desc; early_irq_init() local
277 desc = irq_desc; early_irq_init()
281 desc[i].kstat_irqs = alloc_percpu(unsigned int); early_irq_init()
282 alloc_masks(&desc[i], GFP_KERNEL, node); early_irq_init()
283 raw_spin_lock_init(&desc[i].lock); early_irq_init()
284 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); early_irq_init()
285 desc_set_defaults(i, &desc[i], node, NULL); early_irq_init()
298 struct irq_desc *desc = irq_to_desc(irq); free_desc() local
301 raw_spin_lock_irqsave(&desc->lock, flags); free_desc()
302 desc_set_defaults(irq, desc, desc_node(desc), NULL); free_desc()
303 raw_spin_unlock_irqrestore(&desc->lock, flags); free_desc()
312 struct irq_desc *desc = irq_to_desc(start + i); alloc_descs() local
314 desc->owner = owner; alloc_descs()
347 struct irq_desc *desc = irq_to_desc(irq); generic_handle_irq() local
349 if (!desc) generic_handle_irq()
351 generic_handle_irq_desc(irq, desc); generic_handle_irq()
542 struct irq_desc *desc = irq_to_desc(irq); __irq_get_desc_lock() local
544 if (desc) { __irq_get_desc_lock()
547 !irq_settings_is_per_cpu_devid(desc)) __irq_get_desc_lock()
551 irq_settings_is_per_cpu_devid(desc)) __irq_get_desc_lock()
556 chip_bus_lock(desc); __irq_get_desc_lock()
557 raw_spin_lock_irqsave(&desc->lock, *flags); __irq_get_desc_lock()
559 return desc; __irq_get_desc_lock()
562 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) __irq_put_desc_unlock() argument
564 raw_spin_unlock_irqrestore(&desc->lock, flags); __irq_put_desc_unlock()
566 chip_bus_sync_unlock(desc); __irq_put_desc_unlock()
571 struct irq_desc *desc = irq_to_desc(irq); irq_set_percpu_devid() local
573 if (!desc) irq_set_percpu_devid()
576 if (desc->percpu_enabled) irq_set_percpu_devid()
579 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); irq_set_percpu_devid()
581 if (!desc->percpu_enabled) irq_set_percpu_devid()
604 struct irq_desc *desc = irq_to_desc(irq); kstat_irqs_cpu() local
606 return desc && desc->kstat_irqs ? kstat_irqs_cpu()
607 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; kstat_irqs_cpu()
620 struct irq_desc *desc = irq_to_desc(irq); kstat_irqs() local
624 if (!desc || !desc->kstat_irqs) kstat_irqs()
627 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); kstat_irqs()
H A Dresend.c33 struct irq_desc *desc; resend_irqs() local
39 desc = irq_to_desc(irq); resend_irqs()
41 desc->handle_irq(irq, desc); resend_irqs()
54 * Is called with interrupts disabled and desc->lock held.
56 void check_irq_resend(struct irq_desc *desc, unsigned int irq) check_irq_resend() argument
64 if (irq_settings_is_level(desc)) { check_irq_resend()
65 desc->istate &= ~IRQS_PENDING; check_irq_resend()
68 if (desc->istate & IRQS_REPLAY) check_irq_resend()
70 if (desc->istate & IRQS_PENDING) { check_irq_resend()
71 desc->istate &= ~IRQS_PENDING; check_irq_resend()
72 desc->istate |= IRQS_REPLAY; check_irq_resend()
74 if (!desc->irq_data.chip->irq_retrigger || check_irq_resend()
75 !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { check_irq_resend()
83 if (irq_settings_is_nested_thread(desc)) { check_irq_resend()
89 if (!desc->parent_irq) check_irq_resend()
91 irq = desc->parent_irq; check_irq_resend()
H A Dhandle.c26 * @desc: description of the interrupt
30 void handle_bad_irq(unsigned int irq, struct irq_desc *desc) handle_bad_irq() argument
32 print_irq_desc(irq, desc); handle_bad_irq()
33 kstat_incr_irqs_this_cpu(irq, desc); handle_bad_irq()
55 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action) __irq_wake_thread() argument
82 * in threads_oneshot are serialized via desc->lock against __irq_wake_thread()
88 * spin_lock(desc->lock); __irq_wake_thread()
89 * desc->state |= IRQS_INPROGRESS; __irq_wake_thread()
90 * spin_unlock(desc->lock); __irq_wake_thread()
92 * desc->threads_oneshot |= mask; __irq_wake_thread()
93 * spin_lock(desc->lock); __irq_wake_thread()
94 * desc->state &= ~IRQS_INPROGRESS; __irq_wake_thread()
95 * spin_unlock(desc->lock); __irq_wake_thread()
100 * spin_lock(desc->lock); __irq_wake_thread()
101 * if (desc->state & IRQS_INPROGRESS) { __irq_wake_thread()
102 * spin_unlock(desc->lock); __irq_wake_thread()
103 * while(desc->state & IRQS_INPROGRESS) __irq_wake_thread()
108 * desc->threads_oneshot &= ~mask; __irq_wake_thread()
109 * spin_unlock(desc->lock); __irq_wake_thread()
112 * or we are waiting in the flow handler for desc->lock to be __irq_wake_thread()
114 * IRQTF_RUNTHREAD under desc->lock. If set it leaves __irq_wake_thread()
117 desc->threads_oneshot |= action->thread_mask; __irq_wake_thread()
128 atomic_inc(&desc->threads_active); __irq_wake_thread()
134 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) handle_irq_event_percpu() argument
137 unsigned int flags = 0, irq = desc->irq_data.irq; handle_irq_event_percpu()
161 __irq_wake_thread(desc, action); handle_irq_event_percpu()
179 note_interrupt(irq, desc, retval); handle_irq_event_percpu()
183 irqreturn_t handle_irq_event(struct irq_desc *desc) handle_irq_event() argument
185 struct irqaction *action = desc->action; handle_irq_event()
188 desc->istate &= ~IRQS_PENDING; handle_irq_event()
189 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); handle_irq_event()
190 raw_spin_unlock(&desc->lock); handle_irq_event()
192 ret = handle_irq_event_percpu(desc, action); handle_irq_event()
194 raw_spin_lock(&desc->lock); handle_irq_event()
195 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); handle_irq_event()
H A Dproc.c42 struct irq_desc *desc = irq_to_desc((long)m->private); show_irq_affinity() local
43 const struct cpumask *mask = desc->irq_data.affinity; show_irq_affinity()
46 if (irqd_is_setaffinity_pending(&desc->irq_data)) show_irq_affinity()
47 mask = desc->pending_mask; show_irq_affinity()
58 struct irq_desc *desc = irq_to_desc((long)m->private); irq_affinity_hint_proc_show() local
65 raw_spin_lock_irqsave(&desc->lock, flags); irq_affinity_hint_proc_show()
66 if (desc->affinity_hint) irq_affinity_hint_proc_show()
67 cpumask_copy(mask, desc->affinity_hint); irq_affinity_hint_proc_show()
68 raw_spin_unlock_irqrestore(&desc->lock, flags); irq_affinity_hint_proc_show()
243 struct irq_desc *desc = irq_to_desc((long) m->private); irq_node_proc_show() local
245 seq_printf(m, "%d\n", desc->irq_data.node); irq_node_proc_show()
264 struct irq_desc *desc = irq_to_desc((long) m->private); irq_spurious_proc_show() local
267 desc->irq_count, desc->irqs_unhandled, irq_spurious_proc_show()
268 jiffies_to_msecs(desc->last_unhandled)); irq_spurious_proc_show()
288 struct irq_desc *desc = irq_to_desc(irq); name_unique() local
293 raw_spin_lock_irqsave(&desc->lock, flags); name_unique()
294 for (action = desc->action ; action; action = action->next) { name_unique()
301 raw_spin_unlock_irqrestore(&desc->lock, flags); name_unique()
308 struct irq_desc *desc = irq_to_desc(irq); register_handler_proc() local
310 if (!desc->dir || action->dir || !action->name || register_handler_proc()
318 action->dir = proc_mkdir(name, desc->dir); register_handler_proc()
325 void register_irq_proc(unsigned int irq, struct irq_desc *desc) register_irq_proc() argument
330 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) register_irq_proc()
340 if (desc->dir) register_irq_proc()
347 desc->dir = proc_mkdir(name, root_irq_dir); register_irq_proc()
348 if (!desc->dir) register_irq_proc()
353 proc_create_data("smp_affinity", 0644, desc->dir, register_irq_proc()
357 proc_create_data("affinity_hint", 0444, desc->dir, register_irq_proc()
361 proc_create_data("smp_affinity_list", 0644, desc->dir, register_irq_proc()
364 proc_create_data("node", 0444, desc->dir, register_irq_proc()
368 proc_create_data("spurious", 0444, desc->dir, register_irq_proc()
375 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) unregister_irq_proc() argument
379 if (!root_irq_dir || !desc->dir) unregister_irq_proc()
382 remove_proc_entry("smp_affinity", desc->dir); unregister_irq_proc()
383 remove_proc_entry("affinity_hint", desc->dir); unregister_irq_proc()
384 remove_proc_entry("smp_affinity_list", desc->dir); unregister_irq_proc()
385 remove_proc_entry("node", desc->dir); unregister_irq_proc()
387 remove_proc_entry("spurious", desc->dir); unregister_irq_proc()
412 struct irq_desc *desc; init_irq_proc() local
424 for_each_irq_desc(irq, desc) { for_each_irq_desc()
425 if (!desc) for_each_irq_desc()
428 register_irq_proc(irq, desc); for_each_irq_desc()
450 struct irq_desc *desc; show_interrupts() local
470 desc = irq_to_desc(i); show_interrupts()
471 if (!desc) show_interrupts()
474 raw_spin_lock_irqsave(&desc->lock, flags); show_interrupts()
477 action = desc->action; show_interrupts()
485 if (desc->irq_data.chip) { show_interrupts()
486 if (desc->irq_data.chip->irq_print_chip) show_interrupts()
487 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); show_interrupts()
488 else if (desc->irq_data.chip->name) show_interrupts()
489 seq_printf(p, " %8s", desc->irq_data.chip->name); show_interrupts()
495 if (desc->irq_data.domain) show_interrupts()
496 seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq); show_interrupts()
498 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); show_interrupts()
500 if (desc->name) show_interrupts()
501 seq_printf(p, "-%-8s", desc->name); show_interrupts()
511 raw_spin_unlock_irqrestore(&desc->lock, flags); show_interrupts()
H A Dinternals.h36 * Bit masks for desc->core_internal_state__do_not_mess_with_it
64 extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
66 extern void __disable_irq(struct irq_desc *desc, unsigned int irq);
67 extern void __enable_irq(struct irq_desc *desc, unsigned int irq);
69 extern int irq_startup(struct irq_desc *desc, bool resend);
70 extern void irq_shutdown(struct irq_desc *desc);
71 extern void irq_enable(struct irq_desc *desc);
72 extern void irq_disable(struct irq_desc *desc);
73 extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
74 extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
75 extern void mask_irq(struct irq_desc *desc);
76 extern void unmask_irq(struct irq_desc *desc);
77 extern void unmask_threaded_irq(struct irq_desc *desc);
89 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); irq_unlock_sparse()
91 irqreturn_t handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action); irq_unlock_sparse()
92 irqreturn_t handle_irq_event(struct irq_desc *desc); irq_unlock_sparse()
95 void check_irq_resend(struct irq_desc *desc, unsigned int irq); irq_unlock_sparse()
96 bool irq_wait_for_poll(struct irq_desc *desc); irq_unlock_sparse()
97 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); irq_unlock_sparse()
100 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); irq_unlock_sparse()
101 extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); irq_unlock_sparse()
105 static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } unregister_irq_proc() argument
106 static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } register_handler_proc() argument
115 extern void irq_set_thread_affinity(struct irq_desc *desc);
121 static inline void chip_bus_lock(struct irq_desc *desc) chip_bus_lock() argument
123 if (unlikely(desc->irq_data.chip->irq_bus_lock)) chip_bus_lock()
124 desc->irq_data.chip->irq_bus_lock(&desc->irq_data); chip_bus_lock()
127 static inline void chip_bus_sync_unlock(struct irq_desc *desc) chip_bus_sync_unlock() argument
129 if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) chip_bus_sync_unlock()
130 desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); chip_bus_sync_unlock()
142 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
151 irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) irq_put_desc_busunlock() argument
153 __irq_put_desc_unlock(desc, flags, true); irq_put_desc_busunlock()
163 irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) irq_put_desc_unlock() argument
165 __irq_put_desc_unlock(desc, flags, false); irq_put_desc_unlock()
196 static inline void kstat_incr_irqs_this_cpu(unsigned int irq, struct irq_desc *desc) kstat_incr_irqs_this_cpu() argument
198 __this_cpu_inc(*desc->kstat_irqs); kstat_incr_irqs_this_cpu()
203 bool irq_pm_check_wakeup(struct irq_desc *desc);
204 void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action);
205 void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action);
207 static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } irq_pm_check_wakeup() argument
209 irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } irq_pm_install_action() argument
211 irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } argument
H A Dmanage.c35 static void __synchronize_hardirq(struct irq_desc *desc) __synchronize_hardirq() argument
46 while (irqd_irq_inprogress(&desc->irq_data)) __synchronize_hardirq()
50 raw_spin_lock_irqsave(&desc->lock, flags); __synchronize_hardirq()
51 inprogress = irqd_irq_inprogress(&desc->irq_data); __synchronize_hardirq()
52 raw_spin_unlock_irqrestore(&desc->lock, flags); __synchronize_hardirq()
77 struct irq_desc *desc = irq_to_desc(irq); synchronize_hardirq() local
79 if (desc) { synchronize_hardirq()
80 __synchronize_hardirq(desc); synchronize_hardirq()
81 return !atomic_read(&desc->threads_active); synchronize_hardirq()
100 struct irq_desc *desc = irq_to_desc(irq); synchronize_irq() local
102 if (desc) { synchronize_irq()
103 __synchronize_hardirq(desc); synchronize_irq()
109 wait_event(desc->wait_for_threads, synchronize_irq()
110 !atomic_read(&desc->threads_active)); synchronize_irq()
125 struct irq_desc *desc = irq_to_desc(irq); irq_can_set_affinity() local
127 if (!desc || !irqd_can_balance(&desc->irq_data) || irq_can_set_affinity()
128 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) irq_can_set_affinity()
136 * @desc: irq descriptor which has affitnity changed
140 * set_cpus_allowed_ptr() here as we hold desc->lock and this
143 void irq_set_thread_affinity(struct irq_desc *desc) irq_set_thread_affinity() argument
145 struct irqaction *action = desc->action; irq_set_thread_affinity()
164 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) irq_copy_pending() argument
166 cpumask_copy(desc->pending_mask, mask); irq_copy_pending()
169 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) irq_get_pending() argument
171 cpumask_copy(mask, desc->pending_mask); irq_get_pending()
177 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } irq_copy_pending() argument
179 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } irq_get_pending() argument
185 struct irq_desc *desc = irq_data_to_desc(data); irq_do_set_affinity() local
195 irq_set_thread_affinity(desc); irq_do_set_affinity()
206 struct irq_desc *desc = irq_data_to_desc(data); irq_set_affinity_locked() local
216 irq_copy_pending(desc, mask); irq_set_affinity_locked()
219 if (desc->affinity_notify) { irq_set_affinity_locked()
220 kref_get(&desc->affinity_notify->kref); irq_set_affinity_locked()
221 schedule_work(&desc->affinity_notify->work); irq_set_affinity_locked()
230 struct irq_desc *desc = irq_to_desc(irq); __irq_set_affinity() local
234 if (!desc) __irq_set_affinity()
237 raw_spin_lock_irqsave(&desc->lock, flags); __irq_set_affinity()
238 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); __irq_set_affinity()
239 raw_spin_unlock_irqrestore(&desc->lock, flags); __irq_set_affinity()
246 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); irq_set_affinity_hint() local
248 if (!desc) irq_set_affinity_hint()
250 desc->affinity_hint = m; irq_set_affinity_hint()
251 irq_put_desc_unlock(desc, flags); irq_set_affinity_hint()
263 struct irq_desc *desc = irq_to_desc(notify->irq); irq_affinity_notify() local
267 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) irq_affinity_notify()
270 raw_spin_lock_irqsave(&desc->lock, flags); irq_affinity_notify()
271 if (irq_move_pending(&desc->irq_data)) irq_affinity_notify()
272 irq_get_pending(cpumask, desc); irq_affinity_notify()
274 cpumask_copy(cpumask, desc->irq_data.affinity); irq_affinity_notify()
275 raw_spin_unlock_irqrestore(&desc->lock, flags); irq_affinity_notify()
298 struct irq_desc *desc = irq_to_desc(irq); irq_set_affinity_notifier() local
305 if (!desc) irq_set_affinity_notifier()
315 raw_spin_lock_irqsave(&desc->lock, flags); irq_set_affinity_notifier()
316 old_notify = desc->affinity_notify; irq_set_affinity_notifier()
317 desc->affinity_notify = notify; irq_set_affinity_notifier()
318 raw_spin_unlock_irqrestore(&desc->lock, flags); irq_set_affinity_notifier()
332 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) setup_affinity() argument
335 int node = desc->irq_data.node; setup_affinity()
345 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { setup_affinity()
346 if (cpumask_intersects(desc->irq_data.affinity, setup_affinity()
348 set = desc->irq_data.affinity; setup_affinity()
350 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); setup_affinity()
361 irq_do_set_affinity(&desc->irq_data, mask, false); setup_affinity()
377 struct irq_desc *desc = irq_to_desc(irq); irq_select_affinity_usr() local
381 raw_spin_lock_irqsave(&desc->lock, flags); irq_select_affinity_usr()
382 ret = setup_affinity(irq, desc, mask); irq_select_affinity_usr()
383 raw_spin_unlock_irqrestore(&desc->lock, flags); irq_select_affinity_usr()
389 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) setup_affinity() argument
395 void __disable_irq(struct irq_desc *desc, unsigned int irq) __disable_irq() argument
397 if (!desc->depth++) __disable_irq()
398 irq_disable(desc); __disable_irq()
404 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); __disable_irq_nosync() local
406 if (!desc) __disable_irq_nosync()
408 __disable_irq(desc, irq); __disable_irq_nosync()
409 irq_put_desc_busunlock(desc, flags); __disable_irq_nosync()
475 void __enable_irq(struct irq_desc *desc, unsigned int irq) __enable_irq() argument
477 switch (desc->depth) { __enable_irq()
483 if (desc->istate & IRQS_SUSPENDED) __enable_irq()
486 irq_settings_set_noprobe(desc); __enable_irq()
487 irq_enable(desc); __enable_irq()
488 check_irq_resend(desc, irq); __enable_irq()
492 desc->depth--; __enable_irq()
505 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
510 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); enable_irq() local
512 if (!desc) enable_irq()
514 if (WARN(!desc->irq_data.chip, enable_irq()
518 __enable_irq(desc, irq); enable_irq()
520 irq_put_desc_busunlock(desc, flags); enable_irq()
526 struct irq_desc *desc = irq_to_desc(irq); set_irq_wake_real() local
529 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) set_irq_wake_real()
532 if (desc->irq_data.chip->irq_set_wake) set_irq_wake_real()
533 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); set_irq_wake_real()
553 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); irq_set_irq_wake() local
556 if (!desc) irq_set_irq_wake()
563 if (desc->wake_depth++ == 0) { irq_set_irq_wake()
566 desc->wake_depth = 0; irq_set_irq_wake()
568 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); irq_set_irq_wake()
571 if (desc->wake_depth == 0) { irq_set_irq_wake()
573 } else if (--desc->wake_depth == 0) { irq_set_irq_wake()
576 desc->wake_depth = 1; irq_set_irq_wake()
578 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); irq_set_irq_wake()
581 irq_put_desc_busunlock(desc, flags); irq_set_irq_wake()
594 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); can_request_irq() local
597 if (!desc) can_request_irq()
600 if (irq_settings_can_request(desc)) { can_request_irq()
601 if (!desc->action || can_request_irq()
602 irqflags & desc->action->flags & IRQF_SHARED) can_request_irq()
605 irq_put_desc_unlock(desc, flags); can_request_irq()
609 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, __irq_set_trigger() argument
612 struct irq_chip *chip = desc->irq_data.chip; __irq_set_trigger()
628 if (!irqd_irq_masked(&desc->irq_data)) __irq_set_trigger()
629 mask_irq(desc); __irq_set_trigger()
630 if (!irqd_irq_disabled(&desc->irq_data)) __irq_set_trigger()
635 ret = chip->irq_set_type(&desc->irq_data, flags); __irq_set_trigger()
640 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); __irq_set_trigger()
641 irqd_set(&desc->irq_data, flags); __irq_set_trigger()
644 flags = irqd_get_trigger_type(&desc->irq_data); __irq_set_trigger()
645 irq_settings_set_trigger_mask(desc, flags); __irq_set_trigger()
646 irqd_clear(&desc->irq_data, IRQD_LEVEL); __irq_set_trigger()
647 irq_settings_clr_level(desc); __irq_set_trigger()
649 irq_settings_set_level(desc); __irq_set_trigger()
650 irqd_set(&desc->irq_data, IRQD_LEVEL); __irq_set_trigger()
660 unmask_irq(desc); __irq_set_trigger()
668 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_set_parent() local
670 if (!desc) irq_set_parent()
673 desc->parent_irq = parent_irq; irq_set_parent()
675 irq_put_desc_unlock(desc, flags); irq_set_parent()
723 static void irq_finalize_oneshot(struct irq_desc *desc, irq_finalize_oneshot() argument
726 if (!(desc->istate & IRQS_ONESHOT)) irq_finalize_oneshot()
729 chip_bus_lock(desc); irq_finalize_oneshot()
730 raw_spin_lock_irq(&desc->lock); irq_finalize_oneshot()
742 * versus "desc->threads_onehsot |= action->thread_mask;" in irq_finalize_oneshot()
746 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { irq_finalize_oneshot()
747 raw_spin_unlock_irq(&desc->lock); irq_finalize_oneshot()
748 chip_bus_sync_unlock(desc); irq_finalize_oneshot()
761 desc->threads_oneshot &= ~action->thread_mask; irq_finalize_oneshot()
763 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && irq_finalize_oneshot()
764 irqd_irq_masked(&desc->irq_data)) irq_finalize_oneshot()
765 unmask_threaded_irq(desc); irq_finalize_oneshot()
768 raw_spin_unlock_irq(&desc->lock); irq_finalize_oneshot()
769 chip_bus_sync_unlock(desc); irq_finalize_oneshot()
777 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) irq_thread_check_affinity() argument
794 raw_spin_lock_irq(&desc->lock); irq_thread_check_affinity()
799 if (desc->irq_data.affinity) irq_thread_check_affinity()
800 cpumask_copy(mask, desc->irq_data.affinity); irq_thread_check_affinity()
803 raw_spin_unlock_irq(&desc->lock); irq_thread_check_affinity()
811 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } irq_thread_check_affinity() argument
821 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) irq_forced_thread_fn() argument
827 irq_finalize_oneshot(desc, action); irq_forced_thread_fn()
837 static irqreturn_t irq_thread_fn(struct irq_desc *desc, irq_thread_fn() argument
843 irq_finalize_oneshot(desc, action); irq_thread_fn()
847 static void wake_threads_waitq(struct irq_desc *desc) wake_threads_waitq() argument
849 if (atomic_dec_and_test(&desc->threads_active)) wake_threads_waitq()
850 wake_up(&desc->wait_for_threads); wake_threads_waitq()
856 struct irq_desc *desc; irq_thread_dtor() local
868 desc = irq_to_desc(action->irq); irq_thread_dtor()
871 * desc->threads_active and wake possible waiters. irq_thread_dtor()
874 wake_threads_waitq(desc); irq_thread_dtor()
876 /* Prevent a stale desc->threads_oneshot */ irq_thread_dtor()
877 irq_finalize_oneshot(desc, action); irq_thread_dtor()
887 struct irq_desc *desc = irq_to_desc(action->irq); irq_thread() local
888 irqreturn_t (*handler_fn)(struct irq_desc *desc, irq_thread()
900 irq_thread_check_affinity(desc, action); irq_thread()
905 irq_thread_check_affinity(desc, action); irq_thread()
907 action_ret = handler_fn(desc, action); irq_thread()
909 atomic_inc(&desc->threads_handled); irq_thread()
911 wake_threads_waitq(desc); irq_thread()
935 struct irq_desc *desc = irq_to_desc(irq); irq_wake_thread() local
939 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) irq_wake_thread()
942 raw_spin_lock_irqsave(&desc->lock, flags); irq_wake_thread()
943 for (action = desc->action; action; action = action->next) { irq_wake_thread()
946 __irq_wake_thread(desc, action); irq_wake_thread()
950 raw_spin_unlock_irqrestore(&desc->lock, flags); irq_wake_thread()
970 static int irq_request_resources(struct irq_desc *desc) irq_request_resources() argument
972 struct irq_data *d = &desc->irq_data; irq_request_resources()
978 static void irq_release_resources(struct irq_desc *desc) irq_release_resources() argument
980 struct irq_data *d = &desc->irq_data; irq_release_resources()
992 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) __setup_irq() argument
999 if (!desc) __setup_irq()
1002 if (desc->irq_data.chip == &no_irq_chip) __setup_irq()
1004 if (!try_module_get(desc->owner)) __setup_irq()
1011 nested = irq_settings_is_nested_thread(desc); __setup_irq()
1024 if (irq_settings_can_thread(desc)) __setup_irq()
1081 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) __setup_irq()
1087 raw_spin_lock_irqsave(&desc->lock, flags); __setup_irq()
1088 old_ptr = &desc->action; __setup_irq()
1138 * desc->thread_active to indicate that the __setup_irq()
1142 * line have completed desc->threads_active becomes __setup_irq()
1147 * interrupt handlers, then desc->threads_active is __setup_irq()
1159 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { __setup_irq()
1182 ret = irq_request_resources(desc); __setup_irq()
1185 new->name, irq, desc->irq_data.chip->name); __setup_irq()
1189 init_waitqueue_head(&desc->wait_for_threads); __setup_irq()
1193 ret = __irq_set_trigger(desc, irq, __setup_irq()
1200 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ __setup_irq()
1202 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); __setup_irq()
1205 irqd_set(&desc->irq_data, IRQD_PER_CPU); __setup_irq()
1206 irq_settings_set_per_cpu(desc); __setup_irq()
1210 desc->istate |= IRQS_ONESHOT; __setup_irq()
1212 if (irq_settings_can_autoenable(desc)) __setup_irq()
1213 irq_startup(desc, true); __setup_irq()
1216 desc->depth = 1; __setup_irq()
1220 irq_settings_set_no_balancing(desc); __setup_irq()
1221 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); __setup_irq()
1225 setup_affinity(irq, desc, mask); __setup_irq()
1229 unsigned int omsk = irq_settings_get_trigger_mask(desc); __setup_irq()
1240 irq_pm_install_action(desc, new); __setup_irq()
1243 desc->irq_count = 0; __setup_irq()
1244 desc->irqs_unhandled = 0; __setup_irq()
1250 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { __setup_irq()
1251 desc->istate &= ~IRQS_SPURIOUS_DISABLED; __setup_irq()
1252 __enable_irq(desc, irq); __setup_irq()
1255 raw_spin_unlock_irqrestore(&desc->lock, flags); __setup_irq()
1264 register_irq_proc(irq, desc); __setup_irq()
1282 raw_spin_unlock_irqrestore(&desc->lock, flags); __setup_irq()
1294 module_put(desc->owner); __setup_irq()
1308 struct irq_desc *desc = irq_to_desc(irq); setup_irq() local
1310 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) setup_irq()
1312 chip_bus_lock(desc); setup_irq()
1313 retval = __setup_irq(irq, desc, act); setup_irq()
1314 chip_bus_sync_unlock(desc); setup_irq()
1326 struct irq_desc *desc = irq_to_desc(irq); __free_irq() local
1332 if (!desc) __free_irq()
1335 raw_spin_lock_irqsave(&desc->lock, flags); __free_irq()
1341 action_ptr = &desc->action; __free_irq()
1347 raw_spin_unlock_irqrestore(&desc->lock, flags); __free_irq()
1360 irq_pm_remove_action(desc, action); __free_irq()
1363 if (!desc->action) { __free_irq()
1364 irq_shutdown(desc); __free_irq()
1365 irq_release_resources(desc); __free_irq()
1370 if (WARN_ON_ONCE(desc->affinity_hint)) __free_irq()
1371 desc->affinity_hint = NULL; __free_irq()
1374 raw_spin_unlock_irqrestore(&desc->lock, flags); __free_irq()
1402 module_put(desc->owner); __free_irq()
1415 struct irq_desc *desc = irq_to_desc(irq); remove_irq() local
1417 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) remove_irq()
1438 struct irq_desc *desc = irq_to_desc(irq); free_irq() local
1440 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) free_irq()
1444 if (WARN_ON(desc->affinity_notify)) free_irq()
1445 desc->affinity_notify = NULL; free_irq()
1448 chip_bus_lock(desc); free_irq()
1450 chip_bus_sync_unlock(desc); free_irq()
1501 struct irq_desc *desc; request_threaded_irq() local
1518 desc = irq_to_desc(irq); request_threaded_irq()
1519 if (!desc) request_threaded_irq()
1522 if (!irq_settings_can_request(desc) || request_threaded_irq()
1523 WARN_ON(irq_settings_is_per_cpu_devid(desc))) request_threaded_irq()
1542 chip_bus_lock(desc); request_threaded_irq()
1543 retval = __setup_irq(irq, desc, action); request_threaded_irq()
1544 chip_bus_sync_unlock(desc); request_threaded_irq()
1592 struct irq_desc *desc = irq_to_desc(irq); request_any_context_irq() local
1595 if (!desc) request_any_context_irq()
1598 if (irq_settings_is_nested_thread(desc)) { request_any_context_irq()
1613 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); enable_percpu_irq() local
1615 if (!desc) enable_percpu_irq()
1622 ret = __irq_set_trigger(desc, irq, type); enable_percpu_irq()
1630 irq_percpu_enable(desc, cpu); enable_percpu_irq()
1632 irq_put_desc_unlock(desc, flags); enable_percpu_irq()
1640 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); disable_percpu_irq() local
1642 if (!desc) disable_percpu_irq()
1645 irq_percpu_disable(desc, cpu); disable_percpu_irq()
1646 irq_put_desc_unlock(desc, flags); disable_percpu_irq()
1655 struct irq_desc *desc = irq_to_desc(irq); __free_percpu_irq() local
1661 if (!desc) __free_percpu_irq()
1664 raw_spin_lock_irqsave(&desc->lock, flags); __free_percpu_irq()
1666 action = desc->action; __free_percpu_irq()
1672 if (!cpumask_empty(desc->percpu_enabled)) { __free_percpu_irq()
1674 irq, cpumask_first(desc->percpu_enabled)); __free_percpu_irq()
1679 desc->action = NULL; __free_percpu_irq()
1681 raw_spin_unlock_irqrestore(&desc->lock, flags); __free_percpu_irq()
1685 module_put(desc->owner); __free_percpu_irq()
1689 raw_spin_unlock_irqrestore(&desc->lock, flags); __free_percpu_irq()
1702 struct irq_desc *desc = irq_to_desc(irq); remove_percpu_irq() local
1704 if (desc && irq_settings_is_per_cpu_devid(desc)) remove_percpu_irq()
1722 struct irq_desc *desc = irq_to_desc(irq); free_percpu_irq() local
1724 if (!desc || !irq_settings_is_per_cpu_devid(desc)) free_percpu_irq()
1727 chip_bus_lock(desc); free_percpu_irq()
1729 chip_bus_sync_unlock(desc); free_percpu_irq()
1741 struct irq_desc *desc = irq_to_desc(irq); setup_percpu_irq() local
1744 if (!desc || !irq_settings_is_per_cpu_devid(desc)) setup_percpu_irq()
1746 chip_bus_lock(desc); setup_percpu_irq()
1747 retval = __setup_irq(irq, desc, act); setup_percpu_irq()
1748 chip_bus_sync_unlock(desc); setup_percpu_irq()
1772 struct irq_desc *desc; request_percpu_irq() local
1778 desc = irq_to_desc(irq); request_percpu_irq()
1779 if (!desc || !irq_settings_can_request(desc) || request_percpu_irq()
1780 !irq_settings_is_per_cpu_devid(desc)) request_percpu_irq()
1792 chip_bus_lock(desc); request_percpu_irq()
1793 retval = __setup_irq(irq, desc, action); request_percpu_irq()
1794 chip_bus_sync_unlock(desc); request_percpu_irq()
1818 struct irq_desc *desc; irq_get_irqchip_state() local
1824 desc = irq_get_desc_buslock(irq, &flags, 0); irq_get_irqchip_state()
1825 if (!desc) irq_get_irqchip_state()
1828 data = irq_desc_get_irq_data(desc); irq_get_irqchip_state()
1844 irq_put_desc_busunlock(desc, flags); irq_get_irqchip_state()
1863 struct irq_desc *desc; irq_set_irqchip_state() local
1869 desc = irq_get_desc_buslock(irq, &flags, 0); irq_set_irqchip_state()
1870 if (!desc) irq_set_irqchip_state()
1873 data = irq_desc_get_irq_data(desc); irq_set_irqchip_state()
1889 irq_put_desc_busunlock(desc, flags); irq_set_irqchip_state()
H A Dmigration.c9 struct irq_desc *desc = irq_data_to_desc(idata); irq_move_masked_irq() local
12 if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) irq_move_masked_irq()
18 if (!irqd_can_balance(&desc->irq_data)) { irq_move_masked_irq()
23 irqd_clr_move_pending(&desc->irq_data); irq_move_masked_irq()
25 if (unlikely(cpumask_empty(desc->pending_mask))) irq_move_masked_irq()
31 assert_raw_spin_locked(&desc->lock); irq_move_masked_irq()
45 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) irq_move_masked_irq()
46 irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); irq_move_masked_irq()
48 cpumask_clear(desc->pending_mask); irq_move_masked_irq()
H A Dspurious.c38 bool irq_wait_for_poll(struct irq_desc *desc) irq_wait_for_poll() argument
42 smp_processor_id(), desc->irq_data.irq)) irq_wait_for_poll()
47 raw_spin_unlock(&desc->lock); irq_wait_for_poll()
48 while (irqd_irq_inprogress(&desc->irq_data)) irq_wait_for_poll()
50 raw_spin_lock(&desc->lock); irq_wait_for_poll()
51 } while (irqd_irq_inprogress(&desc->irq_data)); irq_wait_for_poll()
53 return !irqd_irq_disabled(&desc->irq_data) && desc->action; irq_wait_for_poll()
63 static int try_one_irq(int irq, struct irq_desc *desc, bool force) try_one_irq() argument
68 raw_spin_lock(&desc->lock); try_one_irq()
74 if (irq_settings_is_per_cpu(desc) || try_one_irq()
75 irq_settings_is_nested_thread(desc) || try_one_irq()
76 irq_settings_is_polled(desc)) try_one_irq()
83 if (irqd_irq_disabled(&desc->irq_data) && !force) try_one_irq()
90 action = desc->action; try_one_irq()
96 if (irqd_irq_inprogress(&desc->irq_data)) { try_one_irq()
101 desc->istate |= IRQS_PENDING; try_one_irq()
106 desc->istate |= IRQS_POLL_INPROGRESS; try_one_irq()
108 if (handle_irq_event(desc) == IRQ_HANDLED) try_one_irq()
111 action = desc->action; try_one_irq()
112 } while ((desc->istate & IRQS_PENDING) && action); try_one_irq()
113 desc->istate &= ~IRQS_POLL_INPROGRESS; try_one_irq()
115 raw_spin_unlock(&desc->lock); try_one_irq()
121 struct irq_desc *desc; misrouted_irq() local
129 for_each_irq_desc(i, desc) { for_each_irq_desc()
136 if (try_one_irq(i, desc, false)) for_each_irq_desc()
147 struct irq_desc *desc; poll_spurious_irqs() local
154 for_each_irq_desc(i, desc) { for_each_irq_desc()
161 state = desc->istate; for_each_irq_desc()
167 try_one_irq(i, desc, true); for_each_irq_desc()
192 __report_bad_irq(unsigned int irq, struct irq_desc *desc, __report_bad_irq() argument
209 * We need to take desc->lock here. note_interrupt() is called __report_bad_irq()
210 * w/o desc->lock held, but IRQ_PROGRESS set. We might race __report_bad_irq()
212 * desc->lock here. See synchronize_irq(). __report_bad_irq()
214 raw_spin_lock_irqsave(&desc->lock, flags); __report_bad_irq()
215 action = desc->action; __report_bad_irq()
224 raw_spin_unlock_irqrestore(&desc->lock, flags); __report_bad_irq()
228 report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) report_bad_irq() argument
234 __report_bad_irq(irq, desc, action_ret); report_bad_irq()
239 try_misrouted_irq(unsigned int irq, struct irq_desc *desc, try_misrouted_irq() argument
268 action = desc->action; try_misrouted_irq()
275 void note_interrupt(unsigned int irq, struct irq_desc *desc, note_interrupt() argument
278 if (desc->istate & IRQS_POLL_INPROGRESS || note_interrupt()
279 irq_settings_is_polled(desc)) note_interrupt()
283 report_bad_irq(irq, desc, action_ret); note_interrupt()
324 if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { note_interrupt()
325 desc->threads_handled_last |= SPURIOUS_DEFERRED; note_interrupt()
340 handled = atomic_read(&desc->threads_handled); note_interrupt()
342 if (handled != desc->threads_handled_last) { note_interrupt()
352 desc->threads_handled_last = handled; note_interrupt()
383 desc->threads_handled_last &= ~SPURIOUS_DEFERRED; note_interrupt()
394 if (time_after(jiffies, desc->last_unhandled + HZ/10)) note_interrupt()
395 desc->irqs_unhandled = 1; note_interrupt()
397 desc->irqs_unhandled++; note_interrupt()
398 desc->last_unhandled = jiffies; note_interrupt()
401 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { note_interrupt()
404 desc->irqs_unhandled -= ok; note_interrupt()
407 desc->irq_count++; note_interrupt()
408 if (likely(desc->irq_count < 100000)) note_interrupt()
411 desc->irq_count = 0; note_interrupt()
412 if (unlikely(desc->irqs_unhandled > 99900)) { note_interrupt()
416 __report_bad_irq(irq, desc, action_ret); note_interrupt()
421 desc->istate |= IRQS_SPURIOUS_DISABLED; note_interrupt()
422 desc->depth++; note_interrupt()
423 irq_disable(desc); note_interrupt()
428 desc->irqs_unhandled = 0; note_interrupt()
H A Dmsi.c149 struct msi_desc *desc) msi_domain_ops_set_desc()
151 arg->desc = desc; msi_domain_ops_set_desc()
257 struct msi_desc *desc; msi_domain_alloc_irqs() local
266 for_each_msi_entry(desc, dev) { for_each_msi_entry()
267 ops->set_desc(&arg, desc); for_each_msi_entry()
273 virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, for_each_msi_entry()
278 ret = ops->handle_error(domain, desc, ret); for_each_msi_entry()
284 for (i = 0; i < desc->nvec_used; i++) for_each_msi_entry()
285 irq_set_msi_desc_off(virq, i, desc); for_each_msi_entry()
291 for_each_msi_entry(desc, dev) { for_each_msi_entry()
292 if (desc->nvec_used == 1) for_each_msi_entry()
296 virq, virq + desc->nvec_used - 1); for_each_msi_entry()
310 struct msi_desc *desc; msi_domain_free_irqs() local
312 for_each_msi_entry(desc, dev) { for_each_msi_entry()
318 if (desc->irq) { for_each_msi_entry()
319 irq_domain_free_irqs(desc->irq, desc->nvec_used); for_each_msi_entry()
320 desc->irq = 0; for_each_msi_entry()
148 msi_domain_ops_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) msi_domain_ops_set_desc() argument
/linux-4.1.27/drivers/crypto/caam/
H A Ddesc_constr.h7 #include "desc.h"
16 #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
33 static inline int desc_len(u32 *desc) desc_len() argument
35 return *desc & HDR_DESCLEN_MASK; desc_len()
38 static inline int desc_bytes(void *desc) desc_bytes() argument
40 return desc_len(desc) * CAAM_CMD_SZ; desc_bytes()
43 static inline u32 *desc_end(u32 *desc) desc_end() argument
45 return desc + desc_len(desc); desc_end()
48 static inline void *sh_desc_pdb(u32 *desc) sh_desc_pdb() argument
50 return desc + 1; sh_desc_pdb()
53 static inline void init_desc(u32 *desc, u32 options) init_desc() argument
55 *desc = (options | HDR_ONE) + 1; init_desc()
58 static inline void init_sh_desc(u32 *desc, u32 options) init_sh_desc() argument
61 init_desc(desc, CMD_SHARED_DESC_HDR | options); init_sh_desc()
64 static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) init_sh_desc_pdb() argument
68 init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) | init_sh_desc_pdb()
72 static inline void init_job_desc(u32 *desc, u32 options) init_job_desc() argument
74 init_desc(desc, CMD_DESC_HDR | options); init_job_desc()
77 static inline void append_ptr(u32 *desc, dma_addr_t ptr) append_ptr() argument
79 dma_addr_t *offset = (dma_addr_t *)desc_end(desc); append_ptr()
83 (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ; append_ptr()
86 static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len, init_job_desc_shared() argument
90 init_job_desc(desc, HDR_SHARED | options | init_job_desc_shared()
92 append_ptr(desc, ptr); init_job_desc_shared()
95 static inline void append_data(u32 *desc, void *data, int len) append_data() argument
97 u32 *offset = desc_end(desc); append_data()
102 (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; append_data()
105 static inline void append_cmd(u32 *desc, u32 command) append_cmd() argument
107 u32 *cmd = desc_end(desc); append_cmd()
111 (*desc)++; append_cmd()
116 static inline void append_u64(u32 *desc, u64 data) append_u64() argument
118 u32 *offset = desc_end(desc); append_u64()
123 (*desc) += 2; append_u64()
127 static inline u32 *write_cmd(u32 *desc, u32 command) write_cmd() argument
129 *desc = command; write_cmd()
131 return desc + 1; write_cmd()
134 static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, append_cmd_ptr() argument
137 append_cmd(desc, command | len); append_cmd_ptr()
138 append_ptr(desc, ptr); append_cmd_ptr()
142 static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr, append_cmd_ptr_extlen() argument
145 append_cmd(desc, command); append_cmd_ptr_extlen()
147 append_ptr(desc, ptr); append_cmd_ptr_extlen()
148 append_cmd(desc, len); append_cmd_ptr_extlen()
151 static inline void append_cmd_data(u32 *desc, void *data, int len, append_cmd_data() argument
154 append_cmd(desc, command | IMMEDIATE | len); append_cmd_data()
155 append_data(desc, data, len); append_cmd_data()
159 static inline u32 *append_##cmd(u32 *desc, u32 options) \
161 u32 *cmd = desc_end(desc); \
163 append_cmd(desc, CMD_##op | options); \
169 static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd) set_jump_tgt_here() argument
171 *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc)); set_jump_tgt_here()
174 static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd) set_move_tgt_here() argument
177 *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & set_move_tgt_here()
182 static inline void append_##cmd(u32 *desc, u32 options) \
185 append_cmd(desc, CMD_##op | options); \
190 static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
193 append_cmd(desc, CMD_##op | len | options); \
202 static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
206 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
213 static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len, append_store() argument
220 append_cmd(desc, CMD_STORE | options | len); append_store()
227 append_ptr(desc, ptr); append_store()
231 static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
237 append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \
239 append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
245 static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
249 append_cmd_data(desc, data, len, CMD_##op | options); \
255 static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
259 append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \
269 static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
274 append_##cmd##_extlen(desc, ptr, len, options); \
276 append_##cmd##_intlen(desc, ptr, len, options); \
286 static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
291 append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
292 append_data(desc, data, data_len); \
297 static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
301 append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
302 append_cmd(desc, immediate); \
310 #define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
311 append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
314 #define append_math_add(desc, dest, src0, src1, len) \
315 APPEND_MATH(ADD, desc, dest, src0, src1, len)
316 #define append_math_sub(desc, dest, src0, src1, len) \
317 APPEND_MATH(SUB, desc, dest, src0, src1, len)
318 #define append_math_add_c(desc, dest, src0, src1, len) \
319 APPEND_MATH(ADDC, desc, dest, src0, src1, len)
320 #define append_math_sub_b(desc, dest, src0, src1, len) \
321 APPEND_MATH(SUBB, desc, dest, src0, src1, len)
322 #define append_math_and(desc, dest, src0, src1, len) \
323 APPEND_MATH(AND, desc, dest, src0, src1, len)
324 #define append_math_or(desc, dest, src0, src1, len) \
325 APPEND_MATH(OR, desc, dest, src0, src1, len)
326 #define append_math_xor(desc, dest, src0, src1, len) \
327 APPEND_MATH(XOR, desc, dest, src0, src1, len)
328 #define append_math_lshift(desc, dest, src0, src1, len) \
329 APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
330 #define append_math_rshift(desc, dest, src0, src1, len) \
331 APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
332 #define append_math_ldshift(desc, dest, src0, src1, len) \
333 APPEND_MATH(SHLD, desc, dest, src0, src1, len)
336 #define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
338 APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
339 append_cmd(desc, data); \
342 #define append_math_add_imm_u32(desc, dest, src0, src1, data) \
343 APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
344 #define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
345 APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
346 #define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
347 APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
348 #define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
349 APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
350 #define append_math_and_imm_u32(desc, dest, src0, src1, data) \
351 APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
352 #define append_math_or_imm_u32(desc, dest, src0, src1, data) \
353 APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
354 #define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
355 APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
356 #define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
357 APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
358 #define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
359 APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
362 #define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \
365 APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \
368 append_u64(desc, data); \
370 append_u32(desc, data); \
373 #define append_math_add_imm_u64(desc, dest, src0, src1, data) \
374 APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data)
375 #define append_math_sub_imm_u64(desc, dest, src0, src1, data) \
376 APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data)
377 #define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \
378 APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data)
379 #define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \
380 APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data)
381 #define append_math_and_imm_u64(desc, dest, src0, src1, data) \
382 APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data)
383 #define append_math_or_imm_u64(desc, dest, src0, src1, data) \
384 APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data)
385 #define append_math_xor_imm_u64(desc, dest, src0, src1, data) \
386 APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data)
387 #define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \
388 APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
389 #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
390 APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
H A Dkey_gen.c13 void split_key_done(struct device *dev, u32 *desc, u32 err, split_key_done() argument
48 u32 *desc; gen_split_key() local
53 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); gen_split_key()
54 if (!desc) { gen_split_key()
73 init_job_desc(desc, 0); gen_split_key()
74 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); gen_split_key()
77 append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT); gen_split_key()
83 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | gen_split_key()
90 append_fifo_store(desc, dma_addr_out, split_key_len, gen_split_key()
97 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); gen_split_key()
103 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); gen_split_key()
120 kfree(desc); gen_split_key()
H A Djr.h13 int caam_jr_enqueue(struct device *dev, u32 *desc,
14 void (*cbk)(struct device *dev, u32 *desc, u32 status,
H A Dcaamalg.c33 * So, a job desc looks like:
114 static inline void append_dec_op1(u32 *desc, u32 type) append_dec_op1() argument
120 append_operation(desc, type | OP_ALG_AS_INITFINAL | append_dec_op1()
125 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); append_dec_op1()
126 append_operation(desc, type | OP_ALG_AS_INITFINAL | append_dec_op1()
128 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); append_dec_op1()
129 set_jump_tgt_here(desc, jump_cmd); append_dec_op1()
130 append_operation(desc, type | OP_ALG_AS_INITFINAL | append_dec_op1()
132 set_jump_tgt_here(desc, uncond_jump_cmd); append_dec_op1()
139 static inline void aead_append_src_dst(u32 *desc, u32 msg_type) aead_append_src_dst() argument
141 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); aead_append_src_dst()
142 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | aead_append_src_dst()
149 static inline void aead_append_ld_iv(u32 *desc, int ivsize, int ivoffset) aead_append_ld_iv() argument
151 append_seq_load(desc, ivsize, LDST_CLASS_1_CCB | aead_append_ld_iv()
154 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_CLASS2INFIFO | aead_append_ld_iv()
162 static inline void ablkcipher_append_src_dst(u32 *desc) ablkcipher_append_src_dst() argument
164 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ablkcipher_append_src_dst()
165 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ablkcipher_append_src_dst()
166 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | ablkcipher_append_src_dst()
168 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); ablkcipher_append_src_dst()
200 static void append_key_aead(u32 *desc, struct caam_ctx *ctx, append_key_aead() argument
215 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, append_key_aead()
218 append_key_as_imm(desc, (void *)ctx->key + append_key_aead()
222 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | append_key_aead()
224 append_key(desc, ctx->key_dma + ctx->split_key_pad_len, append_key_aead()
232 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | append_key_aead()
234 append_move(desc, append_key_aead()
242 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, init_sh_desc_key_aead() argument
248 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); init_sh_desc_key_aead()
251 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | init_sh_desc_key_aead()
254 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); init_sh_desc_key_aead()
256 set_jump_tgt_here(desc, key_jump_cmd); init_sh_desc_key_aead()
266 u32 *desc; aead_null_set_sh_desc() local
277 desc = ctx->sh_desc_enc; aead_null_set_sh_desc()
279 init_sh_desc(desc, HDR_SHARE_SERIAL); aead_null_set_sh_desc()
282 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | aead_null_set_sh_desc()
285 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, aead_null_set_sh_desc()
289 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | aead_null_set_sh_desc()
291 set_jump_tgt_here(desc, key_jump_cmd); aead_null_set_sh_desc()
294 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); aead_null_set_sh_desc()
300 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); aead_null_set_sh_desc()
303 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | aead_null_set_sh_desc()
307 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); aead_null_set_sh_desc()
308 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); aead_null_set_sh_desc()
315 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | aead_null_set_sh_desc()
318 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | aead_null_set_sh_desc()
324 append_operation(desc, ctx->class2_alg_type | aead_null_set_sh_desc()
328 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); aead_null_set_sh_desc()
330 set_move_tgt_here(desc, read_move_cmd); aead_null_set_sh_desc()
331 set_move_tgt_here(desc, write_move_cmd); aead_null_set_sh_desc()
332 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); aead_null_set_sh_desc()
333 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | aead_null_set_sh_desc()
337 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | aead_null_set_sh_desc()
340 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, aead_null_set_sh_desc()
341 desc_bytes(desc), aead_null_set_sh_desc()
350 DUMP_PREFIX_ADDRESS, 16, 4, desc, aead_null_set_sh_desc()
351 desc_bytes(desc), 1); aead_null_set_sh_desc()
363 desc = ctx->sh_desc_dec; aead_null_set_sh_desc()
366 init_sh_desc(desc, HDR_SHARE_SERIAL); aead_null_set_sh_desc()
369 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | aead_null_set_sh_desc()
372 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, aead_null_set_sh_desc()
376 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | aead_null_set_sh_desc()
378 set_jump_tgt_here(desc, key_jump_cmd); aead_null_set_sh_desc()
381 append_operation(desc, ctx->class2_alg_type | aead_null_set_sh_desc()
385 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, aead_null_set_sh_desc()
388 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); aead_null_set_sh_desc()
389 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); aead_null_set_sh_desc()
392 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | aead_null_set_sh_desc()
396 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); aead_null_set_sh_desc()
397 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); aead_null_set_sh_desc()
404 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | aead_null_set_sh_desc()
407 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | aead_null_set_sh_desc()
413 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); aead_null_set_sh_desc()
419 jump_cmd = append_jump(desc, JUMP_TEST_ALL); aead_null_set_sh_desc()
420 set_jump_tgt_here(desc, jump_cmd); aead_null_set_sh_desc()
422 set_move_tgt_here(desc, read_move_cmd); aead_null_set_sh_desc()
423 set_move_tgt_here(desc, write_move_cmd); aead_null_set_sh_desc()
424 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); aead_null_set_sh_desc()
425 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | aead_null_set_sh_desc()
427 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); aead_null_set_sh_desc()
430 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | aead_null_set_sh_desc()
433 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, aead_null_set_sh_desc()
434 desc_bytes(desc), aead_null_set_sh_desc()
443 DUMP_PREFIX_ADDRESS, 16, 4, desc, aead_null_set_sh_desc()
444 desc_bytes(desc), 1); aead_null_set_sh_desc()
460 u32 *desc; aead_set_sh_desc() local
500 desc = ctx->sh_desc_enc; aead_set_sh_desc()
503 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); aead_set_sh_desc()
506 append_operation(desc, ctx->class2_alg_type | aead_set_sh_desc()
510 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); aead_set_sh_desc()
513 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); aead_set_sh_desc()
516 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); aead_set_sh_desc()
519 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | aead_set_sh_desc()
521 aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off); aead_set_sh_desc()
525 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | aead_set_sh_desc()
532 append_operation(desc, ctx->class1_alg_type | aead_set_sh_desc()
536 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); aead_set_sh_desc()
537 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); aead_set_sh_desc()
538 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); aead_set_sh_desc()
541 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | aead_set_sh_desc()
544 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, aead_set_sh_desc()
545 desc_bytes(desc), aead_set_sh_desc()
553 DUMP_PREFIX_ADDRESS, 16, 4, desc, aead_set_sh_desc()
554 desc_bytes(desc), 1); aead_set_sh_desc()
569 desc = ctx->sh_desc_dec; aead_set_sh_desc()
572 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); aead_set_sh_desc()
575 append_operation(desc, ctx->class2_alg_type | aead_set_sh_desc()
579 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, aead_set_sh_desc()
582 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); aead_set_sh_desc()
583 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); aead_set_sh_desc()
586 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | aead_set_sh_desc()
589 aead_append_ld_iv(desc, tfm->ivsize, ctx1_iv_off); aead_set_sh_desc()
593 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | aead_set_sh_desc()
601 append_operation(desc, ctx->class1_alg_type | aead_set_sh_desc()
604 append_dec_op1(desc, ctx->class1_alg_type); aead_set_sh_desc()
607 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); aead_set_sh_desc()
608 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); aead_set_sh_desc()
609 aead_append_src_dst(desc, FIFOLD_TYPE_MSG); aead_set_sh_desc()
612 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | aead_set_sh_desc()
615 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, aead_set_sh_desc()
616 desc_bytes(desc), aead_set_sh_desc()
624 DUMP_PREFIX_ADDRESS, 16, 4, desc, aead_set_sh_desc()
625 desc_bytes(desc), 1); aead_set_sh_desc()
640 desc = ctx->sh_desc_givenc; aead_set_sh_desc()
643 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); aead_set_sh_desc()
649 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | aead_set_sh_desc()
651 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); aead_set_sh_desc()
652 append_move(desc, MOVE_WAITCOMP | aead_set_sh_desc()
656 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); aead_set_sh_desc()
659 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | aead_set_sh_desc()
664 append_operation(desc, ctx->class2_alg_type | aead_set_sh_desc()
668 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); aead_set_sh_desc()
671 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); aead_set_sh_desc()
674 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | aead_set_sh_desc()
680 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | aead_set_sh_desc()
682 append_load_imm_u32(desc, tfm->ivsize, LDST_CLASS_2_CCB | aead_set_sh_desc()
687 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | aead_set_sh_desc()
694 append_operation(desc, ctx->class1_alg_type | aead_set_sh_desc()
698 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); aead_set_sh_desc()
701 append_seq_fifo_load(desc, tfm->ivsize, aead_set_sh_desc()
705 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); aead_set_sh_desc()
706 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); aead_set_sh_desc()
709 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | aead_set_sh_desc()
712 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, aead_set_sh_desc()
713 desc_bytes(desc), aead_set_sh_desc()
721 DUMP_PREFIX_ADDRESS, 16, 4, desc, aead_set_sh_desc()
722 desc_bytes(desc), 1); aead_set_sh_desc()
747 u32 *desc; gcm_set_sh_desc() local
761 desc = ctx->sh_desc_enc; gcm_set_sh_desc()
763 init_sh_desc(desc, HDR_SHARE_SERIAL); gcm_set_sh_desc()
766 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | gcm_set_sh_desc()
769 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, gcm_set_sh_desc()
772 append_key(desc, ctx->key_dma, ctx->enckeylen, gcm_set_sh_desc()
774 set_jump_tgt_here(desc, key_jump_cmd); gcm_set_sh_desc()
777 append_operation(desc, ctx->class1_alg_type | gcm_set_sh_desc()
781 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); gcm_set_sh_desc()
784 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); gcm_set_sh_desc()
787 append_math_sub(desc, REG1, REG2, REG3, CAAM_CMD_SZ); gcm_set_sh_desc()
790 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); gcm_set_sh_desc()
791 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
794 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | gcm_set_sh_desc()
798 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); gcm_set_sh_desc()
799 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
803 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
805 set_jump_tgt_here(desc, zero_assoc_jump_cmd1); gcm_set_sh_desc()
807 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); gcm_set_sh_desc()
810 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); gcm_set_sh_desc()
813 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
817 append_jump(desc, JUMP_TEST_ALL | 7); gcm_set_sh_desc()
820 set_jump_tgt_here(desc, zero_payload_jump_cmd); gcm_set_sh_desc()
823 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); gcm_set_sh_desc()
824 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
827 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | gcm_set_sh_desc()
831 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
835 append_jump(desc, JUMP_TEST_ALL | 2); gcm_set_sh_desc()
838 set_jump_tgt_here(desc, zero_assoc_jump_cmd2); gcm_set_sh_desc()
839 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | gcm_set_sh_desc()
844 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | gcm_set_sh_desc()
847 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, gcm_set_sh_desc()
848 desc_bytes(desc), gcm_set_sh_desc()
856 DUMP_PREFIX_ADDRESS, 16, 4, desc, gcm_set_sh_desc()
857 desc_bytes(desc), 1); gcm_set_sh_desc()
869 desc = ctx->sh_desc_dec; gcm_set_sh_desc()
871 init_sh_desc(desc, HDR_SHARE_SERIAL); gcm_set_sh_desc()
874 key_jump_cmd = append_jump(desc, JUMP_JSL | gcm_set_sh_desc()
878 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, gcm_set_sh_desc()
881 append_key(desc, ctx->key_dma, ctx->enckeylen, gcm_set_sh_desc()
883 set_jump_tgt_here(desc, key_jump_cmd); gcm_set_sh_desc()
886 append_operation(desc, ctx->class1_alg_type | gcm_set_sh_desc()
890 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, gcm_set_sh_desc()
894 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); gcm_set_sh_desc()
895 append_math_sub(desc, REG1, REG3, REG2, CAAM_CMD_SZ); gcm_set_sh_desc()
898 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | gcm_set_sh_desc()
902 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); gcm_set_sh_desc()
903 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
906 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); gcm_set_sh_desc()
908 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
911 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
913 set_jump_tgt_here(desc, zero_assoc_jump_cmd1); gcm_set_sh_desc()
915 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); gcm_set_sh_desc()
918 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); gcm_set_sh_desc()
921 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
925 append_jump(desc, JUMP_TEST_ALL | 4); gcm_set_sh_desc()
928 set_jump_tgt_here(desc, zero_payload_jump_cmd); gcm_set_sh_desc()
931 append_math_add(desc, VARSEQINLEN, ZERO, REG1, CAAM_CMD_SZ); gcm_set_sh_desc()
932 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
935 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
937 set_jump_tgt_here(desc, zero_assoc_jump_cmd2); gcm_set_sh_desc()
940 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | gcm_set_sh_desc()
943 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, gcm_set_sh_desc()
944 desc_bytes(desc), gcm_set_sh_desc()
952 DUMP_PREFIX_ADDRESS, 16, 4, desc, gcm_set_sh_desc()
953 desc_bytes(desc), 1); gcm_set_sh_desc()
976 u32 *desc; rfc4106_set_sh_desc() local
991 desc = ctx->sh_desc_enc; rfc4106_set_sh_desc()
993 init_sh_desc(desc, HDR_SHARE_SERIAL); rfc4106_set_sh_desc()
996 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | rfc4106_set_sh_desc()
999 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, rfc4106_set_sh_desc()
1002 append_key(desc, ctx->key_dma, ctx->enckeylen, rfc4106_set_sh_desc()
1004 set_jump_tgt_here(desc, key_jump_cmd); rfc4106_set_sh_desc()
1007 append_operation(desc, ctx->class1_alg_type | rfc4106_set_sh_desc()
1011 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); rfc4106_set_sh_desc()
1012 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1015 append_math_sub_imm_u32(desc, REG2, SEQINLEN, IMM, tfm->ivsize); rfc4106_set_sh_desc()
1018 append_math_sub(desc, VARSEQINLEN, REG2, REG3, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1021 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), rfc4106_set_sh_desc()
1024 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | rfc4106_set_sh_desc()
1028 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4106_set_sh_desc()
1032 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1035 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); rfc4106_set_sh_desc()
1038 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4106_set_sh_desc()
1042 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | rfc4106_set_sh_desc()
1045 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, rfc4106_set_sh_desc()
1046 desc_bytes(desc), rfc4106_set_sh_desc()
1054 DUMP_PREFIX_ADDRESS, 16, 4, desc, rfc4106_set_sh_desc()
1055 desc_bytes(desc), 1); rfc4106_set_sh_desc()
1067 desc = ctx->sh_desc_dec; rfc4106_set_sh_desc()
1069 init_sh_desc(desc, HDR_SHARE_SERIAL); rfc4106_set_sh_desc()
1072 key_jump_cmd = append_jump(desc, JUMP_JSL | rfc4106_set_sh_desc()
1075 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, rfc4106_set_sh_desc()
1078 append_key(desc, ctx->key_dma, ctx->enckeylen, rfc4106_set_sh_desc()
1080 set_jump_tgt_here(desc, key_jump_cmd); rfc4106_set_sh_desc()
1083 append_operation(desc, ctx->class1_alg_type | rfc4106_set_sh_desc()
1087 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, rfc4106_set_sh_desc()
1091 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1092 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1095 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1098 append_fifo_load_as_imm(desc, (void *)(ctx->key + ctx->enckeylen), rfc4106_set_sh_desc()
1101 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_CLASS1 | rfc4106_set_sh_desc()
1105 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4106_set_sh_desc()
1109 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1112 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); rfc4106_set_sh_desc()
1115 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4106_set_sh_desc()
1119 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | rfc4106_set_sh_desc()
1122 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, rfc4106_set_sh_desc()
1123 desc_bytes(desc), rfc4106_set_sh_desc()
1131 DUMP_PREFIX_ADDRESS, 16, 4, desc, rfc4106_set_sh_desc()
1132 desc_bytes(desc), 1); rfc4106_set_sh_desc()
1146 desc = ctx->sh_desc_givenc; rfc4106_set_sh_desc()
1148 init_sh_desc(desc, HDR_SHARE_SERIAL); rfc4106_set_sh_desc()
1151 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | rfc4106_set_sh_desc()
1154 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, rfc4106_set_sh_desc()
1157 append_key(desc, ctx->key_dma, ctx->enckeylen, rfc4106_set_sh_desc()
1159 set_jump_tgt_here(desc, key_jump_cmd); rfc4106_set_sh_desc()
1165 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | rfc4106_set_sh_desc()
1167 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); rfc4106_set_sh_desc()
1168 move_cmd = append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_DESCBUF | rfc4106_set_sh_desc()
1170 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); rfc4106_set_sh_desc()
1173 write_iv_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_OUTFIFO | rfc4106_set_sh_desc()
1177 append_operation(desc, ctx->class1_alg_type | rfc4106_set_sh_desc()
1181 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); rfc4106_set_sh_desc()
1184 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1187 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1190 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_IV | rfc4106_set_sh_desc()
1193 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); rfc4106_set_sh_desc()
1194 set_move_tgt_here(desc, move_cmd); rfc4106_set_sh_desc()
1195 set_move_tgt_here(desc, write_iv_cmd); rfc4106_set_sh_desc()
1197 append_cmd(desc, 0x00000000); rfc4106_set_sh_desc()
1198 append_cmd(desc, 0x00000000); rfc4106_set_sh_desc()
1202 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP); rfc4106_set_sh_desc()
1205 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4106_set_sh_desc()
1209 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1212 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); rfc4106_set_sh_desc()
1215 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4106_set_sh_desc()
1219 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | rfc4106_set_sh_desc()
1222 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, rfc4106_set_sh_desc()
1223 desc_bytes(desc), rfc4106_set_sh_desc()
1232 DUMP_PREFIX_ADDRESS, 16, 4, desc, rfc4106_set_sh_desc()
1233 desc_bytes(desc), 1); rfc4106_set_sh_desc()
1258 u32 *desc; rfc4543_set_sh_desc() local
1273 desc = ctx->sh_desc_enc; rfc4543_set_sh_desc()
1275 init_sh_desc(desc, HDR_SHARE_SERIAL); rfc4543_set_sh_desc()
1278 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | rfc4543_set_sh_desc()
1281 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, rfc4543_set_sh_desc()
1284 append_key(desc, ctx->key_dma, ctx->enckeylen, rfc4543_set_sh_desc()
1286 set_jump_tgt_here(desc, key_jump_cmd); rfc4543_set_sh_desc()
1289 append_operation(desc, ctx->class1_alg_type | rfc4543_set_sh_desc()
1293 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | rfc4543_set_sh_desc()
1297 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | rfc4543_set_sh_desc()
1301 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1305 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1309 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); rfc4543_set_sh_desc()
1312 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1315 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | rfc4543_set_sh_desc()
1318 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); rfc4543_set_sh_desc()
1319 set_move_tgt_here(desc, write_iv_cmd); rfc4543_set_sh_desc()
1321 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1322 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1326 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4543_set_sh_desc()
1330 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1333 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1340 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | rfc4543_set_sh_desc()
1342 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1346 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | rfc4543_set_sh_desc()
1348 set_move_tgt_here(desc, write_aad_cmd); rfc4543_set_sh_desc()
1350 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1351 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1355 aead_append_src_dst(desc, FIFOLD_TYPE_AAD); rfc4543_set_sh_desc()
1357 set_move_tgt_here(desc, read_move_cmd); rfc4543_set_sh_desc()
1358 set_move_tgt_here(desc, write_move_cmd); rfc4543_set_sh_desc()
1359 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); rfc4543_set_sh_desc()
1361 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); rfc4543_set_sh_desc()
1364 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | rfc4543_set_sh_desc()
1367 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, rfc4543_set_sh_desc()
1368 desc_bytes(desc), rfc4543_set_sh_desc()
1376 DUMP_PREFIX_ADDRESS, 16, 4, desc, rfc4543_set_sh_desc()
1377 desc_bytes(desc), 1); rfc4543_set_sh_desc()
1389 desc = ctx->sh_desc_dec; rfc4543_set_sh_desc()
1391 init_sh_desc(desc, HDR_SHARE_SERIAL); rfc4543_set_sh_desc()
1394 key_jump_cmd = append_jump(desc, JUMP_JSL | rfc4543_set_sh_desc()
1397 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, rfc4543_set_sh_desc()
1400 append_key(desc, ctx->key_dma, ctx->enckeylen, rfc4543_set_sh_desc()
1402 set_jump_tgt_here(desc, key_jump_cmd); rfc4543_set_sh_desc()
1405 append_operation(desc, ctx->class1_alg_type | rfc4543_set_sh_desc()
1409 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_WORD_DECO_MATH1 | rfc4543_set_sh_desc()
1413 append_jump(desc, JUMP_TEST_ALL | JUMP_COND_CALM | rfc4543_set_sh_desc()
1417 append_math_sub_imm_u32(desc, REG3, SEQINLEN, IMM, ctx->authsize); rfc4543_set_sh_desc()
1420 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1424 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1428 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1429 append_math_sub(desc, VARSEQINLEN, REG3, REG2, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1436 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | rfc4543_set_sh_desc()
1438 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1442 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | rfc4543_set_sh_desc()
1445 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); rfc4543_set_sh_desc()
1446 set_move_tgt_here(desc, write_iv_cmd); rfc4543_set_sh_desc()
1448 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1449 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1453 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4543_set_sh_desc()
1457 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1460 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1463 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | rfc4543_set_sh_desc()
1465 set_move_tgt_here(desc, write_aad_cmd); rfc4543_set_sh_desc()
1467 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1468 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1472 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); rfc4543_set_sh_desc()
1475 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | rfc4543_set_sh_desc()
1478 set_move_tgt_here(desc, read_move_cmd); rfc4543_set_sh_desc()
1479 set_move_tgt_here(desc, write_move_cmd); rfc4543_set_sh_desc()
1480 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); rfc4543_set_sh_desc()
1482 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); rfc4543_set_sh_desc()
1483 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); rfc4543_set_sh_desc()
1486 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | rfc4543_set_sh_desc()
1489 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, rfc4543_set_sh_desc()
1490 desc_bytes(desc), rfc4543_set_sh_desc()
1498 DUMP_PREFIX_ADDRESS, 16, 4, desc, rfc4543_set_sh_desc()
1499 desc_bytes(desc), 1); rfc4543_set_sh_desc()
1512 desc = ctx->sh_desc_givenc; rfc4543_set_sh_desc()
1514 init_sh_desc(desc, HDR_SHARE_SERIAL); rfc4543_set_sh_desc()
1517 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | rfc4543_set_sh_desc()
1520 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, rfc4543_set_sh_desc()
1523 append_key(desc, ctx->key_dma, ctx->enckeylen, rfc4543_set_sh_desc()
1525 set_jump_tgt_here(desc, key_jump_cmd); rfc4543_set_sh_desc()
1531 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | rfc4543_set_sh_desc()
1533 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); rfc4543_set_sh_desc()
1535 append_move(desc, MOVE_SRC_INFIFO | MOVE_DEST_MATH1 | rfc4543_set_sh_desc()
1537 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); rfc4543_set_sh_desc()
1540 write_iv_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1544 write_aad_cmd = append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1548 append_move(desc, MOVE_SRC_MATH1 | MOVE_DEST_OUTFIFO | rfc4543_set_sh_desc()
1552 append_operation(desc, ctx->class1_alg_type | rfc4543_set_sh_desc()
1556 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); rfc4543_set_sh_desc()
1559 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG3, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1562 append_math_add(desc, VARSEQOUTLEN, REG3, REG0, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1569 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | rfc4543_set_sh_desc()
1571 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1575 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | rfc4543_set_sh_desc()
1578 append_data(desc, (void *)(ctx->key + ctx->enckeylen), 4); rfc4543_set_sh_desc()
1579 set_move_tgt_here(desc, write_iv_cmd); rfc4543_set_sh_desc()
1581 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1582 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1586 append_seq_fifo_load(desc, tfm->ivsize, FIFOLD_CLASS_SKIP); rfc4543_set_sh_desc()
1589 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4543_set_sh_desc()
1593 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1596 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | rfc4543_set_sh_desc()
1598 set_move_tgt_here(desc, write_aad_cmd); rfc4543_set_sh_desc()
1600 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1601 append_cmd(desc, 0x00000000); rfc4543_set_sh_desc()
1605 aead_append_src_dst(desc, FIFOLD_TYPE_AAD); rfc4543_set_sh_desc()
1607 set_move_tgt_here(desc, read_move_cmd); rfc4543_set_sh_desc()
1608 set_move_tgt_here(desc, write_move_cmd); rfc4543_set_sh_desc()
1609 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); rfc4543_set_sh_desc()
1611 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); rfc4543_set_sh_desc()
1614 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | rfc4543_set_sh_desc()
1617 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, rfc4543_set_sh_desc()
1618 desc_bytes(desc), rfc4543_set_sh_desc()
1627 DUMP_PREFIX_ADDRESS, 16, 4, desc, rfc4543_set_sh_desc()
1628 desc_bytes(desc), 1); rfc4543_set_sh_desc()
1836 u32 *desc; ablkcipher_setkey() local
1877 desc = ctx->sh_desc_enc; ablkcipher_setkey()
1878 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ablkcipher_setkey()
1880 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ablkcipher_setkey()
1884 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, ablkcipher_setkey()
1891 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | ablkcipher_setkey()
1893 append_move(desc, MOVE_WAITCOMP | ablkcipher_setkey()
1900 set_jump_tgt_here(desc, key_jump_cmd); ablkcipher_setkey()
1903 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT | ablkcipher_setkey()
1908 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | ablkcipher_setkey()
1915 append_operation(desc, ctx->class1_alg_type | ablkcipher_setkey()
1919 ablkcipher_append_src_dst(desc); ablkcipher_setkey()
1921 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, ablkcipher_setkey()
1922 desc_bytes(desc), ablkcipher_setkey()
1931 DUMP_PREFIX_ADDRESS, 16, 4, desc, ablkcipher_setkey()
1932 desc_bytes(desc), 1); ablkcipher_setkey()
1935 desc = ctx->sh_desc_dec; ablkcipher_setkey()
1937 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ablkcipher_setkey()
1939 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ablkcipher_setkey()
1943 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, ablkcipher_setkey()
1950 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | ablkcipher_setkey()
1952 append_move(desc, MOVE_WAITCOMP | ablkcipher_setkey()
1959 set_jump_tgt_here(desc, key_jump_cmd); ablkcipher_setkey()
1962 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT | ablkcipher_setkey()
1967 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | ablkcipher_setkey()
1975 append_operation(desc, ctx->class1_alg_type | ablkcipher_setkey()
1978 append_dec_op1(desc, ctx->class1_alg_type); ablkcipher_setkey()
1981 ablkcipher_append_src_dst(desc); ablkcipher_setkey()
1983 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, ablkcipher_setkey()
1984 desc_bytes(desc), ablkcipher_setkey()
1994 DUMP_PREFIX_ADDRESS, 16, 4, desc, ablkcipher_setkey()
1995 desc_bytes(desc), 1); ablkcipher_setkey()
1998 desc = ctx->sh_desc_givenc; ablkcipher_setkey()
2000 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ablkcipher_setkey()
2002 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ablkcipher_setkey()
2006 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, ablkcipher_setkey()
2013 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | ablkcipher_setkey()
2015 append_move(desc, MOVE_WAITCOMP | ablkcipher_setkey()
2021 set_jump_tgt_here(desc, key_jump_cmd); ablkcipher_setkey()
2027 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | ablkcipher_setkey()
2029 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); ablkcipher_setkey()
2030 append_move(desc, MOVE_WAITCOMP | ablkcipher_setkey()
2035 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); ablkcipher_setkey()
2038 append_seq_store(desc, crt->ivsize, ablkcipher_setkey()
2044 append_load_imm_u32(desc, (u32)1, LDST_IMM | ablkcipher_setkey()
2051 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP | ablkcipher_setkey()
2055 append_operation(desc, ctx->class1_alg_type | ablkcipher_setkey()
2059 ablkcipher_append_src_dst(desc); ablkcipher_setkey()
2061 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, ablkcipher_setkey()
2062 desc_bytes(desc), ablkcipher_setkey()
2071 DUMP_PREFIX_ADDRESS, 16, 4, desc, ablkcipher_setkey()
2072 desc_bytes(desc), 1); ablkcipher_setkey()
2087 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
2113 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
2182 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_encrypt_done() argument
2195 edesc = (struct aead_edesc *)((char *)desc - aead_encrypt_done()
2221 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_decrypt_done() argument
2234 edesc = (struct aead_edesc *)((char *)desc - aead_decrypt_done()
2277 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, ablkcipher_encrypt_done() argument
2289 edesc = (struct ablkcipher_edesc *)((char *)desc - ablkcipher_encrypt_done()
2310 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ablkcipher_decrypt_done() argument
2322 edesc = (struct ablkcipher_edesc *)((char *)desc - ablkcipher_decrypt_done()
2354 u32 *desc = edesc->hw_desc; init_aead_job() local
2383 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); init_aead_job()
2398 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, init_aead_job()
2420 append_seq_out_ptr(desc, dst_dma, req->cryptlen + authsize, init_aead_job()
2423 append_seq_out_ptr(desc, dst_dma, req->cryptlen - authsize, init_aead_job()
2439 u32 *desc = edesc->hw_desc; init_aead_giv_job() local
2467 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); init_aead_giv_job()
2480 append_seq_in_ptr(desc, src_dma, req->assoclen + ivsize + req->cryptlen, init_aead_giv_job()
2499 append_seq_out_ptr(desc, dst_dma, ivsize + req->cryptlen + authsize, init_aead_giv_job()
2513 u32 *desc = edesc->hw_desc; init_ablkcipher_job() local
2528 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); init_ablkcipher_job()
2538 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); init_ablkcipher_job()
2557 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); init_ablkcipher_job()
2570 u32 *desc = edesc->hw_desc; init_ablkcipher_giv_job() local
2585 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); init_ablkcipher_giv_job()
2595 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options); init_ablkcipher_giv_job()
2605 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options); init_ablkcipher_giv_job()
2693 /* allocate space for base edesc and hw desc commands, link tables */ aead_edesc_alloc()
2762 u32 *desc; aead_encrypt() local
2780 desc = edesc->hw_desc; aead_encrypt()
2781 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); aead_encrypt()
2799 u32 *desc; aead_decrypt() local
2823 desc = edesc->hw_desc; aead_decrypt()
2824 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); aead_decrypt()
2933 /* allocate space for base edesc and hw desc commands, link tables */ aead_giv_edesc_alloc()
3010 u32 *desc; aead_givencrypt() local
3035 desc = edesc->hw_desc; aead_givencrypt()
3036 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); aead_givencrypt()
3106 /* allocate space for base edesc and hw desc commands, link tables */ ablkcipher_edesc_alloc()
3161 u32 *desc; ablkcipher_encrypt() local
3178 desc = edesc->hw_desc; ablkcipher_encrypt()
3179 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); ablkcipher_encrypt()
3198 u32 *desc; ablkcipher_decrypt() local
3210 desc = edesc->hw_desc; ablkcipher_decrypt()
3217 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); ablkcipher_decrypt()
3285 /* allocate space for base edesc and hw desc commands, link tables */ ablkcipher_giv_edesc_alloc()
3342 u32 *desc; ablkcipher_givencrypt() local
3360 desc = edesc->hw_desc; ablkcipher_givencrypt()
3361 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); ablkcipher_givencrypt()
H A Dcaamhash.c42 * So, a job desc looks like:
140 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, map_seq_out_ptr_ctx() argument
151 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); map_seq_out_ptr_ctx()
157 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, map_seq_out_ptr_result() argument
163 append_seq_out_ptr(desc, dst_dma, digestsize, 0); map_seq_out_ptr_result()
211 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, ctx_map_to_sec4_sg() argument
227 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) append_key_ahash() argument
229 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, append_key_ahash()
235 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) init_sh_desc_key_ahash() argument
239 init_sh_desc(desc, HDR_SHARE_SERIAL); init_sh_desc_key_ahash()
243 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | init_sh_desc_key_ahash()
246 append_key_ahash(desc, ctx); init_sh_desc_key_ahash()
248 set_jump_tgt_here(desc, key_jump_cmd); init_sh_desc_key_ahash()
252 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); init_sh_desc_key_ahash()
260 static inline void ahash_append_load_str(u32 *desc, int digestsize) ahash_append_load_str() argument
263 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ahash_append_load_str()
266 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | ahash_append_load_str()
270 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | ahash_append_load_str()
277 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, ahash_ctx_data_to_out() argument
281 init_sh_desc_key_ahash(desc, ctx); ahash_ctx_data_to_out()
284 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | ahash_ctx_data_to_out()
288 append_operation(desc, op | state | OP_ALG_ENCRYPT); ahash_ctx_data_to_out()
293 ahash_append_load_str(desc, digestsize); ahash_ctx_data_to_out()
297 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, ahash_data_to_out() argument
300 init_sh_desc_key_ahash(desc, ctx); ahash_data_to_out()
303 append_operation(desc, op | state | OP_ALG_ENCRYPT); ahash_data_to_out()
308 ahash_append_load_str(desc, digestsize); ahash_data_to_out()
317 u32 *desc; ahash_set_sh_desc() local
323 desc = ctx->sh_desc_update; ahash_set_sh_desc()
325 init_sh_desc(desc, HDR_SHARE_SERIAL); ahash_set_sh_desc()
328 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | ahash_set_sh_desc()
332 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | ahash_set_sh_desc()
336 ahash_append_load_str(desc, ctx->ctx_len); ahash_set_sh_desc()
338 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc()
347 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_set_sh_desc()
351 desc = ctx->sh_desc_update_first; ahash_set_sh_desc()
353 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, ahash_set_sh_desc()
356 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, ahash_set_sh_desc()
357 desc_bytes(desc), ahash_set_sh_desc()
366 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_set_sh_desc()
370 desc = ctx->sh_desc_fin; ahash_set_sh_desc()
372 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, ahash_set_sh_desc()
375 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc()
383 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_set_sh_desc()
384 desc_bytes(desc), 1); ahash_set_sh_desc()
388 desc = ctx->sh_desc_finup; ahash_set_sh_desc()
390 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, ahash_set_sh_desc()
393 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc()
401 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_set_sh_desc()
402 desc_bytes(desc), 1); ahash_set_sh_desc()
406 desc = ctx->sh_desc_digest; ahash_set_sh_desc()
408 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, ahash_set_sh_desc()
411 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, ahash_set_sh_desc()
412 desc_bytes(desc), ahash_set_sh_desc()
421 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_set_sh_desc()
422 desc_bytes(desc), 1); ahash_set_sh_desc()
441 u32 *desc; hash_digest_key() local
446 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); hash_digest_key()
447 if (!desc) { hash_digest_key()
452 init_job_desc(desc, 0); hash_digest_key()
458 kfree(desc); hash_digest_key()
466 kfree(desc); hash_digest_key()
471 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | hash_digest_key()
473 append_seq_in_ptr(desc, src_dma, *keylen, 0); hash_digest_key()
474 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | hash_digest_key()
476 append_seq_out_ptr(desc, dst_dma, digestsize, 0); hash_digest_key()
477 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | hash_digest_key()
484 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); hash_digest_key()
490 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); hash_digest_key()
507 kfree(desc); hash_digest_key()
632 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, ahash_done() argument
646 edesc = (struct ahash_edesc *)((char *)desc - ahash_done()
667 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, ahash_done_bi() argument
681 edesc = (struct ahash_edesc *)((char *)desc - ahash_done_bi()
702 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, ahash_done_ctx_src() argument
716 edesc = (struct ahash_edesc *)((char *)desc - ahash_done_ctx_src()
737 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, ahash_done_ctx_dst() argument
751 edesc = (struct ahash_edesc *)((char *)desc - ahash_done_ctx_dst()
787 u32 *sh_desc = ctx->sh_desc_update, *desc; ahash_update_ctx() local
807 * allocate space for base edesc and hw desc commands, ahash_update_ctx()
824 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_update_ctx()
850 desc = edesc->hw_desc; ahash_update_ctx()
851 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | ahash_update_ctx()
862 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + ahash_update_ctx()
865 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); ahash_update_ctx()
869 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_update_ctx()
870 desc_bytes(desc), 1); ahash_update_ctx()
873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); ahash_update_ctx()
910 u32 *sh_desc = ctx->sh_desc_fin, *desc; ahash_final_ctx() local
921 /* allocate space for base edesc and hw desc commands, link tables */ ahash_final_ctx()
930 desc = edesc->hw_desc; ahash_final_ctx()
931 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); ahash_final_ctx()
938 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_final_ctx()
955 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, ahash_final_ctx()
958 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_final_ctx()
967 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_final_ctx()
970 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); ahash_final_ctx()
993 u32 *sh_desc = ctx->sh_desc_finup, *desc; ahash_finup_ctx() local
1008 /* allocate space for base edesc and hw desc commands, link tables */ ahash_finup_ctx()
1017 desc = edesc->hw_desc; ahash_finup_ctx()
1018 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); ahash_finup_ctx()
1026 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_finup_ctx()
1045 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + ahash_finup_ctx()
1048 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_finup_ctx()
1057 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_finup_ctx()
1060 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); ahash_finup_ctx()
1078 u32 *sh_desc = ctx->sh_desc_digest, *desc; ahash_digest() local
1094 /* allocate space for base edesc and hw desc commands, link tables */ ahash_digest()
1108 desc = edesc->hw_desc; ahash_digest()
1109 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); ahash_digest()
1125 append_seq_in_ptr(desc, src_dma, req->nbytes, options); ahash_digest()
1127 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_digest()
1136 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_digest()
1139 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_digest()
1161 u32 *sh_desc = ctx->sh_desc_digest, *desc; ahash_final_no_ctx() local
1168 /* allocate space for base edesc and hw desc commands, link tables */ ahash_final_no_ctx()
1178 desc = edesc->hw_desc; ahash_final_no_ctx()
1179 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); ahash_final_no_ctx()
1187 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); ahash_final_no_ctx()
1189 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_final_no_ctx()
1199 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_final_no_ctx()
1202 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_final_no_ctx()
1230 u32 *desc, *sh_desc = ctx->sh_desc_update_first; ahash_update_no_ctx() local
1246 * allocate space for base edesc and hw desc commands, ahash_update_no_ctx()
1276 desc = edesc->hw_desc; ahash_update_no_ctx()
1277 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | ahash_update_no_ctx()
1288 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); ahash_update_no_ctx()
1290 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); ahash_update_no_ctx()
1296 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_update_no_ctx()
1297 desc_bytes(desc), 1); ahash_update_no_ctx()
1300 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); ahash_update_no_ctx()
1341 u32 *sh_desc = ctx->sh_desc_digest, *desc; ahash_finup_no_ctx() local
1355 /* allocate space for base edesc and hw desc commands, link tables */ ahash_finup_no_ctx()
1364 desc = edesc->hw_desc; ahash_finup_no_ctx()
1365 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); ahash_finup_no_ctx()
1387 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + ahash_finup_no_ctx()
1390 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_finup_no_ctx()
1399 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_finup_no_ctx()
1402 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_finup_no_ctx()
1426 u32 *sh_desc = ctx->sh_desc_update_first, *desc; ahash_update_first() local
1448 * allocate space for base edesc and hw desc commands, ahash_update_first()
1489 desc = edesc->hw_desc; ahash_update_first()
1490 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | ahash_update_first()
1493 append_seq_in_ptr(desc, src_dma, to_hash, options); ahash_update_first()
1495 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); ahash_update_first()
1501 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_update_first()
1502 desc_bytes(desc), 1); ahash_update_first()
1505 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, ahash_update_first()
H A Dcaamrng.c20 * A job desc looks like this:
99 static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) rng_done() argument
103 bd = (struct buf_data *)((char *)desc - rng_done()
121 u32 *desc = bd->hw_desc; submit_job() local
126 err = caam_jr_enqueue(jrdev, desc, rng_done, ctx); submit_job()
191 u32 *desc = ctx->sh_desc; rng_create_sh_desc() local
193 init_sh_desc(desc, HDR_SHARE_SERIAL); rng_create_sh_desc()
196 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); rng_create_sh_desc()
199 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); rng_create_sh_desc()
202 append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE); rng_create_sh_desc()
204 ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), rng_create_sh_desc()
212 desc, desc_bytes(desc), 1); rng_create_sh_desc()
221 u32 *desc = bd->hw_desc; rng_create_job_desc() local
224 init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER | rng_create_job_desc()
233 append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); rng_create_job_desc()
235 print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, rng_create_job_desc()
236 desc, desc_bytes(desc), 1); rng_create_job_desc()
/linux-4.1.27/drivers/usb/class/
H A Dcdc-wdm.c115 struct wdm_device *desc; wdm_find_device() local
118 list_for_each_entry(desc, &wdm_device_list, device_list) wdm_find_device()
119 if (desc->intf == intf) wdm_find_device()
121 desc = NULL; wdm_find_device()
125 return desc; wdm_find_device()
130 struct wdm_device *desc; wdm_find_device_by_minor() local
133 list_for_each_entry(desc, &wdm_device_list, device_list) wdm_find_device_by_minor()
134 if (desc->intf->minor == minor) wdm_find_device_by_minor()
136 desc = NULL; wdm_find_device_by_minor()
140 return desc; wdm_find_device_by_minor()
146 struct wdm_device *desc; wdm_out_callback() local
147 desc = urb->context; wdm_out_callback()
148 spin_lock(&desc->iuspin); wdm_out_callback()
149 desc->werr = urb->status; wdm_out_callback()
150 spin_unlock(&desc->iuspin); wdm_out_callback()
151 kfree(desc->outbuf); wdm_out_callback()
152 desc->outbuf = NULL; wdm_out_callback()
153 clear_bit(WDM_IN_USE, &desc->flags); wdm_out_callback()
154 wake_up(&desc->wait); wdm_out_callback()
159 struct wdm_device *desc = urb->context; wdm_in_callback() local
163 spin_lock(&desc->iuspin); wdm_in_callback()
164 clear_bit(WDM_RESPONDING, &desc->flags); wdm_in_callback()
169 dev_dbg(&desc->intf->dev, wdm_in_callback()
173 dev_dbg(&desc->intf->dev, wdm_in_callback()
177 dev_dbg(&desc->intf->dev, wdm_in_callback()
181 dev_err(&desc->intf->dev, wdm_in_callback()
185 dev_err(&desc->intf->dev, wdm_in_callback()
191 desc->rerr = status; wdm_in_callback()
192 if (length + desc->length > desc->wMaxCommand) { wdm_in_callback()
194 set_bit(WDM_OVERFLOW, &desc->flags); wdm_in_callback()
197 if (!test_bit(WDM_OVERFLOW, &desc->flags)) { wdm_in_callback()
198 memmove(desc->ubuf + desc->length, desc->inbuf, length); wdm_in_callback()
199 desc->length += length; wdm_in_callback()
200 desc->reslength = length; wdm_in_callback()
204 wake_up(&desc->wait); wdm_in_callback()
206 set_bit(WDM_READ, &desc->flags); wdm_in_callback()
207 spin_unlock(&desc->iuspin); wdm_in_callback()
215 struct wdm_device *desc; wdm_int_callback() local
218 desc = urb->context; wdm_int_callback()
219 dr = (struct usb_cdc_notification *)desc->sbuf; wdm_int_callback()
228 set_bit(WDM_INT_STALL, &desc->flags); wdm_int_callback()
229 dev_err(&desc->intf->dev, "Stall on int endpoint\n"); wdm_int_callback()
232 dev_err(&desc->intf->dev, wdm_int_callback()
239 dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n", wdm_int_callback()
246 dev_dbg(&desc->intf->dev, wdm_int_callback()
253 dev_dbg(&desc->intf->dev, wdm_int_callback()
258 dev_dbg(&desc->intf->dev, "SPEED_CHANGE received (len %u)", wdm_int_callback()
262 clear_bit(WDM_POLL_RUNNING, &desc->flags); wdm_int_callback()
263 dev_err(&desc->intf->dev, wdm_int_callback()
271 spin_lock(&desc->iuspin); wdm_int_callback()
272 responding = test_and_set_bit(WDM_RESPONDING, &desc->flags); wdm_int_callback()
273 if (!desc->resp_count++ && !responding wdm_int_callback()
274 && !test_bit(WDM_DISCONNECTING, &desc->flags) wdm_int_callback()
275 && !test_bit(WDM_SUSPENDING, &desc->flags)) { wdm_int_callback()
276 rv = usb_submit_urb(desc->response, GFP_ATOMIC); wdm_int_callback()
277 dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d", wdm_int_callback()
280 spin_unlock(&desc->iuspin); wdm_int_callback()
282 clear_bit(WDM_RESPONDING, &desc->flags); wdm_int_callback()
287 rv = schedule_work(&desc->rxwork); wdm_int_callback()
289 dev_err(&desc->intf->dev, wdm_int_callback()
296 dev_err(&desc->intf->dev, wdm_int_callback()
302 static void kill_urbs(struct wdm_device *desc) kill_urbs() argument
305 usb_kill_urb(desc->command); kill_urbs()
306 usb_kill_urb(desc->validity); kill_urbs()
307 usb_kill_urb(desc->response); kill_urbs()
310 static void free_urbs(struct wdm_device *desc) free_urbs() argument
312 usb_free_urb(desc->validity); free_urbs()
313 usb_free_urb(desc->response); free_urbs()
314 usb_free_urb(desc->command); free_urbs()
317 static void cleanup(struct wdm_device *desc) cleanup() argument
319 kfree(desc->sbuf); cleanup()
320 kfree(desc->inbuf); cleanup()
321 kfree(desc->orq); cleanup()
322 kfree(desc->irq); cleanup()
323 kfree(desc->ubuf); cleanup()
324 free_urbs(desc); cleanup()
325 kfree(desc); cleanup()
333 struct wdm_device *desc = file->private_data; wdm_write() local
336 if (count > desc->wMaxCommand) wdm_write()
337 count = desc->wMaxCommand; wdm_write()
339 spin_lock_irq(&desc->iuspin); wdm_write()
340 we = desc->werr; wdm_write()
341 desc->werr = 0; wdm_write()
342 spin_unlock_irq(&desc->iuspin); wdm_write()
359 r = mutex_lock_interruptible(&desc->wlock); wdm_write()
364 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_write()
369 r = usb_autopm_get_interface(desc->intf); wdm_write()
376 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE, wdm_write()
377 &desc->flags)); wdm_write()
379 if (test_bit(WDM_IN_USE, &desc->flags)) wdm_write()
382 if (test_bit(WDM_RESETTING, &desc->flags)) wdm_write()
390 req = desc->orq; wdm_write()
392 desc->command, wdm_write()
393 interface_to_usbdev(desc->intf), wdm_write()
395 usb_sndctrlpipe(interface_to_usbdev(desc->intf), 0), wdm_write()
400 desc wdm_write()
407 req->wIndex = desc->inum; /* already converted */ wdm_write()
409 set_bit(WDM_IN_USE, &desc->flags); wdm_write()
410 desc->outbuf = buf; wdm_write()
412 rv = usb_submit_urb(desc->command, GFP_KERNEL); wdm_write()
414 desc->outbuf = NULL; wdm_write()
415 clear_bit(WDM_IN_USE, &desc->flags); wdm_write()
416 dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); wdm_write()
420 dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d", wdm_write()
424 usb_autopm_put_interface(desc->intf); wdm_write()
425 mutex_unlock(&desc->wlock); wdm_write()
430 usb_autopm_put_interface(desc->intf); wdm_write()
432 mutex_unlock(&desc->wlock); wdm_write()
442 * Called with desc->iuspin locked
444 static int clear_wdm_read_flag(struct wdm_device *desc) clear_wdm_read_flag() argument
448 clear_bit(WDM_READ, &desc->flags); clear_wdm_read_flag()
451 if (!desc->resp_count || !--desc->resp_count) clear_wdm_read_flag()
454 set_bit(WDM_RESPONDING, &desc->flags); clear_wdm_read_flag()
455 spin_unlock_irq(&desc->iuspin); clear_wdm_read_flag()
456 rv = usb_submit_urb(desc->response, GFP_KERNEL); clear_wdm_read_flag()
457 spin_lock_irq(&desc->iuspin); clear_wdm_read_flag()
459 dev_err(&desc->intf->dev, clear_wdm_read_flag()
463 clear_bit(WDM_RESPONDING, &desc->flags); clear_wdm_read_flag()
464 desc->resp_count = 0; clear_wdm_read_flag()
475 struct wdm_device *desc = file->private_data; wdm_read() local
478 rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */ wdm_read()
482 cntr = ACCESS_ONCE(desc->length); wdm_read()
484 desc->read = 0; wdm_read()
486 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_read()
490 if (test_bit(WDM_OVERFLOW, &desc->flags)) { wdm_read()
491 clear_bit(WDM_OVERFLOW, &desc->flags); wdm_read()
497 if (!test_bit(WDM_READ, &desc->flags)) { wdm_read()
503 rv = wait_event_interruptible(desc->wait, wdm_read()
504 test_bit(WDM_READ, &desc->flags)); wdm_read()
508 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_read()
512 if (test_bit(WDM_RESETTING, &desc->flags)) { wdm_read()
516 usb_mark_last_busy(interface_to_usbdev(desc->intf)); wdm_read()
522 spin_lock_irq(&desc->iuspin); wdm_read()
524 if (desc->rerr) { /* read completed, error happened */ wdm_read()
525 rv = usb_translate_errors(desc->rerr); wdm_read()
526 desc->rerr = 0; wdm_read()
527 spin_unlock_irq(&desc->iuspin); wdm_read()
534 if (!test_bit(WDM_READ, &desc->flags)) { /* lost race */ wdm_read()
535 spin_unlock_irq(&desc->iuspin); wdm_read()
539 if (!desc->reslength) { /* zero length read */ wdm_read()
540 dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__); wdm_read()
541 rv = clear_wdm_read_flag(desc); wdm_read()
542 spin_unlock_irq(&desc->iuspin); wdm_read()
547 cntr = desc->length; wdm_read()
548 spin_unlock_irq(&desc->iuspin); wdm_read()
553 rv = copy_to_user(buffer, desc->ubuf, cntr); wdm_read()
559 spin_lock_irq(&desc->iuspin); wdm_read()
561 for (i = 0; i < desc->length - cntr; i++) wdm_read()
562 desc->ubuf[i] = desc->ubuf[i + cntr]; wdm_read()
564 desc->length -= cntr; wdm_read()
566 if (!desc->length) wdm_read()
567 clear_wdm_read_flag(desc); wdm_read()
568 spin_unlock_irq(&desc->iuspin); wdm_read()
572 mutex_unlock(&desc->rlock); wdm_read()
578 struct wdm_device *desc = file->private_data; wdm_flush() local
580 wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); wdm_flush()
582 /* cannot dereference desc->intf if WDM_DISCONNECTING */ wdm_flush()
583 if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags)) wdm_flush()
584 dev_err(&desc->intf->dev, "Error in flush path: %d\n", wdm_flush()
585 desc->werr); wdm_flush()
587 return usb_translate_errors(desc->werr); wdm_flush()
592 struct wdm_device *desc = file->private_data; wdm_poll() local
596 spin_lock_irqsave(&desc->iuspin, flags); wdm_poll()
597 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_poll()
599 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_poll()
602 if (test_bit(WDM_READ, &desc->flags)) wdm_poll()
604 if (desc->rerr || desc->werr) wdm_poll()
606 if (!test_bit(WDM_IN_USE, &desc->flags)) wdm_poll()
608 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_poll()
610 poll_wait(file, &desc->wait, wait); wdm_poll()
621 struct wdm_device *desc; wdm_open() local
624 desc = wdm_find_device_by_minor(minor); wdm_open()
625 if (!desc) wdm_open()
628 intf = desc->intf; wdm_open()
629 if (test_bit(WDM_DISCONNECTING, &desc->flags)) wdm_open()
631 file->private_data = desc; wdm_open()
633 rv = usb_autopm_get_interface(desc->intf); wdm_open()
635 dev_err(&desc->intf->dev, "Error autopm - %d\n", rv); wdm_open()
639 /* using write lock to protect desc->count */ wdm_open()
640 mutex_lock(&desc->wlock); wdm_open()
641 if (!desc->count++) { wdm_open()
642 desc->werr = 0; wdm_open()
643 desc->rerr = 0; wdm_open()
644 rv = usb_submit_urb(desc->validity, GFP_KERNEL); wdm_open()
646 desc->count--; wdm_open()
647 dev_err(&desc->intf->dev, wdm_open()
654 mutex_unlock(&desc->wlock); wdm_open()
655 if (desc->count == 1) wdm_open()
656 desc->manage_power(intf, 1); wdm_open()
657 usb_autopm_put_interface(desc->intf); wdm_open()
665 struct wdm_device *desc = file->private_data; wdm_release() local
669 /* using write lock to protect desc->count */ wdm_release()
670 mutex_lock(&desc->wlock); wdm_release()
671 desc->count--; wdm_release()
672 mutex_unlock(&desc->wlock); wdm_release()
674 if (!desc->count) { wdm_release()
675 if (!test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_release()
676 dev_dbg(&desc->intf->dev, "wdm_release: cleanup"); wdm_release()
677 kill_urbs(desc); wdm_release()
678 spin_lock_irq(&desc->iuspin); wdm_release()
679 desc->resp_count = 0; wdm_release()
680 spin_unlock_irq(&desc->iuspin); wdm_release()
681 desc->manage_power(desc->intf, 0); wdm_release()
683 /* must avoid dev_printk here as desc->intf is invalid */ wdm_release()
685 cleanup(desc); wdm_release()
694 struct wdm_device *desc = file->private_data; wdm_ioctl() local
699 if (copy_to_user((void __user *)arg, &desc->wMaxCommand, sizeof(desc->wMaxCommand))) wdm_ioctl()
730 struct wdm_device *desc = container_of(work, struct wdm_device, rxwork); wdm_rxwork() local
735 spin_lock_irqsave(&desc->iuspin, flags); wdm_rxwork()
736 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_rxwork()
737 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_rxwork()
739 responding = test_and_set_bit(WDM_RESPONDING, &desc->flags); wdm_rxwork()
740 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_rxwork()
742 rv = usb_submit_urb(desc->response, GFP_KERNEL); wdm_rxwork()
744 spin_lock_irqsave(&desc->iuspin, flags); wdm_rxwork()
745 clear_bit(WDM_RESPONDING, &desc->flags); wdm_rxwork()
746 if (!test_bit(WDM_DISCONNECTING, &desc->flags)) wdm_rxwork()
747 schedule_work(&desc->rxwork); wdm_rxwork()
748 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_rxwork()
759 struct wdm_device *desc; wdm_create() local
761 desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL); wdm_create()
762 if (!desc) wdm_create()
764 INIT_LIST_HEAD(&desc->device_list); wdm_create()
765 mutex_init(&desc->rlock); wdm_create()
766 mutex_init(&desc->wlock); wdm_create()
767 spin_lock_init(&desc->iuspin); wdm_create()
768 init_waitqueue_head(&desc->wait); wdm_create()
769 desc->wMaxCommand = bufsize; wdm_create()
771 desc->inum = cpu_to_le16((u16)intf->cur_altsetting->desc.bInterfaceNumber); wdm_create()
772 desc->intf = intf; wdm_create()
773 INIT_WORK(&desc->rxwork, wdm_rxwork); wdm_create()
779 desc->wMaxPacketSize = usb_endpoint_maxp(ep); wdm_create()
781 desc->orq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); wdm_create()
782 if (!desc->orq) wdm_create()
784 desc->irq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); wdm_create()
785 if (!desc->irq) wdm_create()
788 desc->validity = usb_alloc_urb(0, GFP_KERNEL); wdm_create()
789 if (!desc->validity) wdm_create()
792 desc->response = usb_alloc_urb(0, GFP_KERNEL); wdm_create()
793 if (!desc->response) wdm_create()
796 desc->command = usb_alloc_urb(0, GFP_KERNEL); wdm_create()
797 if (!desc->command) wdm_create()
800 desc->ubuf = kmalloc(desc->wMaxCommand, GFP_KERNEL); wdm_create()
801 if (!desc->ubuf) wdm_create()
804 desc->sbuf = kmalloc(desc->wMaxPacketSize, GFP_KERNEL); wdm_create()
805 if (!desc->sbuf) wdm_create()
808 desc->inbuf = kmalloc(desc->wMaxCommand, GFP_KERNEL); wdm_create()
809 if (!desc->inbuf) wdm_create()
813 desc->validity, wdm_create()
816 desc->sbuf, wdm_create()
817 desc->wMaxPacketSize, wdm_create()
819 desc, wdm_create()
823 desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); wdm_create()
824 desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; wdm_create()
825 desc->irq->wValue = 0; wdm_create()
826 desc->irq->wIndex = desc->inum; /* already converted */ wdm_create()
827 desc->irq->wLength = cpu_to_le16(desc->wMaxCommand); wdm_create()
830 desc->response, wdm_create()
833 usb_rcvctrlpipe(interface_to_usbdev(desc->intf), 0), wdm_create()
834 (unsigned char *)desc->irq, wdm_create()
835 desc->inbuf, wdm_create()
836 desc->wMaxCommand, wdm_create()
838 desc wdm_create()
841 desc->manage_power = manage_power; wdm_create()
844 list_add(&desc->device_list, &wdm_device_list); wdm_create()
856 list_del(&desc->device_list); wdm_create()
858 cleanup(desc); wdm_create()
912 if (iface->desc.bNumEndpoints != 1) wdm_probe()
914 ep = &iface->endpoint[0].desc; wdm_probe()
960 struct wdm_device *desc; wdm_disconnect() local
964 desc = wdm_find_device(intf); wdm_disconnect()
968 spin_lock_irqsave(&desc->iuspin, flags); wdm_disconnect()
969 set_bit(WDM_DISCONNECTING, &desc->flags); wdm_disconnect()
970 set_bit(WDM_READ, &desc->flags); wdm_disconnect()
972 clear_bit(WDM_IN_USE, &desc->flags); wdm_disconnect()
973 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_disconnect()
974 wake_up_all(&desc->wait); wdm_disconnect()
975 mutex_lock(&desc->rlock); wdm_disconnect()
976 mutex_lock(&desc->wlock); wdm_disconnect()
977 kill_urbs(desc); wdm_disconnect()
978 cancel_work_sync(&desc->rxwork); wdm_disconnect()
979 mutex_unlock(&desc->wlock); wdm_disconnect()
980 mutex_unlock(&desc->rlock); wdm_disconnect()
982 /* the desc->intf pointer used as list key is now invalid */ wdm_disconnect()
984 list_del(&desc->device_list); wdm_disconnect()
987 if (!desc->count) wdm_disconnect()
988 cleanup(desc); wdm_disconnect()
990 dev_dbg(&intf->dev, "%s: %d open files - postponing cleanup\n", __func__, desc->count); wdm_disconnect()
997 struct wdm_device *desc = wdm_find_device(intf); wdm_suspend() local
1000 dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); wdm_suspend()
1004 mutex_lock(&desc->rlock); wdm_suspend()
1005 mutex_lock(&desc->wlock); wdm_suspend()
1007 spin_lock_irq(&desc->iuspin); wdm_suspend()
1010 (test_bit(WDM_IN_USE, &desc->flags) wdm_suspend()
1011 || test_bit(WDM_RESPONDING, &desc->flags))) { wdm_suspend()
1012 spin_unlock_irq(&desc->iuspin); wdm_suspend()
1016 set_bit(WDM_SUSPENDING, &desc->flags); wdm_suspend()
1017 spin_unlock_irq(&desc->iuspin); wdm_suspend()
1019 kill_urbs(desc); wdm_suspend()
1020 cancel_work_sync(&desc->rxwork); wdm_suspend()
1023 mutex_unlock(&desc->wlock); wdm_suspend()
1024 mutex_unlock(&desc->rlock); wdm_suspend()
1031 static int recover_from_urb_loss(struct wdm_device *desc) recover_from_urb_loss() argument
1035 if (desc->count) { recover_from_urb_loss()
1036 rv = usb_submit_urb(desc->validity, GFP_NOIO); recover_from_urb_loss()
1038 dev_err(&desc->intf->dev, recover_from_urb_loss()
1047 struct wdm_device *desc = wdm_find_device(intf); wdm_resume() local
1050 dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor); wdm_resume()
1052 clear_bit(WDM_SUSPENDING, &desc->flags); wdm_resume()
1053 rv = recover_from_urb_loss(desc); wdm_resume()
1061 struct wdm_device *desc = wdm_find_device(intf); wdm_pre_reset() local
1069 spin_lock_irq(&desc->iuspin); wdm_pre_reset()
1070 set_bit(WDM_RESETTING, &desc->flags); /* inform read/write */ wdm_pre_reset()
1071 set_bit(WDM_READ, &desc->flags); /* unblock read */ wdm_pre_reset()
1072 clear_bit(WDM_IN_USE, &desc->flags); /* unblock write */ wdm_pre_reset()
1073 desc->rerr = -EINTR; wdm_pre_reset()
1074 spin_unlock_irq(&desc->iuspin); wdm_pre_reset()
1075 wake_up_all(&desc->wait); wdm_pre_reset()
1076 mutex_lock(&desc->rlock); wdm_pre_reset()
1077 mutex_lock(&desc->wlock); wdm_pre_reset()
1078 kill_urbs(desc); wdm_pre_reset()
1079 cancel_work_sync(&desc->rxwork); wdm_pre_reset()
1085 struct wdm_device *desc = wdm_find_device(intf); wdm_post_reset() local
1088 clear_bit(WDM_OVERFLOW, &desc->flags); wdm_post_reset()
1089 clear_bit(WDM_RESETTING, &desc->flags); wdm_post_reset()
1090 rv = recover_from_urb_loss(desc); wdm_post_reset()
1091 mutex_unlock(&desc->wlock); wdm_post_reset()
1092 mutex_unlock(&desc->rlock); wdm_post_reset()
/linux-4.1.27/drivers/scsi/fnic/
H A Dfnic_res.h37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); fnic_queue_wq_desc() local
39 wq_enet_desc_enc(desc, fnic_queue_wq_desc()
61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); fnic_queue_wq_eth_desc() local
63 wq_enet_desc_enc(desc, fnic_queue_wq_eth_desc()
91 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_icmnd_16() local
93 desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */ fnic_queue_wq_copy_desc_icmnd_16()
94 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_icmnd_16()
95 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_icmnd_16()
96 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_icmnd_16()
98 desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */ fnic_queue_wq_copy_desc_icmnd_16()
99 desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */ fnic_queue_wq_copy_desc_icmnd_16()
100 desc->u.icmnd_16._resvd0[0] = 0; /* reserved */ fnic_queue_wq_copy_desc_icmnd_16()
101 desc->u.icmnd_16._resvd0[1] = 0; /* reserved */ fnic_queue_wq_copy_desc_icmnd_16()
102 desc->u.icmnd_16._resvd0[2] = 0; /* reserved */ fnic_queue_wq_copy_desc_icmnd_16()
103 desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */ fnic_queue_wq_copy_desc_icmnd_16()
104 desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */ fnic_queue_wq_copy_desc_icmnd_16()
105 desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */ fnic_queue_wq_copy_desc_icmnd_16()
106 desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */ fnic_queue_wq_copy_desc_icmnd_16()
107 desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/ fnic_queue_wq_copy_desc_icmnd_16()
108 desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */ fnic_queue_wq_copy_desc_icmnd_16()
109 desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */ fnic_queue_wq_copy_desc_icmnd_16()
110 desc->u.icmnd_16.flags = flags; /* command flags */ fnic_queue_wq_copy_desc_icmnd_16()
111 memset(desc->u.icmnd_16.scsi_cdb, 0, CDB_16); fnic_queue_wq_copy_desc_icmnd_16()
112 memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, cdb_len); /* SCSI CDB */ fnic_queue_wq_copy_desc_icmnd_16()
113 desc->u.icmnd_16.data_len = data_len; /* length of data expected */ fnic_queue_wq_copy_desc_icmnd_16()
114 memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */ fnic_queue_wq_copy_desc_icmnd_16()
115 desc->u.icmnd_16._resvd2 = 0; /* reserved */ fnic_queue_wq_copy_desc_icmnd_16()
116 hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */ fnic_queue_wq_copy_desc_icmnd_16()
117 desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */ fnic_queue_wq_copy_desc_icmnd_16()
118 desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */ fnic_queue_wq_copy_desc_icmnd_16()
119 desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */ fnic_queue_wq_copy_desc_icmnd_16()
130 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_itmf() local
132 desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */ fnic_queue_wq_copy_desc_itmf()
133 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_itmf()
134 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_itmf()
135 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_itmf()
137 desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */ fnic_queue_wq_copy_desc_itmf()
138 desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */ fnic_queue_wq_copy_desc_itmf()
139 desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */ fnic_queue_wq_copy_desc_itmf()
140 desc->u.itmf._resvd = 0; fnic_queue_wq_copy_desc_itmf()
141 memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */ fnic_queue_wq_copy_desc_itmf()
142 desc->u.itmf._resvd1 = 0; fnic_queue_wq_copy_desc_itmf()
143 hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */ fnic_queue_wq_copy_desc_itmf()
144 desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */ fnic_queue_wq_copy_desc_itmf()
145 desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */ fnic_queue_wq_copy_desc_itmf()
154 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_flogi_reg() local
156 desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */ fnic_queue_wq_copy_desc_flogi_reg()
157 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_flogi_reg()
158 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_flogi_reg()
159 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_flogi_reg()
161 desc->u.flogi_reg.format = format; fnic_queue_wq_copy_desc_flogi_reg()
162 desc->u.flogi_reg._resvd = 0; fnic_queue_wq_copy_desc_flogi_reg()
163 hton24(desc->u.flogi_reg.s_id, s_id); fnic_queue_wq_copy_desc_flogi_reg()
164 memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); fnic_queue_wq_copy_desc_flogi_reg()
174 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_fip_reg() local
176 desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */ fnic_queue_wq_copy_desc_fip_reg()
177 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_fip_reg()
178 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_fip_reg()
179 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_fip_reg()
181 desc->u.flogi_fip_reg._resvd0 = 0; fnic_queue_wq_copy_desc_fip_reg()
182 hton24(desc->u.flogi_fip_reg.s_id, s_id); fnic_queue_wq_copy_desc_fip_reg()
183 memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN); fnic_queue_wq_copy_desc_fip_reg()
184 desc->u.flogi_fip_reg._resvd1 = 0; fnic_queue_wq_copy_desc_fip_reg()
185 desc->u.flogi_fip_reg.r_a_tov = r_a_tov; fnic_queue_wq_copy_desc_fip_reg()
186 desc->u.flogi_fip_reg.e_d_tov = e_d_tov; fnic_queue_wq_copy_desc_fip_reg()
187 memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN); fnic_queue_wq_copy_desc_fip_reg()
188 desc->u.flogi_fip_reg._resvd2 = 0; fnic_queue_wq_copy_desc_fip_reg()
196 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_fw_reset() local
198 desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */ fnic_queue_wq_copy_desc_fw_reset()
199 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_fw_reset()
200 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_fw_reset()
201 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_fw_reset()
210 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_lunmap() local
212 desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */ fnic_queue_wq_copy_desc_lunmap()
213 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_lunmap()
214 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_lunmap()
215 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_lunmap()
217 desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */ fnic_queue_wq_copy_desc_lunmap()
218 desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */ fnic_queue_wq_copy_desc_lunmap()
227 struct rq_enet_desc *desc = vnic_rq_next_desc(rq); fnic_queue_rq_desc() local
229 rq_enet_desc_enc(desc, fnic_queue_rq_desc()
H A Dwq_enet_desc.h51 static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, wq_enet_desc_enc() argument
56 desc->address = cpu_to_le64(address); wq_enet_desc_enc()
57 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); wq_enet_desc_enc()
58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << wq_enet_desc_enc()
60 desc->header_length_flags = cpu_to_le16( wq_enet_desc_enc()
67 desc->vlan_tag = cpu_to_le16(vlan_tag); wq_enet_desc_enc()
70 static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, wq_enet_desc_dec() argument
75 *address = le64_to_cpu(desc->address); wq_enet_desc_dec()
76 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; wq_enet_desc_dec()
77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & wq_enet_desc_dec()
79 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> wq_enet_desc_dec()
81 *header_length = le16_to_cpu(desc->header_length_flags) & wq_enet_desc_dec()
83 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
85 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
87 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
89 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
91 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
93 *vlan_tag = le16_to_cpu(desc->vlan_tag); wq_enet_desc_dec()
H A Dcq_enet_desc.h31 static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, cq_enet_wq_desc_dec() argument
34 cq_desc_dec((struct cq_desc *)desc, type, cq_enet_wq_desc_dec()
93 static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, cq_enet_rq_desc_dec() argument
102 u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags); cq_enet_rq_desc_dec()
104 le16_to_cpu(desc->q_number_rss_type_flags); cq_enet_rq_desc_dec()
105 u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); cq_enet_rq_desc_dec()
107 cq_desc_dec((struct cq_desc *)desc, type, cq_enet_rq_desc_dec()
124 *rss_hash = le32_to_cpu(desc->rss_hash); cq_enet_rq_desc_dec()
133 *vlan = le16_to_cpu(desc->vlan); cq_enet_rq_desc_dec()
136 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & cq_enet_rq_desc_dec()
138 *fcoe_fc_crc_ok = (desc->flags & cq_enet_rq_desc_dec()
140 *fcoe_enc_error = (desc->flags & cq_enet_rq_desc_dec()
142 *fcoe_eof = (u8)((desc->checksum_fcoe >> cq_enet_rq_desc_dec()
151 *checksum = le16_to_cpu(desc->checksum_fcoe); cq_enet_rq_desc_dec()
155 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; cq_enet_rq_desc_dec()
156 *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; cq_enet_rq_desc_dec()
157 *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; cq_enet_rq_desc_dec()
159 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; cq_enet_rq_desc_dec()
160 *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; cq_enet_rq_desc_dec()
161 *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; cq_enet_rq_desc_dec()
163 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; cq_enet_rq_desc_dec()
164 *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; cq_enet_rq_desc_dec()
H A Drq_enet_desc.h41 static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, rq_enet_desc_enc() argument
44 desc->address = cpu_to_le64(address); rq_enet_desc_enc()
45 desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) | rq_enet_desc_enc()
49 static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, rq_enet_desc_dec() argument
52 *address = le64_to_cpu(desc->address); rq_enet_desc_dec()
53 *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK; rq_enet_desc_dec()
54 *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) & rq_enet_desc_dec()
H A Dvnic_cq_copy.h27 struct fcpio_fw_req *desc), vnic_cq_copy_service()
31 struct fcpio_fw_req *desc; vnic_cq_copy_service() local
35 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + vnic_cq_copy_service()
37 fcpio_color_dec(desc, &color); vnic_cq_copy_service()
41 if ((*q_service)(cq->vdev, cq->index, desc)) vnic_cq_copy_service()
50 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + vnic_cq_copy_service()
52 fcpio_color_dec(desc, &color); vnic_cq_copy_service()
23 vnic_cq_copy_service( struct vnic_cq *cq, int (*q_service)(struct vnic_dev *vdev, unsigned int index, struct fcpio_fw_req *desc), unsigned int work_to_do) vnic_cq_copy_service() argument
H A Dcq_desc.h57 const struct cq_desc *desc = desc_arg; cq_desc_dec() local
58 const u8 type_color = desc->type_color; cq_desc_dec()
63 * Make sure color bit is read from desc *before* other fields cq_desc_dec()
64 * are read from desc. Hardware guarantees color bit is last cq_desc_dec()
73 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; cq_desc_dec()
74 *completed_index = le16_to_cpu(desc->completed_index) & cq_desc_dec()
/linux-4.1.27/include/linux/
H A Dirqnr.h11 # define for_each_irq_desc(irq, desc) \
12 for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
13 irq++, desc = irq_to_desc(irq)) \
14 if (!desc) \
19 # define for_each_irq_desc_reverse(irq, desc) \
20 for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \
21 irq--, desc = irq_to_desc(irq)) \
22 if (!desc) \
H A Dirqdesc.h96 static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) irq_desc_get_irq_data() argument
98 return &desc->irq_data; irq_desc_get_irq_data()
101 static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) irq_desc_get_chip() argument
103 return desc->irq_data.chip; irq_desc_get_chip()
106 static inline void *irq_desc_get_chip_data(struct irq_desc *desc) irq_desc_get_chip_data() argument
108 return desc->irq_data.chip_data; irq_desc_get_chip_data()
111 static inline void *irq_desc_get_handler_data(struct irq_desc *desc) irq_desc_get_handler_data() argument
113 return desc->irq_data.handler_data; irq_desc_get_handler_data()
116 static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) irq_desc_get_msi_desc() argument
118 return desc->irq_data.msi_desc; irq_desc_get_msi_desc()
127 static inline void generic_handle_irq_desc(unsigned int irq, struct irq_desc *desc) generic_handle_irq_desc() argument
129 desc->handle_irq(irq, desc); generic_handle_irq_desc()
154 struct irq_desc *desc = irq_to_desc(irq); irq_has_action() local
155 return desc->action != NULL; irq_has_action()
162 struct irq_desc *desc; __irq_set_handler_locked() local
164 desc = irq_to_desc(irq); __irq_set_handler_locked()
165 desc->handle_irq = handler; __irq_set_handler_locked()
173 struct irq_desc *desc; __irq_set_chip_handler_name_locked() local
175 desc = irq_to_desc(irq); __irq_set_chip_handler_name_locked()
176 irq_desc_get_irq_data(desc)->chip = chip; __irq_set_chip_handler_name_locked()
177 desc->handle_irq = handler; __irq_set_chip_handler_name_locked()
178 desc->name = name; __irq_set_chip_handler_name_locked()
183 struct irq_desc *desc; irq_balancing_disabled() local
185 desc = irq_to_desc(irq); irq_balancing_disabled()
186 return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; irq_balancing_disabled()
191 struct irq_desc *desc; irq_is_percpu() local
193 desc = irq_to_desc(irq); irq_is_percpu()
194 return desc->status_use_accessors & IRQ_PER_CPU; irq_is_percpu()
200 struct irq_desc *desc = irq_to_desc(irq); irq_set_lockdep_class() local
202 if (desc) irq_set_lockdep_class()
203 lockdep_set_class(&desc->lock, class); irq_set_lockdep_class()
210 struct irq_desc *desc; __irq_set_preflow_handler() local
212 desc = irq_to_desc(irq); __irq_set_preflow_handler()
213 desc->preflow_handler = handler; __irq_set_preflow_handler()
H A Delfnote.h9 * Each note has three parts: a name, a type and a desc. The name is
15 * "desc" field is the actual data. There are no constraints on the
16 * desc field's contents, though typically they're fairly small.
34 * desc data with appropriate padding. The 'desctype' argument is the
55 #define ELFNOTE(name, type, desc) \
57 desc ; \
64 * Elf{32,64}_Nhdr, but includes the name and desc data. The size and
65 * type of name and desc depend on the macro arguments. "name" must
66 * be a literal string, and "desc" must be passed by value. You may
71 #define _ELFNOTE(size, name, unique, type, desc) \
76 typeof(desc) _desc \
85 sizeof(desc), \
89 desc \
91 #define ELFNOTE(size, name, type, desc) \
92 _ELFNOTE(size, name, __LINE__, type, desc)
94 #define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
95 #define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
H A Dirqhandler.h11 typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc);
/linux-4.1.27/net/sunrpc/
H A Dsocklib.c22 * @desc: sk_buff copy helper
29 size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len) xdr_skb_read_bits() argument
31 if (len > desc->count) xdr_skb_read_bits()
32 len = desc->count; xdr_skb_read_bits()
33 if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) xdr_skb_read_bits()
35 desc->count -= len; xdr_skb_read_bits()
36 desc->offset += len; xdr_skb_read_bits()
43 * @desc: sk_buff copy helper
49 static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len) xdr_skb_read_and_csum_bits() argument
54 if (len > desc->count) xdr_skb_read_and_csum_bits()
55 len = desc->count; xdr_skb_read_and_csum_bits()
56 pos = desc->offset; xdr_skb_read_and_csum_bits()
57 csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); xdr_skb_read_and_csum_bits()
58 desc->csum = csum_block_add(desc->csum, csum2, pos); xdr_skb_read_and_csum_bits()
59 desc->count -= len; xdr_skb_read_and_csum_bits()
60 desc->offset += len; xdr_skb_read_and_csum_bits()
68 * @desc: sk_buff copy helper
72 ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor) xdr_partial_copy_from_skb() argument
82 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); xdr_partial_copy_from_skb()
84 if (ret != len || !desc->count) xdr_partial_copy_from_skb()
122 ret = copy_actor(desc, kaddr + base, len); xdr_partial_copy_from_skb()
127 ret = copy_actor(desc, kaddr, len); xdr_partial_copy_from_skb()
132 if (ret != len || !desc->count) xdr_partial_copy_from_skb()
139 copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); xdr_partial_copy_from_skb()
155 struct xdr_skb_reader desc; csum_partial_copy_to_xdr() local
157 desc.skb = skb; csum_partial_copy_to_xdr()
158 desc.offset = sizeof(struct udphdr); csum_partial_copy_to_xdr()
159 desc.count = skb->len - desc.offset; csum_partial_copy_to_xdr()
164 desc.csum = csum_partial(skb->data, desc.offset, skb->csum); csum_partial_copy_to_xdr()
165 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0) csum_partial_copy_to_xdr()
167 if (desc.offset != skb->len) { csum_partial_copy_to_xdr()
169 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); csum_partial_copy_to_xdr()
170 desc.csum = csum_block_add(desc.csum, csum2, desc.offset); csum_partial_copy_to_xdr()
172 if (desc.count) csum_partial_copy_to_xdr()
174 if (csum_fold(desc.csum)) csum_partial_copy_to_xdr()
181 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) csum_partial_copy_to_xdr()
183 if (desc.count) csum_partial_copy_to_xdr()
/linux-4.1.27/drivers/infiniband/core/
H A Dpacker.c54 * @desc:Array of structure field descriptions
55 * @desc_len:Number of entries in @desc
60 * controlled by the array of fields in @desc.
62 void ib_pack(const struct ib_field *desc, ib_pack() argument
70 if (desc[i].size_bits <= 32) { ib_pack()
76 shift = 32 - desc[i].offset_bits - desc[i].size_bits; ib_pack()
77 if (desc[i].struct_size_bytes) ib_pack()
78 val = value_read(desc[i].struct_offset_bytes, ib_pack()
79 desc[i].struct_size_bytes, ib_pack()
84 mask = cpu_to_be32(((1ull << desc[i].size_bits) - 1) << shift); ib_pack()
85 addr = (__be32 *) buf + desc[i].offset_words; ib_pack()
87 } else if (desc[i].size_bits <= 64) { ib_pack()
93 shift = 64 - desc[i].offset_bits - desc[i].size_bits; ib_pack()
94 if (desc[i].struct_size_bytes) ib_pack()
95 val = value_read(desc[i].struct_offset_bytes, ib_pack()
96 desc[i].struct_size_bytes, ib_pack()
101 mask = cpu_to_be64((~0ull >> (64 - desc[i].size_bits)) << shift); ib_pack()
102 addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words); ib_pack()
105 if (desc[i].offset_bits % 8 || ib_pack()
106 desc[i].size_bits % 8) { ib_pack()
109 desc[i].field_name, desc[i].size_bits); ib_pack()
112 if (desc[i].struct_size_bytes) ib_pack()
113 memcpy(buf + desc[i].offset_words * 4 + ib_pack()
114 desc[i].offset_bits / 8, ib_pack()
115 structure + desc[i].struct_offset_bytes, ib_pack()
116 desc[i].size_bits / 8); ib_pack()
118 memset(buf + desc[i].offset_words * 4 + ib_pack()
119 desc[i].offset_bits / 8, ib_pack()
121 desc[i].size_bits / 8); ib_pack()
141 * @desc:Array of structure field descriptions
142 * @desc_len:Number of entries in @desc
147 * controlled by the array of fields in @desc.
149 void ib_unpack(const struct ib_field *desc, ib_unpack() argument
157 if (!desc[i].struct_size_bytes) ib_unpack()
160 if (desc[i].size_bits <= 32) { ib_unpack()
166 shift = 32 - desc[i].offset_bits - desc[i].size_bits; ib_unpack()
167 mask = ((1ull << desc[i].size_bits) - 1) << shift; ib_unpack()
168 addr = (__be32 *) buf + desc[i].offset_words; ib_unpack()
170 value_write(desc[i].struct_offset_bytes, ib_unpack()
171 desc[i].struct_size_bytes, ib_unpack()
174 } else if (desc[i].size_bits <= 64) { ib_unpack()
180 shift = 64 - desc[i].offset_bits - desc[i].size_bits; ib_unpack()
181 mask = (~0ull >> (64 - desc[i].size_bits)) << shift; ib_unpack()
182 addr = (__be64 *) buf + desc[i].offset_words; ib_unpack()
184 value_write(desc[i].struct_offset_bytes, ib_unpack()
185 desc[i].struct_size_bytes, ib_unpack()
189 if (desc[i].offset_bits % 8 || ib_unpack()
190 desc[i].size_bits % 8) { ib_unpack()
193 desc[i].field_name, desc[i].size_bits); ib_unpack()
196 memcpy(structure + desc[i].struct_offset_bytes, ib_unpack()
197 buf + desc[i].offset_words * 4 + ib_unpack()
198 desc[i].offset_bits / 8, ib_unpack()
199 desc[i].size_bits / 8); ib_unpack()
/linux-4.1.27/arch/arm/mach-davinci/
H A Dmux.h18 #define MUX_CFG(soc, desc, muxreg, mode_offset, mode_mask, mux_mode, dbg)\
19 [soc##_##desc] = { \
20 .name = #desc, \
29 #define INT_CFG(soc, desc, mode_offset, mode_mask, mux_mode, dbg) \
30 [soc##_##desc] = { \
31 .name = #desc, \
40 #define EVT_CFG(soc, desc, mode_offset, mode_mask, mux_mode, dbg) \
41 [soc##_##desc] = { \
42 .name = #desc, \
/linux-4.1.27/drivers/regulator/
H A Dhelpers.c36 ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val); regulator_is_enabled_regmap()
40 val &= rdev->desc->enable_mask; regulator_is_enabled_regmap()
42 if (rdev->desc->enable_is_inverted) { regulator_is_enabled_regmap()
43 if (rdev->desc->enable_val) regulator_is_enabled_regmap()
44 return val != rdev->desc->enable_val; regulator_is_enabled_regmap()
47 if (rdev->desc->enable_val) regulator_is_enabled_regmap()
48 return val == rdev->desc->enable_val; regulator_is_enabled_regmap()
67 if (rdev->desc->enable_is_inverted) { regulator_enable_regmap()
68 val = rdev->desc->disable_val; regulator_enable_regmap()
70 val = rdev->desc->enable_val; regulator_enable_regmap()
72 val = rdev->desc->enable_mask; regulator_enable_regmap()
75 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, regulator_enable_regmap()
76 rdev->desc->enable_mask, val); regulator_enable_regmap()
93 if (rdev->desc->enable_is_inverted) { regulator_disable_regmap()
94 val = rdev->desc->enable_val; regulator_disable_regmap()
96 val = rdev->desc->enable_mask; regulator_disable_regmap()
98 val = rdev->desc->disable_val; regulator_disable_regmap()
101 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, regulator_disable_regmap()
102 rdev->desc->enable_mask, val); regulator_disable_regmap()
120 ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val); regulator_get_voltage_sel_regmap()
124 val &= rdev->desc->vsel_mask; regulator_get_voltage_sel_regmap()
125 val >>= ffs(rdev->desc->vsel_mask) - 1; regulator_get_voltage_sel_regmap()
145 sel <<= ffs(rdev->desc->vsel_mask) - 1; regulator_set_voltage_sel_regmap()
147 ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, regulator_set_voltage_sel_regmap()
148 rdev->desc->vsel_mask, sel); regulator_set_voltage_sel_regmap()
152 if (rdev->desc->apply_bit) regulator_set_voltage_sel_regmap()
153 ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg, regulator_set_voltage_sel_regmap()
154 rdev->desc->apply_bit, regulator_set_voltage_sel_regmap()
155 rdev->desc->apply_bit); regulator_set_voltage_sel_regmap()
182 for (i = 0; i < rdev->desc->n_voltages; i++) { regulator_map_voltage_iterate()
183 ret = rdev->desc->ops->list_voltage(rdev, i); regulator_map_voltage_iterate()
215 for (i = 0; i < rdev->desc->n_voltages; i++) { regulator_map_voltage_ascend()
216 ret = rdev->desc->ops->list_voltage(rdev, i); regulator_map_voltage_ascend()
247 if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) { regulator_map_voltage_linear()
248 if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV) regulator_map_voltage_linear()
254 if (!rdev->desc->uV_step) { regulator_map_voltage_linear()
255 BUG_ON(!rdev->desc->uV_step); regulator_map_voltage_linear()
259 if (min_uV < rdev->desc->min_uV) regulator_map_voltage_linear()
260 min_uV = rdev->desc->min_uV; regulator_map_voltage_linear()
262 ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); regulator_map_voltage_linear()
266 ret += rdev->desc->linear_min_sel; regulator_map_voltage_linear()
269 voltage = rdev->desc->ops->list_voltage(rdev, ret); regulator_map_voltage_linear()
294 if (!rdev->desc->n_linear_ranges) { regulator_map_voltage_linear_range()
295 BUG_ON(!rdev->desc->n_linear_ranges); regulator_map_voltage_linear_range()
299 for (i = 0; i < rdev->desc->n_linear_ranges; i++) { regulator_map_voltage_linear_range()
302 range = &rdev->desc->linear_ranges[i]; regulator_map_voltage_linear_range()
327 if (i == rdev->desc->n_linear_ranges) regulator_map_voltage_linear_range()
331 voltage = rdev->desc->ops->list_voltage(rdev, ret); regulator_map_voltage_linear_range()
352 if (selector >= rdev->desc->n_voltages) regulator_list_voltage_linear()
354 if (selector < rdev->desc->linear_min_sel) regulator_list_voltage_linear()
357 selector -= rdev->desc->linear_min_sel; regulator_list_voltage_linear()
359 return rdev->desc->min_uV + (rdev->desc->uV_step * selector); regulator_list_voltage_linear()
379 if (!rdev->desc->n_linear_ranges) { regulator_list_voltage_linear_range()
380 BUG_ON(!rdev->desc->n_linear_ranges); regulator_list_voltage_linear_range()
384 for (i = 0; i < rdev->desc->n_linear_ranges; i++) { regulator_list_voltage_linear_range()
385 range = &rdev->desc->linear_ranges[i]; regulator_list_voltage_linear_range()
413 if (!rdev->desc->volt_table) { regulator_list_voltage_table()
414 BUG_ON(!rdev->desc->volt_table); regulator_list_voltage_table()
418 if (selector >= rdev->desc->n_voltages) regulator_list_voltage_table()
421 return rdev->desc->volt_table[selector]; regulator_list_voltage_table()
436 val = rdev->desc->bypass_val_on; regulator_set_bypass_regmap()
438 val = rdev->desc->bypass_mask; regulator_set_bypass_regmap()
440 val = rdev->desc->bypass_val_off; regulator_set_bypass_regmap()
443 return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg, regulator_set_bypass_regmap()
444 rdev->desc->bypass_mask, val); regulator_set_bypass_regmap()
459 ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val); regulator_get_bypass_regmap()
463 *enable = val & rdev->desc->bypass_mask; regulator_get_bypass_regmap()
H A Dpalmas-regulator.c470 pmic->desc[id].enable_val = pmic->current_reg_mode[id]; palmas_set_mode_smps()
869 struct regulator_desc *desc; palmas_ldo_registration() local
883 desc = &pmic->desc[id]; palmas_ldo_registration()
884 desc->name = rinfo->name; palmas_ldo_registration()
885 desc->id = id; palmas_ldo_registration()
886 desc->type = REGULATOR_VOLTAGE; palmas_ldo_registration()
887 desc->owner = THIS_MODULE; palmas_ldo_registration()
890 desc->n_voltages = PALMAS_LDO_NUM_VOLTAGES; palmas_ldo_registration()
892 desc->ops = &palmas_ops_ext_control_ldo; palmas_ldo_registration()
894 desc->ops = &palmas_ops_ldo; palmas_ldo_registration()
895 desc->min_uV = 900000; palmas_ldo_registration()
896 desc->uV_step = 50000; palmas_ldo_registration()
897 desc->linear_min_sel = 1; palmas_ldo_registration()
898 desc->enable_time = 500; palmas_ldo_registration()
899 desc->vsel_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, palmas_ldo_registration()
901 desc->vsel_mask = PALMAS_LDO1_VOLTAGE_VSEL_MASK; palmas_ldo_registration()
902 desc->enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, palmas_ldo_registration()
904 desc->enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE; palmas_ldo_registration()
910 desc->min_uV = 450000; palmas_ldo_registration()
911 desc->uV_step = 25000; palmas_ldo_registration()
917 desc->enable_time = 2000; palmas_ldo_registration()
922 desc->n_voltages = 1; palmas_ldo_registration()
924 desc->ops = &palmas_ops_ext_control_extreg; palmas_ldo_registration()
926 desc->ops = &palmas_ops_extreg; palmas_ldo_registration()
927 desc->enable_reg = palmas_ldo_registration()
930 desc->enable_mask = PALMAS_REGEN1_CTRL_MODE_ACTIVE; palmas_ldo_registration()
938 desc->supply_name = rinfo->sname; palmas_ldo_registration()
941 rdev = devm_regulator_register(pmic->dev, desc, &config); palmas_ldo_registration()
981 struct regulator_desc *desc; tps65917_ldo_registration() local
995 desc = &pmic->desc[id]; tps65917_ldo_registration()
996 desc->name = rinfo->name; tps65917_ldo_registration()
997 desc->id = id; tps65917_ldo_registration()
998 desc->type = REGULATOR_VOLTAGE; tps65917_ldo_registration()
999 desc->owner = THIS_MODULE; tps65917_ldo_registration()
1002 desc->n_voltages = PALMAS_LDO_NUM_VOLTAGES; tps65917_ldo_registration()
1004 desc->ops = &palmas_ops_ext_control_ldo; tps65917_ldo_registration()
1006 desc->ops = &tps65917_ops_ldo; tps65917_ldo_registration()
1007 desc->min_uV = 900000; tps65917_ldo_registration()
1008 desc->uV_step = 50000; tps65917_ldo_registration()
1009 desc->linear_min_sel = 1; tps65917_ldo_registration()
1010 desc->enable_time = 500; tps65917_ldo_registration()
1011 desc->vsel_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, tps65917_ldo_registration()
1013 desc->vsel_mask = PALMAS_LDO1_VOLTAGE_VSEL_MASK; tps65917_ldo_registration()
1014 desc->enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, tps65917_ldo_registration()
1016 desc->enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE; tps65917_ldo_registration()
1021 desc->ramp_delay = 2500; tps65917_ldo_registration()
1023 desc->n_voltages = 1; tps65917_ldo_registration()
1025 desc->ops = &palmas_ops_ext_control_extreg; tps65917_ldo_registration()
1027 desc->ops = &palmas_ops_extreg; tps65917_ldo_registration()
1028 desc->enable_reg = tps65917_ldo_registration()
1031 desc->enable_mask = PALMAS_REGEN1_CTRL_MODE_ACTIVE; tps65917_ldo_registration()
1039 desc->supply_name = rinfo->sname; tps65917_ldo_registration()
1042 rdev = devm_regulator_register(pmic->dev, desc, &config); tps65917_ldo_registration()
1083 struct regulator_desc *desc; palmas_smps_registration() local
1123 desc = &pmic->desc[id]; palmas_smps_registration()
1136 desc->ramp_delay = palmas_smps_ramp_delay[reg & 0x3]; palmas_smps_registration()
1137 pmic->ramp_delay[id] = desc->ramp_delay; palmas_smps_registration()
1151 desc->name = rinfo->name; palmas_smps_registration()
1152 desc->id = id; palmas_smps_registration()
1157 desc->n_voltages = PALMAS_SMPS10_NUM_VOLTAGES; palmas_smps_registration()
1158 desc->ops = &palmas_ops_smps10; palmas_smps_registration()
1159 desc->vsel_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, palmas_smps_registration()
1161 desc->vsel_mask = SMPS10_VSEL; palmas_smps_registration()
1162 desc->enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, palmas_smps_registration()
1165 desc->enable_mask = SMPS10_SWITCH_EN; palmas_smps_registration()
1167 desc->enable_mask = SMPS10_BOOST_EN; palmas_smps_registration()
1168 desc->bypass_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, palmas_smps_registration()
1170 desc->bypass_mask = SMPS10_BYPASS_EN; palmas_smps_registration()
1171 desc->min_uV = 3750000; palmas_smps_registration()
1172 desc->uV_step = 1250000; palmas_smps_registration()
1182 desc->n_linear_ranges = 3; palmas_smps_registration()
1190 desc->linear_ranges = smps_high_ranges; palmas_smps_registration()
1192 desc->linear_ranges = smps_low_ranges; palmas_smps_registration()
1195 desc->ops = &palmas_ops_ext_control_smps; palmas_smps_registration()
1197 desc->ops = &palmas_ops_smps; palmas_smps_registration()
1198 desc->n_voltages = PALMAS_SMPS_NUM_VOLTAGES; palmas_smps_registration()
1199 desc->vsel_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, palmas_smps_registration()
1201 desc->vsel_mask = PALMAS_SMPS12_VOLTAGE_VSEL_MASK; palmas_smps_registration()
1211 desc->enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, palmas_smps_registration()
1213 desc->enable_mask = PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK; palmas_smps_registration()
1215 desc->enable_val = SMPS_CTRL_MODE_ON; palmas_smps_registration()
1218 desc->type = REGULATOR_VOLTAGE; palmas_smps_registration()
1219 desc->owner = THIS_MODULE; palmas_smps_registration()
1226 desc->supply_name = rinfo->sname; palmas_smps_registration()
1229 rdev = devm_regulator_register(pmic->dev, desc, &config); palmas_smps_registration()
1255 struct regulator_desc *desc; tps65917_smps_registration() local
1262 desc = &pmic->desc[id]; tps65917_smps_registration()
1263 desc->n_linear_ranges = 3; tps65917_smps_registration()
1279 desc->name = rinfo->name; tps65917_smps_registration()
1280 desc->id = id; tps65917_smps_registration()
1297 desc->linear_ranges = smps_high_ranges; tps65917_smps_registration()
1299 desc->linear_ranges = smps_low_ranges; tps65917_smps_registration()
1302 desc->ops = &tps65917_ops_ext_control_smps; tps65917_smps_registration()
1304 desc->ops = &tps65917_ops_smps; tps65917_smps_registration()
1305 desc->n_voltages = PALMAS_SMPS_NUM_VOLTAGES; tps65917_smps_registration()
1306 desc->vsel_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, tps65917_smps_registration()
1308 desc->vsel_mask = PALMAS_SMPS12_VOLTAGE_VSEL_MASK; tps65917_smps_registration()
1309 desc->ramp_delay = 2500; tps65917_smps_registration()
1318 desc->enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, tps65917_smps_registration()
1320 desc->enable_mask = PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK; tps65917_smps_registration()
1322 desc->enable_val = SMPS_CTRL_MODE_ON; tps65917_smps_registration()
1324 desc->type = REGULATOR_VOLTAGE; tps65917_smps_registration()
1325 desc->owner = THIS_MODULE; tps65917_smps_registration()
1332 desc->supply_name = rinfo->sname; tps65917_smps_registration()
1335 rdev = devm_regulator_register(pmic->dev, desc, &config); tps65917_smps_registration()
H A Dwm831x-ldo.c40 struct regulator_desc desc; member in struct:wm831x_ldo
252 ldo->desc.name = ldo->name; wm831x_gp_ldo_probe()
256 ldo->desc.supply_name = ldo->supply_name; wm831x_gp_ldo_probe()
258 ldo->desc.id = id; wm831x_gp_ldo_probe()
259 ldo->desc.type = REGULATOR_VOLTAGE; wm831x_gp_ldo_probe()
260 ldo->desc.n_voltages = 32; wm831x_gp_ldo_probe()
261 ldo->desc.ops = &wm831x_gp_ldo_ops; wm831x_gp_ldo_probe()
262 ldo->desc.owner = THIS_MODULE; wm831x_gp_ldo_probe()
263 ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL; wm831x_gp_ldo_probe()
264 ldo->desc.vsel_mask = WM831X_LDO1_ON_VSEL_MASK; wm831x_gp_ldo_probe()
265 ldo->desc.enable_reg = WM831X_LDO_ENABLE; wm831x_gp_ldo_probe()
266 ldo->desc.enable_mask = 1 << id; wm831x_gp_ldo_probe()
267 ldo->desc.bypass_reg = ldo->base; wm831x_gp_ldo_probe()
268 ldo->desc.bypass_mask = WM831X_LDO1_SWI; wm831x_gp_ldo_probe()
269 ldo->desc.linear_ranges = wm831x_gp_ldo_ranges; wm831x_gp_ldo_probe()
270 ldo->desc.n_linear_ranges = ARRAY_SIZE(wm831x_gp_ldo_ranges); wm831x_gp_ldo_probe()
278 ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc, wm831x_gp_ldo_probe()
461 ldo->desc.name = ldo->name; wm831x_aldo_probe()
465 ldo->desc.supply_name = ldo->supply_name; wm831x_aldo_probe()
467 ldo->desc.id = id; wm831x_aldo_probe()
468 ldo->desc.type = REGULATOR_VOLTAGE; wm831x_aldo_probe()
469 ldo->desc.n_voltages = 32; wm831x_aldo_probe()
470 ldo->desc.linear_ranges = wm831x_aldo_ranges; wm831x_aldo_probe()
471 ldo->desc.n_linear_ranges = ARRAY_SIZE(wm831x_aldo_ranges); wm831x_aldo_probe()
472 ldo->desc.ops = &wm831x_aldo_ops; wm831x_aldo_probe()
473 ldo->desc.owner = THIS_MODULE; wm831x_aldo_probe()
474 ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL; wm831x_aldo_probe()
475 ldo->desc.vsel_mask = WM831X_LDO7_ON_VSEL_MASK; wm831x_aldo_probe()
476 ldo->desc.enable_reg = WM831X_LDO_ENABLE; wm831x_aldo_probe()
477 ldo->desc.enable_mask = 1 << id; wm831x_aldo_probe()
478 ldo->desc.bypass_reg = ldo->base; wm831x_aldo_probe()
479 ldo->desc.bypass_mask = WM831X_LDO7_SWI; wm831x_aldo_probe()
487 ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc, wm831x_aldo_probe()
605 ldo->desc.name = ldo->name; wm831x_alive_ldo_probe()
609 ldo->desc.supply_name = ldo->supply_name; wm831x_alive_ldo_probe()
611 ldo->desc.id = id; wm831x_alive_ldo_probe()
612 ldo->desc.type = REGULATOR_VOLTAGE; wm831x_alive_ldo_probe()
613 ldo->desc.n_voltages = WM831X_ALIVE_LDO_MAX_SELECTOR + 1; wm831x_alive_ldo_probe()
614 ldo->desc.ops = &wm831x_alive_ldo_ops; wm831x_alive_ldo_probe()
615 ldo->desc.owner = THIS_MODULE; wm831x_alive_ldo_probe()
616 ldo->desc.vsel_reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL; wm831x_alive_ldo_probe()
617 ldo->desc.vsel_mask = WM831X_LDO11_ON_VSEL_MASK; wm831x_alive_ldo_probe()
618 ldo->desc.enable_reg = WM831X_LDO_ENABLE; wm831x_alive_ldo_probe()
619 ldo->desc.enable_mask = 1 << id; wm831x_alive_ldo_probe()
620 ldo->desc.min_uV = 800000; wm831x_alive_ldo_probe()
621 ldo->desc.uV_step = 50000; wm831x_alive_ldo_probe()
622 ldo->desc.enable_time = 1000; wm831x_alive_ldo_probe()
630 ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc, wm831x_alive_ldo_probe()
H A Dqcom_rpm-regulator.c60 struct regulator_desc desc; member in struct:qcom_rpm_reg
454 .desc.linear_ranges = pldo_ranges,
455 .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges),
456 .desc.n_voltages = 161,
457 .desc.ops = &mV_ops,
464 .desc.linear_ranges = nldo_ranges,
465 .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges),
466 .desc.n_voltages = 64,
467 .desc.ops = &mV_ops,
474 .desc.linear_ranges = smps_ranges,
475 .desc.n_linear_ranges = ARRAY_SIZE(smps_ranges),
476 .desc.n_voltages = 154,
477 .desc.ops = &mV_ops,
484 .desc.linear_ranges = ncp_ranges,
485 .desc.n_linear_ranges = ARRAY_SIZE(ncp_ranges),
486 .desc.n_voltages = 32,
487 .desc.ops = &mV_ops,
492 .desc.ops = &switch_ops,
500 .desc.linear_ranges = pldo_ranges,
501 .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges),
502 .desc.n_voltages = 161,
503 .desc.ops = &mV_ops,
510 .desc.linear_ranges = nldo_ranges,
511 .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges),
512 .desc.n_voltages = 64,
513 .desc.ops = &mV_ops,
520 .desc.linear_ranges = ftsmps_ranges,
521 .desc.n_linear_ranges = ARRAY_SIZE(ftsmps_ranges),
522 .desc.n_voltages = 101,
523 .desc.ops = &mV_ops,
530 .desc.ops = &switch_ops,
538 .desc.linear_ranges = pldo_ranges,
539 .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges),
540 .desc.n_voltages = 161,
541 .desc.ops = &uV_ops,
548 .desc.linear_ranges = nldo_ranges,
549 .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges),
550 .desc.n_voltages = 64,
551 .desc.ops = &uV_ops,
558 .desc.linear_ranges = nldo1200_ranges,
559 .desc.n_linear_ranges = ARRAY_SIZE(nldo1200_ranges),
560 .desc.n_voltages = 124,
561 .desc.ops = &uV_ops,
568 .desc.linear_ranges = smps_ranges,
569 .desc.n_linear_ranges = ARRAY_SIZE(smps_ranges),
570 .desc.n_voltages = 154,
571 .desc.ops = &uV_ops,
578 .desc.linear_ranges = ftsmps_ranges,
579 .desc.n_linear_ranges = ARRAY_SIZE(ftsmps_ranges),
580 .desc.n_voltages = 101,
581 .desc.ops = &uV_ops,
588 .desc.linear_ranges = ncp_ranges,
589 .desc.n_linear_ranges = ARRAY_SIZE(ncp_ranges),
590 .desc.n_voltages = 32,
591 .desc.ops = &uV_ops,
596 .desc.ops = &switch_ops,
601 .desc.linear_ranges = smb208_ranges,
602 .desc.n_linear_ranges = ARRAY_SIZE(smb208_ranges),
603 .desc.n_voltages = 235,
604 .desc.ops = &uV_ops,
657 const struct regulator_desc *desc, rpm_reg_of_parse()
908 vreg->desc.id = -1; rpm_reg_probe()
909 vreg->desc.owner = THIS_MODULE; rpm_reg_probe()
910 vreg->desc.type = REGULATOR_VOLTAGE; rpm_reg_probe()
911 vreg->desc.name = reg->name; rpm_reg_probe()
912 vreg->desc.supply_name = reg->supply; rpm_reg_probe()
913 vreg->desc.of_match = reg->name; rpm_reg_probe()
914 vreg->desc.of_parse_cb = rpm_reg_of_parse; rpm_reg_probe()
918 rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config); rpm_reg_probe()
656 rpm_reg_of_parse(struct device_node *node, const struct regulator_desc *desc, struct regulator_config *config) rpm_reg_of_parse() argument
H A Ddb8500-prcmu.c33 info->desc.name); db8500_regulator_enable()
53 info->desc.name); db8500_regulator_disable()
72 " %i\n", info->desc.name, info->is_enabled); db8500_regulator_is_enabled()
150 info->desc.name); db8500_regulator_switch_enable()
156 info->desc.name); db8500_regulator_switch_enable()
174 info->desc.name); db8500_regulator_switch_disable()
180 info->desc.name); db8500_regulator_switch_disable()
198 info->desc.name, info->is_enabled); db8500_regulator_switch_is_enabled()
215 .desc = {
224 .desc = {
233 .desc = {
242 .desc = {
251 .desc = {
260 .desc = {
272 .desc = {
281 .desc = {
290 .desc = {
300 .desc = {
311 .desc = {
321 .desc = {
331 .desc = {
342 .desc = {
352 .desc = {
362 .desc = {
372 .desc = {
383 .desc = {
394 .desc = {
405 .desc = {
436 info->rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); db8500_regulator_register()
440 info->desc.name, err); db8500_regulator_register()
445 "regulator-%s-probed\n", info->desc.name); db8500_regulator_register()
H A Dpbias-regulator.c40 struct regulator_desc desc; member in struct:pbias_regulator_data
154 drvdata[data_idx].desc.name = info->name; pbias_regulator_probe()
155 drvdata[data_idx].desc.owner = THIS_MODULE; pbias_regulator_probe()
156 drvdata[data_idx].desc.type = REGULATOR_VOLTAGE; pbias_regulator_probe()
157 drvdata[data_idx].desc.ops = &pbias_regulator_voltage_ops; pbias_regulator_probe()
158 drvdata[data_idx].desc.volt_table = pbias_volt_table; pbias_regulator_probe()
159 drvdata[data_idx].desc.n_voltages = 2; pbias_regulator_probe()
160 drvdata[data_idx].desc.enable_time = info->enable_time; pbias_regulator_probe()
161 drvdata[data_idx].desc.vsel_reg = res->start; pbias_regulator_probe()
162 drvdata[data_idx].desc.vsel_mask = info->vmode; pbias_regulator_probe()
163 drvdata[data_idx].desc.enable_reg = res->start; pbias_regulator_probe()
164 drvdata[data_idx].desc.enable_mask = info->enable_mask; pbias_regulator_probe()
165 drvdata[data_idx].desc.enable_val = info->enable; pbias_regulator_probe()
166 drvdata[data_idx].desc.disable_val = info->disable_val; pbias_regulator_probe()
173 &drvdata[data_idx].desc, &cfg); pbias_regulator_probe()
H A Dof_regulator.c29 const struct regulator_desc *desc) of_get_regulation_constraints()
86 if (desc && desc->of_map_mode) { of_get_regulation_constraints()
87 ret = desc->of_map_mode(pval); of_get_regulation_constraints()
119 if (desc && desc->of_map_mode) { of_get_regulation_constraints()
120 ret = desc->of_map_mode(pval); of_get_regulation_constraints()
153 * @desc: regulator description
161 const struct regulator_desc *desc) of_get_regulator_init_data()
172 of_get_regulation_constraints(node, &init_data, desc); of_get_regulator_init_data()
254 match->desc); for_each_child_of_node()
272 const struct regulator_desc *desc, regulator_of_get_init_data()
280 if (!dev->of_node || !desc->of_match) regulator_of_get_init_data()
283 if (desc->regulators_node) regulator_of_get_init_data()
285 desc->regulators_node); regulator_of_get_init_data()
291 desc->regulators_node); regulator_of_get_init_data()
300 if (strcmp(desc->of_match, name)) for_each_child_of_node()
303 init_data = of_get_regulator_init_data(dev, child, desc); for_each_child_of_node()
311 if (desc->of_parse_cb) { for_each_child_of_node()
312 if (desc->of_parse_cb(child, desc, config)) { for_each_child_of_node()
27 of_get_regulation_constraints(struct device_node *np, struct regulator_init_data **init_data, const struct regulator_desc *desc) of_get_regulation_constraints() argument
159 of_get_regulator_init_data(struct device *dev, struct device_node *node, const struct regulator_desc *desc) of_get_regulator_init_data() argument
271 regulator_of_get_init_data(struct device *dev, const struct regulator_desc *desc, struct regulator_config *config, struct device_node **node) regulator_of_get_init_data() argument
H A Dtps65023-regulator.c136 struct regulator_desc desc[TPS65023_NUM_REGULATOR]; member in struct:tps_pmic
241 tps->desc[i].name = info->name; tps_65023_probe()
242 tps->desc[i].id = i; tps_65023_probe()
243 tps->desc[i].n_voltages = info->table_len; tps_65023_probe()
244 tps->desc[i].volt_table = info->table; tps_65023_probe()
245 tps->desc[i].ops = (i > TPS65023_DCDC_3 ? tps_65023_probe()
247 tps->desc[i].type = REGULATOR_VOLTAGE; tps_65023_probe()
248 tps->desc[i].owner = THIS_MODULE; tps_65023_probe()
250 tps->desc[i].enable_reg = TPS65023_REG_REG_CTRL; tps_65023_probe()
253 tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL; tps_65023_probe()
254 tps->desc[i].vsel_mask = 0x07; tps_65023_probe()
255 tps->desc[i].enable_mask = 1 << 1; tps_65023_probe()
258 tps->desc[i].vsel_reg = TPS65023_REG_LDO_CTRL; tps_65023_probe()
259 tps->desc[i].vsel_mask = 0x70; tps_65023_probe()
260 tps->desc[i].enable_mask = 1 << 2; tps_65023_probe()
263 tps->desc[i].enable_mask = tps_65023_probe()
265 tps->desc[i].vsel_reg = TPS65023_REG_DEF_CORE; tps_65023_probe()
266 tps->desc[i].vsel_mask = info->table_len - 1; tps_65023_probe()
267 tps->desc[i].apply_reg = TPS65023_REG_CON_CTRL2; tps_65023_probe()
268 tps->desc[i].apply_bit = TPS65023_REG_CTRL2_GO; tps_65023_probe()
277 rdev = devm_regulator_register(&client->dev, &tps->desc[i], tps_65023_probe()
H A Dltc3589.c87 struct regulator_desc desc; member in struct:ltc3589_regulator
119 shift = ffs(rdev->desc->apply_bit) - 1; ltc3589_set_ramp_delay()
142 return regmap_update_bits(ltc3589->regmap, rdev->desc->vsel_reg + 1, ltc3589_set_suspend_voltage()
143 rdev->desc->vsel_mask, sel); ltc3589_set_suspend_voltage()
153 mask = rdev->desc->apply_bit << 1; ltc3589_set_suspend_mode()
158 mask |= rdev->desc->apply_bit; ltc3589_set_suspend_mode()
159 bit |= rdev->desc->apply_bit; ltc3589_set_suspend_mode()
201 .desc = { \
279 struct ltc3589_regulator *desc = &ltc3589->regulator_descs[i]; ltc3589_parse_regulators_dt() local
291 desc->r1 = vdiv[0]; ltc3589_parse_regulators_dt()
292 desc->r2 = vdiv[1]; ltc3589_parse_regulators_dt()
450 struct regulator_desc *desc = &rdesc->desc; ltc3589_apply_fb_voltage_divider() local
455 desc->min_uV = ltc3589_scale(desc->min_uV, rdesc->r1, rdesc->r2); ltc3589_apply_fb_voltage_divider()
456 desc->uV_step = ltc3589_scale(desc->uV_step, rdesc->r1, rdesc->r2); ltc3589_apply_fb_voltage_divider()
457 desc->fixed_uV = ltc3589_scale(desc->fixed_uV, rdesc->r1, rdesc->r2); ltc3589_apply_fb_voltage_divider()
479 descs[LTC3589_LDO3].desc.fixed_uV = 1800000; ltc3589_probe()
480 descs[LTC3589_LDO4].desc.volt_table = ltc3589_ldo4; ltc3589_probe()
482 descs[LTC3589_LDO3].desc.fixed_uV = 2800000; ltc3589_probe()
483 descs[LTC3589_LDO4].desc.volt_table = ltc3589_12_ldo4; ltc3589_probe()
499 struct regulator_desc *desc = &rdesc->desc; ltc3589_probe() local
513 ltc3589->regulators[i] = devm_regulator_register(dev, desc, ltc3589_probe()
518 desc->name, ret); ltc3589_probe()
H A Dvexpress.c27 struct regulator_desc desc; member in struct:vexpress_regulator
72 reg->desc.name = dev_name(&pdev->dev); vexpress_regulator_probe()
73 reg->desc.type = REGULATOR_VOLTAGE; vexpress_regulator_probe()
74 reg->desc.owner = THIS_MODULE; vexpress_regulator_probe()
75 reg->desc.continuous_voltage_range = true; vexpress_regulator_probe()
78 &reg->desc); vexpress_regulator_probe()
84 reg->desc.ops = &vexpress_regulator_ops; vexpress_regulator_probe()
86 reg->desc.ops = &vexpress_regulator_ops_ro; vexpress_regulator_probe()
93 reg->regdev = devm_regulator_register(&pdev->dev, &reg->desc, &config); vexpress_regulator_probe()
H A Dfixed.c35 struct regulator_desc desc; member in struct:fixed_voltage_data
43 * @desc: regulator description
51 const struct regulator_desc *desc) of_get_fixed_voltage_config()
62 config->init_data = of_get_regulator_init_data(dev, dev->of_node, desc); of_get_fixed_voltage_config()
124 &drvdata->desc); reg_fixed_voltage_probe()
134 drvdata->desc.name = devm_kstrdup(&pdev->dev, reg_fixed_voltage_probe()
137 if (drvdata->desc.name == NULL) { reg_fixed_voltage_probe()
141 drvdata->desc.type = REGULATOR_VOLTAGE; reg_fixed_voltage_probe()
142 drvdata->desc.owner = THIS_MODULE; reg_fixed_voltage_probe()
143 drvdata->desc.ops = &fixed_voltage_ops; reg_fixed_voltage_probe()
145 drvdata->desc.enable_time = config->startup_delay; reg_fixed_voltage_probe()
148 drvdata->desc.supply_name = devm_kstrdup(&pdev->dev, reg_fixed_voltage_probe()
151 if (!drvdata->desc.supply_name) { reg_fixed_voltage_probe()
159 drvdata->desc.n_voltages = 1; reg_fixed_voltage_probe()
161 drvdata->desc.fixed_uV = config->microvolts; reg_fixed_voltage_probe()
188 drvdata->dev = devm_regulator_register(&pdev->dev, &drvdata->desc, reg_fixed_voltage_probe()
198 dev_dbg(&pdev->dev, "%s supplying %duV\n", drvdata->desc.name, reg_fixed_voltage_probe()
199 drvdata->desc.fixed_uV); reg_fixed_voltage_probe()
50 of_get_fixed_voltage_config(struct device *dev, const struct regulator_desc *desc) of_get_fixed_voltage_config() argument
/linux-4.1.27/arch/arm/crypto/
H A Dsha1.h7 extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
10 extern int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
H A Dsha256_glue.h8 int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
11 int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
H A Dsha1-ce-glue.c30 static int sha1_ce_update(struct shash_desc *desc, const u8 *data, sha1_ce_update() argument
33 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_ce_update()
37 return sha1_update_arm(desc, data, len); sha1_ce_update()
40 sha1_base_do_update(desc, data, len, sha1_ce_transform); sha1_ce_update()
46 static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, sha1_ce_finup() argument
50 return sha1_finup_arm(desc, data, len, out); sha1_ce_finup()
54 sha1_base_do_update(desc, data, len, sha1_ce_transform); sha1_ce_finup()
55 sha1_base_do_finalize(desc, sha1_ce_transform); sha1_ce_finup()
58 return sha1_base_finish(desc, out); sha1_ce_finup()
61 static int sha1_ce_final(struct shash_desc *desc, u8 *out) sha1_ce_final() argument
63 return sha1_ce_finup(desc, NULL, 0, out); sha1_ce_final()
H A Dsha1_neon_glue.c37 static int sha1_neon_update(struct shash_desc *desc, const u8 *data, sha1_neon_update() argument
40 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_neon_update()
44 return sha1_update_arm(desc, data, len); sha1_neon_update()
47 sha1_base_do_update(desc, data, len, sha1_neon_update()
54 static int sha1_neon_finup(struct shash_desc *desc, const u8 *data, sha1_neon_finup() argument
58 return sha1_finup_arm(desc, data, len, out); sha1_neon_finup()
62 sha1_base_do_update(desc, data, len, sha1_neon_finup()
64 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon); sha1_neon_finup()
67 return sha1_base_finish(desc, out); sha1_neon_finup()
70 static int sha1_neon_final(struct shash_desc *desc, u8 *out) sha1_neon_final() argument
72 return sha1_neon_finup(desc, NULL, 0, out); sha1_neon_final()
H A Dsha2-ce-glue.c31 static int sha2_ce_update(struct shash_desc *desc, const u8 *data, sha2_ce_update() argument
34 struct sha256_state *sctx = shash_desc_ctx(desc); sha2_ce_update()
38 return crypto_sha256_arm_update(desc, data, len); sha2_ce_update()
41 sha256_base_do_update(desc, data, len, sha2_ce_update()
48 static int sha2_ce_finup(struct shash_desc *desc, const u8 *data, sha2_ce_finup() argument
52 return crypto_sha256_arm_finup(desc, data, len, out); sha2_ce_finup()
56 sha256_base_do_update(desc, data, len, sha2_ce_finup()
58 sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); sha2_ce_finup()
61 return sha256_base_finish(desc, out); sha2_ce_finup()
64 static int sha2_ce_final(struct shash_desc *desc, u8 *out) sha2_ce_final() argument
66 return sha2_ce_finup(desc, NULL, 0, out); sha2_ce_final()
H A Dsha256_neon_glue.c32 static int sha256_update(struct shash_desc *desc, const u8 *data, sha256_update() argument
35 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_update()
39 return crypto_sha256_arm_update(desc, data, len); sha256_update()
42 sha256_base_do_update(desc, data, len, sha256_update()
49 static int sha256_finup(struct shash_desc *desc, const u8 *data, sha256_finup() argument
53 return crypto_sha256_arm_finup(desc, data, len, out); sha256_finup()
57 sha256_base_do_update(desc, data, len, sha256_finup()
59 sha256_base_do_finalize(desc, sha256_finup()
63 return sha256_base_finish(desc, out); sha256_finup()
66 static int sha256_final(struct shash_desc *desc, u8 *out) sha256_final() argument
68 return sha256_finup(desc, NULL, 0, out); sha256_final()
H A Dsha1_glue.c33 int sha1_update_arm(struct shash_desc *desc, const u8 *data, sha1_update_arm() argument
39 return sha1_base_do_update(desc, data, len, sha1_update_arm()
44 static int sha1_final(struct shash_desc *desc, u8 *out) sha1_final() argument
46 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_block_data_order); sha1_final()
47 return sha1_base_finish(desc, out); sha1_final()
50 int sha1_finup_arm(struct shash_desc *desc, const u8 *data, sha1_finup_arm() argument
53 sha1_base_do_update(desc, data, len, sha1_finup_arm()
55 return sha1_final(desc, out); sha1_finup_arm()
H A Dsha256_glue.c36 int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, crypto_sha256_arm_update() argument
42 return sha256_base_do_update(desc, data, len, crypto_sha256_arm_update()
47 static int sha256_final(struct shash_desc *desc, u8 *out) sha256_final() argument
49 sha256_base_do_finalize(desc, sha256_final()
51 return sha256_base_finish(desc, out); sha256_final()
54 int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data, crypto_sha256_arm_finup() argument
57 sha256_base_do_update(desc, data, len, crypto_sha256_arm_finup()
59 return sha256_final(desc, out); crypto_sha256_arm_finup()
H A Dsha512_neon_glue.c79 static int sha512_neon_init(struct shash_desc *desc) sha512_neon_init() argument
81 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_init()
96 static int __sha512_neon_update(struct shash_desc *desc, const u8 *data, __sha512_neon_update() argument
99 struct sha512_state *sctx = shash_desc_ctx(desc); __sha512_neon_update()
126 static int sha512_neon_update(struct shash_desc *desc, const u8 *data, sha512_neon_update() argument
129 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_update()
144 res = crypto_sha512_update(desc, data, len); sha512_neon_update()
147 res = __sha512_neon_update(desc, data, len, partial); sha512_neon_update()
156 static int sha512_neon_final(struct shash_desc *desc, u8 *out) sha512_neon_final() argument
158 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_final()
173 crypto_sha512_update(desc, padding, padlen); sha512_neon_final()
174 crypto_sha512_update(desc, (const u8 *)&bits, sizeof(bits)); sha512_neon_final()
184 __sha512_neon_update(desc, padding, padlen, index); sha512_neon_final()
186 __sha512_neon_update(desc, (const u8 *)&bits, sha512_neon_final()
201 static int sha512_neon_export(struct shash_desc *desc, void *out) sha512_neon_export() argument
203 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_export()
210 static int sha512_neon_import(struct shash_desc *desc, const void *in) sha512_neon_import() argument
212 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_import()
219 static int sha384_neon_init(struct shash_desc *desc) sha384_neon_init() argument
221 struct sha512_state *sctx = shash_desc_ctx(desc); sha384_neon_init()
237 static int sha384_neon_final(struct shash_desc *desc, u8 *hash) sha384_neon_final() argument
241 sha512_neon_final(desc, D); sha384_neon_final()
H A Daes-ce-glue.c166 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
169 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_encrypt()
174 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ecb_encrypt()
176 err = blkcipher_walk_virt(desc, &walk); ecb_encrypt()
182 err = blkcipher_walk_done(desc, &walk, ecb_encrypt()
189 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
192 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_decrypt()
197 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ecb_decrypt()
199 err = blkcipher_walk_virt(desc, &walk); ecb_decrypt()
205 err = blkcipher_walk_done(desc, &walk, ecb_decrypt()
212 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
215 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_encrypt()
220 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_encrypt()
222 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
229 err = blkcipher_walk_done(desc, &walk, cbc_encrypt()
236 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
239 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_decrypt()
244 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_decrypt()
246 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
253 err = blkcipher_walk_done(desc, &walk, cbc_decrypt()
260 static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_encrypt() argument
263 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_encrypt()
267 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctr_encrypt()
269 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); ctr_encrypt()
279 err = blkcipher_walk_done(desc, &walk, ctr_encrypt()
296 err = blkcipher_walk_done(desc, &walk, 0); ctr_encrypt()
303 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_encrypt() argument
306 struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt()
311 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; xts_encrypt()
313 err = blkcipher_walk_virt(desc, &walk); xts_encrypt()
320 err = blkcipher_walk_done(desc, &walk, xts_encrypt()
328 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_decrypt() argument
331 struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt()
336 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; xts_decrypt()
338 err = blkcipher_walk_virt(desc, &walk); xts_decrypt()
345 err = blkcipher_walk_done(desc, &walk, xts_decrypt()
H A Dghash-ce-glue.c46 static int ghash_init(struct shash_desc *desc) ghash_init() argument
48 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_init()
54 static int ghash_update(struct shash_desc *desc, const u8 *src, ghash_update() argument
57 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_update()
63 struct ghash_key *key = crypto_shash_ctx(desc->tfm); ghash_update()
89 static int ghash_final(struct shash_desc *desc, u8 *dst) ghash_final() argument
91 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_final()
95 struct ghash_key *key = crypto_shash_ctx(desc->tfm); ghash_final()
163 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_init() local
166 desc->tfm = child; ghash_async_init()
167 desc->flags = req->base.flags; ghash_async_init()
168 return crypto_shash_init(desc); ghash_async_init()
185 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_update() local
186 return shash_ahash_update(req, desc); ghash_async_update()
203 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_final() local
204 return crypto_shash_final(desc, req->result); ghash_async_final()
220 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_digest() local
223 desc->tfm = child; ghash_async_digest()
224 desc->flags = req->base.flags; ghash_async_digest()
225 return shash_ahash_digest(req, desc); ghash_async_digest()
/linux-4.1.27/drivers/staging/skein/
H A Dskein_generic.c24 static int skein256_init(struct shash_desc *desc) skein256_init() argument
26 return skein_256_init((struct skein_256_ctx *) shash_desc_ctx(desc), skein256_init()
30 static int skein256_update(struct shash_desc *desc, const u8 *data, skein256_update() argument
33 return skein_256_update((struct skein_256_ctx *)shash_desc_ctx(desc), skein256_update()
37 static int skein256_final(struct shash_desc *desc, u8 *out) skein256_final() argument
39 return skein_256_final((struct skein_256_ctx *)shash_desc_ctx(desc), skein256_final()
43 static int skein256_export(struct shash_desc *desc, void *out) skein256_export() argument
45 struct skein_256_ctx *sctx = shash_desc_ctx(desc); skein256_export()
51 static int skein256_import(struct shash_desc *desc, const void *in) skein256_import() argument
53 struct skein_256_ctx *sctx = shash_desc_ctx(desc); skein256_import()
59 static int skein512_init(struct shash_desc *desc) skein512_init() argument
61 return skein_512_init((struct skein_512_ctx *)shash_desc_ctx(desc), skein512_init()
65 static int skein512_update(struct shash_desc *desc, const u8 *data, skein512_update() argument
68 return skein_512_update((struct skein_512_ctx *)shash_desc_ctx(desc), skein512_update()
72 static int skein512_final(struct shash_desc *desc, u8 *out) skein512_final() argument
74 return skein_512_final((struct skein_512_ctx *)shash_desc_ctx(desc), skein512_final()
78 static int skein512_export(struct shash_desc *desc, void *out) skein512_export() argument
80 struct skein_512_ctx *sctx = shash_desc_ctx(desc); skein512_export()
86 static int skein512_import(struct shash_desc *desc, const void *in) skein512_import() argument
88 struct skein_512_ctx *sctx = shash_desc_ctx(desc); skein512_import()
94 static int skein1024_init(struct shash_desc *desc) skein1024_init() argument
96 return skein_1024_init((struct skein_1024_ctx *)shash_desc_ctx(desc), skein1024_init()
100 static int skein1024_update(struct shash_desc *desc, const u8 *data, skein1024_update() argument
103 return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc), skein1024_update()
107 static int skein1024_final(struct shash_desc *desc, u8 *out) skein1024_final() argument
109 return skein_1024_final((struct skein_1024_ctx *)shash_desc_ctx(desc), skein1024_final()
113 static int skein1024_export(struct shash_desc *desc, void *out) skein1024_export() argument
115 struct skein_1024_ctx *sctx = shash_desc_ctx(desc); skein1024_export()
121 static int skein1024_import(struct shash_desc *desc, const void *in) skein1024_import() argument
123 struct skein_1024_ctx *sctx = shash_desc_ctx(desc); skein1024_import()
/linux-4.1.27/drivers/dma/hsu/
H A Dhsu.c60 struct hsu_dma_desc *desc = hsuc->desc; hsu_dma_chan_start() local
80 count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC; hsu_dma_chan_start()
82 hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); hsu_dma_chan_start()
83 hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); hsu_dma_chan_start()
89 desc->active++; hsu_dma_chan_start()
126 hsuc->desc = NULL; hsu_dma_start_transfer()
131 hsuc->desc = to_hsu_dma_desc(vdesc); hsu_dma_start_transfer()
152 struct hsu_dma_desc *desc; hsu_dma_irq() local
179 desc = hsuc->desc; hsu_dma_irq()
180 if (desc) { hsu_dma_irq()
182 desc->status = DMA_ERROR; hsu_dma_irq()
183 } else if (desc->active < desc->nents) { hsu_dma_irq()
186 vchan_cookie_complete(&desc->vdesc); hsu_dma_irq()
187 desc->status = DMA_COMPLETE; hsu_dma_irq()
199 struct hsu_dma_desc *desc; hsu_dma_alloc_desc() local
201 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); hsu_dma_alloc_desc()
202 if (!desc) hsu_dma_alloc_desc()
205 desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT); hsu_dma_alloc_desc()
206 if (!desc->sg) { hsu_dma_alloc_desc()
207 kfree(desc); hsu_dma_alloc_desc()
211 return desc; hsu_dma_alloc_desc()
216 struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc); hsu_dma_desc_free() local
218 kfree(desc->sg); hsu_dma_desc_free()
219 kfree(desc); hsu_dma_desc_free()
228 struct hsu_dma_desc *desc; hsu_dma_prep_slave_sg() local
232 desc = hsu_dma_alloc_desc(sg_len); hsu_dma_prep_slave_sg()
233 if (!desc) hsu_dma_prep_slave_sg()
237 desc->sg[i].addr = sg_dma_address(sg); for_each_sg()
238 desc->sg[i].len = sg_dma_len(sg); for_each_sg()
241 desc->nents = sg_len;
242 desc->direction = direction;
243 /* desc->active = 0 by kzalloc */
244 desc->status = DMA_IN_PROGRESS;
246 return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
255 if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc) hsu_dma_issue_pending()
260 static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc) hsu_dma_desc_size() argument
265 for (i = desc->active; i < desc->nents; i++) hsu_dma_desc_size()
266 bytes += desc->sg[i].len; hsu_dma_desc_size()
273 struct hsu_dma_desc *desc = hsuc->desc; hsu_dma_active_desc_size() local
274 size_t bytes = hsu_dma_desc_size(desc); hsu_dma_active_desc_size()
279 i = desc->active % HSU_DMA_CHAN_NR_DESC; hsu_dma_active_desc_size()
303 if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) { hsu_dma_tx_status()
306 status = hsuc->desc->status; hsu_dma_tx_status()
354 if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) { hsu_dma_pause()
356 hsuc->desc->status = DMA_PAUSED; hsu_dma_pause()
369 if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) { hsu_dma_resume()
370 hsuc->desc->status = DMA_IN_PROGRESS; hsu_dma_resume()
387 if (hsuc->desc) { hsu_dma_terminate_all()
388 hsu_dma_desc_free(&hsuc->desc->vdesc); hsu_dma_terminate_all()
389 hsuc->desc = NULL; hsu_dma_terminate_all()
/linux-4.1.27/drivers/staging/comedi/drivers/
H A Dcomedi_isadma.c28 * @desc: the ISA DMA cookie to program and enable
30 void comedi_isadma_program(struct comedi_isadma_desc *desc) comedi_isadma_program() argument
35 clear_dma_ff(desc->chan); comedi_isadma_program()
36 set_dma_mode(desc->chan, desc->mode); comedi_isadma_program()
37 set_dma_addr(desc->chan, desc->hw_addr); comedi_isadma_program()
38 set_dma_count(desc->chan, desc->size); comedi_isadma_program()
39 enable_dma(desc->chan); comedi_isadma_program()
111 struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma]; comedi_isadma_poll() local
117 clear_dma_ff(desc->chan); comedi_isadma_poll()
119 disable_dma(desc->chan); comedi_isadma_poll()
120 result = get_dma_residue(desc->chan); comedi_isadma_poll()
126 result1 = get_dma_residue(desc->chan); comedi_isadma_poll()
128 enable_dma(desc->chan); comedi_isadma_poll()
133 if (result >= desc->size || result == 0) comedi_isadma_poll()
136 return desc->size - result; comedi_isadma_poll()
142 * @desc: the ISA DMA cookie to set
145 void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir) comedi_isadma_set_mode() argument
147 desc->mode = (dma_dir == COMEDI_ISADMA_READ) ? DMA_MODE_READ comedi_isadma_set_mode()
169 struct comedi_isadma_desc *desc; comedi_isadma_alloc() local
180 desc = kcalloc(n_desc, sizeof(*desc), GFP_KERNEL); comedi_isadma_alloc()
181 if (!desc) comedi_isadma_alloc()
183 dma->desc = desc; comedi_isadma_alloc()
202 desc = &dma->desc[i]; comedi_isadma_alloc()
203 desc->chan = dma_chans[i]; comedi_isadma_alloc()
204 desc->maxsize = maxsize; comedi_isadma_alloc()
205 desc->virt_addr = dma_alloc_coherent(NULL, desc->maxsize, comedi_isadma_alloc()
206 &desc->hw_addr, comedi_isadma_alloc()
208 if (!desc->virt_addr) comedi_isadma_alloc()
210 comedi_isadma_set_mode(desc, dma_dir); comedi_isadma_alloc()
227 struct comedi_isadma_desc *desc; comedi_isadma_free() local
233 if (dma->desc) { comedi_isadma_free()
235 desc = &dma->desc[i]; comedi_isadma_free()
236 if (desc->virt_addr) comedi_isadma_free()
237 dma_free_coherent(NULL, desc->maxsize, comedi_isadma_free()
238 desc->virt_addr, comedi_isadma_free()
239 desc->hw_addr); comedi_isadma_free()
241 kfree(dma->desc); comedi_isadma_free()
H A Dni_labpc_isadma.c64 struct comedi_isadma_desc *desc = &devpriv->dma->desc[0]; labpc_setup_dma() local
69 desc->size = labpc_suggest_transfer_size(dev, s, desc->maxsize); labpc_setup_dma()
71 devpriv->count * sample_size < desc->size) labpc_setup_dma()
72 desc->size = devpriv->count * sample_size; labpc_setup_dma()
74 comedi_isadma_program(desc); labpc_setup_dma()
84 struct comedi_isadma_desc *desc = &devpriv->dma->desc[0]; labpc_drain_dma() local
88 unsigned int max_samples = comedi_bytes_to_samples(s, desc->size); labpc_drain_dma()
98 residue = comedi_isadma_disable(desc->chan); labpc_drain_dma()
118 desc->size = comedi_samples_to_bytes(s, leftover); labpc_drain_dma()
120 comedi_buf_write_samples(s, desc->virt_addr, nsamples); labpc_drain_dma()
127 struct comedi_isadma_desc *desc = &devpriv->dma->desc[0]; handle_isa_dma() local
131 if (desc->size) handle_isa_dma()
132 comedi_isadma_program(desc); handle_isa_dma()
/linux-4.1.27/drivers/misc/mic/card/
H A Dmic_virtio.h43 static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) mic_desc_size() argument
45 return sizeof(*desc) mic_desc_size()
46 + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) mic_desc_size()
47 + ioread8(&desc->feature_len) * 2 mic_desc_size()
48 + ioread8(&desc->config_len); mic_desc_size()
52 mic_vq_config(struct mic_device_desc __iomem *desc) mic_vq_config() argument
54 return (struct mic_vqconfig __iomem *)(desc + 1); mic_vq_config()
58 mic_vq_features(struct mic_device_desc __iomem *desc) mic_vq_features() argument
60 return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq)); mic_vq_features()
64 mic_vq_configspace(struct mic_device_desc __iomem *desc) mic_vq_configspace() argument
66 return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2; mic_vq_configspace()
68 static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) mic_total_desc_size() argument
70 return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); mic_total_desc_size()
/linux-4.1.27/drivers/gpio/
H A Dgpiolib-sysfs.c44 struct gpio_desc *desc = dev_get_drvdata(dev); gpio_direction_show() local
49 if (!test_bit(FLAG_EXPORT, &desc->flags)) { gpio_direction_show()
52 gpiod_get_direction(desc); gpio_direction_show()
54 test_bit(FLAG_IS_OUT, &desc->flags) gpio_direction_show()
65 struct gpio_desc *desc = dev_get_drvdata(dev); gpio_direction_store() local
70 if (!test_bit(FLAG_EXPORT, &desc->flags)) gpio_direction_store()
73 status = gpiod_direction_output_raw(desc, 1); gpio_direction_store()
75 status = gpiod_direction_output_raw(desc, 0); gpio_direction_store()
77 status = gpiod_direction_input(desc); gpio_direction_store()
91 struct gpio_desc *desc = dev_get_drvdata(dev); gpio_value_show() local
96 if (!test_bit(FLAG_EXPORT, &desc->flags)) gpio_value_show()
99 status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc)); gpio_value_show()
108 struct gpio_desc *desc = dev_get_drvdata(dev); gpio_value_store() local
113 if (!test_bit(FLAG_EXPORT, &desc->flags)) gpio_value_store()
115 else if (!test_bit(FLAG_IS_OUT, &desc->flags)) gpio_value_store()
122 gpiod_set_value_cansleep(desc, value); gpio_value_store()
142 static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev, gpio_setup_irq() argument
149 if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags) gpio_setup_irq()
152 irq = gpiod_to_irq(desc); gpio_setup_irq()
156 id = desc->flags >> ID_SHIFT; gpio_setup_irq()
161 desc->flags &= ~GPIO_TRIGGER_MASK; gpio_setup_irq()
164 gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); gpio_setup_irq()
171 irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? gpio_setup_irq()
174 irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? gpio_setup_irq()
189 desc->flags &= GPIO_FLAGS_MASK; gpio_setup_irq()
190 desc->flags |= (unsigned long)id << ID_SHIFT; gpio_setup_irq()
192 if (desc->flags >> ID_SHIFT != id) { gpio_setup_irq()
203 ret = gpiochip_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); gpio_setup_irq()
205 gpiod_warn(desc, "failed to flag the GPIO for IRQ\n"); gpio_setup_irq()
209 desc->flags |= gpio_flags; gpio_setup_irq()
214 desc->flags &= GPIO_FLAGS_MASK; gpio_setup_irq()
235 const struct gpio_desc *desc = dev_get_drvdata(dev); gpio_edge_show() local
240 if (!test_bit(FLAG_EXPORT, &desc->flags)) gpio_edge_show()
247 if ((desc->flags & GPIO_TRIGGER_MASK) gpio_edge_show()
262 struct gpio_desc *desc = dev_get_drvdata(dev); gpio_edge_store() local
274 if (!test_bit(FLAG_EXPORT, &desc->flags)) gpio_edge_store()
277 status = gpio_setup_irq(desc, dev, trigger_types[i].flags); gpio_edge_store()
289 static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev, sysfs_set_active_low() argument
294 if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value) sysfs_set_active_low()
298 set_bit(FLAG_ACTIVE_LOW, &desc->flags); sysfs_set_active_low()
300 clear_bit(FLAG_ACTIVE_LOW, &desc->flags); sysfs_set_active_low()
303 if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^ sysfs_set_active_low()
304 !!test_bit(FLAG_TRIG_FALL, &desc->flags))) { sysfs_set_active_low()
305 unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK; sysfs_set_active_low()
307 gpio_setup_irq(desc, dev, 0); sysfs_set_active_low()
308 status = gpio_setup_irq(desc, dev, trigger_flags); sysfs_set_active_low()
317 const struct gpio_desc *desc = dev_get_drvdata(dev); gpio_active_low_show() local
322 if (!test_bit(FLAG_EXPORT, &desc->flags)) gpio_active_low_show()
326 !!test_bit(FLAG_ACTIVE_LOW, &desc->flags)); gpio_active_low_show()
336 struct gpio_desc *desc = dev_get_drvdata(dev); gpio_active_low_store() local
341 if (!test_bit(FLAG_EXPORT, &desc->flags)) { gpio_active_low_store()
348 status = sysfs_set_active_low(desc, dev, value != 0); gpio_active_low_store()
363 struct gpio_desc *desc = dev_get_drvdata(dev); gpio_is_visible() local
365 bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags); gpio_is_visible()
371 if (gpiod_to_irq(desc) < 0) gpio_is_visible()
373 if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags)) gpio_is_visible()
451 struct gpio_desc *desc; export_store() local
458 desc = gpio_to_desc(gpio); export_store()
460 if (!desc) { export_store()
470 status = gpiod_request(desc, "sysfs"); export_store()
476 status = gpiod_export(desc, true); export_store()
478 gpiod_free(desc); export_store()
480 set_bit(FLAG_SYSFS, &desc->flags); export_store()
493 struct gpio_desc *desc; unexport_store() local
500 desc = gpio_to_desc(gpio); unexport_store()
502 if (!desc) { unexport_store()
513 if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) { unexport_store()
515 gpiod_free(desc); unexport_store()
552 int gpiod_export(struct gpio_desc *desc, bool direction_may_change) gpiod_export() argument
567 if (!desc) { gpiod_export()
572 chip = desc->chip; gpiod_export()
583 if (!test_bit(FLAG_REQUESTED, &desc->flags) || gpiod_export()
584 test_bit(FLAG_EXPORT, &desc->flags)) { gpiod_export()
586 gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n", gpiod_export()
588 test_bit(FLAG_REQUESTED, &desc->flags), gpiod_export()
589 test_bit(FLAG_EXPORT, &desc->flags)); gpiod_export()
594 if (desc->chip->direction_input && desc->chip->direction_output && gpiod_export()
596 set_bit(FLAG_SYSFS_DIR, &desc->flags); gpiod_export()
601 offset = gpio_chip_hwgpio(desc); gpiod_export()
602 if (desc->chip->names && desc->chip->names[offset]) gpiod_export()
603 ioname = desc->chip->names[offset]; gpiod_export()
605 dev = device_create_with_groups(&gpio_class, desc->chip->dev, gpiod_export()
606 MKDEV(0, 0), desc, gpio_groups, gpiod_export()
608 desc_to_gpio(desc)); gpiod_export()
614 set_bit(FLAG_EXPORT, &desc->flags); gpiod_export()
620 gpiod_dbg(desc, "%s: status %d\n", __func__, status); gpiod_export()
642 struct gpio_desc *desc) gpiod_export_link()
646 if (!desc) { gpiod_export_link()
653 if (test_bit(FLAG_EXPORT, &desc->flags)) { gpiod_export_link()
656 tdev = class_find_device(&gpio_class, NULL, desc, match_export); gpiod_export_link()
669 gpiod_dbg(desc, "%s: status %d\n", __func__, status); gpiod_export_link()
687 int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value) gpiod_sysfs_set_active_low() argument
692 if (!desc) { gpiod_sysfs_set_active_low()
699 if (test_bit(FLAG_EXPORT, &desc->flags)) { gpiod_sysfs_set_active_low()
700 dev = class_find_device(&gpio_class, NULL, desc, match_export); gpiod_sysfs_set_active_low()
707 status = sysfs_set_active_low(desc, dev, value); gpiod_sysfs_set_active_low()
713 gpiod_dbg(desc, "%s: status %d\n", __func__, status); gpiod_sysfs_set_active_low()
725 void gpiod_unexport(struct gpio_desc *desc) gpiod_unexport() argument
730 if (!desc) { gpiod_unexport()
737 if (test_bit(FLAG_EXPORT, &desc->flags)) { gpiod_unexport()
739 dev = class_find_device(&gpio_class, NULL, desc, match_export); gpiod_unexport()
741 gpio_setup_irq(desc, dev, 0); gpiod_unexport()
742 clear_bit(FLAG_SYSFS_DIR, &desc->flags); gpiod_unexport()
743 clear_bit(FLAG_EXPORT, &desc->flags); gpiod_unexport()
756 gpiod_dbg(desc, "%s: status %d\n", __func__, status); gpiod_unexport()
795 struct gpio_desc *desc; gpiochip_unexport() local
815 desc = &chip->desc[i]; gpiochip_unexport()
816 if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) gpiochip_unexport()
817 gpiod_free(desc); gpiochip_unexport()
641 gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc) gpiod_export_link() argument
H A Dgpiolib-legacy.c22 struct gpio_desc *desc; gpio_request_one() local
25 desc = gpio_to_desc(gpio); gpio_request_one()
28 if (!desc && gpio_is_valid(gpio)) gpio_request_one()
31 err = gpiod_request(desc, label); gpio_request_one()
36 set_bit(FLAG_OPEN_DRAIN, &desc->flags); gpio_request_one()
39 set_bit(FLAG_OPEN_SOURCE, &desc->flags); gpio_request_one()
42 set_bit(FLAG_ACTIVE_LOW, &desc->flags); gpio_request_one()
45 err = gpiod_direction_input(desc); gpio_request_one()
47 err = gpiod_direction_output_raw(desc, gpio_request_one()
54 err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE); gpio_request_one()
62 gpiod_free(desc); gpio_request_one()
69 struct gpio_desc *desc = gpio_to_desc(gpio); gpio_request() local
72 if (!desc && gpio_is_valid(gpio)) gpio_request()
75 return gpiod_request(desc, label); gpio_request()
H A Dgpiolib.h103 int gpiod_request(struct gpio_desc *desc, const char *label);
104 void gpiod_free(struct gpio_desc *desc);
105 int gpiod_hog(struct gpio_desc *desc, const char *name,
111 static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc) gpio_chip_hwgpio() argument
113 return desc - &desc->chip->desc[0]; gpio_chip_hwgpio()
118 #define gpiod_emerg(desc, fmt, ...) \
119 pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
121 #define gpiod_crit(desc, fmt, ...) \
122 pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
124 #define gpiod_err(desc, fmt, ...) \
125 pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
127 #define gpiod_warn(desc, fmt, ...) \
128 pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
130 #define gpiod_info(desc, fmt, ...) \
131 pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
133 #define gpiod_dbg(desc, fmt, ...) \
134 pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
H A Dgpiolib.c79 return &chip->desc[gpio - chip->base]; gpio_to_desc()
101 return &chip->desc[hwnum]; gpiochip_get_desc()
109 int desc_to_gpio(const struct gpio_desc *desc) desc_to_gpio() argument
111 return desc->chip->base + (desc - &desc->chip->desc[0]); desc_to_gpio()
118 * @desc: descriptor to return the chip of
120 struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) gpiod_to_chip() argument
122 return desc ? desc->chip : NULL; gpiod_to_chip()
152 * @desc: GPIO to get the direction of
158 int gpiod_get_direction(struct gpio_desc *desc) gpiod_get_direction() argument
164 chip = gpiod_to_chip(desc); gpiod_get_direction()
165 offset = gpio_chip_hwgpio(desc); gpiod_get_direction()
174 clear_bit(FLAG_IS_OUT, &desc->flags); gpiod_get_direction()
178 set_bit(FLAG_IS_OUT, &desc->flags); gpiod_get_direction()
269 struct gpio_desc *desc = &descs[id]; gpiochip_add() local
271 desc->chip = chip; gpiochip_add()
279 desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; gpiochip_add()
282 chip->desc = descs; gpiochip_add()
310 chip->desc = NULL; gpiochip_add()
344 if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) gpiochip_remove()
348 chip->desc[id].chip = NULL; gpiochip_remove()
353 kfree(chip->desc); gpiochip_remove()
354 chip->desc = NULL; gpiochip_remove()
778 static int __gpiod_request(struct gpio_desc *desc, const char *label) __gpiod_request() argument
780 struct gpio_chip *chip = desc->chip; __gpiod_request()
790 if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { __gpiod_request()
791 desc_set_label(desc, label ? : "?"); __gpiod_request()
801 status = chip->request(chip, gpio_chip_hwgpio(desc)); __gpiod_request()
805 desc_set_label(desc, NULL); __gpiod_request()
806 clear_bit(FLAG_REQUESTED, &desc->flags); __gpiod_request()
813 gpiod_get_direction(desc); __gpiod_request()
821 int gpiod_request(struct gpio_desc *desc, const char *label) gpiod_request() argument
826 if (!desc) { gpiod_request()
831 chip = desc->chip; gpiod_request()
836 status = __gpiod_request(desc, label); gpiod_request()
843 gpiod_dbg(desc, "%s: status %d\n", __func__, status); gpiod_request()
848 static bool __gpiod_free(struct gpio_desc *desc) __gpiod_free() argument
856 gpiod_unexport(desc); __gpiod_free()
860 chip = desc->chip; __gpiod_free()
861 if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) { __gpiod_free()
865 chip->free(chip, gpio_chip_hwgpio(desc)); __gpiod_free()
868 desc_set_label(desc, NULL); __gpiod_free()
869 clear_bit(FLAG_ACTIVE_LOW, &desc->flags); __gpiod_free()
870 clear_bit(FLAG_REQUESTED, &desc->flags); __gpiod_free()
871 clear_bit(FLAG_OPEN_DRAIN, &desc->flags); __gpiod_free()
872 clear_bit(FLAG_OPEN_SOURCE, &desc->flags); __gpiod_free()
873 clear_bit(FLAG_IS_HOGGED, &desc->flags); __gpiod_free()
881 void gpiod_free(struct gpio_desc *desc) gpiod_free() argument
883 if (desc && __gpiod_free(desc)) gpiod_free()
884 module_put(desc->chip->owner); gpiod_free()
904 struct gpio_desc *desc; gpiochip_is_requested() local
909 desc = &chip->desc[offset]; gpiochip_is_requested()
911 if (test_bit(FLAG_REQUESTED, &desc->flags) == 0) gpiochip_is_requested()
913 return desc->label; gpiochip_is_requested()
919 * @desc: GPIO descriptor to request
931 struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum); gpiochip_request_own_desc() local
934 if (IS_ERR(desc)) { gpiochip_request_own_desc()
936 return desc; gpiochip_request_own_desc()
939 err = __gpiod_request(desc, label); gpiochip_request_own_desc()
943 return desc; gpiochip_request_own_desc()
949 * @desc: GPIO descriptor to free
954 void gpiochip_free_own_desc(struct gpio_desc *desc) gpiochip_free_own_desc() argument
956 if (desc) gpiochip_free_own_desc()
957 __gpiod_free(desc); gpiochip_free_own_desc()
972 * @desc: GPIO to set to input
979 int gpiod_direction_input(struct gpio_desc *desc) gpiod_direction_input() argument
984 if (!desc || !desc->chip) { gpiod_direction_input()
989 chip = desc->chip; gpiod_direction_input()
991 gpiod_warn(desc, gpiod_direction_input()
997 status = chip->direction_input(chip, gpio_chip_hwgpio(desc)); gpiod_direction_input()
999 clear_bit(FLAG_IS_OUT, &desc->flags); gpiod_direction_input()
1001 trace_gpio_direction(desc_to_gpio(desc), 1, status); gpiod_direction_input()
1007 static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value) _gpiod_direction_output_raw() argument
1013 if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) { _gpiod_direction_output_raw()
1014 gpiod_err(desc, _gpiod_direction_output_raw()
1021 if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags)) _gpiod_direction_output_raw()
1022 return gpiod_direction_input(desc); _gpiod_direction_output_raw()
1025 if (!value && test_bit(FLAG_OPEN_SOURCE, &desc->flags)) _gpiod_direction_output_raw()
1026 return gpiod_direction_input(desc); _gpiod_direction_output_raw()
1028 chip = desc->chip; _gpiod_direction_output_raw()
1030 gpiod_warn(desc, _gpiod_direction_output_raw()
1036 status = chip->direction_output(chip, gpio_chip_hwgpio(desc), value); _gpiod_direction_output_raw()
1038 set_bit(FLAG_IS_OUT, &desc->flags); _gpiod_direction_output_raw()
1039 trace_gpio_value(desc_to_gpio(desc), 0, value); _gpiod_direction_output_raw()
1040 trace_gpio_direction(desc_to_gpio(desc), 0, status); _gpiod_direction_output_raw()
1046 * @desc: GPIO to set to output
1055 int gpiod_direction_output_raw(struct gpio_desc *desc, int value) gpiod_direction_output_raw() argument
1057 if (!desc || !desc->chip) { gpiod_direction_output_raw()
1061 return _gpiod_direction_output_raw(desc, value); gpiod_direction_output_raw()
1067 * @desc: GPIO to set to output
1077 int gpiod_direction_output(struct gpio_desc *desc, int value) gpiod_direction_output() argument
1079 if (!desc || !desc->chip) { gpiod_direction_output()
1083 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_direction_output()
1085 return _gpiod_direction_output_raw(desc, value); gpiod_direction_output()
1097 int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) gpiod_set_debounce() argument
1101 if (!desc || !desc->chip) { gpiod_set_debounce()
1106 chip = desc->chip; gpiod_set_debounce()
1108 gpiod_dbg(desc, gpiod_set_debounce()
1114 return chip->set_debounce(chip, gpio_chip_hwgpio(desc), debounce); gpiod_set_debounce()
1120 * @desc: the gpio descriptor to test
1124 int gpiod_is_active_low(const struct gpio_desc *desc) gpiod_is_active_low() argument
1126 return test_bit(FLAG_ACTIVE_LOW, &desc->flags); gpiod_is_active_low()
1152 static bool _gpiod_get_raw_value(const struct gpio_desc *desc) _gpiod_get_raw_value() argument
1158 chip = desc->chip; _gpiod_get_raw_value()
1159 offset = gpio_chip_hwgpio(desc); _gpiod_get_raw_value()
1161 trace_gpio_value(desc_to_gpio(desc), 1, value); _gpiod_get_raw_value()
1167 * @desc: gpio whose value will be returned
1175 int gpiod_get_raw_value(const struct gpio_desc *desc) gpiod_get_raw_value() argument
1177 if (!desc) gpiod_get_raw_value()
1180 WARN_ON(desc->chip->can_sleep); gpiod_get_raw_value()
1181 return _gpiod_get_raw_value(desc); gpiod_get_raw_value()
1187 * @desc: gpio whose value will be returned
1195 int gpiod_get_value(const struct gpio_desc *desc) gpiod_get_value() argument
1198 if (!desc) gpiod_get_value()
1201 WARN_ON(desc->chip->can_sleep); gpiod_get_value()
1203 value = _gpiod_get_raw_value(desc); gpiod_get_value()
1204 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_get_value()
1213 * @desc: gpio descriptor whose state need to be set.
1216 static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value) _gpio_set_open_drain_value() argument
1219 struct gpio_chip *chip = desc->chip; _gpio_set_open_drain_value()
1220 int offset = gpio_chip_hwgpio(desc); _gpio_set_open_drain_value()
1225 clear_bit(FLAG_IS_OUT, &desc->flags); _gpio_set_open_drain_value()
1229 set_bit(FLAG_IS_OUT, &desc->flags); _gpio_set_open_drain_value()
1231 trace_gpio_direction(desc_to_gpio(desc), value, err); _gpio_set_open_drain_value()
1233 gpiod_err(desc, _gpio_set_open_drain_value()
1240 * @desc: gpio descriptor whose state need to be set.
1243 static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value) _gpio_set_open_source_value() argument
1246 struct gpio_chip *chip = desc->chip; _gpio_set_open_source_value()
1247 int offset = gpio_chip_hwgpio(desc); _gpio_set_open_source_value()
1252 set_bit(FLAG_IS_OUT, &desc->flags); _gpio_set_open_source_value()
1256 clear_bit(FLAG_IS_OUT, &desc->flags); _gpio_set_open_source_value()
1258 trace_gpio_direction(desc_to_gpio(desc), !value, err); _gpio_set_open_source_value()
1260 gpiod_err(desc, _gpio_set_open_source_value()
1265 static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value) _gpiod_set_raw_value() argument
1269 chip = desc->chip; _gpiod_set_raw_value()
1270 trace_gpio_value(desc_to_gpio(desc), 0, value); _gpiod_set_raw_value()
1271 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) _gpiod_set_raw_value()
1272 _gpio_set_open_drain_value(desc, value); _gpiod_set_raw_value()
1273 else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) _gpiod_set_raw_value()
1274 _gpio_set_open_source_value(desc, value); _gpiod_set_raw_value()
1276 chip->set(chip, gpio_chip_hwgpio(desc), value); _gpiod_set_raw_value()
1328 struct gpio_desc *desc = desc_array[i]; gpiod_set_array_priv() local
1329 int hwgpio = gpio_chip_hwgpio(desc); gpiod_set_array_priv()
1332 if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_set_array_priv()
1334 trace_gpio_value(desc_to_gpio(desc), 0, value); gpiod_set_array_priv()
1339 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { gpiod_set_array_priv()
1340 _gpio_set_open_drain_value(desc,value); gpiod_set_array_priv()
1341 } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { gpiod_set_array_priv()
1342 _gpio_set_open_source_value(desc, value); gpiod_set_array_priv()
1363 * @desc: gpio whose value will be assigned
1372 void gpiod_set_raw_value(struct gpio_desc *desc, int value) gpiod_set_raw_value() argument
1374 if (!desc) gpiod_set_raw_value()
1377 WARN_ON(desc->chip->can_sleep); gpiod_set_raw_value()
1378 _gpiod_set_raw_value(desc, value); gpiod_set_raw_value()
1384 * @desc: gpio whose value will be assigned
1393 void gpiod_set_value(struct gpio_desc *desc, int value) gpiod_set_value() argument
1395 if (!desc) gpiod_set_value()
1398 WARN_ON(desc->chip->can_sleep); gpiod_set_value()
1399 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_set_value()
1401 _gpiod_set_raw_value(desc, value); gpiod_set_value()
1449 * @desc: gpio to check
1452 int gpiod_cansleep(const struct gpio_desc *desc) gpiod_cansleep() argument
1454 if (!desc) gpiod_cansleep()
1456 return desc->chip->can_sleep; gpiod_cansleep()
1462 * @desc: gpio whose IRQ will be returned (already requested)
1467 int gpiod_to_irq(const struct gpio_desc *desc) gpiod_to_irq() argument
1472 if (!desc) gpiod_to_irq()
1474 chip = desc->chip; gpiod_to_irq()
1475 offset = gpio_chip_hwgpio(desc); gpiod_to_irq()
1493 if (test_bit(FLAG_IS_OUT, &chip->desc[offset].flags)) { gpiochip_lock_as_irq()
1500 set_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); gpiochip_lock_as_irq()
1518 clear_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); gpiochip_unlock_as_irq()
1524 * @desc: gpio whose value will be returned
1531 int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) gpiod_get_raw_value_cansleep() argument
1534 if (!desc) gpiod_get_raw_value_cansleep()
1536 return _gpiod_get_raw_value(desc); gpiod_get_raw_value_cansleep()
1542 * @desc: gpio whose value will be returned
1549 int gpiod_get_value_cansleep(const struct gpio_desc *desc) gpiod_get_value_cansleep() argument
1554 if (!desc) gpiod_get_value_cansleep()
1557 value = _gpiod_get_raw_value(desc); gpiod_get_value_cansleep()
1558 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_get_value_cansleep()
1567 * @desc: gpio whose value will be assigned
1575 void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) gpiod_set_raw_value_cansleep() argument
1578 if (!desc) gpiod_set_raw_value_cansleep()
1580 _gpiod_set_raw_value(desc, value); gpiod_set_raw_value_cansleep()
1586 * @desc: gpio whose value will be assigned
1594 void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) gpiod_set_value_cansleep() argument
1597 if (!desc) gpiod_set_value_cansleep()
1600 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_set_value_cansleep()
1602 _gpiod_set_raw_value(desc, value); gpiod_set_value_cansleep()
1669 struct gpio_desc *desc; of_find_gpio() local
1680 desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, of_find_gpio()
1682 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) of_find_gpio()
1686 if (IS_ERR(desc)) of_find_gpio()
1687 return desc; of_find_gpio()
1692 return desc; of_find_gpio()
1701 struct gpio_desc *desc; acpi_find_gpio() local
1715 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info); acpi_find_gpio()
1716 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) acpi_find_gpio()
1721 if (IS_ERR(desc)) { acpi_find_gpio()
1722 desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info); acpi_find_gpio()
1723 if (IS_ERR(desc)) acpi_find_gpio()
1724 return desc; acpi_find_gpio()
1730 return desc; acpi_find_gpio()
1768 struct gpio_desc *desc = ERR_PTR(-ENOENT); gpiod_find() local
1774 return desc; gpiod_find()
1802 desc = gpiochip_get_desc(chip, p->chip_hwnum); gpiod_find()
1805 return desc; gpiod_find()
1808 return desc; gpiod_find()
1913 * @desc: gpio whose value will be assigned
1923 static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, gpiod_configure_flags() argument
1929 set_bit(FLAG_ACTIVE_LOW, &desc->flags); gpiod_configure_flags()
1931 set_bit(FLAG_OPEN_DRAIN, &desc->flags); gpiod_configure_flags()
1933 set_bit(FLAG_OPEN_SOURCE, &desc->flags); gpiod_configure_flags()
1943 status = gpiod_direction_output(desc, gpiod_configure_flags()
1946 status = gpiod_direction_input(desc); gpiod_configure_flags()
1970 struct gpio_desc *desc = NULL; __gpiod_get_index() local
1980 desc = of_find_gpio(dev, con_id, idx, &lookupflags); __gpiod_get_index()
1983 desc = acpi_find_gpio(dev, con_id, idx, &lookupflags); __gpiod_get_index()
1991 if (!desc || desc == ERR_PTR(-ENOENT)) { __gpiod_get_index()
1993 desc = gpiod_find(dev, con_id, idx, &lookupflags); __gpiod_get_index()
1996 if (IS_ERR(desc)) { __gpiod_get_index()
1998 return desc; __gpiod_get_index()
2001 status = gpiod_request(desc, con_id); __gpiod_get_index()
2005 status = gpiod_configure_flags(desc, con_id, lookupflags, flags); __gpiod_get_index()
2008 gpiod_put(desc); __gpiod_get_index()
2012 return desc; __gpiod_get_index()
2033 struct gpio_desc *desc = ERR_PTR(-ENODEV); fwnode_get_named_gpiod() local
2043 desc = of_get_named_gpiod_flags(of_node(fwnode), propname, 0, fwnode_get_named_gpiod()
2045 if (!IS_ERR(desc)) fwnode_get_named_gpiod()
2050 desc = acpi_get_gpiod_by_index(acpi_node(fwnode), propname, 0, fwnode_get_named_gpiod()
2052 if (!IS_ERR(desc)) fwnode_get_named_gpiod()
2056 if (IS_ERR(desc)) fwnode_get_named_gpiod()
2057 return desc; fwnode_get_named_gpiod()
2059 ret = gpiod_request(desc, NULL); fwnode_get_named_gpiod()
2065 set_bit(FLAG_ACTIVE_LOW, &desc->flags); fwnode_get_named_gpiod()
2067 return desc; fwnode_get_named_gpiod()
2088 struct gpio_desc *desc; __gpiod_get_index_optional() local
2090 desc = gpiod_get_index(dev, con_id, index, flags); __gpiod_get_index_optional()
2091 if (IS_ERR(desc)) { __gpiod_get_index_optional()
2092 if (PTR_ERR(desc) == -ENOENT) __gpiod_get_index_optional()
2096 return desc; __gpiod_get_index_optional()
2101 * gpiod_hog - Hog the specified GPIO desc given the provided flags
2102 * @desc: gpio whose value will be assigned
2108 int gpiod_hog(struct gpio_desc *desc, const char *name, gpiod_hog() argument
2116 chip = gpiod_to_chip(desc); gpiod_hog()
2117 hwnum = gpio_chip_hwgpio(desc); gpiod_hog()
2125 status = gpiod_configure_flags(desc, name, lflags, dflags); gpiod_hog()
2128 gpiochip_free_own_desc(desc); gpiod_hog()
2133 set_bit(FLAG_IS_HOGGED, &desc->flags); gpiod_hog()
2136 desc_to_gpio(desc), name, gpiod_hog()
2155 if (test_bit(FLAG_IS_HOGGED, &chip->desc[id].flags)) gpiochip_free_hogs()
2156 gpiochip_free_own_desc(&chip->desc[id]); gpiochip_free_hogs()
2176 struct gpio_desc *desc; gpiod_get_array() local
2184 descs = kzalloc(sizeof(*descs) + sizeof(descs->desc[0]) * count, gpiod_get_array()
2190 desc = gpiod_get_index(dev, con_id, descs->ndescs, flags); gpiod_get_array()
2191 if (IS_ERR(desc)) { gpiod_get_array()
2193 return ERR_CAST(desc); gpiod_get_array()
2195 descs->desc[descs->ndescs] = desc; gpiod_get_array()
2228 * @desc: GPIO descriptor to dispose of
2232 void gpiod_put(struct gpio_desc *desc) gpiod_put() argument
2234 gpiod_free(desc); gpiod_put()
2247 gpiod_put(descs->desc[i]); gpiod_put_array()
2259 struct gpio_desc *gdesc = &chip->desc[0]; gpiolib_dbg_show()
H A Ddevres.c26 struct gpio_desc **desc = res; devm_gpiod_release() local
28 gpiod_put(*desc); devm_gpiod_release()
105 struct gpio_desc *desc; __devm_gpiod_get_index() local
112 desc = gpiod_get_index(dev, con_id, idx, flags); __devm_gpiod_get_index()
113 if (IS_ERR(desc)) { __devm_gpiod_get_index()
115 return desc; __devm_gpiod_get_index()
118 *dr = desc; __devm_gpiod_get_index()
121 return desc; __devm_gpiod_get_index()
141 struct gpio_desc *desc; devm_get_gpiod_from_child() local
157 desc = fwnode_get_named_gpiod(child, prop_name); devm_get_gpiod_from_child()
158 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) devm_get_gpiod_from_child()
161 if (IS_ERR(desc)) { devm_get_gpiod_from_child()
163 return desc; devm_get_gpiod_from_child()
166 *dr = desc; devm_get_gpiod_from_child()
169 return desc; devm_get_gpiod_from_child()
190 struct gpio_desc *desc; __devm_gpiod_get_index_optional() local
192 desc = devm_gpiod_get_index(dev, con_id, index, flags); __devm_gpiod_get_index_optional()
193 if (IS_ERR(desc)) { __devm_gpiod_get_index_optional()
194 if (PTR_ERR(desc) == -ENOENT) __devm_gpiod_get_index_optional()
198 return desc; __devm_gpiod_get_index_optional()
264 * @desc: GPIO descriptor to dispose of
270 void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) devm_gpiod_put() argument
273 &desc)); devm_gpiod_put()
H A Dgpio-ich.c105 struct ichx_desc *desc; /* Pointer to chipset-specific description */ member in struct:__anon3928
126 if (reg == GPIO_LVL && ichx_priv.desc->use_outlvl_cache) ichx_write_bit()
129 data = ICHX_READ(ichx_priv.desc->regs[reg][reg_nr], ichx_write_bit()
136 ICHX_WRITE(data, ichx_priv.desc->regs[reg][reg_nr], ichx_write_bit()
138 if (reg == GPIO_LVL && ichx_priv.desc->use_outlvl_cache) ichx_write_bit()
141 tmp = ICHX_READ(ichx_priv.desc->regs[reg][reg_nr], ichx_write_bit()
160 data = ICHX_READ(ichx_priv.desc->regs[reg][reg_nr], ichx_read_bit()
163 if (reg == GPIO_LVL && ichx_priv.desc->use_outlvl_cache) ichx_read_bit()
197 if (nr < 32 && ichx_priv.desc->have_blink) ichx_gpio_direction_output()
256 if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f))) ichx_gpio_request()
288 chip->request = ichx_priv.desc->request ? ichx_gpiolib_setup()
289 ichx_priv.desc->request : ichx_gpio_request; ichx_gpiolib_setup()
290 chip->get = ichx_priv.desc->get ? ichx_gpiolib_setup()
291 ichx_priv.desc->get : ichx_gpio_get; ichx_gpiolib_setup()
298 chip->ngpio = ichx_priv.desc->ngpio; ichx_gpiolib_setup()
395 for (i = 0; i < ARRAY_SIZE(ichx_priv.desc->regs[0]); i++) { ichx_gpio_request_regions()
399 res_base->start + ichx_priv.desc->regs[0][i], ichx_gpio_request_regions()
400 ichx_priv.desc->reglen[i], name)) ichx_gpio_request_regions()
410 release_region(res_base->start + ichx_priv.desc->regs[0][i], ichx_gpio_request_regions()
411 ichx_priv.desc->reglen[i]); ichx_gpio_request_regions()
420 for (i = 0; i < ARRAY_SIZE(ichx_priv.desc->regs[0]); i++) { ichx_gpio_release_regions()
423 release_region(res_base->start + ichx_priv.desc->regs[0][i], ichx_gpio_release_regions()
424 ichx_priv.desc->reglen[i]); ichx_gpio_release_regions()
441 ichx_priv.desc = &i3100_desc; ichx_gpio_probe()
444 ichx_priv.desc = &intel5_desc; ichx_gpio_probe()
447 ichx_priv.desc = &ich6_desc; ichx_gpio_probe()
450 ichx_priv.desc = &ich7_desc; ichx_gpio_probe()
453 ichx_priv.desc = &ich9_desc; ichx_gpio_probe()
456 ichx_priv.desc = &ich10_corp_desc; ichx_gpio_probe()
459 ichx_priv.desc = &ich10_cons_desc; ichx_gpio_probe()
462 ichx_priv.desc = &avoton_desc; ichx_gpio_probe()
483 if (!ichx_priv.desc->uses_gpe0) ichx_gpio_probe()
/linux-4.1.27/net/sunrpc/auth_gss/
H A Dgss_krb5_crypto.c63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; krb5_encrypt() local
80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); krb5_encrypt()
97 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; krb5_decrypt() local
113 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); krb5_decrypt()
122 struct hash_desc *desc = data; checksummer() local
124 return crypto_hash_update(desc, sg, sg->length); checksummer()
155 struct hash_desc desc; make_checksum_hmac_md5() local
188 desc.tfm = md5; make_checksum_hmac_md5()
189 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; make_checksum_hmac_md5()
191 err = crypto_hash_init(&desc); make_checksum_hmac_md5()
195 err = crypto_hash_update(&desc, sg, 4); make_checksum_hmac_md5()
200 err = crypto_hash_update(&desc, sg, hdrlen); make_checksum_hmac_md5()
204 checksummer, &desc); make_checksum_hmac_md5()
207 err = crypto_hash_final(&desc, checksumdata); make_checksum_hmac_md5()
211 desc.tfm = hmac_md5; make_checksum_hmac_md5()
212 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; make_checksum_hmac_md5()
214 err = crypto_hash_init(&desc); make_checksum_hmac_md5()
222 err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5), make_checksum_hmac_md5()
245 struct hash_desc desc; make_checksum() local
262 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); make_checksum()
263 if (IS_ERR(desc.tfm)) make_checksum()
265 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; make_checksum()
267 checksumlen = crypto_hash_digestsize(desc.tfm); make_checksum()
270 err = crypto_hash_setkey(desc.tfm, cksumkey, make_checksum()
276 err = crypto_hash_init(&desc); make_checksum()
280 err = crypto_hash_update(&desc, sg, hdrlen); make_checksum()
284 checksummer, &desc); make_checksum()
287 err = crypto_hash_final(&desc, checksumdata); make_checksum()
310 crypto_free_hash(desc.tfm); make_checksum()
326 struct hash_desc desc; make_checksum_v2() local
343 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, make_checksum_v2()
345 if (IS_ERR(desc.tfm)) make_checksum_v2()
347 checksumlen = crypto_hash_digestsize(desc.tfm); make_checksum_v2()
348 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; make_checksum_v2()
350 err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength); make_checksum_v2()
354 err = crypto_hash_init(&desc); make_checksum_v2()
358 checksummer, &desc); make_checksum_v2()
363 err = crypto_hash_update(&desc, sg, hdrlen); make_checksum_v2()
367 err = crypto_hash_final(&desc, checksumdata); make_checksum_v2()
384 crypto_free_hash(desc.tfm); make_checksum_v2()
390 struct blkcipher_desc desc; member in struct:encryptor_desc
403 struct encryptor_desc *desc = data; encryptor() local
404 struct xdr_buf *outbuf = desc->outbuf; encryptor()
406 int thislen = desc->fraglen + sg->length; encryptor()
412 BUG_ON(desc->fragno > 3); encryptor()
414 page_pos = desc->pos - outbuf->head[0].iov_len; encryptor()
418 in_page = desc->pages[i]; encryptor()
422 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, encryptor()
424 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, encryptor()
426 desc->fragno++; encryptor()
427 desc->fraglen += sg->length; encryptor()
428 desc->pos += sg->length; encryptor()
430 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); encryptor()
436 sg_mark_end(&desc->infrags[desc->fragno - 1]); encryptor()
437 sg_mark_end(&desc->outfrags[desc->fragno - 1]); encryptor()
439 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, encryptor()
440 desc->infrags, thislen); encryptor()
444 sg_init_table(desc->infrags, 4); encryptor()
445 sg_init_table(desc->outfrags, 4); encryptor()
448 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, encryptor()
450 desc->infrags[0] = desc->outfrags[0]; encryptor()
451 sg_assign_page(&desc->infrags[0], in_page); encryptor()
452 desc->fragno = 1; encryptor()
453 desc->fraglen = fraglen; encryptor()
455 desc->fragno = 0; encryptor()
456 desc->fraglen = 0; encryptor()
466 struct encryptor_desc desc; gss_encrypt_xdr_buf() local
470 memset(desc.iv, 0, sizeof(desc.iv)); gss_encrypt_xdr_buf()
471 desc.desc.tfm = tfm; gss_encrypt_xdr_buf()
472 desc.desc.info = desc.iv; gss_encrypt_xdr_buf()
473 desc.desc.flags = 0; gss_encrypt_xdr_buf()
474 desc.pos = offset; gss_encrypt_xdr_buf()
475 desc.outbuf = buf; gss_encrypt_xdr_buf()
476 desc.pages = pages; gss_encrypt_xdr_buf()
477 desc.fragno = 0; gss_encrypt_xdr_buf()
478 desc.fraglen = 0; gss_encrypt_xdr_buf()
480 sg_init_table(desc.infrags, 4); gss_encrypt_xdr_buf()
481 sg_init_table(desc.outfrags, 4); gss_encrypt_xdr_buf()
483 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); gss_encrypt_xdr_buf()
489 struct blkcipher_desc desc; member in struct:decryptor_desc
498 struct decryptor_desc *desc = data; decryptor() local
499 int thislen = desc->fraglen + sg->length; decryptor()
504 BUG_ON(desc->fragno > 3); decryptor()
505 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, decryptor()
507 desc->fragno++; decryptor()
508 desc->fraglen += sg->length; decryptor()
510 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); decryptor()
516 sg_mark_end(&desc->frags[desc->fragno - 1]); decryptor()
518 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, decryptor()
519 desc->frags, thislen); decryptor()
523 sg_init_table(desc->frags, 4); decryptor()
526 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, decryptor()
528 desc->fragno = 1; decryptor()
529 desc->fraglen = fraglen; decryptor()
531 desc->fragno = 0; decryptor()
532 desc->fraglen = 0; decryptor()
541 struct decryptor_desc desc; gss_decrypt_xdr_buf() local
546 memset(desc.iv, 0, sizeof(desc.iv)); gss_decrypt_xdr_buf()
547 desc.desc.tfm = tfm; gss_decrypt_xdr_buf()
548 desc.desc.info = desc.iv; gss_decrypt_xdr_buf()
549 desc.desc.flags = 0; gss_decrypt_xdr_buf()
550 desc.fragno = 0; gss_decrypt_xdr_buf()
551 desc.fraglen = 0; gss_decrypt_xdr_buf()
553 sg_init_table(desc.frags, 4); gss_decrypt_xdr_buf()
555 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); gss_decrypt_xdr_buf()
602 struct blkcipher_desc desc = { .tfm = cipher, .info = iv }; gss_krb5_cts_crypt() local
629 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); gss_krb5_cts_crypt()
631 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len); gss_krb5_cts_crypt()
654 struct encryptor_desc desc; gss_krb5_aes_encrypt() local
719 memset(desc.iv, 0, sizeof(desc.iv)); gss_krb5_aes_encrypt()
722 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; gss_krb5_aes_encrypt()
723 desc.fragno = 0; gss_krb5_aes_encrypt()
724 desc.fraglen = 0; gss_krb5_aes_encrypt()
725 desc.pages = pages; gss_krb5_aes_encrypt()
726 desc.outbuf = buf; gss_krb5_aes_encrypt()
727 desc.desc.info = desc.iv; gss_krb5_aes_encrypt()
728 desc.desc.flags = 0; gss_krb5_aes_encrypt()
729 desc.desc.tfm = aux_cipher; gss_krb5_aes_encrypt()
731 sg_init_table(desc.infrags, 4); gss_krb5_aes_encrypt()
732 sg_init_table(desc.outfrags, 4); gss_krb5_aes_encrypt()
735 cbcbytes, encryptor, &desc); gss_krb5_aes_encrypt()
743 desc.iv, pages, 1); gss_krb5_aes_encrypt()
771 struct decryptor_desc desc; gss_krb5_aes_decrypt() local
799 memset(desc.iv, 0, sizeof(desc.iv)); gss_krb5_aes_decrypt()
802 desc.fragno = 0; gss_krb5_aes_decrypt()
803 desc.fraglen = 0; gss_krb5_aes_decrypt()
804 desc.desc.info = desc.iv; gss_krb5_aes_decrypt()
805 desc.desc.flags = 0; gss_krb5_aes_decrypt()
806 desc.desc.tfm = aux_cipher; gss_krb5_aes_decrypt()
808 sg_init_table(desc.frags, 4); gss_krb5_aes_decrypt()
810 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); gss_krb5_aes_decrypt()
816 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); gss_krb5_aes_decrypt()
857 struct hash_desc desc; krb5_rc4_setup_seq_key() local
872 desc.tfm = hmac; krb5_rc4_setup_seq_key()
873 desc.flags = 0; krb5_rc4_setup_seq_key()
875 err = crypto_hash_init(&desc); krb5_rc4_setup_seq_key()
887 err = crypto_hash_digest(&desc, sg, 4, Kseq); krb5_rc4_setup_seq_key()
898 err = crypto_hash_digest(&desc, sg, 8, Kseq); krb5_rc4_setup_seq_key()
923 struct hash_desc desc; krb5_rc4_setup_enc_key() local
939 desc.tfm = hmac; krb5_rc4_setup_enc_key()
940 desc.flags = 0; krb5_rc4_setup_enc_key()
942 err = crypto_hash_init(&desc); krb5_rc4_setup_enc_key()
957 err = crypto_hash_digest(&desc, sg, 4, Kcrypt); krb5_rc4_setup_enc_key()
973 err = crypto_hash_digest(&desc, sg, 4, Kcrypt); krb5_rc4_setup_enc_key()
/linux-4.1.27/drivers/net/ethernet/stmicro/stmmac/
H A Dring_mode.c36 struct dma_desc *desc; stmmac_jumbo_frm() local
41 desc = (struct dma_desc *)(priv->dma_etx + entry); stmmac_jumbo_frm()
43 desc = priv->dma_tx + entry; stmmac_jumbo_frm()
54 desc->des2 = dma_map_single(priv->device, skb->data, stmmac_jumbo_frm()
56 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
59 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
60 desc->des3 = desc->des2 + BUF_SIZE_4KiB; stmmac_jumbo_frm()
61 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, stmmac_jumbo_frm()
68 desc = (struct dma_desc *)(priv->dma_etx + entry); stmmac_jumbo_frm()
70 desc = priv->dma_tx + entry; stmmac_jumbo_frm()
72 desc->des2 = dma_map_single(priv->device, skb->data + bmax, stmmac_jumbo_frm()
74 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
76 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
77 desc->des3 = desc->des2 + BUF_SIZE_4KiB; stmmac_jumbo_frm()
78 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, stmmac_jumbo_frm()
81 priv->hw->desc->set_tx_owner(desc); stmmac_jumbo_frm()
83 desc->des2 = dma_map_single(priv->device, skb->data, stmmac_jumbo_frm()
85 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
87 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
88 desc->des3 = desc->des2 + BUF_SIZE_4KiB; stmmac_jumbo_frm()
89 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, stmmac_jumbo_frm()
H A Dchain_mode.c36 struct dma_desc *desc = priv->dma_tx + entry; stmmac_jumbo_frm() local
48 desc->des2 = dma_map_single(priv->device, skb->data, stmmac_jumbo_frm()
50 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
52 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
53 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); stmmac_jumbo_frm()
58 desc = priv->dma_tx + entry; stmmac_jumbo_frm()
61 desc->des2 = dma_map_single(priv->device, stmmac_jumbo_frm()
64 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
66 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
67 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, stmmac_jumbo_frm()
69 priv->hw->desc->set_tx_owner(desc); stmmac_jumbo_frm()
73 desc->des2 = dma_map_single(priv->device, stmmac_jumbo_frm()
76 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
78 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
79 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, stmmac_jumbo_frm()
81 priv->hw->desc->set_tx_owner(desc); stmmac_jumbo_frm()
149 if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc) stmmac_clean_desc3()
/linux-4.1.27/drivers/staging/lustre/lustre/lov/
H A Dlproc_lov.c47 struct lov_desc *desc; lov_stripesize_seq_show() local
50 desc = &dev->u.lov.desc; lov_stripesize_seq_show()
51 seq_printf(m, "%llu\n", desc->ld_default_stripe_size); lov_stripesize_seq_show()
60 struct lov_desc *desc; lov_stripesize_seq_write() local
65 desc = &dev->u.lov.desc; lov_stripesize_seq_write()
71 desc->ld_default_stripe_size = val; lov_stripesize_seq_write()
79 struct lov_desc *desc; lov_stripeoffset_seq_show() local
82 desc = &dev->u.lov.desc; lov_stripeoffset_seq_show()
83 seq_printf(m, "%llu\n", desc->ld_default_stripe_offset); lov_stripeoffset_seq_show()
92 struct lov_desc *desc; lov_stripeoffset_seq_write() local
97 desc = &dev->u.lov.desc; lov_stripeoffset_seq_write()
102 desc->ld_default_stripe_offset = val; lov_stripeoffset_seq_write()
110 struct lov_desc *desc; lov_stripetype_seq_show() local
113 desc = &dev->u.lov.desc; lov_stripetype_seq_show()
114 seq_printf(m, "%u\n", desc->ld_pattern); lov_stripetype_seq_show()
123 struct lov_desc *desc; lov_stripetype_seq_write() local
127 desc = &dev->u.lov.desc; lov_stripetype_seq_write()
133 desc->ld_pattern = val; lov_stripetype_seq_write()
141 struct lov_desc *desc; lov_stripecount_seq_show() local
144 desc = &dev->u.lov.desc; lov_stripecount_seq_show()
145 seq_printf(m, "%d\n", (__s16)(desc->ld_default_stripe_count + 1) - 1); lov_stripecount_seq_show()
154 struct lov_desc *desc; lov_stripecount_seq_write() local
158 desc = &dev->u.lov.desc; lov_stripecount_seq_write()
164 desc->ld_default_stripe_count = val; lov_stripecount_seq_write()
172 struct lov_desc *desc; lov_numobd_seq_show() local
175 desc = &dev->u.lov.desc; lov_numobd_seq_show()
176 seq_printf(m, "%u\n", desc->ld_tgt_count); lov_numobd_seq_show()
184 struct lov_desc *desc; lov_activeobd_seq_show() local
187 desc = &dev->u.lov.desc; lov_activeobd_seq_show()
188 seq_printf(m, "%u\n", desc->ld_active_tgt_count); lov_activeobd_seq_show()
200 seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid); lov_desc_uuid_seq_show()
210 while (*pos < lov->desc.ld_tgt_count) { lov_tgt_seq_start()
227 while (++*pos < lov->desc.ld_tgt_count) { lov_tgt_seq_next()
/linux-4.1.27/tools/perf/tests/
H A Dbuiltin-test.c18 const char *desc; member in struct:test
22 .desc = "vmlinux symtab matches kallsyms",
26 .desc = "detect open syscall event",
30 .desc = "detect open syscall event on all cpus",
34 .desc = "read samples using the mmap interface",
38 .desc = "parse events tests",
43 .desc = "x86 rdpmc test",
48 .desc = "Validate PERF_RECORD_* events & perf_sample fields",
52 .desc = "Test perf pmu format parsing",
56 .desc = "Test dso data read",
60 .desc = "Test dso data cache",
64 .desc = "Test dso data reopen",
68 .desc = "roundtrip evsel->name check",
72 .desc = "Check parsing of sched tracepoints fields",
76 .desc = "Generate and check syscalls:sys_enter_open event fields",
80 .desc = "struct perf_event_attr setup",
84 .desc = "Test matching and linking multiple hists",
88 .desc = "Try 'import perf' in python, checking link problems",
92 .desc = "Test breakpoint overflow signal handler",
96 .desc = "Test breakpoint overflow sampling",
100 .desc = "Test number of exit event of a simple workload",
104 .desc = "Test software clock events have valid period values",
109 .desc = "Test converting perf time to TSC",
114 .desc = "Test object code reading",
118 .desc = "Test sample parsing",
122 .desc = "Test using a dummy software event to keep tracking",
126 .desc = "Test parsing with no sample_id_all bit set",
132 .desc = "Test dwarf unwind",
138 .desc = "Test filtering hist entries",
142 .desc = "Test mmap thread lookup",
146 .desc = "Test thread mg sharing",
150 .desc = "Test output sorting of hist entries",
154 .desc = "Test cumulation of child hist entries",
158 .desc = "Test tracking with sched_switch",
162 .desc = "Filter fds with revents mask in a fdarray",
166 .desc = "Add fd to a fdarray, making it autogrow",
170 .desc = "Test kmod_path__parse function",
195 if (strstr(tests[curr].desc, argv[i])) perf_test__matches()
238 int len = strlen(tests[i].desc); __cmd_test()
252 pr_info("%2d: %-*s:", i, width, tests[curr].desc); __cmd_test()
261 pr_debug("---- end ----\n%s:", tests[curr].desc); __cmd_test()
287 if (argc > 1 && !strstr(tests[curr].desc, argv[1])) perf_test__list()
290 pr_info("%2d: %s\n", i, tests[curr].desc); perf_test__list()
/linux-4.1.27/include/uapi/linux/usb/
H A Daudio.h159 __le16 wTotalLength; /* includes Unit and Terminal desc. */
257 static inline __u8 uac_mixer_unit_bNrChannels(struct uac_mixer_unit_descriptor *desc) uac_mixer_unit_bNrChannels() argument
259 return desc->baSourceID[desc->bNrInPins]; uac_mixer_unit_bNrChannels()
262 static inline __u32 uac_mixer_unit_wChannelConfig(struct uac_mixer_unit_descriptor *desc, uac_mixer_unit_wChannelConfig() argument
266 return (desc->baSourceID[desc->bNrInPins + 2] << 8) | uac_mixer_unit_wChannelConfig()
267 desc->baSourceID[desc->bNrInPins + 1]; uac_mixer_unit_wChannelConfig()
269 return (desc->baSourceID[desc->bNrInPins + 4] << 24) | uac_mixer_unit_wChannelConfig()
270 (desc->baSourceID[desc->bNrInPins + 3] << 16) | uac_mixer_unit_wChannelConfig()
271 (desc->baSourceID[desc->bNrInPins + 2] << 8) | uac_mixer_unit_wChannelConfig()
272 (desc->baSourceID[desc->bNrInPins + 1]); uac_mixer_unit_wChannelConfig()
275 static inline __u8 uac_mixer_unit_iChannelNames(struct uac_mixer_unit_descriptor *desc, uac_mixer_unit_iChannelNames() argument
279 desc->baSourceID[desc->bNrInPins + 3] : uac_mixer_unit_iChannelNames()
280 desc->baSourceID[desc->bNrInPins + 5]; uac_mixer_unit_iChannelNames()
283 static inline __u8 *uac_mixer_unit_bmControls(struct uac_mixer_unit_descriptor *desc, uac_mixer_unit_bmControls() argument
287 &desc->baSourceID[desc->bNrInPins + 4] : uac_mixer_unit_bmControls()
288 &desc->baSourceID[desc->bNrInPins + 6]; uac_mixer_unit_bmControls()
291 static inline __u8 uac_mixer_unit_iMixer(struct uac_mixer_unit_descriptor *desc) uac_mixer_unit_iMixer() argument
293 __u8 *raw = (__u8 *) desc; uac_mixer_unit_iMixer()
294 return raw[desc->bLength - 1]; uac_mixer_unit_iMixer()
307 static inline __u8 uac_selector_unit_iSelector(struct uac_selector_unit_descriptor *desc) uac_selector_unit_iSelector() argument
309 __u8 *raw = (__u8 *) desc; uac_selector_unit_iSelector()
310 return raw[desc->bLength - 1]; uac_selector_unit_iSelector()
324 static inline __u8 uac_feature_unit_iFeature(struct uac_feature_unit_descriptor *desc) uac_feature_unit_iFeature() argument
326 __u8 *raw = (__u8 *) desc; uac_feature_unit_iFeature()
327 return raw[desc->bLength - 1]; uac_feature_unit_iFeature()
341 static inline __u8 uac_processing_unit_bNrChannels(struct uac_processing_unit_descriptor *desc) uac_processing_unit_bNrChannels() argument
343 return desc->baSourceID[desc->bNrInPins]; uac_processing_unit_bNrChannels()
346 static inline __u32 uac_processing_unit_wChannelConfig(struct uac_processing_unit_descriptor *desc, uac_processing_unit_wChannelConfig() argument
350 return (desc->baSourceID[desc->bNrInPins + 2] << 8) | uac_processing_unit_wChannelConfig()
351 desc->baSourceID[desc->bNrInPins + 1]; uac_processing_unit_wChannelConfig()
353 return (desc->baSourceID[desc->bNrInPins + 4] << 24) | uac_processing_unit_wChannelConfig()
354 (desc->baSourceID[desc->bNrInPins + 3] << 16) | uac_processing_unit_wChannelConfig()
355 (desc->baSourceID[desc->bNrInPins + 2] << 8) | uac_processing_unit_wChannelConfig()
356 (desc->baSourceID[desc->bNrInPins + 1]); uac_processing_unit_wChannelConfig()
359 static inline __u8 uac_processing_unit_iChannelNames(struct uac_processing_unit_descriptor *desc, uac_processing_unit_iChannelNames() argument
363 desc->baSourceID[desc->bNrInPins + 3] : uac_processing_unit_iChannelNames()
364 desc->baSourceID[desc->bNrInPins + 5]; uac_processing_unit_iChannelNames()
367 static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc, uac_processing_unit_bControlSize() argument
371 desc->baSourceID[desc->bNrInPins + 4] : uac_processing_unit_bControlSize()
372 desc->baSourceID[desc->bNrInPins + 6]; uac_processing_unit_bControlSize()
375 static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc, uac_processing_unit_bmControls() argument
379 &desc->baSourceID[desc->bNrInPins + 5] : uac_processing_unit_bmControls()
380 &desc->baSourceID[desc->bNrInPins + 7]; uac_processing_unit_bmControls()
383 static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc, uac_processing_unit_iProcessing() argument
386 __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); uac_processing_unit_iProcessing()
387 return *(uac_processing_unit_bmControls(desc, protocol) uac_processing_unit_iProcessing()
391 static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc, uac_processing_unit_specific() argument
394 __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); uac_processing_unit_specific()
395 return uac_processing_unit_bmControls(desc, protocol) uac_processing_unit_specific()
/linux-4.1.27/drivers/net/ethernet/cisco/enic/
H A Dwq_enet_desc.h53 static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, wq_enet_desc_enc() argument
58 desc->address = cpu_to_le64(address); wq_enet_desc_enc()
59 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); wq_enet_desc_enc()
60 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << wq_enet_desc_enc()
62 desc->header_length_flags = cpu_to_le16( wq_enet_desc_enc()
69 desc->vlan_tag = cpu_to_le16(vlan_tag); wq_enet_desc_enc()
72 static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, wq_enet_desc_dec() argument
77 *address = le64_to_cpu(desc->address); wq_enet_desc_dec()
78 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; wq_enet_desc_dec()
79 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & wq_enet_desc_dec()
81 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> wq_enet_desc_dec()
83 *header_length = le16_to_cpu(desc->header_length_flags) & wq_enet_desc_dec()
85 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
87 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
89 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
91 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
93 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
95 *vlan_tag = le16_to_cpu(desc->vlan_tag); wq_enet_desc_dec()
H A Dcq_enet_desc.h33 static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, cq_enet_wq_desc_dec() argument
36 cq_desc_dec((struct cq_desc *)desc, type, cq_enet_wq_desc_dec()
104 static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, cq_enet_rq_desc_dec() argument
117 cq_desc_dec((struct cq_desc *)desc, type, cq_enet_rq_desc_dec()
120 completed_index_flags = le16_to_cpu(desc->completed_index_flags); cq_enet_rq_desc_dec()
122 le16_to_cpu(desc->q_number_rss_type_flags); cq_enet_rq_desc_dec()
123 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); cq_enet_rq_desc_dec()
139 *rss_hash = le32_to_cpu(desc->rss_hash); cq_enet_rq_desc_dec()
151 *vlan_tci = le16_to_cpu(desc->vlan); cq_enet_rq_desc_dec()
154 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & cq_enet_rq_desc_dec()
156 *fcoe_fc_crc_ok = (desc->flags & cq_enet_rq_desc_dec()
158 *fcoe_enc_error = (desc->flags & cq_enet_rq_desc_dec()
160 *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >> cq_enet_rq_desc_dec()
169 *checksum = le16_to_cpu(desc->checksum_fcoe); cq_enet_rq_desc_dec()
173 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; cq_enet_rq_desc_dec()
174 *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; cq_enet_rq_desc_dec()
175 *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; cq_enet_rq_desc_dec()
177 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; cq_enet_rq_desc_dec()
178 *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; cq_enet_rq_desc_dec()
179 *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; cq_enet_rq_desc_dec()
181 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; cq_enet_rq_desc_dec()
182 *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; cq_enet_rq_desc_dec()
H A Drq_enet_desc.h43 static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, rq_enet_desc_enc() argument
46 desc->address = cpu_to_le64(address); rq_enet_desc_enc()
47 desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) | rq_enet_desc_enc()
51 static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, rq_enet_desc_dec() argument
54 *address = le64_to_cpu(desc->address); rq_enet_desc_dec()
55 *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK; rq_enet_desc_dec()
56 *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) & rq_enet_desc_dec()
H A Dcq_desc.h59 const struct cq_desc *desc = desc_arg; cq_desc_dec() local
60 const u8 type_color = desc->type_color; cq_desc_dec()
65 * Make sure color bit is read from desc *before* other fields cq_desc_dec()
66 * are read from desc. Hardware guarantees color bit is last cq_desc_dec()
75 *q_number = le16_to_cpu(desc->q_number) & CQ_DESC_Q_NUM_MASK; cq_desc_dec()
76 *completed_index = le16_to_cpu(desc->completed_index) & cq_desc_dec()
/linux-4.1.27/drivers/dma/
H A Ddma-jz4780.c118 struct jz4780_dma_hwdesc *desc; member in struct:jz4780_dma_desc
134 struct jz4780_dma_desc *desc; member in struct:jz4780_dma_chan
187 struct jz4780_dma_desc *desc; jz4780_dma_desc_alloc() local
192 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); jz4780_dma_desc_alloc()
193 if (!desc) jz4780_dma_desc_alloc()
196 desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT, jz4780_dma_desc_alloc()
197 &desc->desc_phys); jz4780_dma_desc_alloc()
198 if (!desc->desc) { jz4780_dma_desc_alloc()
199 kfree(desc); jz4780_dma_desc_alloc()
203 desc->count = count; jz4780_dma_desc_alloc()
204 desc->type = type; jz4780_dma_desc_alloc()
205 return desc; jz4780_dma_desc_alloc()
210 struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc); jz4780_dma_desc_free() local
213 dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys); jz4780_dma_desc_free()
214 kfree(desc); jz4780_dma_desc_free()
242 struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len, jz4780_dma_setup_hwdesc()
250 desc->dcm = JZ_DMA_DCM_SAI; jz4780_dma_setup_hwdesc()
251 desc->dsa = addr; jz4780_dma_setup_hwdesc()
252 desc->dta = config->dst_addr; jz4780_dma_setup_hwdesc()
253 desc->drt = jzchan->transfer_type; jz4780_dma_setup_hwdesc()
258 desc->dcm = JZ_DMA_DCM_DAI; jz4780_dma_setup_hwdesc()
259 desc->dsa = config->src_addr; jz4780_dma_setup_hwdesc()
260 desc->dta = addr; jz4780_dma_setup_hwdesc()
261 desc->drt = jzchan->transfer_type; jz4780_dma_setup_hwdesc()
288 desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT; jz4780_dma_setup_hwdesc()
289 desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT; jz4780_dma_setup_hwdesc()
290 desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT; jz4780_dma_setup_hwdesc()
292 desc->dtc = len >> ord; jz4780_dma_setup_hwdesc()
300 struct jz4780_dma_desc *desc; jz4780_dma_prep_slave_sg() local
304 desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE); jz4780_dma_prep_slave_sg()
305 if (!desc) jz4780_dma_prep_slave_sg()
309 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], jz4780_dma_prep_slave_sg()
317 desc->desc[i].dcm |= JZ_DMA_DCM_TIE; jz4780_dma_prep_slave_sg()
321 desc->desc[i].dcm |= JZ_DMA_DCM_LINK; jz4780_dma_prep_slave_sg()
328 desc->desc[i].dtc |= jz4780_dma_prep_slave_sg()
329 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24; jz4780_dma_prep_slave_sg()
333 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); jz4780_dma_prep_slave_sg()
342 struct jz4780_dma_desc *desc; jz4780_dma_prep_dma_cyclic() local
351 desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC); jz4780_dma_prep_dma_cyclic()
352 if (!desc) jz4780_dma_prep_dma_cyclic()
356 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr, jz4780_dma_prep_dma_cyclic()
369 desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK; jz4780_dma_prep_dma_cyclic()
378 desc->desc[i].dtc |= jz4780_dma_prep_dma_cyclic()
379 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24; jz4780_dma_prep_dma_cyclic()
383 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); jz4780_dma_prep_dma_cyclic()
391 struct jz4780_dma_desc *desc; jz4780_dma_prep_dma_memcpy() local
395 desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY); jz4780_dma_prep_dma_memcpy()
396 if (!desc) jz4780_dma_prep_dma_memcpy()
403 desc->desc[0].dsa = src; jz4780_dma_prep_dma_memcpy()
404 desc->desc[0].dta = dest; jz4780_dma_prep_dma_memcpy()
405 desc->desc[0].drt = JZ_DMA_DRT_AUTO; jz4780_dma_prep_dma_memcpy()
406 desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI | jz4780_dma_prep_dma_memcpy()
410 desc->desc[0].dtc = len >> ord; jz4780_dma_prep_dma_memcpy()
412 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); jz4780_dma_prep_dma_memcpy()
422 if (!jzchan->desc) { jz4780_dma_begin()
429 jzchan->desc = to_jz4780_dma_desc(vdesc); jz4780_dma_begin()
432 if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) { jz4780_dma_begin()
447 for (i = 0; i < jzchan->desc->count; i++) jz4780_dma_begin()
448 jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK; jz4780_dma_begin()
457 (jzchan->curr_hwdesc + 1) % jzchan->desc->count; jz4780_dma_begin()
464 desc_phys = jzchan->desc->desc_phys + jz4780_dma_begin()
465 (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc)); jz4780_dma_begin()
481 if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc) jz4780_dma_issue_pending()
497 if (jzchan->desc) { jz4780_dma_terminate_all()
498 jz4780_dma_desc_free(&jzchan->desc->vdesc); jz4780_dma_terminate_all()
499 jzchan->desc = NULL; jz4780_dma_terminate_all()
524 struct jz4780_dma_desc *desc, unsigned int next_sg) jz4780_dma_desc_residue()
532 for (i = next_sg; i < desc->count; i++) jz4780_dma_desc_residue()
533 residue += desc->desc[i].dtc << jzchan->transfer_shift; jz4780_dma_desc_residue()
563 } else if (cookie == jzchan->desc->vdesc.tx.cookie) { jz4780_dma_tx_status()
564 txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc, jz4780_dma_tx_status()
565 (jzchan->curr_hwdesc + 1) % jzchan->desc->count); jz4780_dma_tx_status()
569 if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc jz4780_dma_tx_status()
570 && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) jz4780_dma_tx_status()
597 if (jzchan->desc) { jz4780_dma_chan_irq()
598 jzchan->desc->status = dcs; jz4780_dma_chan_irq()
601 if (jzchan->desc->type == DMA_CYCLIC) { jz4780_dma_chan_irq()
602 vchan_cyclic_callback(&jzchan->desc->vdesc); jz4780_dma_chan_irq()
604 vchan_cookie_complete(&jzchan->desc->vdesc); jz4780_dma_chan_irq()
605 jzchan->desc = NULL; jz4780_dma_chan_irq()
241 jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len, enum dma_transfer_direction direction) jz4780_dma_setup_hwdesc() argument
523 jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan, struct jz4780_dma_desc *desc, unsigned int next_sg) jz4780_dma_desc_residue() argument
H A Dtxx9dmac.c148 const struct txx9dmac_desc *desc) desc_read_CHAR()
150 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; desc_read_CHAR()
154 struct txx9dmac_desc *desc, dma_addr_t val) desc_write_CHAR()
157 desc->hwdesc.CHAR = val; desc_write_CHAR()
159 desc->hwdesc32.CHAR = val; desc_write_CHAR()
183 static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) txx9dmac_last_child() argument
185 if (!list_empty(&desc->tx_list)) txx9dmac_last_child()
186 desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); txx9dmac_last_child()
187 return desc; txx9dmac_last_child()
196 struct txx9dmac_desc *desc; txx9dmac_desc_alloc() local
198 desc = kzalloc(sizeof(*desc), flags); txx9dmac_desc_alloc()
199 if (!desc) txx9dmac_desc_alloc()
201 INIT_LIST_HEAD(&desc->tx_list); txx9dmac_desc_alloc()
202 dma_async_tx_descriptor_init(&desc->txd, &dc->chan); txx9dmac_desc_alloc()
203 desc->txd.tx_submit = txx9dmac_tx_submit; txx9dmac_desc_alloc()
205 desc->txd.flags = DMA_CTRL_ACK; txx9dmac_desc_alloc()
206 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, txx9dmac_desc_alloc()
208 return desc; txx9dmac_desc_alloc()
213 struct txx9dmac_desc *desc, *_desc; txx9dmac_desc_get() local
218 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { txx9dmac_desc_get()
219 if (async_tx_test_ack(&desc->txd)) { txx9dmac_desc_get()
220 list_del(&desc->desc_node); txx9dmac_desc_get()
221 ret = desc; txx9dmac_desc_get()
224 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); txx9dmac_desc_get()
245 struct txx9dmac_desc *desc) txx9dmac_sync_desc_for_cpu()
250 list_for_each_entry(child, &desc->tx_list, desc_node) txx9dmac_sync_desc_for_cpu()
255 desc->txd.phys, ddev->descsize, txx9dmac_sync_desc_for_cpu()
261 * `desc' must not be on any lists.
264 struct txx9dmac_desc *desc) txx9dmac_desc_put()
266 if (desc) { txx9dmac_desc_put()
269 txx9dmac_sync_desc_for_cpu(dc, desc); txx9dmac_desc_put()
272 list_for_each_entry(child, &desc->tx_list, desc_node) txx9dmac_desc_put()
274 "moving child desc %p to freelist\n", txx9dmac_desc_put()
276 list_splice_init(&desc->tx_list, &dc->free_list); txx9dmac_desc_put()
277 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", txx9dmac_desc_put()
278 desc); txx9dmac_desc_put()
279 list_add(&desc->desc_node, &dc->free_list); txx9dmac_desc_put()
404 struct txx9dmac_desc *desc) txx9dmac_descriptor_complete()
408 struct dma_async_tx_descriptor *txd = &desc->txd; txx9dmac_descriptor_complete()
411 txd->cookie, desc); txx9dmac_descriptor_complete() local
417 txx9dmac_sync_desc_for_cpu(dc, desc); txx9dmac_descriptor_complete()
418 list_splice_init(&desc->tx_list, &dc->free_list); txx9dmac_descriptor_complete()
419 list_move(&desc->desc_node, &dc->free_list); txx9dmac_descriptor_complete()
434 struct txx9dmac_desc *desc; txx9dmac_dequeue() local
439 desc = txx9dmac_first_queued(dc); txx9dmac_dequeue()
441 desc_write_CHAR(dc, prev, desc->txd.phys); txx9dmac_dequeue()
446 prev = txx9dmac_last_child(desc); txx9dmac_dequeue()
447 list_move_tail(&desc->desc_node, list); txx9dmac_dequeue()
449 if ((desc->txd.flags & DMA_PREP_INTERRUPT) && txx9dmac_dequeue()
457 struct txx9dmac_desc *desc, *_desc; txx9dmac_complete_all() local
470 list_for_each_entry_safe(desc, _desc, &list, desc_node) txx9dmac_complete_all()
471 txx9dmac_descriptor_complete(dc, desc); txx9dmac_complete_all()
475 struct txx9dmac_hwdesc *desc) txx9dmac_dump_desc()
480 " desc: ch%#llx s%#llx d%#llx c%#x\n", txx9dmac_dump_desc()
481 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); txx9dmac_dump_desc()
484 " desc: ch%#llx s%#llx d%#llx c%#x" txx9dmac_dump_desc()
486 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, txx9dmac_dump_desc()
487 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); txx9dmac_dump_desc()
490 struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; txx9dmac_dump_desc()
493 " desc: ch%#x s%#x d%#x c%#x\n", txx9dmac_dump_desc()
497 " desc: ch%#x s%#x d%#x c%#x" txx9dmac_dump_desc()
546 struct txx9dmac_desc *desc, *_desc; txx9dmac_scan_descriptors() local
571 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { txx9dmac_scan_descriptors()
572 if (desc_read_CHAR(dc, desc) == chain) { txx9dmac_scan_descriptors()
579 list_for_each_entry(child, &desc->tx_list, desc_node) txx9dmac_scan_descriptors()
591 txx9dmac_descriptor_complete(dc, desc); txx9dmac_scan_descriptors()
699 struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); txx9dmac_tx_submit() local
707 desc->txd.cookie, desc); txx9dmac_tx_submit() local
709 list_add_tail(&desc->desc_node, &dc->queue); txx9dmac_tx_submit()
721 struct txx9dmac_desc *desc; txx9dmac_prep_dma_memcpy() local
755 desc = txx9dmac_desc_get(dc); txx9dmac_prep_dma_memcpy()
756 if (!desc) { txx9dmac_prep_dma_memcpy()
762 desc->hwdesc.SAR = src + offset; txx9dmac_prep_dma_memcpy()
763 desc->hwdesc.DAR = dest + offset; txx9dmac_prep_dma_memcpy()
764 desc->hwdesc.CNTR = xfer_count; txx9dmac_prep_dma_memcpy()
765 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, txx9dmac_prep_dma_memcpy()
768 desc->hwdesc32.SAR = src + offset; txx9dmac_prep_dma_memcpy()
769 desc->hwdesc32.DAR = dest + offset; txx9dmac_prep_dma_memcpy()
770 desc->hwdesc32.CNTR = xfer_count; txx9dmac_prep_dma_memcpy()
771 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, txx9dmac_prep_dma_memcpy()
783 first = desc; txx9dmac_prep_dma_memcpy()
785 desc_write_CHAR(dc, prev, desc->txd.phys); txx9dmac_prep_dma_memcpy()
789 list_add_tail(&desc->desc_node, &first->tx_list); txx9dmac_prep_dma_memcpy()
791 prev = desc; txx9dmac_prep_dma_memcpy()
835 struct txx9dmac_desc *desc; for_each_sg() local
839 desc = txx9dmac_desc_get(dc); for_each_sg()
840 if (!desc) { for_each_sg()
849 desc->hwdesc.SAR = mem; for_each_sg()
850 desc->hwdesc.DAR = ds->tx_reg; for_each_sg()
852 desc->hwdesc.SAR = ds->rx_reg; for_each_sg()
853 desc->hwdesc.DAR = mem; for_each_sg()
855 desc->hwdesc.CNTR = sg_dma_len(sg); for_each_sg()
858 desc->hwdesc32.SAR = mem; for_each_sg()
859 desc->hwdesc32.DAR = ds->tx_reg; for_each_sg()
861 desc->hwdesc32.SAR = ds->rx_reg; for_each_sg()
862 desc->hwdesc32.DAR = mem; for_each_sg()
864 desc->hwdesc32.CNTR = sg_dma_len(sg); for_each_sg()
873 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, for_each_sg()
877 first = desc; for_each_sg()
879 desc_write_CHAR(dc, prev, desc->txd.phys); for_each_sg()
884 list_add_tail(&desc->desc_node, &first->tx_list); for_each_sg()
886 prev = desc; for_each_sg()
907 struct txx9dmac_desc *desc, *_desc; txx9dmac_terminate_all() local
922 list_for_each_entry_safe(desc, _desc, &list, desc_node) txx9dmac_terminate_all()
923 txx9dmac_descriptor_complete(dc, desc); txx9dmac_terminate_all()
950 struct txx9dmac_desc *desc; txx9dmac_chain_dynamic() local
955 desc = list_entry(list.next, struct txx9dmac_desc, desc_node); txx9dmac_chain_dynamic()
956 desc_write_CHAR(dc, prev, desc->txd.phys); txx9dmac_chain_dynamic()
964 channel_write_CHAR(dc, desc->txd.phys); txx9dmac_chain_dynamic()
996 struct txx9dmac_desc *desc; txx9dmac_alloc_chan_resources() local
1031 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); txx9dmac_alloc_chan_resources()
1032 if (!desc) { txx9dmac_alloc_chan_resources()
1038 txx9dmac_desc_put(dc, desc); txx9dmac_alloc_chan_resources()
1055 struct txx9dmac_desc *desc, *_desc; txx9dmac_free_chan_resources() local
1071 list_for_each_entry_safe(desc, _desc, &list, desc_node) { txx9dmac_free_chan_resources()
1072 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); txx9dmac_free_chan_resources() local
1073 dma_unmap_single(chan2parent(chan), desc->txd.phys, txx9dmac_free_chan_resources()
1075 kfree(desc); txx9dmac_free_chan_resources()
147 desc_read_CHAR(const struct txx9dmac_chan *dc, const struct txx9dmac_desc *desc) desc_read_CHAR() argument
153 desc_write_CHAR(const struct txx9dmac_chan *dc, struct txx9dmac_desc *desc, dma_addr_t val) desc_write_CHAR() argument
244 txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) txx9dmac_sync_desc_for_cpu() argument
263 txx9dmac_desc_put(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) txx9dmac_desc_put() argument
403 txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) txx9dmac_descriptor_complete() argument
474 txx9dmac_dump_desc(struct txx9dmac_chan *dc, struct txx9dmac_hwdesc *desc) txx9dmac_dump_desc() argument
H A Dpch_dma.c92 struct pch_dma_desc_regs desc[MAX_CHAN_NR]; member in struct:pch_dma_regs
331 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) pdc_dostart() argument
340 pd_chan->chan.chan_id, desc->regs.dev_addr); pdc_dostart()
342 pd_chan->chan.chan_id, desc->regs.mem_addr); pdc_dostart()
344 pd_chan->chan.chan_id, desc->regs.size); pdc_dostart()
346 pd_chan->chan.chan_id, desc->regs.next); pdc_dostart()
348 if (list_empty(&desc->tx_list)) { pdc_dostart()
349 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); pdc_dostart()
350 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); pdc_dostart()
351 channel_writel(pd_chan, SIZE, desc->regs.size); pdc_dostart()
352 channel_writel(pd_chan, NEXT, desc->regs.next); pdc_dostart()
355 channel_writel(pd_chan, NEXT, desc->txd.phys); pdc_dostart()
361 struct pch_dma_desc *desc) pdc_chain_complete()
363 struct dma_async_tx_descriptor *txd = &desc->txd; pdc_chain_complete()
367 list_splice_init(&desc->tx_list, &pd_chan->free_list); pdc_chain_complete()
368 list_move(&desc->desc_node, &pd_chan->free_list); pdc_chain_complete()
376 struct pch_dma_desc *desc, *_d; pdc_complete_all() local
387 list_for_each_entry_safe(desc, _d, &list, desc_node) pdc_complete_all()
388 pdc_chain_complete(pd_chan, desc); pdc_complete_all()
423 struct pch_dma_desc *desc = to_pd_desc(txd); pd_tx_submit() local
431 list_add_tail(&desc->desc_node, &pd_chan->active_list); pd_tx_submit()
432 pdc_dostart(pd_chan, desc); pd_tx_submit()
434 list_add_tail(&desc->desc_node, &pd_chan->queue); pd_tx_submit()
443 struct pch_dma_desc *desc = NULL; pdc_alloc_desc() local
447 desc = pci_pool_alloc(pd->pool, flags, &addr); pdc_alloc_desc()
448 if (desc) { pdc_alloc_desc()
449 memset(desc, 0, sizeof(struct pch_dma_desc)); pdc_alloc_desc()
450 INIT_LIST_HEAD(&desc->tx_list); pdc_alloc_desc()
451 dma_async_tx_descriptor_init(&desc->txd, chan); pdc_alloc_desc()
452 desc->txd.tx_submit = pd_tx_submit; pdc_alloc_desc()
453 desc->txd.flags = DMA_CTRL_ACK; pdc_alloc_desc()
454 desc->txd.phys = addr; pdc_alloc_desc()
457 return desc; pdc_alloc_desc()
462 struct pch_dma_desc *desc, *_d; pdc_desc_get() local
467 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { pdc_desc_get()
469 if (async_tx_test_ack(&desc->txd)) { pdc_desc_get()
470 list_del(&desc->desc_node); pdc_desc_get()
471 ret = desc; pdc_desc_get()
474 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); pdc_desc_get()
487 "failed to alloc desc\n"); pdc_desc_get()
495 struct pch_dma_desc *desc) pdc_desc_put()
497 if (desc) { pdc_desc_put()
499 list_splice_init(&desc->tx_list, &pd_chan->free_list); pdc_desc_put()
500 list_add(&desc->desc_node, &pd_chan->free_list); pdc_desc_put()
508 struct pch_dma_desc *desc; pd_alloc_chan_resources() local
521 desc = pdc_alloc_desc(chan, GFP_KERNEL); pd_alloc_chan_resources()
523 if (!desc) { pd_alloc_chan_resources()
529 list_add_tail(&desc->desc_node, &tmp_list); pd_alloc_chan_resources()
547 struct pch_dma_desc *desc, *_d; pd_free_chan_resources() local
559 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) pd_free_chan_resources()
560 pci_pool_free(pd->pool, desc, desc->txd.phys); pd_free_chan_resources()
591 struct pch_dma_desc *desc = NULL; pd_prep_slave_sg() local
612 desc = pdc_desc_get(pd_chan); for_each_sg()
614 if (!desc) for_each_sg()
617 desc->regs.dev_addr = reg; for_each_sg()
618 desc->regs.mem_addr = sg_dma_address(sg); for_each_sg()
619 desc->regs.size = sg_dma_len(sg); for_each_sg()
620 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; for_each_sg()
624 if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) for_each_sg()
626 desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; for_each_sg()
629 if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) for_each_sg()
631 desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; for_each_sg()
634 if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) for_each_sg()
636 desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; for_each_sg()
643 first = desc; for_each_sg()
645 prev->regs.next |= desc->txd.phys; for_each_sg()
646 list_add_tail(&desc->desc_node, &first->tx_list); for_each_sg()
649 prev = desc; for_each_sg()
653 desc->regs.next = DMA_DESC_END_WITH_IRQ;
655 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
658 desc->txd.flags = flags;
663 dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
671 struct pch_dma_desc *desc, *_d; pd_device_terminate_all() local
681 list_for_each_entry_safe(desc, _d, &list, desc_node) pd_device_terminate_all()
682 pdc_chain_complete(pd_chan, desc); pd_device_terminate_all()
909 pd_chan->membase = &regs->desc[i]; pch_dma_probe()
360 pdc_chain_complete(struct pch_dma_chan *pd_chan, struct pch_dma_desc *desc) pdc_chain_complete() argument
494 pdc_desc_put(struct pch_dma_chan *pd_chan, struct pch_dma_desc *desc) pdc_desc_put() argument
H A Dimx-dma.c128 struct dma_async_tx_descriptor desc; member in struct:imxdma_desc
162 struct dma_async_tx_descriptor desc; member in struct:imxdma_channel
245 struct imxdma_desc *desc; imxdma_chan_is_doing_cyclic() local
248 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, imxdma_chan_is_doing_cyclic()
250 if (desc->type == IMXDMA_DESC_CYCLIC) imxdma_chan_is_doing_cyclic()
284 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); imxdma_sg_next()
313 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); imxdma_enable_hw()
434 struct imxdma_desc *desc; dma_irq_handle_channel() local
443 desc = list_first_entry(&imxdmac->ld_active, dma_irq_handle_channel()
448 if (desc->sg) { dma_irq_handle_channel()
450 desc->sg = sg_next(desc->sg); dma_irq_handle_channel()
452 if (desc->sg) { dma_irq_handle_channel()
453 imxdma_sg_next(desc); dma_irq_handle_channel()
516 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); imxdma_xfer_desc()
622 struct imxdma_desc *desc; imxdma_tasklet() local
632 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); imxdma_tasklet()
641 dma_cookie_complete(&desc->desc); imxdma_tasklet()
652 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, imxdma_tasklet()
655 if (imxdma_xfer_desc(desc) < 0) imxdma_tasklet()
656 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", imxdma_tasklet()
662 if (desc->desc.callback) imxdma_tasklet()
663 desc->desc.callback(desc->desc.callback_param); imxdma_tasklet()
761 struct imxdma_desc *desc; imxdma_alloc_chan_resources() local
763 desc = kzalloc(sizeof(*desc), GFP_KERNEL); imxdma_alloc_chan_resources()
764 if (!desc) imxdma_alloc_chan_resources()
766 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); imxdma_alloc_chan_resources()
767 dma_async_tx_descriptor_init(&desc->desc, chan); imxdma_alloc_chan_resources()
768 desc->desc.tx_submit = imxdma_tx_submit; imxdma_alloc_chan_resources()
770 desc->desc.flags = DMA_CTRL_ACK; imxdma_alloc_chan_resources()
771 desc->status = DMA_COMPLETE; imxdma_alloc_chan_resources()
773 list_add_tail(&desc->node, &imxdmac->ld_free); imxdma_alloc_chan_resources()
787 struct imxdma_desc *desc, *_desc; imxdma_free_chan_resources() local
798 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { imxdma_free_chan_resources()
799 kfree(desc); imxdma_free_chan_resources()
816 struct imxdma_desc *desc; imxdma_prep_slave_sg() local
822 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); imxdma_prep_slave_sg()
843 desc->type = IMXDMA_DESC_SLAVE_SG;
844 desc->sg = sgl;
845 desc->sgcount = sg_len;
846 desc->len = dma_length;
847 desc->direction = direction;
849 desc->src = imxdmac->per_address;
851 desc->dest = imxdmac->per_address;
853 desc->desc.callback = NULL;
854 desc->desc.callback_param = NULL;
856 return &desc->desc;
866 struct imxdma_desc *desc; imxdma_prep_dma_cyclic() local
877 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); imxdma_prep_dma_cyclic()
902 desc->type = IMXDMA_DESC_CYCLIC; imxdma_prep_dma_cyclic()
903 desc->sg = imxdmac->sg_list; imxdma_prep_dma_cyclic()
904 desc->sgcount = periods; imxdma_prep_dma_cyclic()
905 desc->len = IMX_DMA_LENGTH_LOOP; imxdma_prep_dma_cyclic()
906 desc->direction = direction; imxdma_prep_dma_cyclic()
908 desc->src = imxdmac->per_address; imxdma_prep_dma_cyclic()
910 desc->dest = imxdmac->per_address; imxdma_prep_dma_cyclic()
912 desc->desc.callback = NULL; imxdma_prep_dma_cyclic()
913 desc->desc.callback_param = NULL; imxdma_prep_dma_cyclic()
915 return &desc->desc; imxdma_prep_dma_cyclic()
924 struct imxdma_desc *desc; imxdma_prep_dma_memcpy() local
934 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); imxdma_prep_dma_memcpy()
936 desc->type = IMXDMA_DESC_MEMCPY; imxdma_prep_dma_memcpy()
937 desc->src = src; imxdma_prep_dma_memcpy()
938 desc->dest = dest; imxdma_prep_dma_memcpy()
939 desc->len = len; imxdma_prep_dma_memcpy()
940 desc->direction = DMA_MEM_TO_MEM; imxdma_prep_dma_memcpy()
941 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; imxdma_prep_dma_memcpy()
942 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; imxdma_prep_dma_memcpy()
943 desc->desc.callback = NULL; imxdma_prep_dma_memcpy()
944 desc->desc.callback_param = NULL; imxdma_prep_dma_memcpy()
946 return &desc->desc; imxdma_prep_dma_memcpy()
955 struct imxdma_desc *desc; imxdma_prep_dma_interleaved() local
971 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); imxdma_prep_dma_interleaved()
973 desc->type = IMXDMA_DESC_INTERLEAVED; imxdma_prep_dma_interleaved()
974 desc->src = xt->src_start; imxdma_prep_dma_interleaved()
975 desc->dest = xt->dst_start; imxdma_prep_dma_interleaved()
976 desc->x = xt->sgl[0].size; imxdma_prep_dma_interleaved()
977 desc->y = xt->numf; imxdma_prep_dma_interleaved()
978 desc->w = xt->sgl[0].icg + desc->x; imxdma_prep_dma_interleaved()
979 desc->len = desc->x * desc->y; imxdma_prep_dma_interleaved()
980 desc->direction = DMA_MEM_TO_MEM; imxdma_prep_dma_interleaved()
981 desc->config_port = IMX_DMA_MEMSIZE_32; imxdma_prep_dma_interleaved()
982 desc->config_mem = IMX_DMA_MEMSIZE_32; imxdma_prep_dma_interleaved()
984 desc->config_mem |= IMX_DMA_TYPE_2D; imxdma_prep_dma_interleaved()
986 desc->config_port |= IMX_DMA_TYPE_2D; imxdma_prep_dma_interleaved()
987 desc->desc.callback = NULL; imxdma_prep_dma_interleaved()
988 desc->desc.callback_param = NULL; imxdma_prep_dma_interleaved()
990 return &desc->desc; imxdma_prep_dma_interleaved()
997 struct imxdma_desc *desc; imxdma_issue_pending() local
1003 desc = list_first_entry(&imxdmac->ld_queue, imxdma_issue_pending()
1006 if (imxdma_xfer_desc(desc) < 0) { imxdma_issue_pending()
H A Dep93xx_dma.c228 * @desc: head of the new active descriptor chain
230 * Sets @desc to be the head of the new active descriptor chain. This is the
237 struct ep93xx_dma_desc *desc) ep93xx_dma_set_active()
241 list_add_tail(&desc->node, &edmac->active); ep93xx_dma_set_active()
243 /* Flatten the @desc->tx_list chain into @edmac->active list */ ep93xx_dma_set_active()
244 while (!list_empty(&desc->tx_list)) { ep93xx_dma_set_active()
245 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, ep93xx_dma_set_active()
254 d->txd.callback = desc->txd.callback; ep93xx_dma_set_active()
255 d->txd.callback_param = desc->txd.callback_param; ep93xx_dma_set_active()
285 struct ep93xx_dma_desc *desc; ep93xx_dma_advance_active() local
292 desc = ep93xx_dma_get_active(edmac); ep93xx_dma_advance_active()
293 if (!desc) ep93xx_dma_advance_active()
300 return !desc->txd.cookie; ep93xx_dma_advance_active()
355 struct ep93xx_dma_desc *desc; m2p_fill_desc() local
358 desc = ep93xx_dma_get_active(edmac); m2p_fill_desc()
359 if (!desc) { m2p_fill_desc()
365 bus_addr = desc->src_addr; m2p_fill_desc()
367 bus_addr = desc->dst_addr; m2p_fill_desc()
370 writel(desc->size, edmac->regs + M2P_MAXCNT0); m2p_fill_desc()
373 writel(desc->size, edmac->regs + M2P_MAXCNT1); m2p_fill_desc()
401 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); m2p_hw_interrupt() local
420 desc->txd.cookie, desc->src_addr, desc->dst_addr, m2p_hw_interrupt()
421 desc->size); m2p_hw_interrupt()
516 struct ep93xx_dma_desc *desc; m2m_fill_desc() local
518 desc = ep93xx_dma_get_active(edmac); m2m_fill_desc()
519 if (!desc) { m2m_fill_desc()
525 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); m2m_fill_desc()
526 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); m2m_fill_desc()
527 writel(desc->size, edmac->regs + M2M_BCR0); m2m_fill_desc()
529 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); m2m_fill_desc()
530 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); m2m_fill_desc()
531 writel(desc->size, edmac->regs + M2M_BCR1); m2m_fill_desc()
593 struct ep93xx_dma_desc *desc; m2m_hw_interrupt() local
608 desc = ep93xx_dma_get_active(edmac); m2m_hw_interrupt()
609 last_done = !desc || desc->txd.cookie; m2m_hw_interrupt()
667 struct ep93xx_dma_desc *desc, *_desc; ep93xx_dma_desc_get() local
672 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { ep93xx_dma_desc_get()
673 if (async_tx_test_ack(&desc->txd)) { ep93xx_dma_desc_get()
674 list_del_init(&desc->node); ep93xx_dma_desc_get()
677 desc->src_addr = 0; ep93xx_dma_desc_get()
678 desc->dst_addr = 0; ep93xx_dma_desc_get()
679 desc->size = 0; ep93xx_dma_desc_get()
680 desc->complete = false; ep93xx_dma_desc_get()
681 desc->txd.cookie = 0; ep93xx_dma_desc_get()
682 desc->txd.callback = NULL; ep93xx_dma_desc_get()
683 desc->txd.callback_param = NULL; ep93xx_dma_desc_get()
685 ret = desc; ep93xx_dma_desc_get()
694 struct ep93xx_dma_desc *desc) ep93xx_dma_desc_put()
696 if (desc) { ep93xx_dma_desc_put()
700 list_splice_init(&desc->tx_list, &edmac->free_list); ep93xx_dma_desc_put()
701 list_add(&desc->node, &edmac->free_list); ep93xx_dma_desc_put()
739 struct ep93xx_dma_desc *desc, *d; ep93xx_dma_tasklet() local
750 desc = ep93xx_dma_get_active(edmac); ep93xx_dma_tasklet()
751 if (desc) { ep93xx_dma_tasklet()
752 if (desc->complete) { ep93xx_dma_tasklet()
755 dma_cookie_complete(&desc->txd); ep93xx_dma_tasklet()
758 callback = desc->txd.callback; ep93xx_dma_tasklet()
759 callback_param = desc->txd.callback_param; ep93xx_dma_tasklet()
767 list_for_each_entry_safe(desc, d, &list, node) { ep93xx_dma_tasklet()
768 dma_descriptor_unmap(&desc->txd); ep93xx_dma_tasklet()
769 ep93xx_dma_desc_put(edmac, desc); ep93xx_dma_tasklet()
779 struct ep93xx_dma_desc *desc; ep93xx_dma_interrupt() local
784 desc = ep93xx_dma_get_active(edmac); ep93xx_dma_interrupt()
785 if (!desc) { ep93xx_dma_interrupt()
794 desc->complete = true; ep93xx_dma_interrupt()
824 struct ep93xx_dma_desc *desc; ep93xx_dma_tx_submit() local
831 desc = container_of(tx, struct ep93xx_dma_desc, txd); ep93xx_dma_tx_submit()
839 ep93xx_dma_set_active(edmac, desc); ep93xx_dma_tx_submit()
842 list_add_tail(&desc->node, &edmac->queue); ep93xx_dma_tx_submit()
907 struct ep93xx_dma_desc *desc; ep93xx_dma_alloc_chan_resources() local
909 desc = kzalloc(sizeof(*desc), GFP_KERNEL); ep93xx_dma_alloc_chan_resources()
910 if (!desc) { ep93xx_dma_alloc_chan_resources()
915 INIT_LIST_HEAD(&desc->tx_list); ep93xx_dma_alloc_chan_resources()
917 dma_async_tx_descriptor_init(&desc->txd, chan); ep93xx_dma_alloc_chan_resources()
918 desc->txd.flags = DMA_CTRL_ACK; ep93xx_dma_alloc_chan_resources()
919 desc->txd.tx_submit = ep93xx_dma_tx_submit; ep93xx_dma_alloc_chan_resources()
921 ep93xx_dma_desc_put(edmac, desc); ep93xx_dma_alloc_chan_resources()
944 struct ep93xx_dma_desc *desc, *d; ep93xx_dma_free_chan_resources() local
959 list_for_each_entry_safe(desc, d, &list, node) ep93xx_dma_free_chan_resources()
960 kfree(desc); ep93xx_dma_free_chan_resources()
981 struct ep93xx_dma_desc *desc, *first; ep93xx_dma_prep_dma_memcpy() local
986 desc = ep93xx_dma_desc_get(edmac); ep93xx_dma_prep_dma_memcpy()
987 if (!desc) { ep93xx_dma_prep_dma_memcpy()
994 desc->src_addr = src + offset; ep93xx_dma_prep_dma_memcpy()
995 desc->dst_addr = dest + offset; ep93xx_dma_prep_dma_memcpy()
996 desc->size = bytes; ep93xx_dma_prep_dma_memcpy()
999 first = desc; ep93xx_dma_prep_dma_memcpy()
1001 list_add_tail(&desc->node, &first->tx_list); ep93xx_dma_prep_dma_memcpy()
1030 struct ep93xx_dma_desc *desc, *first; ep93xx_dma_prep_slave_sg() local
1056 desc = ep93xx_dma_desc_get(edmac); for_each_sg()
1057 if (!desc) { for_each_sg()
1063 desc->src_addr = sg_dma_address(sg); for_each_sg()
1064 desc->dst_addr = edmac->runtime_addr; for_each_sg()
1066 desc->src_addr = edmac->runtime_addr; for_each_sg()
1067 desc->dst_addr = sg_dma_address(sg); for_each_sg()
1069 desc->size = sg_len; for_each_sg()
1072 first = desc; for_each_sg()
1074 list_add_tail(&desc->node, &first->tx_list); for_each_sg()
1110 struct ep93xx_dma_desc *desc, *first; ep93xx_dma_prep_dma_cyclic() local
1134 desc = ep93xx_dma_desc_get(edmac); ep93xx_dma_prep_dma_cyclic()
1135 if (!desc) { ep93xx_dma_prep_dma_cyclic()
1141 desc->src_addr = dma_addr + offset; ep93xx_dma_prep_dma_cyclic()
1142 desc->dst_addr = edmac->runtime_addr; ep93xx_dma_prep_dma_cyclic()
1144 desc->src_addr = edmac->runtime_addr; ep93xx_dma_prep_dma_cyclic()
1145 desc->dst_addr = dma_addr + offset; ep93xx_dma_prep_dma_cyclic()
1148 desc->size = period_len; ep93xx_dma_prep_dma_cyclic()
1151 first = desc; ep93xx_dma_prep_dma_cyclic()
1153 list_add_tail(&desc->node, &first->tx_list); ep93xx_dma_prep_dma_cyclic()
1175 struct ep93xx_dma_desc *desc, *_d; ep93xx_dma_terminate_all() local
1192 list_for_each_entry_safe(desc, _d, &list, node) ep93xx_dma_terminate_all()
1193 ep93xx_dma_desc_put(edmac, desc); ep93xx_dma_terminate_all()
236 ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, struct ep93xx_dma_desc *desc) ep93xx_dma_set_active() argument
693 ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac, struct ep93xx_dma_desc *desc) ep93xx_dma_desc_put() argument
H A Dat_xdmac.c341 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); at_xdmac_start_xfer()
415 struct at_xdmac_desc *desc = txd_to_at_desc(tx); at_xdmac_tx_submit() local
423 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", at_xdmac_tx_submit()
424 __func__, atchan, desc); at_xdmac_tx_submit()
425 list_add_tail(&desc->xfer_node, &atchan->xfers_list); at_xdmac_tx_submit()
427 at_xdmac_start_xfer(atchan, desc); at_xdmac_tx_submit()
436 struct at_xdmac_desc *desc; at_xdmac_alloc_desc() local
440 desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); at_xdmac_alloc_desc()
441 if (desc) { at_xdmac_alloc_desc()
442 memset(desc, 0, sizeof(*desc)); at_xdmac_alloc_desc()
443 INIT_LIST_HEAD(&desc->descs_list); at_xdmac_alloc_desc()
444 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); at_xdmac_alloc_desc()
445 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; at_xdmac_alloc_desc()
446 desc->tx_dma_desc.phys = phys; at_xdmac_alloc_desc()
449 return desc; at_xdmac_alloc_desc()
455 struct at_xdmac_desc *desc; at_xdmac_get_desc() local
458 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT); at_xdmac_get_desc()
460 desc = list_first_entry(&atchan->free_descs_list, at_xdmac_get_desc()
462 list_del(&desc->desc_node); at_xdmac_get_desc()
463 desc->active_xfer = false; at_xdmac_get_desc()
466 return desc; at_xdmac_get_desc()
624 struct at_xdmac_desc *desc = NULL; for_each_sg() local
636 desc = at_xdmac_get_desc(atchan); for_each_sg()
637 if (!desc) { for_each_sg()
646 desc->lld.mbr_sa = atchan->sconfig.src_addr; for_each_sg()
647 desc->lld.mbr_da = mem; for_each_sg()
649 desc->lld.mbr_sa = mem; for_each_sg()
650 desc->lld.mbr_da = atchan->sconfig.dst_addr; for_each_sg()
656 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ for_each_sg()
661 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | for_each_sg()
665 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); for_each_sg()
669 prev->lld.mbr_nda = desc->tx_dma_desc.phys; for_each_sg()
675 prev = desc; for_each_sg()
677 first = desc; for_each_sg()
679 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", for_each_sg()
680 __func__, desc, first); for_each_sg() local
681 list_add_tail(&desc->desc_node, &first->descs_list); for_each_sg()
726 struct at_xdmac_desc *desc = NULL; at_xdmac_prep_dma_cyclic() local
729 desc = at_xdmac_get_desc(atchan); at_xdmac_prep_dma_cyclic()
730 if (!desc) { at_xdmac_prep_dma_cyclic()
739 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", at_xdmac_prep_dma_cyclic()
740 __func__, desc, &desc->tx_dma_desc.phys); at_xdmac_prep_dma_cyclic() local
743 desc->lld.mbr_sa = atchan->sconfig.src_addr; at_xdmac_prep_dma_cyclic()
744 desc->lld.mbr_da = buf_addr + i * period_len; at_xdmac_prep_dma_cyclic()
746 desc->lld.mbr_sa = buf_addr + i * period_len; at_xdmac_prep_dma_cyclic()
747 desc->lld.mbr_da = atchan->sconfig.dst_addr; at_xdmac_prep_dma_cyclic()
749 desc->lld.mbr_cfg = atchan->cfg; at_xdmac_prep_dma_cyclic()
750 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 at_xdmac_prep_dma_cyclic()
754 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg); at_xdmac_prep_dma_cyclic()
758 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); at_xdmac_prep_dma_cyclic()
762 prev->lld.mbr_nda = desc->tx_dma_desc.phys; at_xdmac_prep_dma_cyclic()
768 prev = desc; at_xdmac_prep_dma_cyclic()
770 first = desc; at_xdmac_prep_dma_cyclic()
772 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", at_xdmac_prep_dma_cyclic()
773 __func__, desc, first); at_xdmac_prep_dma_cyclic() local
774 list_add_tail(&desc->desc_node, &first->descs_list); at_xdmac_prep_dma_cyclic()
840 struct at_xdmac_desc *desc = NULL; at_xdmac_prep_dma_memcpy() local
845 desc = at_xdmac_get_desc(atchan); at_xdmac_prep_dma_memcpy()
847 if (!desc) { at_xdmac_prep_dma_memcpy()
884 desc->lld.mbr_sa = src_addr; at_xdmac_prep_dma_memcpy()
885 desc->lld.mbr_da = dst_addr; at_xdmac_prep_dma_memcpy()
886 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 at_xdmac_prep_dma_memcpy()
891 desc->lld.mbr_cfg = chan_cc; at_xdmac_prep_dma_memcpy()
895 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); at_xdmac_prep_dma_memcpy()
899 prev->lld.mbr_nda = desc->tx_dma_desc.phys; at_xdmac_prep_dma_memcpy()
905 prev = desc; at_xdmac_prep_dma_memcpy()
907 first = desc; at_xdmac_prep_dma_memcpy()
909 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", at_xdmac_prep_dma_memcpy()
910 __func__, desc, first); at_xdmac_prep_dma_memcpy() local
911 list_add_tail(&desc->desc_node, &first->descs_list); at_xdmac_prep_dma_memcpy()
926 struct at_xdmac_desc *desc, *_desc; at_xdmac_tx_status() local
943 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); at_xdmac_tx_status()
949 if (!desc->active_xfer) { at_xdmac_tx_status()
950 dma_set_residue(txstate, desc->xfer_size); at_xdmac_tx_status()
954 residue = desc->xfer_size; at_xdmac_tx_status()
961 if ((desc->lld.mbr_cfg & mask) == value) { at_xdmac_tx_status()
1008 descs_list = &desc->descs_list; list_for_each_entry_safe()
1009 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { list_for_each_entry_safe()
1010 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); list_for_each_entry_safe()
1011 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; list_for_each_entry_safe()
1012 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) list_for_each_entry_safe()
1020 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1021 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); local
1030 struct at_xdmac_desc *desc) at_xdmac_remove_xfer()
1032 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); at_xdmac_remove_xfer()
1038 list_del(&desc->xfer_node); at_xdmac_remove_xfer()
1039 list_splice_init(&desc->descs_list, &atchan->free_descs_list); at_xdmac_remove_xfer()
1044 struct at_xdmac_desc *desc; at_xdmac_advance_work() local
1054 desc = list_first_entry(&atchan->xfers_list, at_xdmac_advance_work()
1057 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); at_xdmac_advance_work()
1058 if (!desc->active_xfer) at_xdmac_advance_work()
1059 at_xdmac_start_xfer(atchan, desc); at_xdmac_advance_work()
1067 struct at_xdmac_desc *desc; at_xdmac_handle_cyclic() local
1070 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); at_xdmac_handle_cyclic()
1071 txd = &desc->tx_dma_desc; at_xdmac_handle_cyclic()
1080 struct at_xdmac_desc *desc; at_xdmac_tasklet() local
1104 desc = list_first_entry(&atchan->xfers_list, at_xdmac_tasklet()
1107 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); at_xdmac_tasklet()
1108 BUG_ON(!desc->active_xfer); at_xdmac_tasklet()
1110 txd = &desc->tx_dma_desc; at_xdmac_tasklet()
1112 at_xdmac_remove_xfer(atchan, desc); at_xdmac_tasklet()
1253 struct at_xdmac_desc *desc, *_desc; at_xdmac_device_terminate_all() local
1266 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) at_xdmac_device_terminate_all()
1267 at_xdmac_remove_xfer(atchan, desc); at_xdmac_device_terminate_all()
1279 struct at_xdmac_desc *desc; at_xdmac_alloc_chan_resources() local
1300 desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC); at_xdmac_alloc_chan_resources()
1301 if (!desc) { at_xdmac_alloc_chan_resources()
1306 list_add_tail(&desc->desc_node, &atchan->free_descs_list); at_xdmac_alloc_chan_resources()
1322 struct at_xdmac_desc *desc, *_desc; at_xdmac_free_chan_resources() local
1324 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) { at_xdmac_free_chan_resources()
1325 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc); at_xdmac_free_chan_resources() local
1326 list_del(&desc->desc_node); at_xdmac_free_chan_resources()
1327 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys); at_xdmac_free_chan_resources()
1029 at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, struct at_xdmac_desc *desc) at_xdmac_remove_xfer() argument
H A Dfsl_raid.c88 struct fsl_re_desc *desc; fsl_re_tx_submit() local
93 desc = to_fsl_re_dma_desc(tx); fsl_re_tx_submit()
98 list_add_tail(&desc->node, &re_chan->submit_q); fsl_re_tx_submit()
109 struct fsl_re_desc *desc, *_desc; fsl_re_issue_pending() local
118 list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) { fsl_re_issue_pending()
122 list_move_tail(&desc->node, &re_chan->active_q); fsl_re_issue_pending()
125 &desc->hwdesc, sizeof(struct fsl_re_hw_desc)); fsl_re_issue_pending()
135 static void fsl_re_desc_done(struct fsl_re_desc *desc) fsl_re_desc_done() argument
140 dma_cookie_complete(&desc->async_tx); fsl_re_desc_done()
142 callback = desc->async_tx.callback; fsl_re_desc_done()
143 callback_param = desc->async_tx.callback_param; fsl_re_desc_done()
147 dma_descriptor_unmap(&desc->async_tx); fsl_re_desc_done()
152 struct fsl_re_desc *desc, *_desc; fsl_re_cleanup_descs() local
156 list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) { fsl_re_cleanup_descs()
157 if (async_tx_test_ack(&desc->async_tx)) fsl_re_cleanup_descs()
158 list_move_tail(&desc->node, &re_chan->free_q); fsl_re_cleanup_descs()
168 struct fsl_re_desc *desc, *_desc; fsl_re_dequeue() local
183 list_for_each_entry_safe(desc, _desc, &re_chan->active_q, fsl_re_dequeue()
186 if (desc->hwdesc.lbea32 == hwdesc->lbea32 && fsl_re_dequeue()
187 desc->hwdesc.addr_low == hwdesc->addr_low) { fsl_re_dequeue()
194 fsl_re_desc_done(desc); fsl_re_dequeue()
195 list_move_tail(&desc->node, &re_chan->ack_q); fsl_re_dequeue()
260 struct fsl_re_desc *desc, fsl_re_init_desc()
263 desc->re_chan = re_chan; fsl_re_init_desc()
264 desc->async_tx.tx_submit = fsl_re_tx_submit; fsl_re_init_desc()
265 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan); fsl_re_init_desc()
266 INIT_LIST_HEAD(&desc->node); fsl_re_init_desc()
268 desc->hwdesc.fmt32 = FSL_RE_FRAME_FORMAT << FSL_RE_HWDESC_FMT_SHIFT; fsl_re_init_desc()
269 desc->hwdesc.lbea32 = upper_32_bits(paddr); fsl_re_init_desc()
270 desc->hwdesc.addr_low = lower_32_bits(paddr); fsl_re_init_desc()
271 desc->cf_addr = cf; fsl_re_init_desc()
272 desc->cf_paddr = paddr; fsl_re_init_desc()
274 desc->cdb_addr = (void *)(cf + FSL_RE_CF_DESC_SIZE); fsl_re_init_desc()
275 desc->cdb_paddr = paddr + FSL_RE_CF_DESC_SIZE; fsl_re_init_desc()
277 return desc; fsl_re_init_desc()
283 struct fsl_re_desc *desc = NULL; fsl_re_chan_alloc_desc() local
292 /* take one desc from free_q */ fsl_re_chan_alloc_desc()
293 desc = list_first_entry(&re_chan->free_q, fsl_re_chan_alloc_desc()
295 list_del(&desc->node); fsl_re_chan_alloc_desc()
297 desc->async_tx.flags = flags; fsl_re_chan_alloc_desc()
301 if (!desc) { fsl_re_chan_alloc_desc()
302 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); fsl_re_chan_alloc_desc()
303 if (!desc) fsl_re_chan_alloc_desc()
309 kfree(desc); fsl_re_chan_alloc_desc()
313 desc = fsl_re_init_desc(re_chan, desc, cf, paddr); fsl_re_chan_alloc_desc()
314 desc->async_tx.flags = flags; fsl_re_chan_alloc_desc()
321 return desc; fsl_re_chan_alloc_desc()
330 struct fsl_re_desc *desc; fsl_re_prep_dma_genq() local
345 desc = fsl_re_chan_alloc_desc(re_chan, flags); fsl_re_prep_dma_genq()
346 if (desc <= 0) fsl_re_prep_dma_genq()
360 xor = desc->cdb_addr; fsl_re_prep_dma_genq()
376 cf = desc->cf_addr; fsl_re_prep_dma_genq()
377 fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0); fsl_re_prep_dma_genq()
392 return &desc->async_tx; fsl_re_prep_dma_genq()
417 struct fsl_re_desc *desc; fsl_re_prep_dma_pq() local
449 desc = to_fsl_re_dma_desc(tx); fsl_re_prep_dma_pq()
469 desc = fsl_re_chan_alloc_desc(re_chan, flags); fsl_re_prep_dma_pq()
470 if (desc <= 0) fsl_re_prep_dma_pq()
480 pq = desc->cdb_addr; fsl_re_prep_dma_pq()
497 cf = desc->cf_addr; fsl_re_prep_dma_pq()
498 fill_cfd_frame(cf, 0, sizeof(struct fsl_re_pq_cdb), desc->cdb_paddr, 0); fsl_re_prep_dma_pq()
526 return &desc->async_tx; fsl_re_prep_dma_pq()
539 struct fsl_re_desc *desc; fsl_re_prep_dma_memcpy() local
553 desc = fsl_re_chan_alloc_desc(re_chan, flags); fsl_re_prep_dma_memcpy()
554 if (desc <= 0) fsl_re_prep_dma_memcpy()
563 move = desc->cdb_addr; fsl_re_prep_dma_memcpy()
567 cf = desc->cf_addr; fsl_re_prep_dma_memcpy()
568 fill_cfd_frame(cf, 0, sizeof(*move), desc->cdb_paddr, 0); fsl_re_prep_dma_memcpy()
578 return &desc->async_tx; fsl_re_prep_dma_memcpy()
584 struct fsl_re_desc *desc; fsl_re_alloc_chan_resources() local
591 desc = kzalloc(sizeof(*desc), GFP_KERNEL); fsl_re_alloc_chan_resources()
592 if (!desc) fsl_re_alloc_chan_resources()
598 kfree(desc); fsl_re_alloc_chan_resources()
602 INIT_LIST_HEAD(&desc->node); fsl_re_alloc_chan_resources()
603 fsl_re_init_desc(re_chan, desc, cf, paddr); fsl_re_alloc_chan_resources()
605 list_add_tail(&desc->node, &re_chan->free_q); fsl_re_alloc_chan_resources()
614 struct fsl_re_desc *desc; fsl_re_free_chan_resources() local
618 desc = list_first_entry(&re_chan->free_q, fsl_re_free_chan_resources()
622 list_del(&desc->node); fsl_re_free_chan_resources()
623 dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr, fsl_re_free_chan_resources()
624 desc->cf_paddr); fsl_re_free_chan_resources()
625 kfree(desc); fsl_re_free_chan_resources()
820 dev_err(dev, "No memory for fsl re_cf desc pool\n"); fsl_re_probe()
828 dev_err(dev, "No memory for fsl re_hw desc pool\n"); fsl_re_probe()
259 fsl_re_init_desc(struct fsl_re_chan *re_chan, struct fsl_re_desc *desc, void *cf, dma_addr_t paddr) fsl_re_init_desc() argument
H A Ddma-jz4740.c122 struct jz4740_dma_desc *desc; member in struct:jz4740_dmaengine_chan
280 chan->desc = NULL; jz4740_dma_terminate_all()
299 if (!chan->desc) { jz4740_dma_start_transfer()
303 chan->desc = to_jz4740_dma_desc(vdesc); jz4740_dma_start_transfer()
307 if (chan->next_sg == chan->desc->num_sgs) jz4740_dma_start_transfer()
310 sg = &chan->desc->sg[chan->next_sg]; jz4740_dma_start_transfer()
312 if (chan->desc->direction == DMA_MEM_TO_DEV) { jz4740_dma_start_transfer()
341 if (chan->desc) { jz4740_dma_chan_irq()
342 if (chan->desc->cyclic) { jz4740_dma_chan_irq()
343 vchan_cyclic_callback(&chan->desc->vdesc); jz4740_dma_chan_irq()
345 if (chan->next_sg == chan->desc->num_sgs) { jz4740_dma_chan_irq()
346 list_del(&chan->desc->vdesc.node); jz4740_dma_chan_irq()
347 vchan_cookie_complete(&chan->desc->vdesc); jz4740_dma_chan_irq()
348 chan->desc = NULL; jz4740_dma_chan_irq()
384 if (vchan_issue_pending(&chan->vchan) && !chan->desc) jz4740_dma_issue_pending()
395 struct jz4740_dma_desc *desc; jz4740_dma_prep_slave_sg() local
399 desc = jz4740_dma_alloc_desc(sg_len); jz4740_dma_prep_slave_sg()
400 if (!desc) jz4740_dma_prep_slave_sg()
404 desc->sg[i].addr = sg_dma_address(sg); for_each_sg()
405 desc->sg[i].len = sg_dma_len(sg); for_each_sg()
408 desc->num_sgs = sg_len;
409 desc->direction = direction;
410 desc->cyclic = false;
412 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
421 struct jz4740_dma_desc *desc; jz4740_dma_prep_dma_cyclic() local
429 desc = jz4740_dma_alloc_desc(num_periods); jz4740_dma_prep_dma_cyclic()
430 if (!desc) jz4740_dma_prep_dma_cyclic()
434 desc->sg[i].addr = buf_addr; jz4740_dma_prep_dma_cyclic()
435 desc->sg[i].len = period_len; jz4740_dma_prep_dma_cyclic()
439 desc->num_sgs = num_periods; jz4740_dma_prep_dma_cyclic()
440 desc->direction = direction; jz4740_dma_prep_dma_cyclic()
441 desc->cyclic = true; jz4740_dma_prep_dma_cyclic()
443 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); jz4740_dma_prep_dma_cyclic()
447 struct jz4740_dma_desc *desc, unsigned int next_sg) jz4740_dma_desc_residue()
455 for (i = next_sg; i < desc->num_sgs; i++) jz4740_dma_desc_residue()
456 residue += desc->sg[i].len; jz4740_dma_desc_residue()
481 if (cookie == chan->desc->vdesc.tx.cookie) { jz4740_dma_tx_status()
482 state->residue = jz4740_dma_desc_residue(chan, chan->desc, jz4740_dma_tx_status()
446 jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan, struct jz4740_dma_desc *desc, unsigned int next_sg) jz4740_dma_desc_residue() argument
H A Dpl330.c358 struct dma_pl330_desc *desc; member in struct:_pl330_req
412 /* Schedule desc completion */
510 /* The channel which currently holds this desc */
522 struct dma_pl330_desc *desc; member in struct:_xfer_spec
527 return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL; _queue_empty()
532 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; _queue_full()
1029 struct dma_pl330_desc *desc; _trigger() local
1040 if (thrd->req[idx].desc != NULL) { _trigger()
1044 if (thrd->req[idx].desc != NULL) _trigger()
1058 desc = req->desc; _trigger()
1060 ns = desc->rqcfg.nonsecure ? 1 : 0; _trigger()
1120 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg; _ldst_memtomem()
1146 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); _ldst_devtomem()
1147 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri); _ldst_devtomem()
1149 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); _ldst_devtomem()
1161 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); _ldst_memtodev()
1163 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri); _ldst_memtodev()
1164 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); _ldst_memtodev()
1175 switch (pxs->desc->rqtype) { _bursts()
1275 struct pl330_xfer *x = &pxs->desc->px; _setup_loops()
1292 struct pl330_xfer *x = &pxs->desc->px; _setup_xfer()
1323 x = &pxs->desc->px; _setup_req()
1376 struct dma_pl330_desc *desc) pl330_submit_req()
1393 if (desc->rqtype != DMA_MEM_TO_MEM && pl330_submit_req()
1394 desc->peri >= pl330->pcfg.num_peri) { pl330_submit_req()
1397 __func__, __LINE__, desc->peri); pl330_submit_req()
1410 desc->rqcfg.nonsecure = 0; pl330_submit_req()
1412 desc->rqcfg.nonsecure = 1; pl330_submit_req()
1414 ccr = _prepare_ccr(&desc->rqcfg); pl330_submit_req()
1416 idx = thrd->req[0].desc == NULL ? 0 : 1; pl330_submit_req()
1419 xs.desc = desc; pl330_submit_req()
1435 thrd->req[idx].desc = desc; pl330_submit_req()
1446 static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err) dma_pl330_rqcb() argument
1451 if (!desc) dma_pl330_rqcb()
1454 pch = desc->pchan; dma_pl330_rqcb()
1456 /* If desc aborted */ dma_pl330_rqcb()
1462 desc->status = DONE; dma_pl330_rqcb()
1509 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err); pl330_dotask()
1510 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err); pl330_dotask()
1513 thrd->req[0].desc = NULL; pl330_dotask()
1514 thrd->req[1].desc = NULL; pl330_dotask()
1594 descdone = thrd->req[active].desc; pl330_update()
1595 thrd->req[active].desc = NULL; pl330_update()
1672 thrd->req[0].desc = NULL; pl330_request_channel()
1673 thrd->req[1].desc = NULL; pl330_request_channel()
1707 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT); pl330_release_channel()
1708 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT); pl330_release_channel()
1771 thrd->req[0].desc = NULL; _reset_thread()
1777 thrd->req[1].desc = NULL; _reset_thread()
1944 struct dma_pl330_desc *desc; fill_queue() local
1947 list_for_each_entry(desc, &pch->work_list, node) { fill_queue()
1950 if (desc->status == BUSY) fill_queue()
1953 ret = pl330_submit_req(pch->thread, desc); fill_queue()
1955 desc->status = BUSY; fill_queue()
1961 desc->status = DONE; fill_queue()
1963 __func__, __LINE__, desc->txd.cookie); fill_queue()
1972 struct dma_pl330_desc *desc, *_dt; pl330_tasklet() local
1979 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) pl330_tasklet()
1980 if (desc->status == DONE) { pl330_tasklet()
1982 dma_cookie_complete(&desc->txd); pl330_tasklet()
1983 list_move_tail(&desc->node, &pch->completed_list); pl330_tasklet()
2005 desc = list_first_entry(&pch->completed_list, pl330_tasklet()
2008 callback = desc->txd.callback; pl330_tasklet()
2009 callback_param = desc->txd.callback_param; pl330_tasklet()
2012 desc->status = PREP; pl330_tasklet()
2013 list_move_tail(&desc->node, &pch->work_list); pl330_tasklet()
2021 desc->status = FREE; pl330_tasklet()
2022 list_move_tail(&desc->node, &pch->dmac->desc_pool); pl330_tasklet()
2025 dma_descriptor_unmap(&desc->txd); pl330_tasklet()
2125 struct dma_pl330_desc *desc; pl330_terminate_all() local
2136 pch->thread->req[0].desc = NULL; pl330_terminate_all()
2137 pch->thread->req[1].desc = NULL; pl330_terminate_all()
2140 /* Mark all desc done */ pl330_terminate_all()
2141 list_for_each_entry(desc, &pch->submitted_list, node) { pl330_terminate_all()
2142 desc->status = FREE; pl330_terminate_all()
2143 dma_cookie_complete(&desc->txd); pl330_terminate_all()
2146 list_for_each_entry(desc, &pch->work_list , node) { pl330_terminate_all()
2147 desc->status = FREE; pl330_terminate_all()
2148 dma_cookie_complete(&desc->txd); pl330_terminate_all()
2210 struct dma_pl330_desc *desc) pl330_get_current_xferred_count()
2219 if (desc->rqcfg.src_inc) { pl330_get_current_xferred_count()
2221 addr = desc->px.src_addr; pl330_get_current_xferred_count()
2224 addr = desc->px.dst_addr; pl330_get_current_xferred_count()
2237 struct dma_pl330_desc *desc, *running = NULL; pl330_tx_status() local
2252 running = pch->thread->req[pch->thread->req_running].desc; pl330_tx_status()
2255 list_for_each_entry(desc, &pch->work_list, node) { pl330_tx_status()
2256 if (desc->status == DONE) pl330_tx_status()
2257 transferred = desc->bytes_requested; pl330_tx_status()
2258 else if (running && desc == running) pl330_tx_status()
2260 pl330_get_current_xferred_count(pch, desc); pl330_tx_status()
2263 residual += desc->bytes_requested - transferred; pl330_tx_status()
2264 if (desc->txd.cookie == cookie) { pl330_tx_status()
2265 switch (desc->status) { pl330_tx_status()
2278 if (desc->last) pl330_tx_status()
2317 struct dma_pl330_desc *desc, *last = to_desc(tx); pl330_tx_submit() local
2326 desc = list_entry(last->node.next, struct dma_pl330_desc, node); pl330_tx_submit()
2328 desc->txd.callback = last->txd.callback; pl330_tx_submit()
2329 desc->txd.callback_param = last->txd.callback_param; pl330_tx_submit()
2331 desc->last = false; pl330_tx_submit()
2333 dma_cookie_assign(&desc->txd); pl330_tx_submit()
2335 list_move_tail(&desc->node, &pch->submitted_list); pl330_tx_submit()
2346 static inline void _init_desc(struct dma_pl330_desc *desc) _init_desc() argument
2348 desc->rqcfg.swap = SWAP_NO; _init_desc()
2349 desc->rqcfg.scctl = CCTRL0; _init_desc()
2350 desc->rqcfg.dcctl = CCTRL0; _init_desc()
2351 desc->txd.tx_submit = pl330_tx_submit; _init_desc()
2353 INIT_LIST_HEAD(&desc->node); _init_desc()
2359 struct dma_pl330_desc *desc; add_desc() local
2363 desc = kcalloc(count, sizeof(*desc), flg); add_desc()
2364 if (!desc) add_desc()
2370 _init_desc(&desc[i]); add_desc()
2371 list_add_tail(&desc[i].node, &pl330->desc_pool); add_desc()
2381 struct dma_pl330_desc *desc = NULL; pluck_desc() local
2387 desc = list_entry(pl330->desc_pool.next, pluck_desc()
2390 list_del_init(&desc->node); pluck_desc()
2392 desc->status = PREP; pluck_desc()
2393 desc->txd.callback = NULL; pluck_desc()
2398 return desc; pluck_desc()
2405 struct dma_pl330_desc *desc; pl330_get_desc() local
2407 /* Pluck one desc from the pool of DMAC */ pl330_get_desc()
2408 desc = pluck_desc(pl330); pl330_get_desc()
2411 if (!desc) { pl330_get_desc()
2416 desc = pluck_desc(pl330); pl330_get_desc()
2417 if (!desc) { pl330_get_desc()
2425 desc->pchan = pch; pl330_get_desc()
2426 desc->txd.cookie = 0; pl330_get_desc()
2427 async_tx_ack(&desc->txd); pl330_get_desc()
2429 desc->peri = peri_id ? pch->chan.chan_id : 0; pl330_get_desc()
2430 desc->rqcfg.pcfg = &pch->dmac->pcfg; pl330_get_desc()
2432 dma_async_tx_descriptor_init(&desc->txd, &pch->chan); pl330_get_desc()
2434 return desc; pl330_get_desc()
2449 struct dma_pl330_desc *desc = pl330_get_desc(pch); __pl330_prep_dma_memcpy() local
2451 if (!desc) { __pl330_prep_dma_memcpy()
2452 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __pl330_prep_dma_memcpy()
2467 fill_px(&desc->px, dst, src, len); __pl330_prep_dma_memcpy()
2469 return desc; __pl330_prep_dma_memcpy()
2473 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) get_burst_len() argument
2475 struct dma_pl330_chan *pch = desc->pchan; get_burst_len()
2481 burst_len >>= desc->rqcfg.brst_size; get_burst_len()
2488 if (!(len % (burst_len << desc->rqcfg.brst_size))) get_burst_len()
2501 struct dma_pl330_desc *desc = NULL, *first = NULL; pl330_prep_dma_cyclic() local
2518 desc = pl330_get_desc(pch); pl330_prep_dma_cyclic()
2519 if (!desc) { pl330_prep_dma_cyclic()
2520 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", pl330_prep_dma_cyclic()
2529 desc = list_entry(first->node.next, pl330_prep_dma_cyclic()
2531 list_move_tail(&desc->node, &pl330->desc_pool); pl330_prep_dma_cyclic()
2543 desc->rqcfg.src_inc = 1; pl330_prep_dma_cyclic()
2544 desc->rqcfg.dst_inc = 0; pl330_prep_dma_cyclic()
2549 desc->rqcfg.src_inc = 0; pl330_prep_dma_cyclic()
2550 desc->rqcfg.dst_inc = 1; pl330_prep_dma_cyclic()
2558 desc->rqtype = direction; pl330_prep_dma_cyclic()
2559 desc->rqcfg.brst_size = pch->burst_sz; pl330_prep_dma_cyclic()
2560 desc->rqcfg.brst_len = 1; pl330_prep_dma_cyclic()
2561 desc->bytes_requested = period_len; pl330_prep_dma_cyclic()
2562 fill_px(&desc->px, dst, src, period_len); pl330_prep_dma_cyclic()
2565 first = desc; pl330_prep_dma_cyclic()
2567 list_add_tail(&desc->node, &first->node); pl330_prep_dma_cyclic()
2572 if (!desc) pl330_prep_dma_cyclic()
2576 desc->txd.flags = flags; pl330_prep_dma_cyclic()
2578 return &desc->txd; pl330_prep_dma_cyclic()
2585 struct dma_pl330_desc *desc; pl330_prep_dma_memcpy() local
2593 desc = __pl330_prep_dma_memcpy(pch, dst, src, len); pl330_prep_dma_memcpy()
2594 if (!desc) pl330_prep_dma_memcpy()
2597 desc->rqcfg.src_inc = 1; pl330_prep_dma_memcpy()
2598 desc->rqcfg.dst_inc = 1; pl330_prep_dma_memcpy()
2599 desc->rqtype = DMA_MEM_TO_MEM; pl330_prep_dma_memcpy()
2612 desc->rqcfg.brst_size = 0; pl330_prep_dma_memcpy()
2613 while (burst != (1 << desc->rqcfg.brst_size)) pl330_prep_dma_memcpy()
2614 desc->rqcfg.brst_size++; pl330_prep_dma_memcpy()
2620 if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) pl330_prep_dma_memcpy()
2621 desc->rqcfg.brst_len = 1; pl330_prep_dma_memcpy()
2623 desc->rqcfg.brst_len = get_burst_len(desc, len); pl330_prep_dma_memcpy()
2624 desc->bytes_requested = len; pl330_prep_dma_memcpy()
2626 desc->txd.flags = flags; pl330_prep_dma_memcpy()
2628 return &desc->txd; pl330_prep_dma_memcpy()
2635 struct dma_pl330_desc *desc; __pl330_giveback_desc() local
2643 desc = list_entry(first->node.next, __pl330_giveback_desc()
2645 list_move_tail(&desc->node, &pl330->desc_pool); __pl330_giveback_desc()
2658 struct dma_pl330_desc *first, *desc = NULL; pl330_prep_slave_sg() local
2673 desc = pl330_get_desc(pch); for_each_sg()
2674 if (!desc) { for_each_sg()
2678 "%s:%d Unable to fetch desc\n", for_each_sg()
2686 first = desc; for_each_sg()
2688 list_add_tail(&desc->node, &first->node); for_each_sg()
2691 desc->rqcfg.src_inc = 1; for_each_sg()
2692 desc->rqcfg.dst_inc = 0; for_each_sg()
2693 fill_px(&desc->px, for_each_sg()
2696 desc->rqcfg.src_inc = 0; for_each_sg()
2697 desc->rqcfg.dst_inc = 1; for_each_sg()
2698 fill_px(&desc->px, for_each_sg()
2702 desc->rqcfg.brst_size = pch->burst_sz; for_each_sg()
2703 desc->rqcfg.brst_len = 1; for_each_sg()
2704 desc->rqtype = direction; for_each_sg()
2705 desc->bytes_requested = sg_dma_len(sg); for_each_sg()
2708 /* Return the last desc in the chain */
2709 desc->txd.flags = flg;
2710 return &desc->txd;
2830 dev_warn(&adev->dev, "unable to allocate desc\n"); pl330_probe()
1375 pl330_submit_req(struct pl330_thread *thrd, struct dma_pl330_desc *desc) pl330_submit_req() argument
2209 pl330_get_current_xferred_count(struct dma_pl330_chan *pch, struct dma_pl330_desc *desc) pl330_get_current_xferred_count() argument
H A Dat_hdmac.c108 struct at_desc *desc = NULL; atc_alloc_descriptor() local
112 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); atc_alloc_descriptor()
113 if (desc) { atc_alloc_descriptor()
114 memset(desc, 0, sizeof(struct at_desc)); atc_alloc_descriptor()
115 INIT_LIST_HEAD(&desc->tx_list); atc_alloc_descriptor()
116 dma_async_tx_descriptor_init(&desc->txd, chan); atc_alloc_descriptor()
118 desc->txd.flags = DMA_CTRL_ACK; atc_alloc_descriptor()
119 desc->txd.tx_submit = atc_tx_submit; atc_alloc_descriptor()
120 desc->txd.phys = phys; atc_alloc_descriptor()
123 return desc; atc_alloc_descriptor()
132 struct at_desc *desc, *_desc; atc_desc_get() local
139 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { atc_desc_get()
141 if (async_tx_test_ack(&desc->txd)) { atc_desc_get()
142 list_del(&desc->desc_node); atc_desc_get()
143 ret = desc; atc_desc_get()
147 "desc %p not ACKed\n", desc); atc_desc_get()
172 * @desc: descriptor, at the head of a chain, to move to free list
174 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) atc_desc_put() argument
176 if (desc) { atc_desc_put()
181 list_for_each_entry(child, &desc->tx_list, desc_node) atc_desc_put()
183 "moving child desc %p to freelist\n", atc_desc_put()
185 list_splice_init(&desc->tx_list, &atchan->free_list); atc_desc_put()
187 "moving desc %p to freelist\n", desc); atc_desc_put()
188 list_add(&desc->desc_node, &atchan->free_list); atc_desc_put()
197 * @desc: descriptor to queue
202 struct at_desc *desc) atc_desc_chain()
205 *first = desc; atc_desc_chain()
208 (*prev)->lli.dscr = desc->txd.phys; atc_desc_chain()
210 list_add_tail(&desc->desc_node, atc_desc_chain()
213 *prev = desc; atc_desc_chain()
263 struct at_desc *desc, *_desc; atc_get_desc_by_cookie() local
265 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) { atc_get_desc_by_cookie()
266 if (desc->txd.cookie == cookie) atc_get_desc_by_cookie()
267 return desc; atc_get_desc_by_cookie()
270 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { atc_get_desc_by_cookie()
271 if (desc->txd.cookie == cookie) atc_get_desc_by_cookie()
272 return desc; atc_get_desc_by_cookie()
284 * @desc: the descriptor containing the transfer width
287 struct at_desc *desc) atc_calc_bytes_left()
289 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); atc_calc_bytes_left()
298 * @desc: the descriptor containing the transfer width
301 struct at_dma_chan *atchan, struct at_desc *desc) atc_calc_bytes_left_from_reg()
305 return atc_calc_bytes_left(current_len, ctrla, desc); atc_calc_bytes_left_from_reg()
317 struct at_desc *desc; atc_get_bytes_left() local
326 desc = atc_get_desc_by_cookie(atchan, cookie); atc_get_bytes_left()
327 if (desc == NULL) atc_get_bytes_left()
329 else if (desc != desc_first) atc_get_bytes_left()
330 return desc->total_len; atc_get_bytes_left()
356 list_for_each_entry(desc, &desc_first->tx_list, desc_node) { atc_get_bytes_left()
357 if (desc->lli.dscr == dscr) atc_get_bytes_left()
360 ret -= desc->len; atc_get_bytes_left()
369 if (!desc->lli.dscr) atc_get_bytes_left()
370 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc); atc_get_bytes_left()
382 * @desc: descriptor at the head of the chain we want do complete
386 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) atc_chain_complete() argument
388 struct dma_async_tx_descriptor *txd = &desc->txd; atc_chain_complete()
398 list_splice_init(&desc->tx_list, &atchan->free_list); atc_chain_complete()
400 list_move(&desc->desc_node, &atchan->free_list); atc_chain_complete()
431 struct at_desc *desc, *_desc; atc_complete_all() local
447 list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_complete_all()
448 atc_chain_complete(atchan, desc); atc_complete_all()
605 * @desc: descriptor at the head of the transaction chain
613 struct at_desc *desc = txd_to_at_desc(tx); atc_tx_submit() local
623 desc->txd.cookie); atc_tx_submit()
624 atc_dostart(atchan, desc); atc_tx_submit()
625 list_add_tail(&desc->desc_node, &atchan->active_list); atc_tx_submit()
628 desc->txd.cookie); atc_tx_submit()
629 list_add_tail(&desc->desc_node, &atchan->queue); atc_tx_submit()
650 struct at_desc *desc = NULL; atc_prep_dma_memcpy() local
686 desc = atc_desc_get(atchan); atc_prep_dma_memcpy()
687 if (!desc) atc_prep_dma_memcpy()
690 desc->lli.saddr = src + offset; atc_prep_dma_memcpy()
691 desc->lli.daddr = dest + offset; atc_prep_dma_memcpy()
692 desc->lli.ctrla = ctrla | xfer_count; atc_prep_dma_memcpy()
693 desc->lli.ctrlb = ctrlb; atc_prep_dma_memcpy()
695 desc->txd.cookie = 0; atc_prep_dma_memcpy()
696 desc->len = xfer_count << src_width; atc_prep_dma_memcpy()
698 atc_desc_chain(&first, &prev, desc); atc_prep_dma_memcpy()
710 set_desc_eol(desc); atc_prep_dma_memcpy()
774 struct at_desc *desc; for_each_sg() local
778 desc = atc_desc_get(atchan); for_each_sg()
779 if (!desc) for_each_sg()
793 desc->lli.saddr = mem; for_each_sg()
794 desc->lli.daddr = reg; for_each_sg()
795 desc->lli.ctrla = ctrla for_each_sg()
798 desc->lli.ctrlb = ctrlb; for_each_sg()
799 desc->len = len; for_each_sg()
801 atc_desc_chain(&first, &prev, desc); for_each_sg()
815 struct at_desc *desc; for_each_sg() local
819 desc = atc_desc_get(atchan); for_each_sg()
820 if (!desc) for_each_sg()
834 desc->lli.saddr = reg; for_each_sg()
835 desc->lli.daddr = mem; for_each_sg()
836 desc->lli.ctrla = ctrla for_each_sg()
839 desc->lli.ctrlb = ctrlb; for_each_sg()
840 desc->len = len; for_each_sg()
842 atc_desc_chain(&first, &prev, desc); for_each_sg()
889 struct at_desc *desc = NULL; atc_prep_dma_sg() local
966 desc = atc_desc_get(atchan); atc_prep_dma_sg()
967 if (!desc) atc_prep_dma_sg()
970 desc->lli.saddr = src; atc_prep_dma_sg()
971 desc->lli.daddr = dst; atc_prep_dma_sg()
972 desc->lli.ctrla = ctrla | xfer_count; atc_prep_dma_sg()
973 desc->lli.ctrlb = ctrlb; atc_prep_dma_sg()
975 desc->txd.cookie = 0; atc_prep_dma_sg()
976 desc->len = len; atc_prep_dma_sg()
982 desc->tx_width = src_width; atc_prep_dma_sg()
984 atc_desc_chain(&first, &prev, desc); atc_prep_dma_sg()
1000 set_desc_eol(desc); atc_prep_dma_sg()
1036 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, atc_dma_cyclic_fill_desc() argument
1054 desc->lli.saddr = buf_addr + (period_len * period_index); atc_dma_cyclic_fill_desc()
1055 desc->lli.daddr = sconfig->dst_addr; atc_dma_cyclic_fill_desc()
1056 desc->lli.ctrla = ctrla; atc_dma_cyclic_fill_desc()
1057 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED atc_dma_cyclic_fill_desc()
1062 desc->len = period_len; atc_dma_cyclic_fill_desc()
1066 desc->lli.saddr = sconfig->src_addr; atc_dma_cyclic_fill_desc()
1067 desc->lli.daddr = buf_addr + (period_len * period_index); atc_dma_cyclic_fill_desc()
1068 desc->lli.ctrla = ctrla; atc_dma_cyclic_fill_desc()
1069 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR atc_dma_cyclic_fill_desc()
1074 desc->len = period_len; atc_dma_cyclic_fill_desc()
1138 struct at_desc *desc; atc_prep_dma_cyclic() local
1140 desc = atc_desc_get(atchan); atc_prep_dma_cyclic()
1141 if (!desc) atc_prep_dma_cyclic()
1144 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, atc_prep_dma_cyclic()
1148 atc_desc_chain(&first, &prev, desc); atc_prep_dma_cyclic()
1238 struct at_desc *desc, *_desc; atc_terminate_all() local
1265 list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_terminate_all()
1266 atc_chain_complete(atchan, desc); atc_terminate_all()
1358 struct at_desc *desc; atc_alloc_chan_resources() local
1395 desc = atc_alloc_descriptor(chan, GFP_KERNEL); atc_alloc_chan_resources()
1396 if (!desc) { atc_alloc_chan_resources()
1401 list_add_tail(&desc->desc_node, &tmp_list); atc_alloc_chan_resources()
1428 struct at_desc *desc, *_desc; atc_free_chan_resources() local
1439 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { atc_free_chan_resources()
1440 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); atc_free_chan_resources() local
1441 list_del(&desc->desc_node); atc_free_chan_resources()
1443 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); atc_free_chan_resources()
201 atc_desc_chain(struct at_desc **first, struct at_desc **prev, struct at_desc *desc) atc_desc_chain() argument
286 atc_calc_bytes_left(int current_len, u32 ctrla, struct at_desc *desc) atc_calc_bytes_left() argument
300 atc_calc_bytes_left_from_reg(int current_len, struct at_dma_chan *atchan, struct at_desc *desc) atc_calc_bytes_left_from_reg() argument
/linux-4.1.27/arch/mips/vr41xx/common/
H A Dicu.c157 struct irq_desc *desc = irq_to_desc(PIU_IRQ); vr41xx_enable_piuint() local
162 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_piuint()
164 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_piuint()
172 struct irq_desc *desc = irq_to_desc(PIU_IRQ); vr41xx_disable_piuint() local
177 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_piuint()
179 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_piuint()
187 struct irq_desc *desc = irq_to_desc(AIU_IRQ); vr41xx_enable_aiuint() local
192 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_aiuint()
194 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_aiuint()
202 struct irq_desc *desc = irq_to_desc(AIU_IRQ); vr41xx_disable_aiuint() local
207 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_aiuint()
209 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_aiuint()
217 struct irq_desc *desc = irq_to_desc(KIU_IRQ); vr41xx_enable_kiuint() local
222 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_kiuint()
224 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_kiuint()
232 struct irq_desc *desc = irq_to_desc(KIU_IRQ); vr41xx_disable_kiuint() local
237 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_kiuint()
239 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_kiuint()
247 struct irq_desc *desc = irq_to_desc(ETHERNET_IRQ); vr41xx_enable_macint() local
250 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_macint()
252 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_macint()
259 struct irq_desc *desc = irq_to_desc(ETHERNET_IRQ); vr41xx_disable_macint() local
262 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_macint()
264 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_macint()
271 struct irq_desc *desc = irq_to_desc(DSIU_IRQ); vr41xx_enable_dsiuint() local
274 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_dsiuint()
276 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_dsiuint()
283 struct irq_desc *desc = irq_to_desc(DSIU_IRQ); vr41xx_disable_dsiuint() local
286 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_dsiuint()
288 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_dsiuint()
295 struct irq_desc *desc = irq_to_desc(FIR_IRQ); vr41xx_enable_firint() local
298 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_firint()
300 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_firint()
307 struct irq_desc *desc = irq_to_desc(FIR_IRQ); vr41xx_disable_firint() local
310 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_firint()
312 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_firint()
319 struct irq_desc *desc = irq_to_desc(PCI_IRQ); vr41xx_enable_pciint() local
325 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_pciint()
327 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_pciint()
335 struct irq_desc *desc = irq_to_desc(PCI_IRQ); vr41xx_disable_pciint() local
341 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_pciint()
343 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_pciint()
351 struct irq_desc *desc = irq_to_desc(SCU_IRQ); vr41xx_enable_scuint() local
357 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_scuint()
359 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_scuint()
367 struct irq_desc *desc = irq_to_desc(SCU_IRQ); vr41xx_disable_scuint() local
373 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_scuint()
375 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_scuint()
383 struct irq_desc *desc = irq_to_desc(CSI_IRQ); vr41xx_enable_csiint() local
389 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_csiint()
391 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_csiint()
399 struct irq_desc *desc = irq_to_desc(CSI_IRQ); vr41xx_disable_csiint() local
405 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_csiint()
407 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_csiint()
415 struct irq_desc *desc = irq_to_desc(BCU_IRQ); vr41xx_enable_bcuint() local
421 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_bcuint()
423 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_bcuint()
431 struct irq_desc *desc = irq_to_desc(BCU_IRQ); vr41xx_disable_bcuint() local
437 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_bcuint()
439 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_bcuint()
479 struct irq_desc *desc = irq_to_desc(irq); set_sysint1_assign() local
485 raw_spin_lock_irq(&desc->lock); set_sysint1_assign()
524 raw_spin_unlock_irq(&desc->lock); set_sysint1_assign()
532 raw_spin_unlock_irq(&desc->lock); set_sysint1_assign()
539 struct irq_desc *desc = irq_to_desc(irq); set_sysint2_assign() local
545 raw_spin_lock_irq(&desc->lock); set_sysint2_assign()
592 raw_spin_unlock_irq(&desc->lock); set_sysint2_assign()
600 raw_spin_unlock_irq(&desc->lock); set_sysint2_assign()
/linux-4.1.27/lib/
H A Dcrc-t10dif.c27 } desc; crc_t10dif() local
33 desc.shash.tfm = crct10dif_tfm; crc_t10dif()
34 desc.shash.flags = 0; crc_t10dif()
35 *(__u16 *)desc.ctx = 0; crc_t10dif()
37 err = crypto_shash_update(&desc.shash, buffer, len); crc_t10dif()
40 return *(__u16 *)desc.ctx; crc_t10dif()
/linux-4.1.27/drivers/dma/sh/
H A Drcar-dmac.c130 * @lock: protects the channel CHCR register and the desc members
131 * @desc.free: list of free descriptors
132 * @desc.pending: list of pending descriptors (submitted with tx_submit)
133 * @desc.active: list of active descriptors (activated with issue_pending)
134 * @desc.done: list of completed descriptors
135 * @desc.wait: list of descriptors waiting for an ack
136 * @desc.running: the descriptor being processed (a member of the active list)
137 * @desc.chunks_free: list of free transfer chunk descriptors
138 * @desc.pages: list of pages used by allocated descriptors
164 } desc; member in struct:rcar_dmac_chan
319 struct rcar_dmac_desc *desc = chan->desc.running; rcar_dmac_chan_start_xfer() local
320 u32 chcr = desc->chcr; rcar_dmac_chan_start_xfer()
327 if (desc->hwdescs.use) { rcar_dmac_chan_start_xfer()
331 "chan%u: queue desc %p: %u@%pad\n", rcar_dmac_chan_start_xfer()
332 chan->index, desc, desc->nchunks, &desc->hwdescs.dma); rcar_dmac_chan_start_xfer()
336 desc->hwdescs.dma >> 32); rcar_dmac_chan_start_xfer()
339 (desc->hwdescs.dma & 0xfffffff0) | rcar_dmac_chan_start_xfer()
342 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | rcar_dmac_chan_start_xfer()
352 chunk = list_first_entry(&desc->chunks, rcar_dmac_chan_start_xfer()
370 if (!desc->cyclic) rcar_dmac_chan_start_xfer()
376 else if (desc->async_tx.callback) rcar_dmac_chan_start_xfer()
385 struct rcar_dmac_xfer_chunk *chunk = desc->running; rcar_dmac_chan_start_xfer()
403 chunk->size >> desc->xfer_shift); rcar_dmac_chan_start_xfer()
436 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx); rcar_dmac_tx_submit() local
445 chan->index, tx->cookie, desc); rcar_dmac_tx_submit()
447 list_add_tail(&desc->node, &chan->desc.pending); rcar_dmac_tx_submit()
448 desc->running = list_first_entry(&desc->chunks, rcar_dmac_tx_submit()
476 struct rcar_dmac_desc *desc = &page->descs[i]; rcar_dmac_desc_alloc() local
478 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); rcar_dmac_desc_alloc()
479 desc->async_tx.tx_submit = rcar_dmac_tx_submit; rcar_dmac_desc_alloc()
480 INIT_LIST_HEAD(&desc->chunks); rcar_dmac_desc_alloc()
482 list_add_tail(&desc->node, &list); rcar_dmac_desc_alloc()
486 list_splice_tail(&list, &chan->desc.free); rcar_dmac_desc_alloc()
487 list_add_tail(&page->node, &chan->desc.pages); rcar_dmac_desc_alloc()
496 * @desc: the descriptor
506 struct rcar_dmac_desc *desc) rcar_dmac_desc_put()
511 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); rcar_dmac_desc_put()
512 list_add_tail(&desc->node, &chan->desc.free); rcar_dmac_desc_put()
518 struct rcar_dmac_desc *desc, *_desc; rcar_dmac_desc_recycle_acked() local
528 list_splice_init(&chan->desc.wait, &list); rcar_dmac_desc_recycle_acked()
531 list_for_each_entry_safe(desc, _desc, &list, node) { rcar_dmac_desc_recycle_acked()
532 if (async_tx_test_ack(&desc->async_tx)) { rcar_dmac_desc_recycle_acked()
533 list_del(&desc->node); rcar_dmac_desc_recycle_acked()
534 rcar_dmac_desc_put(chan, desc); rcar_dmac_desc_recycle_acked()
543 list_splice(&list, &chan->desc.wait); rcar_dmac_desc_recycle_acked()
558 struct rcar_dmac_desc *desc; rcar_dmac_desc_get() local
566 while (list_empty(&chan->desc.free)) { rcar_dmac_desc_get()
580 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); rcar_dmac_desc_get()
581 list_del(&desc->node); rcar_dmac_desc_get()
585 return desc; rcar_dmac_desc_get()
610 list_splice_tail(&list, &chan->desc.chunks_free); rcar_dmac_xfer_chunk_alloc()
611 list_add_tail(&page->node, &chan->desc.pages); rcar_dmac_xfer_chunk_alloc()
634 while (list_empty(&chan->desc.chunks_free)) { rcar_dmac_xfer_chunk_get()
648 chunk = list_first_entry(&chan->desc.chunks_free, rcar_dmac_xfer_chunk_get()
658 struct rcar_dmac_desc *desc, size_t size) rcar_dmac_realloc_hwdesc()
668 if (desc->hwdescs.size == size) rcar_dmac_realloc_hwdesc()
671 if (desc->hwdescs.mem) { rcar_dmac_realloc_hwdesc()
672 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, rcar_dmac_realloc_hwdesc()
673 desc->hwdescs.mem, desc->hwdescs.dma); rcar_dmac_realloc_hwdesc()
674 desc->hwdescs.mem = NULL; rcar_dmac_realloc_hwdesc()
675 desc->hwdescs.size = 0; rcar_dmac_realloc_hwdesc()
681 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, rcar_dmac_realloc_hwdesc()
682 &desc->hwdescs.dma, GFP_NOWAIT); rcar_dmac_realloc_hwdesc()
683 if (!desc->hwdescs.mem) rcar_dmac_realloc_hwdesc()
686 desc->hwdescs.size = size; rcar_dmac_realloc_hwdesc()
690 struct rcar_dmac_desc *desc) rcar_dmac_fill_hwdesc()
695 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); rcar_dmac_fill_hwdesc()
697 hwdesc = desc->hwdescs.mem; rcar_dmac_fill_hwdesc()
701 list_for_each_entry(chunk, &desc->chunks, node) { rcar_dmac_fill_hwdesc()
704 hwdesc->tcr = chunk->size >> desc->xfer_shift; rcar_dmac_fill_hwdesc()
726 struct rcar_dmac_desc *desc, *_desc; rcar_dmac_chan_reinit() local
733 list_splice_init(&chan->desc.pending, &descs); rcar_dmac_chan_reinit()
734 list_splice_init(&chan->desc.active, &descs); rcar_dmac_chan_reinit()
735 list_splice_init(&chan->desc.done, &descs); rcar_dmac_chan_reinit()
736 list_splice_init(&chan->desc.wait, &descs); rcar_dmac_chan_reinit()
738 chan->desc.running = NULL; rcar_dmac_chan_reinit()
742 list_for_each_entry_safe(desc, _desc, &descs, node) { rcar_dmac_chan_reinit()
743 list_del(&desc->node); rcar_dmac_chan_reinit()
744 rcar_dmac_desc_put(chan, desc); rcar_dmac_chan_reinit()
775 struct rcar_dmac_desc *desc) rcar_dmac_chan_configure_desc()
787 switch (desc->direction) { rcar_dmac_chan_configure_desc()
808 desc->xfer_shift = ilog2(xfer_size); rcar_dmac_chan_configure_desc()
809 desc->chcr = chcr | chcr_ts[desc->xfer_shift]; rcar_dmac_chan_configure_desc()
829 struct rcar_dmac_desc *desc; rcar_dmac_chan_prep_sg() local
837 desc = rcar_dmac_desc_get(chan); rcar_dmac_chan_prep_sg()
838 if (!desc) rcar_dmac_chan_prep_sg()
841 desc->async_tx.flags = dma_flags; rcar_dmac_chan_prep_sg()
842 desc->async_tx.cookie = -EBUSY; rcar_dmac_chan_prep_sg()
844 desc->cyclic = cyclic; rcar_dmac_chan_prep_sg()
845 desc->direction = dir; rcar_dmac_chan_prep_sg()
847 rcar_dmac_chan_configure_desc(chan, desc); rcar_dmac_chan_prep_sg()
849 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; rcar_dmac_chan_prep_sg()
885 rcar_dmac_desc_put(chan, desc); for_each_sg()
901 chan->index, chunk, desc, i, sg, size, len, for_each_sg()
910 list_add_tail(&chunk->node, &desc->chunks); for_each_sg()
915 desc->nchunks = nchunks;
916 desc->size = full_size;
928 desc->hwdescs.use = !highmem && nchunks > 1;
929 if (desc->hwdescs.use) {
930 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
931 desc->hwdescs.use = false;
934 return &desc->async_tx;
946 INIT_LIST_HEAD(&rchan->desc.chunks_free); rcar_dmac_alloc_chan_resources()
947 INIT_LIST_HEAD(&rchan->desc.pages); rcar_dmac_alloc_chan_resources()
966 struct rcar_dmac_desc *desc; rcar_dmac_free_chan_resources() local
982 list_splice_init(&rchan->desc.free, &list); rcar_dmac_free_chan_resources()
983 list_splice_init(&rchan->desc.pending, &list); rcar_dmac_free_chan_resources()
984 list_splice_init(&rchan->desc.active, &list); rcar_dmac_free_chan_resources()
985 list_splice_init(&rchan->desc.done, &list); rcar_dmac_free_chan_resources()
986 list_splice_init(&rchan->desc.wait, &list); rcar_dmac_free_chan_resources()
988 list_for_each_entry(desc, &list, node) rcar_dmac_free_chan_resources()
989 rcar_dmac_realloc_hwdesc(rchan, desc, 0); rcar_dmac_free_chan_resources()
991 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { rcar_dmac_free_chan_resources()
1049 struct dma_async_tx_descriptor *desc; rcar_dmac_prep_dma_cyclic() local
1092 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, rcar_dmac_prep_dma_cyclic()
1096 return desc; rcar_dmac_prep_dma_cyclic()
1138 struct rcar_dmac_desc *desc = chan->desc.running; rcar_dmac_chan_get_residue() local
1144 if (!desc) rcar_dmac_chan_get_residue()
1152 if (cookie != desc->async_tx.cookie) rcar_dmac_chan_get_residue()
1153 return desc->size; rcar_dmac_chan_get_residue()
1161 if (desc->hwdescs.use) { rcar_dmac_chan_get_residue()
1164 WARN_ON(dptr >= desc->nchunks); rcar_dmac_chan_get_residue()
1166 running = desc->running; rcar_dmac_chan_get_residue()
1170 list_for_each_entry_reverse(chunk, &desc->chunks, node) { rcar_dmac_chan_get_residue()
1171 if (chunk == running || ++dptr == desc->nchunks) rcar_dmac_chan_get_residue()
1178 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift; rcar_dmac_chan_get_residue()
1212 if (list_empty(&rchan->desc.pending)) rcar_dmac_issue_pending()
1216 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); rcar_dmac_issue_pending()
1222 if (!rchan->desc.running) { rcar_dmac_issue_pending()
1223 struct rcar_dmac_desc *desc; rcar_dmac_issue_pending() local
1225 desc = list_first_entry(&rchan->desc.active, rcar_dmac_issue_pending()
1227 rchan->desc.running = desc; rcar_dmac_issue_pending()
1242 struct rcar_dmac_desc *desc = chan->desc.running; rcar_dmac_isr_desc_stage_end() local
1245 if (WARN_ON(!desc || !desc->cyclic)) { rcar_dmac_isr_desc_stage_end()
1264 struct rcar_dmac_desc *desc = chan->desc.running; rcar_dmac_isr_transfer_end() local
1267 if (WARN_ON_ONCE(!desc)) { rcar_dmac_isr_transfer_end()
1281 if (!desc->hwdescs.use) { rcar_dmac_isr_transfer_end()
1287 if (!list_is_last(&desc->running->node, &desc->chunks)) { rcar_dmac_isr_transfer_end()
1288 desc->running = list_next_entry(desc->running, node); rcar_dmac_isr_transfer_end()
1289 if (!desc->cyclic) rcar_dmac_isr_transfer_end()
1298 if (desc->cyclic) { rcar_dmac_isr_transfer_end()
1299 desc->running = rcar_dmac_isr_transfer_end()
1300 list_first_entry(&desc->chunks, rcar_dmac_isr_transfer_end()
1308 list_move_tail(&desc->node, &chan->desc.done); rcar_dmac_isr_transfer_end()
1311 if (!list_empty(&chan->desc.active)) rcar_dmac_isr_transfer_end()
1312 chan->desc.running = list_first_entry(&chan->desc.active, rcar_dmac_isr_transfer_end()
1316 chan->desc.running = NULL; rcar_dmac_isr_transfer_end()
1319 if (chan->desc.running) rcar_dmac_isr_transfer_end()
1353 struct rcar_dmac_desc *desc; rcar_dmac_isr_channel_thread() local
1358 if (chan->desc.running && chan->desc.running->cyclic) { rcar_dmac_isr_channel_thread()
1362 desc = chan->desc.running; rcar_dmac_isr_channel_thread()
1363 callback = desc->async_tx.callback; rcar_dmac_isr_channel_thread()
1364 callback_param = desc->async_tx.callback_param; rcar_dmac_isr_channel_thread()
1377 while (!list_empty(&chan->desc.done)) { rcar_dmac_isr_channel_thread()
1378 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, rcar_dmac_isr_channel_thread()
1380 dma_cookie_complete(&desc->async_tx); rcar_dmac_isr_channel_thread()
1381 list_del(&desc->node); rcar_dmac_isr_channel_thread()
1383 if (desc->async_tx.callback) { rcar_dmac_isr_channel_thread()
1390 desc->async_tx.callback(desc->async_tx.callback_param); rcar_dmac_isr_channel_thread()
1394 list_add_tail(&desc->node, &chan->desc.wait); rcar_dmac_isr_channel_thread()
1531 INIT_LIST_HEAD(&rchan->desc.free); rcar_dmac_chan_probe()
1532 INIT_LIST_HEAD(&rchan->desc.pending); rcar_dmac_chan_probe()
1533 INIT_LIST_HEAD(&rchan->desc.active); rcar_dmac_chan_probe()
1534 INIT_LIST_HEAD(&rchan->desc.done); rcar_dmac_chan_probe()
1535 INIT_LIST_HEAD(&rchan->desc.wait); rcar_dmac_chan_probe()
505 rcar_dmac_desc_put(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) rcar_dmac_desc_put() argument
657 rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc, size_t size) rcar_dmac_realloc_hwdesc() argument
689 rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) rcar_dmac_fill_hwdesc() argument
774 rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) rcar_dmac_chan_configure_desc() argument
H A Dusb-dmac.c74 * @desc: the current descriptor
84 struct usb_dmac_desc *desc; member in struct:usb_dmac_chan
199 struct usb_dmac_desc *desc = chan->desc; usb_dmac_chan_start_sg() local
200 struct usb_dmac_sg *sg = desc->sg + index; usb_dmac_chan_start_sg()
205 if (desc->direction == DMA_DEV_TO_MEM) usb_dmac_chan_start_sg()
231 chan->desc = NULL; usb_dmac_chan_start_desc()
242 chan->desc = to_usb_dmac_desc(vd); usb_dmac_chan_start_desc()
243 chan->desc->sg_index = 0; usb_dmac_chan_start_desc()
269 struct usb_dmac_desc *desc; usb_dmac_desc_alloc() local
272 desc = kzalloc(sizeof(*desc) + sg_len * sizeof(desc->sg[0]), gfp); usb_dmac_desc_alloc()
273 if (!desc) usb_dmac_desc_alloc()
276 desc->sg_allocated_len = sg_len; usb_dmac_desc_alloc()
277 INIT_LIST_HEAD(&desc->node); usb_dmac_desc_alloc()
280 list_add_tail(&desc->node, &chan->desc_freed); usb_dmac_desc_alloc()
288 struct usb_dmac_desc *desc, *_desc; usb_dmac_desc_free() local
294 list_for_each_entry_safe(desc, _desc, &list, node) { usb_dmac_desc_free()
295 list_del(&desc->node); usb_dmac_desc_free()
296 kfree(desc); usb_dmac_desc_free()
304 struct usb_dmac_desc *desc = NULL; usb_dmac_desc_get() local
309 list_for_each_entry(desc, &chan->desc_freed, node) { usb_dmac_desc_get()
310 if (sg_len <= desc->sg_allocated_len) { usb_dmac_desc_get()
311 list_move_tail(&desc->node, &chan->desc_got); usb_dmac_desc_get()
313 return desc; usb_dmac_desc_get()
320 /* If allocated the desc, it was added to tail of the list */ usb_dmac_desc_get()
322 desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc, usb_dmac_desc_get()
324 list_move_tail(&desc->node, &chan->desc_got); usb_dmac_desc_get()
326 return desc; usb_dmac_desc_get()
333 struct usb_dmac_desc *desc) usb_dmac_desc_put()
338 list_move_tail(&desc->node, &chan->desc_freed); usb_dmac_desc_put()
424 struct usb_dmac_desc *desc; usb_dmac_prep_slave_sg() local
434 desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT); usb_dmac_prep_slave_sg()
435 if (!desc) usb_dmac_prep_slave_sg()
438 desc->direction = dir; usb_dmac_prep_slave_sg()
439 desc->sg_len = sg_len; for_each_sg()
441 desc->sg[i].mem_addr = sg_dma_address(sg); for_each_sg()
442 desc->sg[i].size = sg_dma_len(sg); for_each_sg()
445 return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags);
451 struct usb_dmac_desc *desc; usb_dmac_chan_terminate_all() local
459 if (uchan->desc) usb_dmac_chan_terminate_all()
460 uchan->desc = NULL; usb_dmac_chan_terminate_all()
462 list_for_each_entry(desc, &list, node) usb_dmac_chan_terminate_all()
463 list_move_tail(&desc->node, &uchan->desc_freed); usb_dmac_chan_terminate_all()
471 struct usb_dmac_desc *desc, usb_dmac_get_current_residue()
474 struct usb_dmac_sg *sg = desc->sg + sg_index; usb_dmac_get_current_residue()
482 if (desc->direction == DMA_DEV_TO_MEM) usb_dmac_get_current_residue()
493 struct usb_dmac_desc *desc; usb_dmac_chan_get_residue_if_complete() local
496 list_for_each_entry_reverse(desc, &chan->desc_freed, node) { usb_dmac_chan_get_residue_if_complete()
497 if (desc->done_cookie == cookie) { usb_dmac_chan_get_residue_if_complete()
498 residue = desc->residue; usb_dmac_chan_get_residue_if_complete()
511 struct usb_dmac_desc *desc = chan->desc; usb_dmac_chan_get_residue() local
514 if (!desc) { usb_dmac_chan_get_residue()
518 desc = to_usb_dmac_desc(vd); usb_dmac_chan_get_residue()
522 for (i = desc->sg_index + 1; i < desc->sg_len; i++) usb_dmac_chan_get_residue()
523 residue += desc->sg[i].size; usb_dmac_chan_get_residue()
526 residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index); usb_dmac_chan_get_residue()
563 if (vchan_issue_pending(&uchan->vc) && !uchan->desc) usb_dmac_issue_pending()
570 struct usb_dmac_desc *desc = to_usb_dmac_desc(vd); usb_dmac_virt_desc_free() local
573 usb_dmac_desc_put(chan, desc); usb_dmac_virt_desc_free()
582 struct usb_dmac_desc *desc = chan->desc; usb_dmac_isr_transfer_end() local
584 BUG_ON(!desc); usb_dmac_isr_transfer_end()
586 if (++desc->sg_index < desc->sg_len) { usb_dmac_isr_transfer_end()
587 usb_dmac_chan_start_sg(chan, desc->sg_index); usb_dmac_isr_transfer_end()
589 desc->residue = usb_dmac_get_current_residue(chan, desc, usb_dmac_isr_transfer_end()
590 desc->sg_index - 1); usb_dmac_isr_transfer_end()
591 desc->done_cookie = desc->vd.tx.cookie; usb_dmac_isr_transfer_end()
592 vchan_cookie_complete(&desc->vd); usb_dmac_isr_transfer_end()
594 /* Restart the next transfer if this driver has a next desc */ usb_dmac_isr_transfer_end()
332 usb_dmac_desc_put(struct usb_dmac_chan *chan, struct usb_dmac_desc *desc) usb_dmac_desc_put() argument
470 usb_dmac_get_current_residue(struct usb_dmac_chan *chan, struct usb_dmac_desc *desc, int sg_index) usb_dmac_get_current_residue() argument
/linux-4.1.27/arch/arm64/crypto/
H A Dsha1-ce-glue.c35 static int sha1_ce_update(struct shash_desc *desc, const u8 *data, sha1_ce_update() argument
38 struct sha1_ce_state *sctx = shash_desc_ctx(desc); sha1_ce_update()
42 sha1_base_do_update(desc, data, len, sha1_ce_update()
49 static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, sha1_ce_finup() argument
52 struct sha1_ce_state *sctx = shash_desc_ctx(desc); sha1_ce_finup()
67 sha1_base_do_update(desc, data, len, sha1_ce_finup()
70 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); sha1_ce_finup()
72 return sha1_base_finish(desc, out); sha1_ce_finup()
75 static int sha1_ce_final(struct shash_desc *desc, u8 *out) sha1_ce_final() argument
77 struct sha1_ce_state *sctx = shash_desc_ctx(desc); sha1_ce_final()
81 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); sha1_ce_final()
83 return sha1_base_finish(desc, out); sha1_ce_final()
H A Dsha2-ce-glue.c35 static int sha256_ce_update(struct shash_desc *desc, const u8 *data, sha256_ce_update() argument
38 struct sha256_ce_state *sctx = shash_desc_ctx(desc); sha256_ce_update()
42 sha256_base_do_update(desc, data, len, sha256_ce_update()
49 static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, sha256_ce_finup() argument
52 struct sha256_ce_state *sctx = shash_desc_ctx(desc); sha256_ce_finup()
67 sha256_base_do_update(desc, data, len, sha256_ce_finup()
70 sha256_base_do_finalize(desc, sha256_ce_finup()
73 return sha256_base_finish(desc, out); sha256_ce_finup()
76 static int sha256_ce_final(struct shash_desc *desc, u8 *out) sha256_ce_final() argument
78 struct sha256_ce_state *sctx = shash_desc_ctx(desc); sha256_ce_final()
82 sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); sha256_ce_final()
84 return sha256_base_finish(desc, out); sha256_ce_final()
H A Daes-glue.c99 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
102 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_encrypt()
107 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ecb_encrypt()
109 err = blkcipher_walk_virt(desc, &walk); ecb_encrypt()
115 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); ecb_encrypt()
121 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
124 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_decrypt()
129 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ecb_decrypt()
131 err = blkcipher_walk_virt(desc, &walk); ecb_decrypt()
137 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); ecb_decrypt()
143 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
146 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_encrypt()
151 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_encrypt()
153 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
160 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); cbc_encrypt()
166 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
169 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_decrypt()
174 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_decrypt()
176 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
183 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); cbc_decrypt()
189 static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_encrypt() argument
192 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_encrypt()
197 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctr_encrypt()
199 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); ctr_encrypt()
211 err = blkcipher_walk_done(desc, &walk, ctr_encrypt()
228 err = blkcipher_walk_done(desc, &walk, 0); ctr_encrypt()
235 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_encrypt() argument
238 struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt()
243 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; xts_encrypt()
245 err = blkcipher_walk_virt(desc, &walk); xts_encrypt()
252 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); xts_encrypt()
259 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_decrypt() argument
262 struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt()
267 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; xts_decrypt()
269 err = blkcipher_walk_virt(desc, &walk); xts_decrypt()
276 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); xts_decrypt()
H A Dcrc32-arm64.c100 static int chksum_init(struct shash_desc *desc) chksum_init() argument
102 struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); chksum_init()
103 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_init()
128 static int chksum_update(struct shash_desc *desc, const u8 *data, chksum_update() argument
131 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_update()
137 static int chksumc_update(struct shash_desc *desc, const u8 *data, chksumc_update() argument
140 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksumc_update()
146 static int chksum_final(struct shash_desc *desc, u8 *out) chksum_final() argument
148 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_final()
154 static int chksumc_final(struct shash_desc *desc, u8 *out) chksumc_final() argument
156 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksumc_final()
174 static int chksum_finup(struct shash_desc *desc, const u8 *data, chksum_finup() argument
177 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_finup()
182 static int chksumc_finup(struct shash_desc *desc, const u8 *data, chksumc_finup() argument
185 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksumc_finup()
190 static int chksum_digest(struct shash_desc *desc, const u8 *data, chksum_digest() argument
193 struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); chksum_digest()
198 static int chksumc_digest(struct shash_desc *desc, const u8 *data, chksumc_digest() argument
201 struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); chksumc_digest()
/linux-4.1.27/include/net/
H A Dpsnap.h5 register_snap_client(const unsigned char *desc,
/linux-4.1.27/arch/arm/include/asm/mach/
H A Dirq.h26 #define do_bad_IRQ(irq,desc) \
28 raw_spin_lock(&desc->lock); \
29 handle_bad_irq(irq, desc); \
30 raw_spin_unlock(&desc->lock); \
/linux-4.1.27/tools/perf/bench/
H A Dmem-memcpy-arch.h4 #define MEMCPY_FN(fn, name, desc) \
H A Dmem-memset-arch.h4 #define MEMSET_FN(fn, name, desc) \
/linux-4.1.27/arch/s390/kernel/
H A Dirq.c35 char *desc; member in struct:irq_class
60 {.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"},
61 {.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"},
62 {.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"},
63 {.irq = IRQEXT_TMR, .name = "TMR", .desc = "[EXT] CPU Timer"},
64 {.irq = IRQEXT_TLA, .name = "TAL", .desc = "[EXT] Timing Alert"},
65 {.irq = IRQEXT_PFL, .name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
66 {.irq = IRQEXT_DSD, .name = "DSD", .desc = "[EXT] DASD Diag"},
67 {.irq = IRQEXT_VRT, .name = "VRT", .desc = "[EXT] Virtio"},
68 {.irq = IRQEXT_SCP, .name = "SCP", .desc = "[EXT] Service Call"},
69 {.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"},
70 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
71 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
72 {.irq = IRQEXT_CMR, .name = "CMR", .desc = "[EXT] CPU-Measurement: RI"},
73 {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
74 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
75 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
76 {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
77 {.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
78 {.irq = IRQIO_C70, .name = "C70", .desc = "[I/O] 3270"},
79 {.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"},
80 {.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
81 {.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
82 {.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
83 {.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"},
84 {.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},
85 {.irq = IRQIO_CSC, .name = "CSC", .desc = "[I/O] CHSC Subchannel"},
86 {.irq = IRQIO_PCI, .name = "PCI", .desc = "[I/O] PCI Interrupt" },
87 {.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" },
88 {.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
89 {.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
90 {.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
91 {.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
147 if (irqclass_sub_desc[index].desc) show_interrupts()
148 seq_printf(p, " %s", irqclass_sub_desc[index].desc); show_interrupts()
/linux-4.1.27/drivers/net/wireless/ti/wl1251/
H A Drx.c35 struct wl1251_rx_descriptor *desc) wl1251_rx_header()
43 wl1251_mem_read(wl, rx_packet_ring_addr, desc, sizeof(*desc)); wl1251_rx_header()
47 struct wl1251_rx_descriptor *desc, wl1251_rx_status()
57 status->mactime = desc->timestamp; wl1251_rx_status()
73 status->signal = desc->rssi; wl1251_rx_status()
79 wl->noise = desc->rssi - desc->snr / 2; wl1251_rx_status()
81 status->freq = ieee80211_channel_to_frequency(desc->channel, wl1251_rx_status()
86 if (!wl->monitor_present && (desc->flags & RX_DESC_ENCRYPTION_MASK)) { wl1251_rx_status()
89 if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL))) wl1251_rx_status()
92 if (unlikely(desc->flags & RX_DESC_MIC_FAIL)) wl1251_rx_status()
96 if (unlikely(!(desc->flags & RX_DESC_VALID_FCS))) wl1251_rx_status()
99 switch (desc->rate) { wl1251_rx_status()
134 if (desc->rate == RATE_1MBPS) { wl1251_rx_status()
135 if (!(desc->mod_pre & OFDM_RATE_BIT)) wl1251_rx_status()
143 if (desc->mod_pre & SHORT_PREAMBLE_BIT) wl1251_rx_status()
148 struct wl1251_rx_descriptor *desc) wl1251_rx_body()
156 length = WL1251_RX_ALIGN(desc->length - PLCP_HEADER_LENGTH); wl1251_rx_body()
157 curr_id = (desc->flags & RX_DESC_SEQNUM_MASK) >> RX_DESC_PACKETID_SHIFT; wl1251_rx_body()
183 skb_trim(skb, desc->length - PLCP_HEADER_LENGTH); wl1251_rx_body()
190 wl1251_rx_status(wl, desc, &status, beacon); wl1251_rx_body()
34 wl1251_rx_header(struct wl1251 *wl, struct wl1251_rx_descriptor *desc) wl1251_rx_header() argument
46 wl1251_rx_status(struct wl1251 *wl, struct wl1251_rx_descriptor *desc, struct ieee80211_rx_status *status, u8 beacon) wl1251_rx_status() argument
147 wl1251_rx_body(struct wl1251 *wl, struct wl1251_rx_descriptor *desc) wl1251_rx_body() argument
/linux-4.1.27/drivers/sh/intc/
H A Dhandle.c18 static intc_enum __init intc_grp_id(struct intc_desc *desc, intc_grp_id() argument
21 struct intc_group *g = desc->hw.groups; intc_grp_id()
24 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) { intc_grp_id()
25 g = desc->hw.groups + i; intc_grp_id()
38 static unsigned int __init _intc_mask_data(struct intc_desc *desc, _intc_mask_data() argument
44 struct intc_mask_reg *mr = desc->hw.mask_regs; _intc_mask_data()
48 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { _intc_mask_data()
49 mr = desc->hw.mask_regs + *reg_idx; _intc_mask_data()
89 intc_get_mask_handle(struct intc_desc *desc, struct intc_desc_int *d, intc_get_mask_handle() argument
96 ret = _intc_mask_data(desc, d, enum_id, &i, &j); intc_get_mask_handle()
101 return intc_get_mask_handle(desc, d, intc_grp_id(desc, enum_id), 0); intc_get_mask_handle()
106 static unsigned int __init _intc_prio_data(struct intc_desc *desc, _intc_prio_data() argument
112 struct intc_prio_reg *pr = desc->hw.prio_regs; _intc_prio_data()
116 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) { _intc_prio_data()
117 pr = desc->hw.prio_regs + *reg_idx; _intc_prio_data()
158 intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d, intc_get_prio_handle() argument
165 ret = _intc_prio_data(desc, d, enum_id, &i, &j); intc_get_prio_handle()
170 return intc_get_prio_handle(desc, d, intc_grp_id(desc, enum_id), 0); intc_get_prio_handle()
175 static unsigned int intc_ack_data(struct intc_desc *desc, intc_ack_data() argument
178 struct intc_mask_reg *mr = desc->hw.ack_regs; intc_ack_data()
182 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) { intc_ack_data()
183 mr = desc->hw.ack_regs + i; intc_ack_data()
231 void __init intc_enable_disable_enum(struct intc_desc *desc, intc_enable_disable_enum() argument
240 data = _intc_mask_data(desc, d, enum_id, &i, &j); intc_enable_disable_enum()
249 data = _intc_prio_data(desc, d, enum_id, &i, &j); intc_enable_disable_enum()
258 intc_get_sense_handle(struct intc_desc *desc, struct intc_desc_int *d, intc_get_sense_handle() argument
261 struct intc_sense_reg *sr = desc->hw.sense_regs; intc_get_sense_handle()
264 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) { intc_get_sense_handle()
265 sr = desc->hw.sense_regs + i; intc_get_sense_handle()
287 void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc, intc_set_ack_handle() argument
295 if (!desc->hw.ack_regs) intc_set_ack_handle()
299 ack_handle[irq] = intc_ack_data(desc, d, id); intc_set_ack_handle()
H A Dbalancing.c40 static unsigned int intc_dist_data(struct intc_desc *desc, intc_dist_data() argument
44 struct intc_mask_reg *mr = desc->hw.mask_regs; intc_dist_data()
48 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) { intc_dist_data()
49 mr = desc->hw.mask_regs + i; intc_dist_data()
83 void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc, intc_set_dist_handle() argument
91 if (!desc->hw.mask_regs) intc_set_dist_handle()
95 dist_handle[irq] = intc_dist_data(desc, d, id); intc_set_dist_handle()
H A Dvirq-debugfs.c27 struct intc_desc_int *desc = entry->desc; intc_irq_xlate_debug() local
29 if (!desc) intc_irq_xlate_debug()
34 seq_printf(m, "%-15s\n", desc->chip.name); intc_irq_xlate_debug()
H A Dcore.c68 static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc) intc_redirect_irq() argument
73 static void __init intc_register_irq(struct intc_desc *desc, intc_register_irq() argument
95 data[0] = intc_get_mask_handle(desc, d, enum_id, 0); intc_register_irq()
96 data[1] = intc_get_prio_handle(desc, d, enum_id, 0); intc_register_irq()
106 data[0] = data[0] ? data[0] : intc_get_mask_handle(desc, d, enum_id, 1); intc_register_irq()
107 data[1] = data[1] ? data[1] : intc_get_prio_handle(desc, d, enum_id, 1); intc_register_irq()
148 data[0] = intc_get_sense_handle(desc, d, enum_id); intc_register_irq()
158 intc_set_ack_handle(irq, desc, d, enum_id); intc_register_irq()
159 intc_set_dist_handle(irq, desc, d, enum_id); intc_register_irq()
182 int __init register_intc_controller(struct intc_desc *desc) register_intc_controller() argument
185 struct intc_hw_desc *hw = &desc->hw; register_intc_controller()
190 desc->name, hw->nr_vectors); register_intc_controller()
204 if (desc->num_resources) { register_intc_controller()
205 d->nr_windows = desc->num_resources; register_intc_controller()
212 res = desc->resource + k; register_intc_controller()
290 d->chip.name = desc->name; register_intc_controller()
299 if (desc->force_disable) register_intc_controller()
300 intc_enable_disable_enum(desc, d, desc->force_disable, 0); register_intc_controller()
303 if (desc->force_enable) register_intc_controller()
304 intc_enable_disable_enum(desc, d, desc->force_enable, 0); register_intc_controller()
334 intc_register_irq(desc, d, vect->enum_id, irq); register_intc_controller()
374 intc_subgroup_init(desc, d); register_intc_controller()
377 if (desc->force_enable) register_intc_controller()
378 intc_enable_disable_enum(desc, d, desc->force_enable, 1); register_intc_controller()
380 d->skip_suspend = desc->skip_syscore_suspend; register_intc_controller()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dpers.c46 void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, ptlrpc_fill_bulk_md() argument
51 LASSERT(mdidx < desc->bd_md_max_brw); ptlrpc_fill_bulk_md()
52 LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); ptlrpc_fill_bulk_md()
57 md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); ptlrpc_fill_bulk_md()
59 if (desc->bd_enc_iov) ptlrpc_fill_bulk_md()
60 md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV]; ptlrpc_fill_bulk_md()
62 md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV]; ptlrpc_fill_bulk_md()
65 void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, ptlrpc_add_bulk_page() argument
68 lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count]; ptlrpc_add_bulk_page()
74 desc->bd_iov_count++; ptlrpc_add_bulk_page()
H A Dniobuf.c116 struct ptlrpc_bulk_desc *desc = req->rq_bulk; ptlrpc_register_bulk() local
129 /* NB no locking required until desc is on the network */ ptlrpc_register_bulk()
130 LASSERT(desc->bd_nob > 0); ptlrpc_register_bulk()
131 LASSERT(desc->bd_md_count == 0); ptlrpc_register_bulk()
132 LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT); ptlrpc_register_bulk()
133 LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); ptlrpc_register_bulk()
134 LASSERT(desc->bd_req != NULL); ptlrpc_register_bulk()
135 LASSERT(desc->bd_type == BULK_PUT_SINK || ptlrpc_register_bulk()
136 desc->bd_type == BULK_GET_SOURCE); ptlrpc_register_bulk()
140 desc->bd_nob_transferred = 0; ptlrpc_register_bulk()
142 LASSERT(desc->bd_nob_transferred == 0); ptlrpc_register_bulk()
144 desc->bd_failure = 0; ptlrpc_register_bulk()
146 peer = desc->bd_import->imp_connection->c_peer; ptlrpc_register_bulk()
148 LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback); ptlrpc_register_bulk()
149 LASSERT(desc->bd_cbid.cbid_arg == desc); ptlrpc_register_bulk()
158 xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1); ptlrpc_register_bulk()
159 LASSERTF(!(desc->bd_registered && ptlrpc_register_bulk()
161 xid != desc->bd_last_xid, ptlrpc_register_bulk()
163 desc->bd_registered, xid, desc->bd_last_xid); ptlrpc_register_bulk()
165 total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV; ptlrpc_register_bulk()
166 desc->bd_registered = 1; ptlrpc_register_bulk()
167 desc->bd_last_xid = xid; ptlrpc_register_bulk()
168 desc->bd_md_count = total_md; ptlrpc_register_bulk()
169 md.user_ptr = &desc->bd_cbid; ptlrpc_register_bulk()
175 ((desc->bd_type == BULK_GET_SOURCE) ? ptlrpc_register_bulk()
177 ptlrpc_fill_bulk_md(&md, desc, posted_md); ptlrpc_register_bulk()
179 rc = LNetMEAttach(desc->bd_portal, peer, xid, 0, ptlrpc_register_bulk()
183 desc->bd_import->imp_obd->obd_name, xid, ptlrpc_register_bulk()
190 &desc->bd_mds[posted_md]); ptlrpc_register_bulk()
193 desc->bd_import->imp_obd->obd_name, xid, ptlrpc_register_bulk()
203 spin_lock(&desc->bd_lock); ptlrpc_register_bulk()
204 desc->bd_md_count -= total_md - posted_md; ptlrpc_register_bulk()
205 spin_unlock(&desc->bd_lock); ptlrpc_register_bulk()
206 LASSERT(desc->bd_md_count >= 0); ptlrpc_register_bulk()
207 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); ptlrpc_register_bulk()
215 LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK), ptlrpc_register_bulk()
217 desc->bd_last_xid, req->rq_xid); ptlrpc_register_bulk()
219 spin_lock(&desc->bd_lock); ptlrpc_register_bulk()
221 if (desc->bd_md_count != total_md) ptlrpc_register_bulk()
223 desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer), ptlrpc_register_bulk()
224 total_md - desc->bd_md_count); ptlrpc_register_bulk()
225 spin_unlock(&desc->bd_lock); ptlrpc_register_bulk()
228 desc->bd_md_count, ptlrpc_register_bulk()
229 desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink", ptlrpc_register_bulk()
230 desc->bd_iov_count, desc->bd_nob, ptlrpc_register_bulk()
231 desc->bd_last_xid, req->rq_xid, desc->bd_portal); ptlrpc_register_bulk()
238 * Disconnect a bulk desc from the network. Idempotent. Not
245 struct ptlrpc_bulk_desc *desc = req->rq_bulk; ptlrpc_unregister_bulk() local
260 LASSERT(desc->bd_req == req); /* bd_req NULL until registered */ ptlrpc_unregister_bulk()
266 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); ptlrpc_unregister_bulk()
295 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p", ptlrpc_unregister_bulk()
296 desc); ptlrpc_unregister_bulk()
/linux-4.1.27/include/crypto/
H A Dsha256_base.h21 static inline int sha224_base_init(struct shash_desc *desc) sha224_base_init() argument
23 struct sha256_state *sctx = shash_desc_ctx(desc); sha224_base_init()
38 static inline int sha256_base_init(struct shash_desc *desc) sha256_base_init() argument
40 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_init()
55 static inline int sha256_base_do_update(struct shash_desc *desc, sha256_base_do_update() argument
60 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_do_update()
93 static inline int sha256_base_do_finalize(struct shash_desc *desc, sha256_base_do_finalize() argument
97 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_do_finalize()
116 static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) sha256_base_finish() argument
118 unsigned int digest_size = crypto_shash_digestsize(desc->tfm); sha256_base_finish()
119 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_finish()
H A Dsha512_base.h21 static inline int sha384_base_init(struct shash_desc *desc) sha384_base_init() argument
23 struct sha512_state *sctx = shash_desc_ctx(desc); sha384_base_init()
38 static inline int sha512_base_init(struct shash_desc *desc) sha512_base_init() argument
40 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_init()
55 static inline int sha512_base_do_update(struct shash_desc *desc, sha512_base_do_update() argument
60 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_do_update()
95 static inline int sha512_base_do_finalize(struct shash_desc *desc, sha512_base_do_finalize() argument
99 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_do_finalize()
119 static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) sha512_base_finish() argument
121 unsigned int digest_size = crypto_shash_digestsize(desc->tfm); sha512_base_finish()
122 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_finish()
H A Dsha1_base.h20 static inline int sha1_base_init(struct shash_desc *desc) sha1_base_init() argument
22 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_init()
34 static inline int sha1_base_do_update(struct shash_desc *desc, sha1_base_do_update() argument
39 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_do_update()
72 static inline int sha1_base_do_finalize(struct shash_desc *desc, sha1_base_do_finalize() argument
76 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_do_finalize()
95 static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) sha1_base_finish() argument
97 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_finish()
H A Dsha.h87 extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
90 extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
93 extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
96 extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
99 extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
102 extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
/linux-4.1.27/drivers/s390/kvm/
H A Dkvm_virtio.c42 struct kvm_device_desc *desc; member in struct:kvm_device
56 static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc) kvm_vq_config() argument
58 return (struct kvm_vqconfig *)(desc + 1); kvm_vq_config()
61 static u8 *kvm_vq_features(const struct kvm_device_desc *desc) kvm_vq_features() argument
63 return (u8 *)(kvm_vq_config(desc) + desc->num_vq); kvm_vq_features()
66 static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc) kvm_vq_configspace() argument
68 return kvm_vq_features(desc) + desc->feature_len * 2; kvm_vq_configspace()
72 * The total size of the config page used by this device (incl. desc)
74 static unsigned desc_size(const struct kvm_device_desc *desc) desc_size() argument
76 return sizeof(*desc) desc_size()
77 + desc->num_vq * sizeof(struct kvm_vqconfig) desc_size()
78 + desc->feature_len * 2 desc_size()
79 + desc->config_len; desc_size()
87 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; kvm_get_features() local
88 u8 *in_features = kvm_vq_features(desc); kvm_get_features()
90 for (i = 0; i < min(desc->feature_len * 8, 32); i++) kvm_get_features()
99 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; kvm_finalize_features() local
101 u8 *out_features = kvm_vq_features(desc) + desc->feature_len; kvm_finalize_features()
109 memset(out_features, 0, desc->feature_len); kvm_finalize_features()
110 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; kvm_finalize_features()
125 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; kvm_get() local
127 BUG_ON(offset + len > desc->config_len); kvm_get()
128 memcpy(buf, kvm_vq_configspace(desc) + offset, len); kvm_get()
134 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; kvm_set() local
136 BUG_ON(offset + len > desc->config_len); kvm_set()
137 memcpy(kvm_vq_configspace(desc) + offset, buf, len); kvm_set()
147 return to_kvmdev(vdev)->desc->status; kvm_get_status()
153 to_kvmdev(vdev)->desc->status = status; kvm_set_status()
155 (unsigned long) to_kvmdev(vdev)->desc); kvm_set_status()
166 (unsigned long) to_kvmdev(vdev)->desc); kvm_reset()
199 if (index >= kdev->desc->num_vq) kvm_find_vq()
205 config = kvm_vq_config(kdev->desc)+index; kvm_find_vq()
264 if (nvqs > kdev->desc->num_vq) kvm_find_vqs()
324 kdev->desc = d; add_kvm_device()
353 * match for a kvm device with a specific desc pointer
360 return kdev->desc == data; match_desc()
/linux-4.1.27/fs/nfs/
H A Dpagelist.c46 nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc) nfs_pgio_current_mirror() argument
48 return nfs_pgio_has_mirroring(desc) ? nfs_pgio_current_mirror()
49 &desc->pg_mirrors[desc->pg_mirror_idx] : nfs_pgio_current_mirror()
50 &desc->pg_mirrors[0]; nfs_pgio_current_mirror()
54 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, nfs_pgheader_init() argument
58 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_pgheader_init()
62 hdr->inode = desc->pg_inode; nfs_pgheader_init()
66 hdr->dreq = desc->pg_dreq; nfs_pgheader_init()
67 hdr->layout_private = desc->pg_layout_private; nfs_pgheader_init()
69 hdr->completion_ops = desc->pg_completion_ops; nfs_pgheader_init()
73 hdr->pgio_mirror_idx = desc->pg_mirror_idx; nfs_pgheader_init()
487 * @desc: pointer to descriptor
488 * @prev: previous request in desc, or NULL
491 * Returns zero if @req can be coalesced into @desc, otherwise it returns
494 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, nfs_generic_pg_test() argument
497 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_generic_pg_test()
665 * @desc: IO descriptor
668 static int nfs_pgio_error(struct nfs_pageio_descriptor *desc, nfs_pgio_error() argument
679 for (midx = 0; midx < desc->pg_mirror_count; midx++) { nfs_pgio_error()
680 mirror = &desc->pg_mirrors[midx]; nfs_pgio_error()
681 desc->pg_completion_ops->error_cleanup(&mirror->pg_list); nfs_pgio_error()
712 * @desc: pointer to descriptor
718 void nfs_pageio_init(struct nfs_pageio_descriptor *desc, nfs_pageio_init() argument
729 desc->pg_moreio = 0; nfs_pageio_init()
730 desc->pg_inode = inode; nfs_pageio_init()
731 desc->pg_ops = pg_ops; nfs_pageio_init()
732 desc->pg_completion_ops = compl_ops; nfs_pageio_init()
733 desc->pg_rw_ops = rw_ops; nfs_pageio_init()
734 desc->pg_ioflags = io_flags; nfs_pageio_init()
735 desc->pg_error = 0; nfs_pageio_init()
736 desc->pg_lseg = NULL; nfs_pageio_init()
737 desc->pg_dreq = NULL; nfs_pageio_init()
738 desc->pg_layout_private = NULL; nfs_pageio_init()
739 desc->pg_bsize = bsize; nfs_pageio_init()
741 desc->pg_mirror_count = 1; nfs_pageio_init()
742 desc->pg_mirror_idx = 0; nfs_pageio_init()
749 desc->pg_mirrors_dynamic = new; nfs_pageio_init()
750 desc->pg_mirrors = new; nfs_pageio_init()
753 nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize); nfs_pageio_init()
755 desc->pg_mirrors_dynamic = NULL; nfs_pageio_init()
756 desc->pg_mirrors = desc->pg_mirrors_static; nfs_pageio_init()
757 nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize); nfs_pageio_init()
791 int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, nfs_generic_pgio() argument
794 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_generic_pgio()
805 return nfs_pgio_error(desc, hdr); nfs_generic_pgio()
807 nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); nfs_generic_pgio()
824 return nfs_pgio_error(desc, hdr); nfs_generic_pgio()
826 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && nfs_generic_pgio()
827 (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) nfs_generic_pgio()
828 desc->pg_ioflags &= ~FLUSH_COND_STABLE; nfs_generic_pgio()
831 nfs_pgio_rpcsetup(hdr, mirror->pg_count, 0, desc->pg_ioflags, &cinfo); nfs_generic_pgio()
832 desc->pg_rpc_callops = &nfs_pgio_common_ops; nfs_generic_pgio()
837 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) nfs_generic_pg_pgios() argument
843 mirror = nfs_pgio_current_mirror(desc); nfs_generic_pg_pgios()
845 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); nfs_generic_pg_pgios()
849 desc->pg_completion_ops->error_cleanup(&mirror->pg_list); nfs_generic_pg_pgios()
852 nfs_pgheader_init(desc, hdr, nfs_pgio_header_free); nfs_generic_pg_pgios()
853 ret = nfs_generic_pgio(desc, hdr); nfs_generic_pg_pgios()
859 desc->pg_rpc_callops, nfs_generic_pg_pgios()
860 desc->pg_ioflags, 0); nfs_generic_pg_pgios()
968 * @desc: destination io descriptor
972 * existing list of pages 'desc'.
974 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, nfs_pageio_do_add_request() argument
977 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_pageio_do_add_request()
984 if (desc->pg_ops->pg_init) nfs_pageio_do_add_request()
985 desc->pg_ops->pg_init(desc, req); nfs_pageio_do_add_request()
988 if (!nfs_can_coalesce_requests(prev, req, desc)) nfs_pageio_do_add_request()
999 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) nfs_pageio_doio() argument
1001 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_pageio_doio()
1005 int error = desc->pg_ops->pg_doio(desc); nfs_pageio_doio()
1007 desc->pg_error = error; nfs_pageio_doio()
1019 * @desc: destination io descriptor
1026 * existing list of pages 'desc'.
1028 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, __nfs_pageio_add_request() argument
1031 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); __nfs_pageio_add_request()
1045 if (!nfs_pageio_do_add_request(desc, subreq)) { __nfs_pageio_add_request()
1052 desc->pg_moreio = 1; __nfs_pageio_add_request()
1053 nfs_pageio_doio(desc); __nfs_pageio_add_request()
1054 if (desc->pg_error < 0) __nfs_pageio_add_request()
1087 desc->pg_error = PTR_ERR(subreq); __nfs_pageio_add_request()
1092 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) nfs_do_recoalesce() argument
1094 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_do_recoalesce()
1104 desc->pg_moreio = 0; nfs_do_recoalesce()
1111 if (__nfs_pageio_add_request(desc, req)) nfs_do_recoalesce()
1113 if (desc->pg_error < 0) { nfs_do_recoalesce()
1124 static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc, nfs_pageio_add_request_mirror() argument
1130 ret = __nfs_pageio_add_request(desc, req); nfs_pageio_add_request_mirror()
1133 if (desc->pg_error < 0) nfs_pageio_add_request_mirror()
1135 ret = nfs_do_recoalesce(desc); nfs_pageio_add_request_mirror()
1141 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, nfs_pageio_add_request() argument
1152 nfs_pageio_setup_mirroring(desc, req); nfs_pageio_add_request()
1154 for (midx = 0; midx < desc->pg_mirror_count; midx++) { nfs_pageio_add_request()
1179 if (nfs_pgio_has_mirroring(desc)) nfs_pageio_add_request()
1180 desc->pg_mirror_idx = midx; nfs_pageio_add_request()
1181 if (!nfs_pageio_add_request_mirror(desc, dupreq)) nfs_pageio_add_request()
1191 * @desc: pointer to io descriptor
1193 static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, nfs_pageio_complete_mirror() argument
1196 struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx]; nfs_pageio_complete_mirror()
1197 u32 restore_idx = desc->pg_mirror_idx; nfs_pageio_complete_mirror()
1199 if (nfs_pgio_has_mirroring(desc)) nfs_pageio_complete_mirror()
1200 desc->pg_mirror_idx = mirror_idx; nfs_pageio_complete_mirror()
1202 nfs_pageio_doio(desc); nfs_pageio_complete_mirror()
1205 if (!nfs_do_recoalesce(desc)) nfs_pageio_complete_mirror()
1208 desc->pg_mirror_idx = restore_idx; nfs_pageio_complete_mirror()
1214 * @desc - the pageio descriptor to add requests to
1216 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1221 int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, nfs_pageio_resend() argument
1226 desc->pg_dreq = hdr->dreq; nfs_pageio_resend()
1231 if (!nfs_pageio_add_request(desc, req)) nfs_pageio_resend()
1234 nfs_pageio_complete(desc); nfs_pageio_resend()
1245 * @desc: pointer to io descriptor
1247 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) nfs_pageio_complete() argument
1251 for (midx = 0; midx < desc->pg_mirror_count; midx++) nfs_pageio_complete()
1252 nfs_pageio_complete_mirror(desc, midx); nfs_pageio_complete()
1254 if (desc->pg_ops->pg_cleanup) nfs_pageio_complete()
1255 desc->pg_ops->pg_cleanup(desc); nfs_pageio_complete()
1256 nfs_pageio_cleanup_mirroring(desc); nfs_pageio_complete()
1261 * @desc: pointer to io descriptor
1268 * is not contiguous with the existing list of pages in 'desc'.
1270 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) nfs_pageio_cond_complete() argument
1276 for (midx = 0; midx < desc->pg_mirror_count; midx++) { nfs_pageio_cond_complete()
1277 mirror = &desc->pg_mirrors[midx]; nfs_pageio_cond_complete()
1281 nfs_pageio_complete_mirror(desc, midx); nfs_pageio_cond_complete()
/linux-4.1.27/crypto/
H A Dshash.c74 static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, shash_update_unaligned() argument
77 struct crypto_shash *tfm = desc->tfm; shash_update_unaligned()
91 err = shash->update(desc, buf, unaligned_len); shash_update_unaligned()
95 shash->update(desc, data + unaligned_len, len - unaligned_len); shash_update_unaligned()
98 int crypto_shash_update(struct shash_desc *desc, const u8 *data, crypto_shash_update() argument
101 struct crypto_shash *tfm = desc->tfm; crypto_shash_update()
106 return shash_update_unaligned(desc, data, len); crypto_shash_update()
108 return shash->update(desc, data, len); crypto_shash_update()
112 static int shash_final_unaligned(struct shash_desc *desc, u8 *out) shash_final_unaligned() argument
114 struct crypto_shash *tfm = desc->tfm; shash_final_unaligned()
123 err = shash->final(desc, buf); shash_final_unaligned()
134 int crypto_shash_final(struct shash_desc *desc, u8 *out) crypto_shash_final() argument
136 struct crypto_shash *tfm = desc->tfm; crypto_shash_final()
141 return shash_final_unaligned(desc, out); crypto_shash_final()
143 return shash->final(desc, out); crypto_shash_final()
147 static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data, shash_finup_unaligned() argument
150 return crypto_shash_update(desc, data, len) ?: shash_finup_unaligned()
151 crypto_shash_final(desc, out); shash_finup_unaligned()
154 int crypto_shash_finup(struct shash_desc *desc, const u8 *data, crypto_shash_finup() argument
157 struct crypto_shash *tfm = desc->tfm; crypto_shash_finup()
162 return shash_finup_unaligned(desc, data, len, out); crypto_shash_finup()
164 return shash->finup(desc, data, len, out); crypto_shash_finup()
168 static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, shash_digest_unaligned() argument
171 return crypto_shash_init(desc) ?: shash_digest_unaligned()
172 crypto_shash_finup(desc, data, len, out); shash_digest_unaligned()
175 int crypto_shash_digest(struct shash_desc *desc, const u8 *data, crypto_shash_digest() argument
178 struct crypto_shash *tfm = desc->tfm; crypto_shash_digest()
183 return shash_digest_unaligned(desc, data, len, out); crypto_shash_digest()
185 return shash->digest(desc, data, len, out); crypto_shash_digest()
189 static int shash_default_export(struct shash_desc *desc, void *out) shash_default_export() argument
191 memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); shash_default_export()
195 static int shash_default_import(struct shash_desc *desc, const void *in) shash_default_import() argument
197 memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm)); shash_default_import()
212 struct shash_desc *desc = ahash_request_ctx(req); shash_async_init() local
214 desc->tfm = *ctx; shash_async_init()
215 desc->flags = req->base.flags; shash_async_init()
217 return crypto_shash_init(desc); shash_async_init()
220 int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) shash_ahash_update() argument
227 nbytes = crypto_shash_update(desc, walk.data, nbytes); shash_ahash_update()
243 int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) shash_ahash_finup() argument
250 return crypto_shash_final(desc, req->result); shash_ahash_finup()
254 crypto_shash_finup(desc, walk.data, nbytes, shash_ahash_finup()
256 crypto_shash_update(desc, walk.data, nbytes); shash_ahash_finup()
267 struct shash_desc *desc = ahash_request_ctx(req); shash_async_finup() local
269 desc->tfm = *ctx; shash_async_finup()
270 desc->flags = req->base.flags; shash_async_finup()
272 return shash_ahash_finup(req, desc); shash_async_finup()
275 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) shash_ahash_digest() argument
286 err = crypto_shash_digest(desc, data + offset, nbytes, shash_ahash_digest()
289 crypto_yield(desc->flags); shash_ahash_digest()
291 err = crypto_shash_init(desc) ?: shash_ahash_digest()
292 shash_ahash_finup(req, desc); shash_ahash_digest()
301 struct shash_desc *desc = ahash_request_ctx(req); shash_async_digest() local
303 desc->tfm = *ctx; shash_async_digest()
304 desc->flags = req->base.flags; shash_async_digest()
306 return shash_ahash_digest(req, desc); shash_async_digest()
317 struct shash_desc *desc = ahash_request_ctx(req); shash_async_import() local
319 desc->tfm = *ctx; shash_async_import()
320 desc->flags = req->base.flags; shash_async_import()
322 return crypto_shash_import(desc, in); shash_async_import()
375 struct shash_desc *desc = *descp; shash_compat_setkey() local
377 return crypto_shash_setkey(desc->tfm, key, keylen); shash_compat_setkey()
383 struct shash_desc *desc = *descp; shash_compat_init() local
385 desc->flags = hdesc->flags; shash_compat_init()
387 return crypto_shash_init(desc); shash_compat_init()
394 struct shash_desc *desc = *descp; shash_compat_update() local
400 nbytes = crypto_shash_update(desc, walk.data, nbytes); shash_compat_update()
420 struct shash_desc *desc = *descp; shash_compat_digest() local
423 desc->flags = hdesc->flags; shash_compat_digest()
426 err = crypto_shash_digest(desc, data + offset, nbytes, out); shash_compat_digest()
428 crypto_yield(desc->flags); shash_compat_digest()
449 struct shash_desc *desc = *descp; crypto_exit_shash_ops_compat() local
451 crypto_free_shash(desc->tfm); crypto_exit_shash_ops_compat()
452 kzfree(desc); crypto_exit_shash_ops_compat()
462 struct shash_desc *desc; crypto_init_shash_ops_compat() local
473 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash), crypto_init_shash_ops_compat()
475 if (!desc) { crypto_init_shash_ops_compat()
480 *descp = desc; crypto_init_shash_ops_compat()
481 desc->tfm = shash; crypto_init_shash_ops_compat()
H A Dsha1_generic.c41 int crypto_sha1_update(struct shash_desc *desc, const u8 *data, crypto_sha1_update() argument
44 return sha1_base_do_update(desc, data, len, sha1_generic_block_fn); crypto_sha1_update()
48 static int sha1_final(struct shash_desc *desc, u8 *out) sha1_final() argument
50 sha1_base_do_finalize(desc, sha1_generic_block_fn); sha1_final()
51 return sha1_base_finish(desc, out); sha1_final()
54 int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, crypto_sha1_finup() argument
57 sha1_base_do_update(desc, data, len, sha1_generic_block_fn); crypto_sha1_finup()
58 return sha1_final(desc, out); crypto_sha1_finup()
H A Dablk_helper.c58 struct blkcipher_desc desc; __ablk_encrypt() local
60 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); __ablk_encrypt()
61 desc.info = req->info; __ablk_encrypt()
62 desc.flags = 0; __ablk_encrypt()
64 return crypto_blkcipher_crt(desc.tfm)->encrypt( __ablk_encrypt()
65 &desc, req->dst, req->src, req->nbytes); __ablk_encrypt()
102 struct blkcipher_desc desc; ablk_decrypt() local
104 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); ablk_decrypt()
105 desc.info = req->info; ablk_decrypt()
106 desc.flags = 0; ablk_decrypt()
108 return crypto_blkcipher_crt(desc.tfm)->decrypt( ablk_decrypt()
109 &desc, req->dst, req->src, req->nbytes); ablk_decrypt()
H A Dcrc32.c76 static int crc32_init(struct shash_desc *desc) crc32_init() argument
78 u32 *mctx = crypto_shash_ctx(desc->tfm); crc32_init()
79 u32 *crcp = shash_desc_ctx(desc); crc32_init()
86 static int crc32_update(struct shash_desc *desc, const u8 *data, crc32_update() argument
89 u32 *crcp = shash_desc_ctx(desc); crc32_update()
103 static int crc32_finup(struct shash_desc *desc, const u8 *data, crc32_finup() argument
106 return __crc32_finup(shash_desc_ctx(desc), data, len, out); crc32_finup()
109 static int crc32_final(struct shash_desc *desc, u8 *out) crc32_final() argument
111 u32 *crcp = shash_desc_ctx(desc); crc32_final()
117 static int crc32_digest(struct shash_desc *desc, const u8 *data, crc32_digest() argument
120 return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, crc32_digest()
H A Dcrct10dif_generic.c42 static int chksum_init(struct shash_desc *desc) chksum_init() argument
44 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_init()
51 static int chksum_update(struct shash_desc *desc, const u8 *data, chksum_update() argument
54 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_update()
60 static int chksum_final(struct shash_desc *desc, u8 *out) chksum_final() argument
62 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_final()
75 static int chksum_finup(struct shash_desc *desc, const u8 *data, chksum_finup() argument
78 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_finup()
83 static int chksum_digest(struct shash_desc *desc, const u8 *data, chksum_digest() argument
86 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_digest()
H A Dmd5.c50 static int md5_init(struct shash_desc *desc) md5_init() argument
52 struct md5_state *mctx = shash_desc_ctx(desc); md5_init()
63 static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) md5_update() argument
65 struct md5_state *mctx = shash_desc_ctx(desc); md5_update()
95 static int md5_final(struct shash_desc *desc, u8 *out) md5_final() argument
97 struct md5_state *mctx = shash_desc_ctx(desc); md5_final()
123 static int md5_export(struct shash_desc *desc, void *out) md5_export() argument
125 struct md5_state *ctx = shash_desc_ctx(desc); md5_export()
131 static int md5_import(struct shash_desc *desc, const void *in) md5_import() argument
133 struct md5_state *ctx = shash_desc_ctx(desc); md5_import()
H A Dhmac.c91 struct shash_desc *desc = shash_desc_ctx(pdesc); hmac_export() local
93 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; hmac_export()
95 return crypto_shash_export(desc, out); hmac_export()
100 struct shash_desc *desc = shash_desc_ctx(pdesc); hmac_import() local
103 desc->tfm = ctx->hash; hmac_import()
104 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; hmac_import()
106 return crypto_shash_import(desc, in); hmac_import()
117 struct shash_desc *desc = shash_desc_ctx(pdesc); hmac_update() local
119 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; hmac_update()
121 return crypto_shash_update(desc, data, nbytes); hmac_update()
130 struct shash_desc *desc = shash_desc_ctx(pdesc); hmac_final() local
132 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; hmac_final()
134 return crypto_shash_final(desc, out) ?: hmac_final()
135 crypto_shash_import(desc, opad) ?: hmac_final()
136 crypto_shash_finup(desc, out, ds, out); hmac_final()
147 struct shash_desc *desc = shash_desc_ctx(pdesc); hmac_finup() local
149 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; hmac_finup()
151 return crypto_shash_finup(desc, data, nbytes, out) ?: hmac_finup()
152 crypto_shash_import(desc, opad) ?: hmac_finup()
153 crypto_shash_finup(desc, out, ds, out); hmac_finup()
H A Dcts.c74 struct blkcipher_desc *desc, cts_cbc_encrypt()
80 int bsize = crypto_blkcipher_blocksize(desc->tfm); cts_cbc_encrypt()
98 memcpy(iv, desc->info, bsize); cts_cbc_encrypt()
102 lcldesc.flags = desc->flags; cts_cbc_encrypt()
120 memcpy(desc->info, tmp2, bsize); cts_cbc_encrypt()
125 static int crypto_cts_encrypt(struct blkcipher_desc *desc, crypto_cts_encrypt() argument
129 struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); crypto_cts_encrypt()
130 int bsize = crypto_blkcipher_blocksize(desc->tfm); crypto_cts_encrypt()
137 lcldesc.info = desc->info; crypto_cts_encrypt()
138 lcldesc.flags = desc->flags; crypto_cts_encrypt()
143 err = cts_cbc_encrypt(ctx, desc, dst, src, 0, nbytes); crypto_cts_encrypt()
150 err = cts_cbc_encrypt(ctx, desc, dst, src, crypto_cts_encrypt()
160 struct blkcipher_desc *desc, cts_cbc_decrypt()
166 int bsize = crypto_blkcipher_blocksize(desc->tfm); cts_cbc_decrypt()
185 lcldesc.flags = desc->flags; cts_cbc_decrypt()
212 crypto_xor(d, desc->info, bsize); cts_cbc_decrypt()
216 memcpy(desc->info, s, bsize); cts_cbc_decrypt()
220 static int crypto_cts_decrypt(struct blkcipher_desc *desc, crypto_cts_decrypt() argument
224 struct crypto_cts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); crypto_cts_decrypt()
225 int bsize = crypto_blkcipher_blocksize(desc->tfm); crypto_cts_decrypt()
232 lcldesc.info = desc->info; crypto_cts_decrypt()
233 lcldesc.flags = desc->flags; crypto_cts_decrypt()
238 err = cts_cbc_decrypt(ctx, desc, dst, src, 0, nbytes); crypto_cts_decrypt()
245 err = cts_cbc_decrypt(ctx, desc, dst, src, crypto_cts_decrypt()
73 cts_cbc_encrypt(struct crypto_cts_ctx *ctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int offset, unsigned int nbytes) cts_cbc_encrypt() argument
159 cts_cbc_decrypt(struct crypto_cts_ctx *ctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int offset, unsigned int nbytes) cts_cbc_decrypt() argument
/linux-4.1.27/arch/arm/mach-imx/
H A Dmach-pcm037_eet.c72 .desc = "Wheel Manual",
78 .desc = "Wheel AF",
84 .desc = "Wheel View",
90 .desc = "Wheel Menu",
96 .desc = "Nav Pad Up",
102 .desc = "Nav Pad Right",
108 .desc = "Nav Pad Down",
114 .desc = "Nav Pad Left",
120 .desc = "Nav Pad Ok",
126 .desc = "Wheel Off",
132 .desc = "Focus Forward",
138 .desc = "Focus Backward",
144 .desc = "Release Half",
150 .desc = "Release Full",
/linux-4.1.27/arch/powerpc/include/asm/
H A Dqe_ic.h62 void (*low_handler)(unsigned int irq, struct irq_desc *desc),
63 void (*high_handler)(unsigned int irq, struct irq_desc *desc));
68 void (*low_handler)(unsigned int irq, struct irq_desc *desc), qe_ic_init()
69 void (*high_handler)(unsigned int irq, struct irq_desc *desc)) qe_ic_init()
82 struct irq_desc *desc) qe_ic_cascade_low_ipic()
84 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); qe_ic_cascade_low_ipic()
92 struct irq_desc *desc) qe_ic_cascade_high_ipic()
94 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); qe_ic_cascade_high_ipic()
102 struct irq_desc *desc) qe_ic_cascade_low_mpic()
104 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); qe_ic_cascade_low_mpic()
106 struct irq_chip *chip = irq_desc_get_chip(desc); qe_ic_cascade_low_mpic()
111 chip->irq_eoi(&desc->irq_data); qe_ic_cascade_low_mpic()
115 struct irq_desc *desc) qe_ic_cascade_high_mpic()
117 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); qe_ic_cascade_high_mpic()
119 struct irq_chip *chip = irq_desc_get_chip(desc); qe_ic_cascade_high_mpic()
124 chip->irq_eoi(&desc->irq_data); qe_ic_cascade_high_mpic()
128 struct irq_desc *desc) qe_ic_cascade_muxed_mpic()
130 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); qe_ic_cascade_muxed_mpic()
132 struct irq_chip *chip = irq_desc_get_chip(desc); qe_ic_cascade_muxed_mpic()
141 chip->irq_eoi(&desc->irq_data); qe_ic_cascade_muxed_mpic()
67 qe_ic_init(struct device_node *node, unsigned int flags, void (*low_handler)(unsigned int irq, struct irq_desc *desc), void (*high_handler)(unsigned int irq, struct irq_desc *desc)) qe_ic_init() argument
81 qe_ic_cascade_low_ipic(unsigned int irq, struct irq_desc *desc) qe_ic_cascade_low_ipic() argument
91 qe_ic_cascade_high_ipic(unsigned int irq, struct irq_desc *desc) qe_ic_cascade_high_ipic() argument
101 qe_ic_cascade_low_mpic(unsigned int irq, struct irq_desc *desc) qe_ic_cascade_low_mpic() argument
114 qe_ic_cascade_high_mpic(unsigned int irq, struct irq_desc *desc) qe_ic_cascade_high_mpic() argument
127 qe_ic_cascade_muxed_mpic(unsigned int irq, struct irq_desc *desc) qe_ic_cascade_muxed_mpic() argument
/linux-4.1.27/arch/x86/include/asm/
H A Ddesc.h11 static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) fill_ldt() argument
13 desc->limit0 = info->limit & 0x0ffff; fill_ldt()
15 desc->base0 = (info->base_addr & 0x0000ffff); fill_ldt()
16 desc->base1 = (info->base_addr & 0x00ff0000) >> 16; fill_ldt()
18 desc->type = (info->read_exec_only ^ 1) << 1; fill_ldt()
19 desc->type |= info->contents << 2; fill_ldt()
21 desc->s = 1; fill_ldt()
22 desc->dpl = 0x3; fill_ldt()
23 desc->p = info->seg_not_present ^ 1; fill_ldt()
24 desc->limit = (info->limit & 0xf0000) >> 16; fill_ldt()
25 desc->avl = info->useable; fill_ldt()
26 desc->d = info->seg_32bit; fill_ldt()
27 desc->g = info->limit_in_pages; fill_ldt()
29 desc->base2 = (info->base_addr & 0xff000000) >> 24; fill_ldt()
34 desc->l = 0; fill_ldt()
83 const u32 *desc = ptr; desc_empty() local
85 return !(desc[0] | desc[1]); desc_empty()
104 #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
105 #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
124 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) native_write_ldt_entry() argument
126 memcpy(&ldt[entry], desc, 8); native_write_ldt_entry()
130 native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type) native_write_gdt_entry() argument
140 memcpy(&gdt[entry], desc, size); native_write_gdt_entry()
143 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, pack_descriptor() argument
147 desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); pack_descriptor()
148 desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | pack_descriptor()
151 desc->p = 1; pack_descriptor()
158 struct ldttss_desc64 *desc = d; set_tssldt_descriptor() local
160 memset(desc, 0, sizeof(*desc)); set_tssldt_descriptor()
162 desc->limit0 = size & 0xFFFF; set_tssldt_descriptor()
163 desc->base0 = PTR_LOW(addr); set_tssldt_descriptor()
164 desc->base1 = PTR_MIDDLE(addr) & 0xFF; set_tssldt_descriptor()
165 desc->type = type; set_tssldt_descriptor()
166 desc->p = 1; set_tssldt_descriptor()
167 desc->limit1 = (size >> 16) & 0xF; set_tssldt_descriptor()
168 desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; set_tssldt_descriptor()
169 desc->base3 = PTR_HIGH(addr); set_tssldt_descriptor()
283 static inline unsigned long get_desc_base(const struct desc_struct *desc) get_desc_base() argument
285 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); get_desc_base()
288 static inline void set_desc_base(struct desc_struct *desc, unsigned long base) set_desc_base() argument
290 desc->base0 = base & 0xffff; set_desc_base()
291 desc->base1 = (base >> 16) & 0xff; set_desc_base()
292 desc->base2 = (base >> 24) & 0xff; set_desc_base()
295 static inline unsigned long get_desc_limit(const struct desc_struct *desc) get_desc_limit() argument
297 return desc->limit0 | (desc->limit << 16); get_desc_limit()
300 static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) set_desc_limit() argument
302 desc->limit0 = limit & 0xffff; set_desc_limit()
303 desc->limit = (limit >> 16) & 0xf; set_desc_limit()
/linux-4.1.27/arch/arm/mach-iop13xx/include/mach/
H A Dadma.h221 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, iop_desc_get_byte_count() argument
224 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_get_byte_count()
228 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc, iop_desc_get_src_addr() argument
232 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_get_src_addr()
236 static inline u32 iop_desc_get_src_count(struct iop_adma_desc_slot *desc, iop_desc_get_src_count() argument
239 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_get_src_count()
244 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags) iop_desc_init_memcpy() argument
246 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_memcpy()
260 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags) iop_desc_init_memset() argument
262 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_memset()
278 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_xor() argument
281 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_xor()
299 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_zero_sum() argument
302 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_zero_sum()
321 iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_pq() argument
324 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_pq()
340 iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_pq_zero_sum() argument
343 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_pq_zero_sum()
360 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc, iop_desc_set_byte_count() argument
364 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_byte_count()
369 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) iop_desc_set_zero_sum_byte_count() argument
371 int slots_per_op = desc->slots_per_op; iop_desc_set_zero_sum_byte_count()
372 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; iop_desc_set_zero_sum_byte_count()
394 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, iop_desc_set_dest_addr() argument
398 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_dest_addr()
404 iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr) iop_desc_set_pq_addr() argument
406 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_pq_addr()
413 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc, iop_desc_set_memcpy_src_addr() argument
416 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_memcpy_src_addr()
421 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc, iop_desc_set_xor_src_addr() argument
424 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; iop_desc_set_xor_src_addr()
425 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; iop_desc_set_xor_src_addr()
441 iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx, iop_desc_set_pq_src_addr() argument
444 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; iop_desc_set_pq_src_addr()
445 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; iop_desc_set_pq_src_addr()
464 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, iop_desc_init_interrupt() argument
467 iop_desc_init_memcpy(desc, 1); iop_desc_init_interrupt()
468 iop_desc_set_byte_count(desc, chan, 0); iop_desc_init_interrupt()
469 iop_desc_set_dest_addr(desc, chan, 0); iop_desc_init_interrupt()
470 iop_desc_set_memcpy_src_addr(desc, 0); iop_desc_init_interrupt()
477 iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx, iop_desc_set_pq_zero_sum_addr() argument
480 iop_desc_set_xor_src_addr(desc, pq_idx, src[pq_idx]); iop_desc_set_pq_zero_sum_addr()
481 iop_desc_set_xor_src_addr(desc, pq_idx+1, src[pq_idx+1]); iop_desc_set_pq_zero_sum_addr()
484 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, iop_desc_set_next_desc() argument
487 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_next_desc()
493 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc) iop_desc_get_next_desc() argument
495 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_get_next_desc()
499 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc) iop_desc_clear_next_desc() argument
501 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_clear_next_desc()
505 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, iop_desc_set_block_fill_val() argument
508 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_block_fill_val()
513 iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) iop_desc_get_zero_result() argument
515 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_get_zero_result()
/linux-4.1.27/drivers/pinctrl/
H A Dpinmux.c35 const struct pinmux_ops *ops = pctldev->desc->pmxops; pinmux_check_ops()
87 struct pin_desc *desc; pin_request() local
88 const struct pinmux_ops *ops = pctldev->desc->pmxops; pin_request()
91 desc = pin_desc_get(pctldev, pin); pin_request()
92 if (desc == NULL) { pin_request()
100 pin, desc->name, owner); pin_request()
104 if (desc->gpio_owner) { pin_request()
107 desc->name, desc->gpio_owner, owner); pin_request()
111 desc->gpio_owner = owner; pin_request()
113 if (desc->mux_usecount && strcmp(desc->mux_owner, owner)) { pin_request()
116 desc->name, desc->mux_owner, owner); pin_request()
120 desc->mux_usecount++; pin_request()
121 if (desc->mux_usecount > 1) pin_request()
124 desc->mux_owner = owner; pin_request()
156 desc->gpio_owner = NULL; pin_request()
158 desc->mux_usecount--; pin_request()
159 if (!desc->mux_usecount) pin_request()
160 desc->mux_owner = NULL; pin_request()
185 const struct pinmux_ops *ops = pctldev->desc->pmxops; pin_free()
186 struct pin_desc *desc; pin_free() local
189 desc = pin_desc_get(pctldev, pin); pin_free()
190 if (desc == NULL) { pin_free()
200 if (WARN_ON(!desc->mux_usecount)) pin_free()
202 desc->mux_usecount--; pin_free()
203 if (desc->mux_usecount) pin_free()
217 owner = desc->gpio_owner; pin_free()
218 desc->gpio_owner = NULL; pin_free()
220 owner = desc->mux_owner; pin_free()
221 desc->mux_owner = NULL; pin_free()
222 desc->mux_setting = NULL; pin_free()
284 ops = pctldev->desc->pmxops; pinmux_gpio_direction()
297 const struct pinmux_ops *ops = pctldev->desc->pmxops; pinmux_func_name_to_selector()
321 const struct pinmux_ops *pmxops = pctldev->desc->pmxops; pinmux_map_to_setting()
392 const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; pinmux_enable_setting()
393 const struct pinmux_ops *ops = pctldev->desc->pmxops; pinmux_enable_setting()
398 struct pin_desc *desc; pinmux_enable_setting() local
423 desc = pin_desc_get(pctldev, pins[i]); pinmux_enable_setting()
424 pname = desc ? desc->name : "non-existing"; pinmux_enable_setting()
438 desc = pin_desc_get(pctldev, pins[i]); pinmux_enable_setting()
439 if (desc == NULL) { pinmux_enable_setting()
441 "could not get pin desc for pin %d\n", pinmux_enable_setting()
445 desc->mux_setting = &(setting->data.mux); pinmux_enable_setting()
458 desc = pin_desc_get(pctldev, pins[i]); pinmux_enable_setting()
459 if (desc) pinmux_enable_setting()
460 desc->mux_setting = NULL; pinmux_enable_setting()
473 const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; pinmux_disable_setting()
478 struct pin_desc *desc; pinmux_disable_setting() local
497 desc = pin_desc_get(pctldev, pins[i]); pinmux_disable_setting()
498 if (desc == NULL) { pinmux_disable_setting()
500 "could not get pin desc for pin %d\n", pinmux_disable_setting()
504 if (desc->mux_setting == &(setting->data.mux)) { pinmux_disable_setting()
505 desc->mux_setting = NULL; pinmux_disable_setting()
517 pins[i], desc->name, gname); pinmux_disable_setting()
528 const struct pinmux_ops *pmxops = pctldev->desc->pmxops; pinmux_functions_show()
567 const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; pinmux_pins_show()
568 const struct pinmux_ops *pmxops = pctldev->desc->pmxops; pinmux_pins_show()
580 for (i = 0; i < pctldev->desc->npins; i++) { pinmux_pins_show()
581 struct pin_desc *desc; pinmux_pins_show() local
584 pin = pctldev->desc->pins[i].number; pinmux_pins_show()
585 desc = pin_desc_get(pctldev, pin); pinmux_pins_show()
587 if (desc == NULL) pinmux_pins_show()
590 if (desc->mux_owner && pinmux_pins_show()
591 !strcmp(desc->mux_owner, pinctrl_dev_get_name(pctldev))) pinmux_pins_show()
595 desc->name ? desc->name : "unnamed", pinmux_pins_show()
596 desc->mux_owner ? desc->mux_owner pinmux_pins_show()
598 desc->gpio_owner ? desc->gpio_owner pinmux_pins_show()
602 if (desc->mux_setting) pinmux_pins_show()
605 desc->mux_setting->func), pinmux_pins_show()
607 desc->mux_setting->group)); pinmux_pins_show()
628 const struct pinmux_ops *pmxops = pctldev->desc->pmxops; pinmux_show_setting()
629 const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; pinmux_show_setting()
/linux-4.1.27/drivers/usb/core/
H A Dconfig.c50 struct usb_ss_ep_comp_descriptor *desc; usb_parse_ss_endpoint_companion() local
56 desc = (struct usb_ss_ep_comp_descriptor *) buffer; usb_parse_ss_endpoint_companion()
57 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || usb_parse_ss_endpoint_companion()
62 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
73 if (usb_endpoint_xfer_isoc(&ep->desc) || usb_parse_ss_endpoint_companion()
74 usb_endpoint_xfer_int(&ep->desc)) usb_parse_ss_endpoint_companion()
76 ep->desc.wMaxPacketSize; usb_parse_ss_endpoint_companion()
80 memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE); usb_parse_ss_endpoint_companion()
83 if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) { usb_parse_ss_endpoint_companion()
86 "setting to zero\n", desc->bMaxBurst, usb_parse_ss_endpoint_companion()
87 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
89 } else if (desc->bMaxBurst > 15) { usb_parse_ss_endpoint_companion()
92 "setting to 15\n", desc->bMaxBurst, usb_parse_ss_endpoint_companion()
93 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
97 if ((usb_endpoint_xfer_control(&ep->desc) || usb_parse_ss_endpoint_companion()
98 usb_endpoint_xfer_int(&ep->desc)) && usb_parse_ss_endpoint_companion()
99 desc->bmAttributes != 0) { usb_parse_ss_endpoint_companion()
103 usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk", usb_parse_ss_endpoint_companion()
104 desc->bmAttributes, usb_parse_ss_endpoint_companion()
105 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
107 } else if (usb_endpoint_xfer_bulk(&ep->desc) && usb_parse_ss_endpoint_companion()
108 desc->bmAttributes > 16) { usb_parse_ss_endpoint_companion()
112 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
114 } else if (usb_endpoint_xfer_isoc(&ep->desc) && usb_parse_ss_endpoint_companion()
115 USB_SS_MULT(desc->bmAttributes) > 3) { usb_parse_ss_endpoint_companion()
119 USB_SS_MULT(desc->bmAttributes), usb_parse_ss_endpoint_companion()
120 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
124 if (usb_endpoint_xfer_isoc(&ep->desc)) usb_parse_ss_endpoint_companion()
125 max_tx = (desc->bMaxBurst + 1) * usb_parse_ss_endpoint_companion()
126 (USB_SS_MULT(desc->bmAttributes)) * usb_parse_ss_endpoint_companion()
127 usb_endpoint_maxp(&ep->desc); usb_parse_ss_endpoint_companion()
128 else if (usb_endpoint_xfer_int(&ep->desc)) usb_parse_ss_endpoint_companion()
129 max_tx = usb_endpoint_maxp(&ep->desc) * usb_parse_ss_endpoint_companion()
130 (desc->bMaxBurst + 1); usb_parse_ss_endpoint_companion()
133 if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { usb_parse_ss_endpoint_companion()
137 usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", usb_parse_ss_endpoint_companion()
138 le16_to_cpu(desc->wBytesPerInterval), usb_parse_ss_endpoint_companion()
139 cfgno, inum, asnum, ep->desc.bEndpointAddress, usb_parse_ss_endpoint_companion()
178 if (ifp->desc.bNumEndpoints >= num_ep) usb_parse_endpoint()
181 endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; usb_parse_endpoint()
182 ++ifp->desc.bNumEndpoints; usb_parse_endpoint()
184 memcpy(&endpoint->desc, d, n); usb_parse_endpoint()
241 endpoint->desc.bInterval = n; usb_parse_endpoint()
253 endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT; usb_parse_endpoint()
254 endpoint->desc.bInterval = 1; usb_parse_endpoint()
255 if (usb_endpoint_maxp(&endpoint->desc) > 8) usb_parse_endpoint()
256 endpoint->desc.wMaxPacketSize = cpu_to_le16(8); usb_parse_endpoint()
268 maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; usb_parse_endpoint()
336 for (i = 0; i < config->desc.bNumInterfaces; ++i) { usb_parse_interface()
350 if (alt->desc.bAlternateSetting == asnum) { usb_parse_interface()
359 memcpy(&alt->desc, d, USB_DT_INTERFACE_SIZE); usb_parse_interface()
374 num_ep = num_ep_orig = alt->desc.bNumEndpoints; usb_parse_interface()
375 alt->desc.bNumEndpoints = 0; /* Use as a counter */ usb_parse_interface()
436 memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); usb_parse_configuration()
437 if (config->desc.bDescriptorType != USB_DT_CONFIG || usb_parse_configuration()
438 config->desc.bLength < USB_DT_CONFIG_SIZE || usb_parse_configuration()
439 config->desc.bLength > size) { usb_parse_configuration()
442 config->desc.bDescriptorType, config->desc.bLength); usb_parse_configuration()
445 cfgno = config->desc.bConfigurationValue; usb_parse_configuration()
447 buffer += config->desc.bLength; usb_parse_configuration()
448 size -= config->desc.bLength; usb_parse_configuration()
450 nintf = nintf_orig = config->desc.bNumInterfaces; usb_parse_configuration()
545 config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0); usb_parse_configuration()
553 config->desc.bNumInterfaces = nintf = n; usb_parse_configuration()
614 if (intfc->altsetting[n].desc. usb_parse_configuration()
649 for (i = 0; i < cf->desc.bNumInterfaces; i++) { usb_destroy_configuration()
673 struct usb_config_descriptor *desc; usb_get_configuration() local
698 desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL); usb_get_configuration()
699 if (!desc) usb_get_configuration()
707 desc, USB_DT_CONFIG_SIZE); usb_get_configuration()
723 length = max((int) le16_to_cpu(desc->wTotalLength), usb_get_configuration()
762 kfree(desc); usb_get_configuration()
773 kfree(dev->bos->desc); usb_release_bos_descriptor()
820 dev->bos->desc = (struct usb_bos_descriptor *)buffer; usb_get_bos_descriptor()
H A Dendpoint.c19 struct usb_endpoint_descriptor *desc; member in struct:ep_device
40 return sprintf(buf, format_string, ep->desc->field); \
54 usb_endpoint_maxp(ep->desc) & 0x07ff); wMaxPacketSize_show()
64 switch (usb_endpoint_type(ep->desc)) { type_show()
90 in = (ep->desc->bEndpointAddress & USB_DIR_IN); interval_show()
92 switch (usb_endpoint_type(ep->desc)) { interval_show()
96 interval = ep->desc->bInterval; interval_show()
100 interval = 1 << (ep->desc->bInterval - 1); interval_show()
106 interval = ep->desc->bInterval; interval_show()
111 interval = 1 << (ep->desc->bInterval - 1); interval_show()
113 interval = ep->desc->bInterval; interval_show()
134 if (usb_endpoint_xfer_control(ep->desc)) direction_show()
136 else if (usb_endpoint_dir_in(ep->desc)) direction_show()
188 ep_dev->desc = &endpoint->desc; usb_create_ep_devs()
193 dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress); usb_create_ep_devs()
H A Ddevices.c182 const struct usb_endpoint_descriptor *desc) usb_dump_endpoint_descriptor()
190 dir = usb_endpoint_dir_in(desc) ? 'I' : 'O'; usb_dump_endpoint_descriptor()
193 switch (usb_endpoint_maxp(desc) & (0x03 << 11)) { usb_dump_endpoint_descriptor()
202 switch (usb_endpoint_type(desc)) { usb_dump_endpoint_descriptor()
206 interval = desc->bInterval; usb_dump_endpoint_descriptor()
213 interval = 1 << (desc->bInterval - 1); usb_dump_endpoint_descriptor()
218 interval = desc->bInterval; usb_dump_endpoint_descriptor()
225 interval = 1 << (desc->bInterval - 1); usb_dump_endpoint_descriptor()
227 interval = desc->bInterval; usb_dump_endpoint_descriptor()
241 start += sprintf(start, format_endpt, desc->bEndpointAddress, dir, usb_dump_endpoint_descriptor()
242 desc->bmAttributes, type, usb_dump_endpoint_descriptor()
243 (usb_endpoint_maxp(desc) & 0x07ff) * usb_dump_endpoint_descriptor()
254 const struct usb_interface_descriptor *desc; usb_dump_interface_descriptor() local
260 desc = &intfc->altsetting[setno].desc; usb_dump_interface_descriptor()
265 active = (desc == &iface->cur_altsetting->desc); usb_dump_interface_descriptor()
269 desc->bInterfaceNumber, usb_dump_interface_descriptor()
270 desc->bAlternateSetting, usb_dump_interface_descriptor()
271 desc->bNumEndpoints, usb_dump_interface_descriptor()
272 desc->bInterfaceClass, usb_dump_interface_descriptor()
273 class_decode(desc->bInterfaceClass), usb_dump_interface_descriptor()
274 desc->bInterfaceSubClass, usb_dump_interface_descriptor()
275 desc->bInterfaceProtocol, usb_dump_interface_descriptor()
284 const struct usb_host_interface *desc = &intfc->altsetting[setno]; usb_dump_interface() local
288 for (i = 0; i < desc->desc.bNumEndpoints; i++) { usb_dump_interface()
292 start, end, &desc->endpoint[i].desc); usb_dump_interface()
318 const struct usb_config_descriptor *desc, usb_dump_config_descriptor()
332 desc->bNumInterfaces, usb_dump_config_descriptor()
333 desc->bConfigurationValue, usb_dump_config_descriptor()
334 desc->bmAttributes, usb_dump_config_descriptor()
335 desc->bMaxPower * mul); usb_dump_config_descriptor()
350 return start + sprintf(start, "(null Cfg. desc.)\n"); usb_dump_config()
351 start = usb_dump_config_descriptor(start, end, &config->desc, active, usb_dump_config()
359 for (i = 0; i < config->desc.bNumInterfaces; i++) { usb_dump_config()
376 const struct usb_device_descriptor *desc) usb_dump_device_descriptor()
378 u16 bcdUSB = le16_to_cpu(desc->bcdUSB); usb_dump_device_descriptor()
379 u16 bcdDevice = le16_to_cpu(desc->bcdDevice); usb_dump_device_descriptor()
385 desc->bDeviceClass, usb_dump_device_descriptor()
386 class_decode(desc->bDeviceClass), usb_dump_device_descriptor()
387 desc->bDeviceSubClass, usb_dump_device_descriptor()
388 desc->bDeviceProtocol, usb_dump_device_descriptor()
389 desc->bMaxPacketSize0, usb_dump_device_descriptor()
390 desc->bNumConfigurations); usb_dump_device_descriptor()
394 le16_to_cpu(desc->idVendor), usb_dump_device_descriptor()
395 le16_to_cpu(desc->idProduct), usb_dump_device_descriptor()
455 const struct usb_hub_descriptor *desc) usb_dump_hub_descriptor()
458 unsigned char *ptr = (unsigned char *)desc; usb_dump_hub_descriptor()
181 usb_dump_endpoint_descriptor(int speed, char *start, char *end, const struct usb_endpoint_descriptor *desc) usb_dump_endpoint_descriptor() argument
317 usb_dump_config_descriptor(char *start, char *end, const struct usb_config_descriptor *desc, int active, int speed) usb_dump_config_descriptor() argument
375 usb_dump_device_descriptor(char *start, char *end, const struct usb_device_descriptor *desc) usb_dump_device_descriptor() argument
454 usb_dump_hub_descriptor(char *start, char *end, const struct usb_hub_descriptor *desc) usb_dump_hub_descriptor() argument
H A Dgeneric.c29 static int is_rndis(struct usb_interface_descriptor *desc) is_rndis() argument
31 return desc->bInterfaceClass == USB_CLASS_COMM is_rndis()
32 && desc->bInterfaceSubClass == 2 is_rndis()
33 && desc->bInterfaceProtocol == 0xff; is_rndis()
36 static int is_activesync(struct usb_interface_descriptor *desc) is_activesync() argument
38 return desc->bInterfaceClass == USB_CLASS_MISC is_activesync()
39 && desc->bInterfaceSubClass == 1 is_activesync()
40 && desc->bInterfaceProtocol == 1; is_activesync()
57 struct usb_interface_descriptor *desc = NULL; usb_choose_configuration() local
60 if (c->desc.bNumInterfaces > 0) usb_choose_configuration()
61 desc = &c->intf_cache[0]->altsetting->desc; usb_choose_configuration()
85 if (bus_powered && (c->desc.bmAttributes & usb_choose_configuration()
113 if (i == 0 && num_configs > 1 && desc && usb_choose_configuration()
114 (is_rndis(desc) || is_activesync(desc))) { usb_choose_configuration()
128 (desc && desc->bInterfaceClass != usb_choose_configuration()
146 i = best->desc.bConfigurationValue; usb_choose_configuration()
/linux-4.1.27/arch/x86/crypto/
H A Dglue_helper.c36 struct blkcipher_desc *desc, __glue_ecb_crypt_128bit()
39 void *ctx = crypto_blkcipher_ctx(desc->tfm); __glue_ecb_crypt_128bit()
45 err = blkcipher_walk_virt(desc, walk); __glue_ecb_crypt_128bit()
52 desc, fpu_enabled, nbytes); __glue_ecb_crypt_128bit()
74 err = blkcipher_walk_done(desc, walk, nbytes); __glue_ecb_crypt_128bit()
82 struct blkcipher_desc *desc, struct scatterlist *dst, glue_ecb_crypt_128bit()
88 return __glue_ecb_crypt_128bit(gctx, desc, &walk); glue_ecb_crypt_128bit()
93 struct blkcipher_desc *desc, __glue_cbc_encrypt_128bit()
96 void *ctx = crypto_blkcipher_ctx(desc->tfm); __glue_cbc_encrypt_128bit()
118 struct blkcipher_desc *desc, glue_cbc_encrypt_128bit()
126 err = blkcipher_walk_virt(desc, &walk); glue_cbc_encrypt_128bit()
129 nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk); glue_cbc_encrypt_128bit()
130 err = blkcipher_walk_done(desc, &walk, nbytes); glue_cbc_encrypt_128bit()
139 struct blkcipher_desc *desc, __glue_cbc_decrypt_128bit()
142 void *ctx = crypto_blkcipher_ctx(desc->tfm); __glue_cbc_decrypt_128bit()
192 struct blkcipher_desc *desc, glue_cbc_decrypt_128bit()
202 err = blkcipher_walk_virt(desc, &walk); glue_cbc_decrypt_128bit()
206 desc, fpu_enabled, nbytes); glue_cbc_decrypt_128bit()
207 nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); glue_cbc_decrypt_128bit()
208 err = blkcipher_walk_done(desc, &walk, nbytes); glue_cbc_decrypt_128bit()
217 struct blkcipher_desc *desc, glue_ctr_crypt_final_128bit()
220 void *ctx = crypto_blkcipher_ctx(desc->tfm); glue_ctr_crypt_final_128bit()
237 struct blkcipher_desc *desc, __glue_ctr_crypt_128bit()
241 void *ctx = crypto_blkcipher_ctx(desc->tfm); __glue_ctr_crypt_128bit()
276 struct blkcipher_desc *desc, struct scatterlist *dst, glue_ctr_crypt_128bit()
285 err = blkcipher_walk_virt_block(desc, &walk, bsize); glue_ctr_crypt_128bit()
289 desc, fpu_enabled, nbytes); glue_ctr_crypt_128bit()
290 nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); glue_ctr_crypt_128bit()
291 err = blkcipher_walk_done(desc, &walk, nbytes); glue_ctr_crypt_128bit()
298 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); glue_ctr_crypt_128bit()
299 err = blkcipher_walk_done(desc, &walk, 0); glue_ctr_crypt_128bit()
308 struct blkcipher_desc *desc, __glue_xts_crypt_128bit()
344 struct blkcipher_desc *desc, struct scatterlist *dst, glue_xts_crypt_128bit()
356 err = blkcipher_walk_virt(desc, &walk); glue_xts_crypt_128bit()
363 desc, fpu_enabled, glue_xts_crypt_128bit()
370 nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); glue_xts_crypt_128bit()
372 err = blkcipher_walk_done(desc, &walk, nbytes); glue_xts_crypt_128bit()
35 __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct blkcipher_walk *walk) __glue_ecb_crypt_128bit() argument
81 glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) glue_ecb_crypt_128bit() argument
92 __glue_cbc_encrypt_128bit(const common_glue_func_t fn, struct blkcipher_desc *desc, struct blkcipher_walk *walk) __glue_cbc_encrypt_128bit() argument
117 glue_cbc_encrypt_128bit(const common_glue_func_t fn, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) glue_cbc_encrypt_128bit() argument
138 __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct blkcipher_walk *walk) __glue_cbc_decrypt_128bit() argument
191 glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) glue_cbc_decrypt_128bit() argument
216 glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr, struct blkcipher_desc *desc, struct blkcipher_walk *walk) glue_ctr_crypt_final_128bit() argument
236 __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct blkcipher_walk *walk) __glue_ctr_crypt_128bit() argument
275 glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) glue_ctr_crypt_128bit() argument
306 __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, void *ctx, struct blkcipher_desc *desc, struct blkcipher_walk *walk) __glue_xts_crypt_128bit() argument
343 glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src), void *tweak_ctx, void *crypt_ctx) glue_xts_crypt_128bit() argument
H A Dcast5_avx_glue.c60 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, ecb_crypt() argument
64 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_crypt()
72 err = blkcipher_walk_virt(desc, walk); ecb_crypt()
73 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ecb_crypt()
107 err = blkcipher_walk_done(desc, walk, nbytes); ecb_crypt()
114 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
120 return ecb_crypt(desc, &walk, true); ecb_encrypt()
123 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
129 return ecb_crypt(desc, &walk, false); ecb_decrypt()
132 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, __cbc_encrypt() argument
135 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_encrypt()
156 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
163 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
166 nbytes = __cbc_encrypt(desc, &walk); cbc_encrypt()
167 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_encrypt()
173 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, __cbc_decrypt() argument
176 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_decrypt()
228 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
236 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
237 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_decrypt()
241 nbytes = __cbc_decrypt(desc, &walk); cbc_decrypt()
242 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_decrypt()
249 static void ctr_crypt_final(struct blkcipher_desc *desc, ctr_crypt_final() argument
252 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_crypt_final()
266 static unsigned int __ctr_crypt(struct blkcipher_desc *desc, __ctr_crypt() argument
269 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ctr_crypt()
312 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_crypt() argument
320 err = blkcipher_walk_virt_block(desc, &walk, CAST5_BLOCK_SIZE); ctr_crypt()
321 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctr_crypt()
325 nbytes = __ctr_crypt(desc, &walk); ctr_crypt()
326 err = blkcipher_walk_done(desc, &walk, nbytes); ctr_crypt()
332 ctr_crypt_final(desc, &walk); ctr_crypt()
333 err = blkcipher_walk_done(desc, &walk, 0); ctr_crypt()
H A Dcrct10dif-pclmul_glue.c48 static int chksum_init(struct shash_desc *desc) chksum_init() argument
50 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_init()
57 static int chksum_update(struct shash_desc *desc, const u8 *data, chksum_update() argument
60 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_update()
71 static int chksum_final(struct shash_desc *desc, u8 *out) chksum_final() argument
73 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_final()
91 static int chksum_finup(struct shash_desc *desc, const u8 *data, chksum_finup() argument
94 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_finup()
99 static int chksum_digest(struct shash_desc *desc, const u8 *data, chksum_digest() argument
102 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_digest()
H A Ddes3_ede_glue.c86 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, ecb_crypt() argument
93 err = blkcipher_walk_virt(desc, walk); ecb_crypt()
124 err = blkcipher_walk_done(desc, walk, nbytes); ecb_crypt()
130 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
133 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_encrypt()
137 return ecb_crypt(desc, &walk, ctx->enc_expkey); ecb_encrypt()
140 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
143 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_decrypt()
147 return ecb_crypt(desc, &walk, ctx->dec_expkey); ecb_decrypt()
150 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, __cbc_encrypt() argument
153 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_encrypt()
174 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
181 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
184 nbytes = __cbc_encrypt(desc, &walk); cbc_encrypt()
185 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_encrypt()
191 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, __cbc_decrypt() argument
194 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_decrypt()
253 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
260 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
263 nbytes = __cbc_decrypt(desc, &walk); cbc_decrypt()
264 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_decrypt()
286 static unsigned int __ctr_crypt(struct blkcipher_desc *desc, __ctr_crypt() argument
289 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ctr_crypt()
337 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_crypt() argument
344 err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE); ctr_crypt()
347 nbytes = __ctr_crypt(desc, &walk); ctr_crypt()
348 err = blkcipher_walk_done(desc, &walk, nbytes); ctr_crypt()
352 ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); ctr_crypt()
353 err = blkcipher_walk_done(desc, &walk, 0); ctr_crypt()
H A Dblowfish_glue.c80 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, ecb_crypt() argument
84 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_crypt()
89 err = blkcipher_walk_virt(desc, walk); ecb_crypt()
119 err = blkcipher_walk_done(desc, walk, nbytes); ecb_crypt()
125 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
131 return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way); ecb_encrypt()
134 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
140 return ecb_crypt(desc, &walk, blowfish_dec_blk, blowfish_dec_blk_4way); ecb_decrypt()
143 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, __cbc_encrypt() argument
146 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_encrypt()
167 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
174 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
177 nbytes = __cbc_encrypt(desc, &walk); cbc_encrypt()
178 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_encrypt()
184 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, __cbc_decrypt() argument
187 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_decrypt()
248 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
255 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
258 nbytes = __cbc_decrypt(desc, &walk); cbc_decrypt()
259 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_decrypt()
280 static unsigned int __ctr_crypt(struct blkcipher_desc *desc, __ctr_crypt() argument
283 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ctr_crypt()
336 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_crypt() argument
343 err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE); ctr_crypt()
346 nbytes = __ctr_crypt(desc, &walk); ctr_crypt()
347 err = blkcipher_walk_done(desc, &walk, nbytes); ctr_crypt()
351 ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); ctr_crypt()
352 err = blkcipher_walk_done(desc, &walk, 0); ctr_crypt()
H A Dghash-clmulni-intel_glue.c46 static int ghash_init(struct shash_desc *desc) ghash_init() argument
48 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ghash_init()
80 static int ghash_update(struct shash_desc *desc, ghash_update() argument
83 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ghash_update()
84 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); ghash_update()
134 static int ghash_final(struct shash_desc *desc, u8 *dst) ghash_final() argument
136 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ghash_final()
137 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); ghash_final()
177 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_init() local
180 desc->tfm = child; ghash_async_init()
181 desc->flags = req->base.flags; ghash_async_init()
182 return crypto_shash_init(desc); ghash_async_init()
199 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_update() local
200 return shash_ahash_update(req, desc); ghash_async_update()
217 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_final() local
218 return crypto_shash_final(desc, req->result); ghash_async_final()
234 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_digest() local
237 desc->tfm = child; ghash_async_digest()
238 desc->flags = req->base.flags; ghash_async_digest()
239 return shash_ahash_digest(req, desc); ghash_async_digest()
H A Dcamellia_aesni_avx_glue.c158 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
161 return glue_ecb_crypt_128bit(&camellia_enc, desc, dst, src, nbytes); ecb_encrypt()
164 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
167 return glue_ecb_crypt_128bit(&camellia_dec, desc, dst, src, nbytes); ecb_decrypt()
170 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
173 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(camellia_enc_blk), desc, cbc_encrypt()
177 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
180 return glue_cbc_decrypt_128bit(&camellia_dec_cbc, desc, dst, src, cbc_decrypt()
184 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_crypt() argument
187 return glue_ctr_crypt_128bit(&camellia_ctr, desc, dst, src, nbytes); ctr_crypt()
262 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, lrw_encrypt() argument
265 struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_encrypt()
281 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; lrw_encrypt()
282 ret = lrw_crypt(desc, dst, src, nbytes, &req); lrw_encrypt()
288 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, lrw_decrypt() argument
291 struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_decrypt()
307 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; lrw_decrypt()
308 ret = lrw_crypt(desc, dst, src, nbytes, &req); lrw_decrypt()
314 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_encrypt() argument
317 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt()
319 return glue_xts_crypt_128bit(&camellia_enc_xts, desc, dst, src, nbytes, xts_encrypt()
324 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_decrypt() argument
327 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt()
329 return glue_xts_crypt_128bit(&camellia_dec_xts, desc, dst, src, nbytes, xts_decrypt()
H A Dserpent_avx2_glue.c140 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
143 return glue_ecb_crypt_128bit(&serpent_enc, desc, dst, src, nbytes); ecb_encrypt()
146 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
149 return glue_ecb_crypt_128bit(&serpent_dec, desc, dst, src, nbytes); ecb_decrypt()
152 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
155 return glue_cbc_encrypt_128bit(GLUE_FUNC_CAST(__serpent_encrypt), desc, cbc_encrypt()
159 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
162 return glue_cbc_decrypt_128bit(&serpent_dec_cbc, desc, dst, src, cbc_decrypt()
166 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_crypt() argument
169 return glue_ctr_crypt_128bit(&serpent_ctr, desc, dst, src, nbytes); ctr_crypt()
236 static int lrw_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, lrw_encrypt() argument
239 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_encrypt()
255 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; lrw_encrypt()
256 ret = lrw_crypt(desc, dst, src, nbytes, &req); lrw_encrypt()
262 static int lrw_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, lrw_decrypt() argument
265 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_decrypt()
281 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; lrw_decrypt()
282 ret = lrw_crypt(desc, dst, src, nbytes, &req); lrw_decrypt()
288 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_encrypt() argument
291 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt()
293 return glue_xts_crypt_128bit(&serpent_enc_xts, desc, dst, src, nbytes, xts_encrypt()
298 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_decrypt() argument
301 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt()
303 return glue_xts_crypt_128bit(&serpent_dec_xts, desc, dst, src, nbytes, xts_decrypt()
/linux-4.1.27/security/apparmor/
H A Dcrypto.c38 } desc; aa_calc_profile_hash() local
49 desc.shash.tfm = apparmor_tfm; aa_calc_profile_hash()
50 desc.shash.flags = 0; aa_calc_profile_hash()
52 error = crypto_shash_init(&desc.shash); aa_calc_profile_hash()
55 error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4); aa_calc_profile_hash()
58 error = crypto_shash_update(&desc.shash, (u8 *) start, len); aa_calc_profile_hash()
61 error = crypto_shash_final(&desc.shash, profile->hash); aa_calc_profile_hash()
/linux-4.1.27/security/integrity/evm/
H A Devm_crypto.c40 struct shash_desc *desc; init_desc() local
75 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), init_desc()
77 if (!desc) init_desc()
80 desc->tfm = *tfm; init_desc()
81 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; init_desc()
83 rc = crypto_shash_init(desc); init_desc()
85 kfree(desc); init_desc()
88 return desc; init_desc()
97 static void hmac_add_misc(struct shash_desc *desc, struct inode *inode, hmac_add_misc() argument
114 crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc)); hmac_add_misc()
116 crypto_shash_update(desc, inode->i_sb->s_uuid, hmac_add_misc()
118 crypto_shash_final(desc, digest); hmac_add_misc()
135 struct shash_desc *desc; evm_calc_hmac_or_hash() local
144 desc = init_desc(type); evm_calc_hmac_or_hash()
145 if (IS_ERR(desc)) evm_calc_hmac_or_hash()
146 return PTR_ERR(desc); evm_calc_hmac_or_hash()
153 crypto_shash_update(desc, (const u8 *)req_xattr_value, evm_calc_hmac_or_hash()
168 crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size); evm_calc_hmac_or_hash()
170 hmac_add_misc(desc, inode, digest); evm_calc_hmac_or_hash()
174 kfree(desc); evm_calc_hmac_or_hash()
222 struct shash_desc *desc; evm_init_hmac() local
224 desc = init_desc(EVM_XATTR_HMAC); evm_init_hmac()
225 if (IS_ERR(desc)) { evm_init_hmac()
227 return PTR_ERR(desc); evm_init_hmac()
230 crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len); evm_init_hmac()
231 hmac_add_misc(desc, inode, hmac_val); evm_init_hmac()
232 kfree(desc); evm_init_hmac()
/linux-4.1.27/include/linux/irqchip/
H A Dchained_irq.h28 struct irq_desc *desc) chained_irq_enter()
35 chip->irq_mask_ack(&desc->irq_data); chained_irq_enter()
37 chip->irq_mask(&desc->irq_data); chained_irq_enter()
39 chip->irq_ack(&desc->irq_data); chained_irq_enter()
44 struct irq_desc *desc) chained_irq_exit()
47 chip->irq_eoi(&desc->irq_data); chained_irq_exit()
49 chip->irq_unmask(&desc->irq_data); chained_irq_exit()
27 chained_irq_enter(struct irq_chip *chip, struct irq_desc *desc) chained_irq_enter() argument
43 chained_irq_exit(struct irq_chip *chip, struct irq_desc *desc) chained_irq_exit() argument
/linux-4.1.27/include/linux/regulator/
H A Dof_regulator.h16 const struct regulator_desc *desc; member in struct:of_regulator_match
23 const struct regulator_desc *desc);
31 const struct regulator_desc *desc) of_get_regulator_init_data()
29 of_get_regulator_init_data(struct device *dev, struct device_node *node, const struct regulator_desc *desc) of_get_regulator_init_data() argument
/linux-4.1.27/arch/powerpc/crypto/
H A Dsha1.c31 static int sha1_init(struct shash_desc *desc) sha1_init() argument
33 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_init()
42 static int sha1_update(struct shash_desc *desc, const u8 *data, sha1_update() argument
45 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_update()
79 static int sha1_final(struct shash_desc *desc, u8 *out) sha1_final() argument
81 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_final()
92 sha1_update(desc, padding, padlen); sha1_final()
95 sha1_update(desc, (const u8 *)&bits, sizeof(bits)); sha1_final()
107 static int sha1_export(struct shash_desc *desc, void *out) sha1_export() argument
109 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_export()
115 static int sha1_import(struct shash_desc *desc, const void *in) sha1_import() argument
117 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_import()
H A Dmd5-glue.c36 static int ppc_md5_init(struct shash_desc *desc) ppc_md5_init() argument
38 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_init()
49 static int ppc_md5_update(struct shash_desc *desc, const u8 *data, ppc_md5_update() argument
52 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_update()
81 static int ppc_md5_final(struct shash_desc *desc, u8 *out) ppc_md5_final() argument
83 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_final()
113 static int ppc_md5_export(struct shash_desc *desc, void *out) ppc_md5_export() argument
115 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_export()
121 static int ppc_md5_import(struct shash_desc *desc, const void *in) ppc_md5_import() argument
123 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_import()
H A Daes-spe-glue.c179 static int ppc_ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_ecb_encrypt() argument
182 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_ecb_encrypt()
187 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_ecb_encrypt()
189 err = blkcipher_walk_virt(desc, &walk); ppc_ecb_encrypt()
201 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_ecb_encrypt()
207 static int ppc_ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_ecb_decrypt() argument
210 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_ecb_decrypt()
215 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_ecb_decrypt()
217 err = blkcipher_walk_virt(desc, &walk); ppc_ecb_decrypt()
229 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_ecb_decrypt()
235 static int ppc_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_cbc_encrypt() argument
238 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_cbc_encrypt()
243 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_cbc_encrypt()
245 err = blkcipher_walk_virt(desc, &walk); ppc_cbc_encrypt()
257 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_cbc_encrypt()
263 static int ppc_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_cbc_decrypt() argument
266 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_cbc_decrypt()
271 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_cbc_decrypt()
273 err = blkcipher_walk_virt(desc, &walk); ppc_cbc_decrypt()
285 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_cbc_decrypt()
291 static int ppc_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_ctr_crypt() argument
294 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_ctr_crypt()
299 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_ctr_crypt()
301 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); ppc_ctr_crypt()
315 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_ctr_crypt()
321 static int ppc_xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_xts_encrypt() argument
324 struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_xts_encrypt()
330 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_xts_encrypt()
332 err = blkcipher_walk_virt(desc, &walk); ppc_xts_encrypt()
346 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_xts_encrypt()
352 static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_xts_decrypt() argument
355 struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_xts_decrypt()
361 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_xts_decrypt()
363 err = blkcipher_walk_virt(desc, &walk); ppc_xts_decrypt()
377 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_xts_decrypt()
/linux-4.1.27/drivers/usb/image/
H A Dmicrotek.c212 static inline void mts_debug_dump(struct mts_desc* desc) { mts_debug_dump() argument
213 MTS_DEBUG("desc at 0x%x: toggle = %02x%02x\n", mts_debug_dump()
214 (int)desc, mts_debug_dump()
215 (int)desc->usb_dev->toggle[1],(int)desc->usb_dev->toggle[0] mts_debug_dump()
218 usb_sndbulkpipe(desc->usb_dev,desc->ep_out), mts_debug_dump()
219 usb_rcvbulkpipe(desc->usb_dev,desc->ep_response), mts_debug_dump()
220 usb_rcvbulkpipe(desc->usb_dev,desc->ep_image) mts_debug_dump()
317 static inline void mts_urb_abort(struct mts_desc* desc) { mts_urb_abort() argument
319 mts_debug_dump(desc); mts_urb_abort()
321 usb_kill_urb( desc->urb ); mts_urb_abort()
338 struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); mts_scsi_abort() local
342 mts_urb_abort(desc); mts_scsi_abort()
349 struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); mts_scsi_host_reset() local
353 mts_debug_dump(desc); mts_scsi_host_reset()
355 result = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf); mts_scsi_host_reset()
357 result = usb_reset_device(desc->usb_dev); mts_scsi_host_reset()
358 usb_unlock_device(desc->usb_dev); mts_scsi_host_reset()
529 mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc) mts_build_transfer_context() argument
536 desc->context.instance = desc; mts_build_transfer_context()
537 desc->context.srb = srb; mts_build_transfer_context()
538 desc->context.fragment = 0; mts_build_transfer_context()
541 desc->context.data = NULL; mts_build_transfer_context()
542 desc->context.data_length = 0; mts_build_transfer_context()
546 desc->context.data = sg_virt(&sg[0]); mts_build_transfer_context()
547 desc->context.data_length = sg[0].length; mts_build_transfer_context()
556 ) { pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_image); mts_build_transfer_context()
557 MTS_DEBUG( "transferring from desc->ep_image == %d\n", mts_build_transfer_context()
558 (int)desc->ep_image ); mts_build_transfer_context()
560 pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_response); mts_build_transfer_context()
561 MTS_DEBUG( "transferring from desc->ep_response == %d\n", mts_build_transfer_context()
562 (int)desc->ep_response); mts_build_transfer_context()
564 MTS_DEBUG("transferring to desc->ep_out == %d\n", mts_build_transfer_context()
565 (int)desc->ep_out); mts_build_transfer_context()
566 pipe = usb_sndbulkpipe(desc->usb_dev,desc->ep_out); mts_build_transfer_context()
568 desc->context.data_pipe = pipe; mts_build_transfer_context()
575 struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); mts_scsi_queuecommand_lck() local
581 mts_debug_dump(desc); mts_scsi_queuecommand_lck()
598 usb_fill_bulk_urb(desc->urb, mts_scsi_queuecommand_lck()
599 desc->usb_dev, mts_scsi_queuecommand_lck()
600 usb_sndbulkpipe(desc->usb_dev,desc->ep_out), mts_scsi_queuecommand_lck()
604 &desc->context mts_scsi_queuecommand_lck()
608 mts_build_transfer_context( srb, desc ); mts_scsi_queuecommand_lck()
609 desc->context.final_callback = callback; mts_scsi_queuecommand_lck()
612 res=usb_submit_urb(desc->urb, GFP_ATOMIC); mts_scsi_queuecommand_lck()
697 if ( altsetting->desc.bNumEndpoints != MTS_EP_TOTAL ) { mts_usb_probe()
699 (int)MTS_EP_TOTAL, (int)altsetting->desc.bNumEndpoints ); mts_usb_probe()
703 for( i = 0; i < altsetting->desc.bNumEndpoints; i++ ) { mts_usb_probe()
704 if ((altsetting->endpoint[i].desc.bmAttributes & mts_usb_probe()
708 (int)altsetting->endpoint[i].desc.bEndpointAddress ); mts_usb_probe()
710 if (altsetting->endpoint[i].desc.bEndpointAddress & mts_usb_probe()
713 = altsetting->endpoint[i].desc.bEndpointAddress & mts_usb_probe()
721 ep_out = altsetting->endpoint[i].desc.bEndpointAddress & mts_usb_probe()
796 struct mts_desc *desc = usb_get_intfdata(intf); mts_usb_disconnect() local
800 usb_kill_urb(desc->urb); mts_usb_disconnect()
801 scsi_remove_host(desc->host); mts_usb_disconnect()
803 scsi_host_put(desc->host); mts_usb_disconnect()
804 usb_free_urb(desc->urb); mts_usb_disconnect()
805 kfree(desc->context.scsi_status); mts_usb_disconnect()
806 kfree(desc); mts_usb_disconnect()
/linux-4.1.27/arch/s390/crypto/
H A Daes_s390.c251 static int fallback_blk_dec(struct blkcipher_desc *desc, fallback_blk_dec() argument
257 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); fallback_blk_dec()
259 tfm = desc->tfm; fallback_blk_dec()
260 desc->tfm = sctx->fallback.blk; fallback_blk_dec()
262 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); fallback_blk_dec()
264 desc->tfm = tfm; fallback_blk_dec()
268 static int fallback_blk_enc(struct blkcipher_desc *desc, fallback_blk_enc() argument
274 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); fallback_blk_enc()
276 tfm = desc->tfm; fallback_blk_enc()
277 desc->tfm = sctx->fallback.blk; fallback_blk_enc()
279 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); fallback_blk_enc()
281 desc->tfm = tfm; fallback_blk_enc()
315 static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, ecb_aes_crypt() argument
318 int ret = blkcipher_walk_virt(desc, walk); ecb_aes_crypt()
332 ret = blkcipher_walk_done(desc, walk, nbytes); ecb_aes_crypt()
338 static int ecb_aes_encrypt(struct blkcipher_desc *desc, ecb_aes_encrypt() argument
342 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ecb_aes_encrypt()
346 return fallback_blk_enc(desc, dst, src, nbytes); ecb_aes_encrypt()
349 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); ecb_aes_encrypt()
352 static int ecb_aes_decrypt(struct blkcipher_desc *desc, ecb_aes_decrypt() argument
356 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ecb_aes_decrypt()
360 return fallback_blk_dec(desc, dst, src, nbytes); ecb_aes_decrypt()
363 return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); ecb_aes_decrypt()
444 static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, cbc_aes_crypt() argument
447 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_crypt()
448 int ret = blkcipher_walk_virt(desc, walk); cbc_aes_crypt()
471 ret = blkcipher_walk_done(desc, walk, nbytes); cbc_aes_crypt()
479 static int cbc_aes_encrypt(struct blkcipher_desc *desc, cbc_aes_encrypt() argument
483 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_encrypt()
487 return fallback_blk_enc(desc, dst, src, nbytes); cbc_aes_encrypt()
490 return cbc_aes_crypt(desc, sctx->enc, &walk); cbc_aes_encrypt()
493 static int cbc_aes_decrypt(struct blkcipher_desc *desc, cbc_aes_decrypt() argument
497 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_decrypt()
501 return fallback_blk_dec(desc, dst, src, nbytes); cbc_aes_decrypt()
504 return cbc_aes_crypt(desc, sctx->dec, &walk); cbc_aes_decrypt()
550 static int xts_fallback_decrypt(struct blkcipher_desc *desc, xts_fallback_decrypt() argument
554 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); xts_fallback_decrypt()
558 tfm = desc->tfm; xts_fallback_decrypt()
559 desc->tfm = xts_ctx->fallback; xts_fallback_decrypt()
561 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); xts_fallback_decrypt()
563 desc->tfm = tfm; xts_fallback_decrypt()
567 static int xts_fallback_encrypt(struct blkcipher_desc *desc, xts_fallback_encrypt() argument
571 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); xts_fallback_encrypt()
575 tfm = desc->tfm; xts_fallback_encrypt()
576 desc->tfm = xts_ctx->fallback; xts_fallback_encrypt()
578 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); xts_fallback_encrypt()
580 desc->tfm = tfm; xts_fallback_encrypt()
616 static int xts_aes_crypt(struct blkcipher_desc *desc, long func, xts_aes_crypt() argument
621 int ret = blkcipher_walk_virt(desc, walk); xts_aes_crypt()
656 ret = blkcipher_walk_done(desc, walk, nbytes); xts_aes_crypt()
662 static int xts_aes_encrypt(struct blkcipher_desc *desc, xts_aes_encrypt() argument
666 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); xts_aes_encrypt()
670 return xts_fallback_encrypt(desc, dst, src, nbytes); xts_aes_encrypt()
673 return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk); xts_aes_encrypt()
676 static int xts_aes_decrypt(struct blkcipher_desc *desc, xts_aes_decrypt() argument
680 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); xts_aes_decrypt()
684 return xts_fallback_decrypt(desc, dst, src, nbytes); xts_aes_decrypt()
687 return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk); xts_aes_decrypt()
777 static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, ctr_aes_crypt() argument
780 int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); ctr_aes_crypt()
815 ret = blkcipher_walk_done(desc, walk, nbytes); ctr_aes_crypt()
839 ret = blkcipher_walk_done(desc, walk, 0); ctr_aes_crypt()
846 static int ctr_aes_encrypt(struct blkcipher_desc *desc, ctr_aes_encrypt() argument
850 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ctr_aes_encrypt()
854 return ctr_aes_crypt(desc, sctx->enc, sctx, &walk); ctr_aes_encrypt()
857 static int ctr_aes_decrypt(struct blkcipher_desc *desc, ctr_aes_decrypt() argument
861 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ctr_aes_decrypt()
865 return ctr_aes_crypt(desc, sctx->dec, sctx, &walk); ctr_aes_decrypt()
H A Ddes_s390.c85 static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, ecb_desall_crypt() argument
88 int ret = blkcipher_walk_virt(desc, walk); ecb_desall_crypt()
102 ret = blkcipher_walk_done(desc, walk, nbytes); ecb_desall_crypt()
108 static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, cbc_desall_crypt() argument
111 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_desall_crypt()
112 int ret = blkcipher_walk_virt(desc, walk); cbc_desall_crypt()
135 ret = blkcipher_walk_done(desc, walk, nbytes); cbc_desall_crypt()
143 static int ecb_des_encrypt(struct blkcipher_desc *desc, ecb_des_encrypt() argument
147 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_des_encrypt()
151 return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk); ecb_des_encrypt()
154 static int ecb_des_decrypt(struct blkcipher_desc *desc, ecb_des_decrypt() argument
158 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_des_decrypt()
162 return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk); ecb_des_decrypt()
185 static int cbc_des_encrypt(struct blkcipher_desc *desc, cbc_des_encrypt() argument
192 return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk); cbc_des_encrypt()
195 static int cbc_des_decrypt(struct blkcipher_desc *desc, cbc_des_decrypt() argument
202 return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk); cbc_des_decrypt()
289 static int ecb_des3_encrypt(struct blkcipher_desc *desc, ecb_des3_encrypt() argument
293 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_des3_encrypt()
297 return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk); ecb_des3_encrypt()
300 static int ecb_des3_decrypt(struct blkcipher_desc *desc, ecb_des3_decrypt() argument
304 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_des3_decrypt()
308 return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk); ecb_des3_decrypt()
331 static int cbc_des3_encrypt(struct blkcipher_desc *desc, cbc_des3_encrypt() argument
338 return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk); cbc_des3_encrypt()
341 static int cbc_des3_decrypt(struct blkcipher_desc *desc, cbc_des3_decrypt() argument
348 return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk); cbc_des3_decrypt()
385 static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, ctr_desall_crypt() argument
389 int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); ctr_desall_crypt()
424 ret = blkcipher_walk_done(desc, walk, nbytes); ctr_desall_crypt()
446 ret = blkcipher_walk_done(desc, walk, 0); ctr_desall_crypt()
452 static int ctr_des_encrypt(struct blkcipher_desc *desc, ctr_des_encrypt() argument
456 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_des_encrypt()
460 return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk); ctr_des_encrypt()
463 static int ctr_des_decrypt(struct blkcipher_desc *desc, ctr_des_decrypt() argument
467 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_des_decrypt()
471 return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk); ctr_des_decrypt()
495 static int ctr_des3_encrypt(struct blkcipher_desc *desc, ctr_des3_encrypt() argument
499 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_des3_encrypt()
503 return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk); ctr_des3_encrypt()
506 static int ctr_des3_decrypt(struct blkcipher_desc *desc, ctr_des3_decrypt() argument
510 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_des3_decrypt()
514 return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk); ctr_des3_decrypt()
H A Dsha_common.c21 int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) s390_sha_update() argument
23 struct s390_sha_ctx *ctx = shash_desc_ctx(desc); s390_sha_update()
24 unsigned int bsize = crypto_shash_blocksize(desc->tfm); s390_sha_update()
63 int s390_sha_final(struct shash_desc *desc, u8 *out) s390_sha_final() argument
65 struct s390_sha_ctx *ctx = shash_desc_ctx(desc); s390_sha_final()
66 unsigned int bsize = crypto_shash_blocksize(desc->tfm); s390_sha_final()
97 memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); s390_sha_final()
H A Dsha1_s390.c34 static int sha1_init(struct shash_desc *desc) sha1_init() argument
36 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha1_init()
49 static int sha1_export(struct shash_desc *desc, void *out) sha1_export() argument
51 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha1_export()
60 static int sha1_import(struct shash_desc *desc, const void *in) sha1_import() argument
62 struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sha1_import()
/linux-4.1.27/include/linux/gpio/
H A Dconsumer.h25 struct gpio_desc *desc[]; member in struct:gpio_descs
70 void gpiod_put(struct gpio_desc *desc);
92 void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
95 int gpiod_get_direction(struct gpio_desc *desc);
96 int gpiod_direction_input(struct gpio_desc *desc);
97 int gpiod_direction_output(struct gpio_desc *desc, int value);
98 int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
101 int gpiod_get_value(const struct gpio_desc *desc);
102 void gpiod_set_value(struct gpio_desc *desc, int value);
105 int gpiod_get_raw_value(const struct gpio_desc *desc);
106 void gpiod_set_raw_value(struct gpio_desc *desc, int value);
111 int gpiod_get_value_cansleep(const struct gpio_desc *desc);
112 void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
116 int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
117 void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
122 int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
124 int gpiod_is_active_low(const struct gpio_desc *desc);
125 int gpiod_cansleep(const struct gpio_desc *desc);
127 int gpiod_to_irq(const struct gpio_desc *desc);
131 int desc_to_gpio(const struct gpio_desc *desc);
191 static inline void gpiod_put(struct gpio_desc *desc) gpiod_put() argument
252 static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) devm_gpiod_put() argument
270 static inline int gpiod_get_direction(const struct gpio_desc *desc) gpiod_get_direction() argument
276 static inline int gpiod_direction_input(struct gpio_desc *desc) gpiod_direction_input() argument
282 static inline int gpiod_direction_output(struct gpio_desc *desc, int value) gpiod_direction_output() argument
288 static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) gpiod_direction_output_raw() argument
296 static inline int gpiod_get_value(const struct gpio_desc *desc) gpiod_get_value() argument
302 static inline void gpiod_set_value(struct gpio_desc *desc, int value) gpiod_set_value() argument
314 static inline int gpiod_get_raw_value(const struct gpio_desc *desc) gpiod_get_raw_value() argument
320 static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) gpiod_set_raw_value() argument
333 static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) gpiod_get_value_cansleep() argument
339 static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) gpiod_set_value_cansleep() argument
351 static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) gpiod_get_raw_value_cansleep() argument
357 static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, gpiod_set_raw_value_cansleep() argument
371 static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) gpiod_set_debounce() argument
378 static inline int gpiod_is_active_low(const struct gpio_desc *desc) gpiod_is_active_low() argument
384 static inline int gpiod_cansleep(const struct gpio_desc *desc) gpiod_cansleep() argument
391 static inline int gpiod_to_irq(const struct gpio_desc *desc) gpiod_to_irq() argument
402 static inline int desc_to_gpio(const struct gpio_desc *desc) desc_to_gpio() argument
464 int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
466 struct gpio_desc *desc);
467 int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value);
468 void gpiod_unexport(struct gpio_desc *desc);
472 static inline int gpiod_export(struct gpio_desc *desc, gpiod_export() argument
479 struct gpio_desc *desc) gpiod_export_link()
484 static inline int gpiod_sysfs_set_active_low(struct gpio_desc *desc, int value) gpiod_sysfs_set_active_low() argument
489 static inline void gpiod_unexport(struct gpio_desc *desc) gpiod_unexport() argument
478 gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc) gpiod_export_link() argument
/linux-4.1.27/drivers/rapidio/devices/
H A Dtsi721_dma.c43 static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
126 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", tsi721_bdma_ch_init()
336 tsi721_desc_fill_init(struct tsi721_tx_desc *desc, tsi721_desc_fill_init() argument
347 (desc->rtype << 19) | desc->destid); tsi721_desc_fill_init()
348 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | tsi721_desc_fill_init()
350 rio_addr = (desc->rio_addr >> 2) | tsi721_desc_fill_init()
351 ((u64)(desc->rio_addr_u & 0x3) << 62); tsi721_desc_fill_init()
378 struct tsi721_tx_desc *desc) tsi721_dma_tx_err()
380 struct dma_async_tx_descriptor *txd = &desc->txd; tsi721_dma_tx_err()
384 list_move(&desc->desc_node, &bdma_chan->free_list); tsi721_dma_tx_err()
414 static int tsi721_submit_sg(struct tsi721_tx_desc *desc) tsi721_submit_sg() argument
416 struct dma_chan *dchan = desc->txd.chan; tsi721_submit_sg()
439 rio_addr = desc->rio_addr; tsi721_submit_sg()
457 for_each_sg(desc->sg, sg, desc->sg_len, i) { tsi721_submit_sg()
460 i, desc->sg_len, tsi721_submit_sg()
483 "%s: prev desc final len: %d\n", tsi721_submit_sg()
487 desc->rio_addr = rio_addr; tsi721_submit_sg()
493 desc->sg = sg; tsi721_submit_sg()
494 desc->sg_len -= i; tsi721_submit_sg()
499 err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); tsi721_submit_sg()
502 "Failed to build desc: err=%d\n", err); tsi721_submit_sg()
507 bd_ptr, desc->destid, desc->rio_addr); tsi721_submit_sg()
522 dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n", tsi721_submit_sg()
524 desc->sg_len = 0; tsi721_submit_sg()
539 struct tsi721_tx_desc *desc; tsi721_advance_work() local
553 desc = tsi721_dma_first_active(bdma_chan); tsi721_advance_work()
554 err = tsi721_submit_sg(desc); tsi721_advance_work()
558 tsi721_dma_tx_err(bdma_chan, desc); tsi721_advance_work()
593 struct tsi721_tx_desc *desc; tsi721_dma_tasklet() local
597 desc = tsi721_dma_first_active(bdma_chan); tsi721_dma_tasklet()
599 if (desc->sg_len == 0) { tsi721_dma_tasklet()
603 desc->status = DMA_COMPLETE; tsi721_dma_tasklet()
604 dma_cookie_complete(&desc->txd); tsi721_dma_tasklet()
605 if (desc->txd.flags & DMA_PREP_INTERRUPT) { tsi721_dma_tasklet()
606 callback = desc->txd.callback; tsi721_dma_tasklet()
607 param = desc->txd.callback_param; tsi721_dma_tasklet()
609 list_move(&desc->desc_node, &bdma_chan->free_list); tsi721_dma_tasklet()
626 struct tsi721_tx_desc *desc = to_tsi721_desc(txd); tsi721_tx_submit() local
631 if (!list_empty(&desc->desc_node)) { tsi721_tx_submit()
645 desc->status = DMA_IN_PROGRESS; tsi721_tx_submit()
646 list_add_tail(&desc->desc_node, &bdma_chan->queue); tsi721_tx_submit()
655 struct tsi721_tx_desc *desc = NULL; tsi721_alloc_chan_resources() local
672 desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc), tsi721_alloc_chan_resources()
674 if (!desc) { tsi721_alloc_chan_resources()
681 bdma_chan->tx_desc = desc; tsi721_alloc_chan_resources()
684 dma_async_tx_descriptor_init(&desc[i].txd, dchan); tsi721_alloc_chan_resources()
685 desc[i].txd.tx_submit = tsi721_tx_submit; tsi721_alloc_chan_resources()
686 desc[i].txd.flags = DMA_CTRL_ACK; tsi721_alloc_chan_resources()
687 list_add(&desc[i].desc_node, &bdma_chan->free_list); tsi721_alloc_chan_resources()
762 struct tsi721_tx_desc *desc, *_d; tsi721_prep_rio_sg() local
798 list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) { tsi721_prep_rio_sg()
799 if (async_tx_test_ack(&desc->txd)) { tsi721_prep_rio_sg()
800 list_del_init(&desc->desc_node); tsi721_prep_rio_sg()
801 desc->destid = rext->destid; tsi721_prep_rio_sg()
802 desc->rio_addr = rext->rio_addr; tsi721_prep_rio_sg()
803 desc->rio_addr_u = 0; tsi721_prep_rio_sg()
804 desc->rtype = rtype; tsi721_prep_rio_sg()
805 desc->sg_len = sg_len; tsi721_prep_rio_sg()
806 desc->sg = sgl; tsi721_prep_rio_sg()
807 txd = &desc->txd; tsi721_prep_rio_sg()
821 struct tsi721_tx_desc *desc, *_d; tsi721_terminate_all() local
845 list_for_each_entry_safe(desc, _d, &list, desc_node) tsi721_terminate_all()
846 tsi721_dma_tx_err(bdma_chan, desc); tsi721_terminate_all()
377 tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan, struct tsi721_tx_desc *desc) tsi721_dma_tx_err() argument
/linux-4.1.27/drivers/power/
H A Dcharger-manager.c95 switch (cm->desc->battery_present) { is_batt_present()
102 psy = power_supply_get_by_name(cm->desc->psy_fuel_gauge); is_batt_present()
113 for (i = 0; cm->desc->psy_charger_stat[i]; i++) { is_batt_present()
115 cm->desc->psy_charger_stat[i]); is_batt_present()
118 cm->desc->psy_charger_stat[i]); is_batt_present()
152 for (i = 0; cm->desc->psy_charger_stat[i]; i++) { is_ext_pwr_online()
153 psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]); is_ext_pwr_online()
156 cm->desc->psy_charger_stat[i]); is_ext_pwr_online()
186 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); get_batt_uV()
216 for (i = 0; cm->desc->psy_charger_stat[i]; i++) { is_charging()
223 psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]); is_charging()
226 cm->desc->psy_charger_stat[i]); is_charging()
235 cm->desc->psy_charger_stat[i]); is_charging()
253 cm->desc->psy_charger_stat[i]); is_charging()
275 struct charger_desc *desc = cm->desc; is_full_charged() local
286 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); is_full_charged()
290 if (desc->fullbatt_full_capacity > 0) { is_full_charged()
296 if (!ret && val.intval > desc->fullbatt_full_capacity) { is_full_charged()
303 if (desc->fullbatt_uV > 0) { is_full_charged()
305 if (!ret && uV >= desc->fullbatt_uV) { is_full_charged()
312 if (desc->fullbatt_soc > 0) { is_full_charged()
317 if (!ret && val.intval >= desc->fullbatt_soc) { is_full_charged()
334 switch (cm->desc->polling_mode) { is_polling_required()
345 cm->desc->polling_mode); is_polling_required()
364 struct charger_desc *desc = cm->desc; try_charger_enable() local
381 for (i = 0 ; i < desc->num_charger_regulators ; i++) { try_charger_enable()
382 if (desc->charger_regulators[i].externally_control) try_charger_enable()
385 err = regulator_enable(desc->charger_regulators[i].consumer); try_charger_enable()
388 desc->charger_regulators[i].regulator_name); try_charger_enable()
399 for (i = 0 ; i < desc->num_charger_regulators ; i++) { try_charger_enable()
400 if (desc->charger_regulators[i].externally_control) try_charger_enable()
403 err = regulator_disable(desc->charger_regulators[i].consumer); try_charger_enable()
406 desc->charger_regulators[i].regulator_name); try_charger_enable()
414 for (i = 0; i < desc->num_charger_regulators; i++) { try_charger_enable()
416 desc->charger_regulators[i].consumer)) { try_charger_enable()
418 desc->charger_regulators[i].consumer); try_charger_enable()
420 desc->charger_regulators[i].regulator_name); try_charger_enable()
520 struct charger_desc *desc = cm->desc; fullbatt_vchk() local
526 if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms) fullbatt_vchk()
535 diff = desc->fullbatt_uV - batt_uV; fullbatt_vchk()
541 if (diff > desc->fullbatt_vchkdrop_uV) { fullbatt_vchk()
559 struct charger_desc *desc = cm->desc; check_charging_duration() local
564 if (!desc->charging_max_duration_ms && check_charging_duration()
565 !desc->discharging_max_duration_ms) check_charging_duration()
571 if (duration > desc->charging_max_duration_ms) { check_charging_duration()
573 desc->charging_max_duration_ms); check_charging_duration()
581 if (duration > desc->charging_max_duration_ms && check_charging_duration()
584 desc->discharging_max_duration_ms); check_charging_duration()
600 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); cm_get_battery_temperature_by_psy()
617 if (!cm->desc->measure_battery_temp) cm_get_battery_temperature()
638 struct charger_desc *desc = cm->desc; cm_check_thermal_status() local
653 upper_limit = desc->temp_max; cm_check_thermal_status()
654 lower_limit = desc->temp_min; cm_check_thermal_status()
657 upper_limit -= desc->temp_diff; cm_check_thermal_status()
658 lower_limit += desc->temp_diff; cm_check_thermal_status()
771 if (is_polling_required(cm) && cm->desc->polling_interval_ms) { _setup_polling()
774 if (min > cm->desc->polling_interval_ms) _setup_polling()
775 min = cm->desc->polling_interval_ms; _setup_polling()
830 struct charger_desc *desc = cm->desc; fullbatt_handler() local
832 if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms) fullbatt_handler()
839 msecs_to_jiffies(desc->fullbatt_vchkdrop_ms)); fullbatt_handler()
841 desc->fullbatt_vchkdrop_ms); fullbatt_handler()
879 if (is_polling_required(cm) && cm->desc->polling_interval_ms) misc_event_handler()
889 struct charger_desc *desc = cm->desc; charger_get_property() local
921 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); charger_get_property()
939 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); charger_get_property()
963 * the battery voltage values and the thresholds given as desc charger_get_property()
972 if (desc->fullbatt_uV > 0 && uV >= desc->fullbatt_uV && charger_get_property()
995 cm->desc->psy_fuel_gauge); charger_get_property()
1093 if (cm->desc->polling_interval_ms == 0) cm_setup_timer()
1095 CM_MIN_VALID(wakeup_ms, cm->desc->polling_interval_ms); cm_setup_timer()
1233 struct charger_desc *desc = cm->desc; charger_manager_register_extcon() local
1239 for (i = 0; i < desc->num_charger_regulators; i++) { charger_manager_register_extcon()
1240 charger = &desc->charger_regulators[i]; charger_manager_register_extcon()
1309 struct charger_desc *desc = cm->desc; charger_externally_control_store() local
1326 for (i = 0; i < desc->num_charger_regulators; i++) { charger_externally_control_store()
1327 if (&desc->charger_regulators[i] != charger && charger_externally_control_store()
1328 !desc->charger_regulators[i].externally_control) { charger_externally_control_store()
1370 struct charger_desc *desc = cm->desc; charger_manager_register_sysfs() local
1379 for (i = 0; i < desc->num_charger_regulators; i++) { charger_manager_register_sysfs()
1380 charger = &desc->charger_regulators[i]; charger_manager_register_sysfs()
1417 if (!desc->charger_regulators[i].externally_control || charger_manager_register_sysfs()
1447 struct charger_desc *desc = cm->desc; cm_init_thermal_data() local
1459 cm->desc->measure_battery_temp = true; cm_init_thermal_data()
1462 if (ret && desc->thermal_zone) { cm_init_thermal_data()
1464 thermal_zone_get_zone_by_name(desc->thermal_zone); cm_init_thermal_data()
1472 cm->desc->measure_battery_temp = true; cm_init_thermal_data()
1476 if (cm->desc->measure_battery_temp) { cm_init_thermal_data()
1478 if (!desc->temp_max) cm_init_thermal_data()
1479 desc->temp_max = CM_DEFAULT_CHARGE_TEMP_MAX; cm_init_thermal_data()
1480 if (!desc->temp_diff) cm_init_thermal_data()
1481 desc->temp_diff = CM_DEFAULT_RECHARGE_TEMP_DIFF; cm_init_thermal_data()
1496 struct charger_desc *desc; of_cm_parse_desc() local
1502 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); of_cm_parse_desc()
1503 if (!desc) of_cm_parse_desc()
1506 of_property_read_string(np, "cm-name", &desc->psy_name); of_cm_parse_desc()
1509 desc->polling_mode = poll_mode; of_cm_parse_desc()
1512 &desc->polling_interval_ms); of_cm_parse_desc()
1515 &desc->fullbatt_vchkdrop_ms); of_cm_parse_desc()
1517 &desc->fullbatt_vchkdrop_uV); of_cm_parse_desc()
1518 of_property_read_u32(np, "cm-fullbatt-voltage", &desc->fullbatt_uV); of_cm_parse_desc()
1519 of_property_read_u32(np, "cm-fullbatt-soc", &desc->fullbatt_soc); of_cm_parse_desc()
1521 &desc->fullbatt_full_capacity); of_cm_parse_desc()
1524 desc->battery_present = battery_stat; of_cm_parse_desc()
1530 desc->psy_charger_stat = devm_kzalloc(dev, sizeof(char *) of_cm_parse_desc()
1532 if (desc->psy_charger_stat) { of_cm_parse_desc()
1536 i, &desc->psy_charger_stat[i]); of_cm_parse_desc()
1542 of_property_read_string(np, "cm-fuel-gauge", &desc->psy_fuel_gauge); of_cm_parse_desc()
1544 of_property_read_string(np, "cm-thermal-zone", &desc->thermal_zone); of_cm_parse_desc()
1546 of_property_read_u32(np, "cm-battery-cold", &desc->temp_min); of_cm_parse_desc()
1548 desc->temp_min *= -1; of_cm_parse_desc()
1549 of_property_read_u32(np, "cm-battery-hot", &desc->temp_max); of_cm_parse_desc()
1550 of_property_read_u32(np, "cm-battery-temp-diff", &desc->temp_diff); of_cm_parse_desc()
1553 &desc->charging_max_duration_ms); of_cm_parse_desc()
1555 &desc->discharging_max_duration_ms); of_cm_parse_desc()
1558 desc->num_charger_regulators = of_get_child_count(np); of_cm_parse_desc()
1559 if (desc->num_charger_regulators) { of_cm_parse_desc()
1564 * desc->num_charger_regulators, of_cm_parse_desc()
1569 desc->charger_regulators = chg_regs; of_cm_parse_desc()
1607 return desc;
1625 struct charger_desc *desc = cm_get_drv_data(pdev); charger_manager_probe() local
1633 if (IS_ERR(desc)) { charger_manager_probe()
1634 dev_err(&pdev->dev, "No platform data (desc) found\n"); charger_manager_probe()
1645 cm->desc = desc; charger_manager_probe()
1658 if (desc->fullbatt_uV == 0) { charger_manager_probe()
1661 if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) { charger_manager_probe()
1663 desc->fullbatt_vchkdrop_ms = 0; charger_manager_probe()
1664 desc->fullbatt_vchkdrop_uV = 0; charger_manager_probe()
1666 if (desc->fullbatt_soc == 0) { charger_manager_probe()
1669 if (desc->fullbatt_full_capacity == 0) { charger_manager_probe()
1673 if (!desc->charger_regulators || desc->num_charger_regulators < 1) { charger_manager_probe()
1678 if (!desc->psy_charger_stat || !desc->psy_charger_stat[0]) { charger_manager_probe()
1683 if (!desc->psy_fuel_gauge) { charger_manager_probe()
1689 while (desc->psy_charger_stat[i]) charger_manager_probe()
1693 for (i = 0; desc->psy_charger_stat[i]; i++) { charger_manager_probe()
1696 psy = power_supply_get_by_name(desc->psy_charger_stat[i]); charger_manager_probe()
1699 desc->psy_charger_stat[i]); charger_manager_probe()
1705 if (desc->polling_interval_ms == 0 || charger_manager_probe()
1706 msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) { charger_manager_probe()
1711 if (!desc->charging_max_duration_ms || charger_manager_probe()
1712 !desc->discharging_max_duration_ms) { charger_manager_probe()
1714 desc->charging_max_duration_ms = 0; charger_manager_probe()
1715 desc->discharging_max_duration_ms = 0; charger_manager_probe()
1722 if (!desc->psy_name) charger_manager_probe()
1725 strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX); charger_manager_probe()
1742 fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge); charger_manager_probe()
1745 desc->psy_fuel_gauge); charger_manager_probe()
1765 cm->desc->measure_battery_temp = false; charger_manager_probe()
1818 for (i = 0; i < desc->num_charger_regulators; i++) { charger_manager_probe()
1821 charger = &desc->charger_regulators[i]; charger_manager_probe()
1826 for (i = 0; i < desc->num_charger_regulators; i++) { charger_manager_probe()
1829 charger = &desc->charger_regulators[i]; charger_manager_probe()
1837 regulator_put(desc->charger_regulators[i].consumer); charger_manager_probe()
1848 struct charger_desc *desc = cm->desc; charger_manager_remove() local
1860 for (i = 0 ; i < desc->num_charger_regulators ; i++) { charger_manager_remove()
1862 = &desc->charger_regulators[i]; charger_manager_remove()
1869 for (i = 0 ; i < desc->num_charger_regulators ; i++) charger_manager_remove()
1870 regulator_put(desc->charger_regulators[i].consumer); charger_manager_remove()
2030 for (i = 0; cm->desc->psy_charger_stat[i]; i++) { find_power_supply()
2031 if (!strcmp(psy->desc->name, cm->desc->psy_charger_stat[i])) { find_power_supply()
H A Dpower_supply_leds.c28 if (psy->desc->get_property(psy, POWER_SUPPLY_PROP_STATUS, &status)) power_supply_update_bat_leds()
61 "%s-charging-or-full", psy->desc->name); power_supply_create_bat_triggers()
66 "%s-charging", psy->desc->name); power_supply_create_bat_triggers()
70 psy->full_trig_name = kasprintf(GFP_KERNEL, "%s-full", psy->desc->name); power_supply_create_bat_triggers()
75 "%s-charging-blink-full-solid", psy->desc->name); power_supply_create_bat_triggers()
118 if (psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &online)) power_supply_update_gen_leds()
132 psy->desc->name); power_supply_create_gen_triggers()
151 if (psy->desc->type == POWER_SUPPLY_TYPE_BATTERY) power_supply_update_leds()
159 if (psy->desc->type == POWER_SUPPLY_TYPE_BATTERY) power_supply_create_triggers()
166 if (psy->desc->type == POWER_SUPPLY_TYPE_BATTERY) power_supply_remove_triggers()
H A Dpower_supply_core.c45 if (!supplier->desc->name) __power_supply_is_supplied_by()
48 if (!strcmp(supplier->desc->name, supply->supplied_from[i])) __power_supply_is_supplied_by()
51 if (!supply->desc->name) __power_supply_is_supplied_by()
54 if (!strcmp(supplier->supplied_to[i], supply->desc->name)) __power_supply_is_supplied_by()
67 if (pst->desc->external_power_changed) __power_supply_changed_work()
68 pst->desc->external_power_changed(pst); __power_supply_changed_work()
168 psy->desc->name, epsy->desc->name); __power_supply_populate_supplied_from()
169 psy->supplied_from[i-1] = (char *)epsy->desc->name; __power_supply_populate_supplied_from()
290 if (!epsy->desc->get_property(epsy, POWER_SUPPLY_PROP_ONLINE, __power_supply_am_i_supplied()
317 if (psy->desc->type != POWER_SUPPLY_TYPE_BATTERY) __power_supply_is_system_supplied()
318 if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE, __power_supply_is_system_supplied()
347 psy->desc->type == POWER_SUPPLY_TYPE_BATTERY && power_supply_set_battery_charged()
348 psy->desc->set_charged) { power_supply_set_battery_charged()
349 psy->desc->set_charged(psy); power_supply_set_battery_charged()
362 return strcmp(psy->desc->name, name) == 0; power_supply_match_device_by_name()
458 return psy->desc->get_property(psy, psp, val); power_supply_get_property()
466 if (atomic_read(&psy->use_cnt) <= 0 || !psy->desc->set_property) power_supply_set_property()
469 return psy->desc->set_property(psy, psp, val); power_supply_set_property()
477 !psy->desc->property_is_writeable) power_supply_property_is_writeable()
480 return psy->desc->property_is_writeable(psy, psp); power_supply_property_is_writeable()
487 !psy->desc->external_power_changed) power_supply_external_power_changed()
490 psy->desc->external_power_changed(psy); power_supply_external_power_changed()
529 ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val); power_supply_read_temp()
546 if (psy->desc->no_thermal) psy_register_thermal()
550 for (i = 0; i < psy->desc->num_properties; i++) { psy_register_thermal()
551 if (psy->desc->properties[i] == POWER_SUPPLY_PROP_TEMP) { psy_register_thermal()
552 psy->tzd = thermal_zone_device_register(psy->desc->name, psy_register_thermal()
576 ret = psy->desc->get_property(psy, ps_get_max_charge_cntl_limit()
592 ret = psy->desc->get_property(psy, ps_get_cur_chrage_cntl_limit()
609 ret = psy->desc->set_property(psy, ps_set_cur_charge_cntl_limit()
626 for (i = 0; i < psy->desc->num_properties; i++) { psy_register_cooler()
627 if (psy->desc->properties[i] == psy_register_cooler()
630 (char *)psy->desc->name, psy_register_cooler()
666 const struct power_supply_desc *desc, __power_supply_register()
676 __func__, desc->name); __power_supply_register()
691 psy->desc = desc; __power_supply_register()
699 rc = dev_set_name(dev, "%s", desc->name); __power_supply_register()
769 * @desc: Description of power supply, must be valid through whole
780 const struct power_supply_desc *desc, power_supply_register()
783 return __power_supply_register(parent, desc, cfg, true); power_supply_register()
791 * @desc: Description of power supply, must be valid through whole
803 const struct power_supply_desc *desc, power_supply_register_no_ws()
806 return __power_supply_register(parent, desc, cfg, false); power_supply_register_no_ws()
821 * @desc: Description of power supply, must be valid through whole
833 const struct power_supply_desc *desc, devm_power_supply_register()
842 psy = __power_supply_register(parent, desc, cfg, true); devm_power_supply_register()
857 * @desc: Description of power supply, must be valid through whole
869 const struct power_supply_desc *desc, devm_power_supply_register_no_ws()
878 psy = __power_supply_register(parent, desc, cfg, false); devm_power_supply_register_no_ws()
665 __power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg, bool ws) __power_supply_register() argument
779 power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg) power_supply_register() argument
802 power_supply_register_no_ws(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg) power_supply_register_no_ws() argument
832 devm_power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg) devm_power_supply_register() argument
868 devm_power_supply_register_no_ws(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg) devm_power_supply_register_no_ws() argument
/linux-4.1.27/drivers/crypto/
H A Dpadlock-sha.c36 static int padlock_sha_init(struct shash_desc *desc) padlock_sha_init() argument
38 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha_init()
39 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); padlock_sha_init()
42 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; padlock_sha_init()
46 static int padlock_sha_update(struct shash_desc *desc, padlock_sha_update() argument
49 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha_update()
51 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; padlock_sha_update()
55 static int padlock_sha_export(struct shash_desc *desc, void *out) padlock_sha_export() argument
57 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha_export()
62 static int padlock_sha_import(struct shash_desc *desc, const void *in) padlock_sha_import() argument
64 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha_import()
65 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); padlock_sha_import()
68 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; padlock_sha_import()
79 static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, padlock_sha1_finup() argument
88 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha1_finup()
95 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; padlock_sha1_finup()
138 static int padlock_sha1_final(struct shash_desc *desc, u8 *out) padlock_sha1_final() argument
142 return padlock_sha1_finup(desc, buf, 0, out); padlock_sha1_final()
145 static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, padlock_sha256_finup() argument
154 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha256_finup()
161 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; padlock_sha256_finup()
204 static int padlock_sha256_final(struct shash_desc *desc, u8 *out) padlock_sha256_final() argument
208 return padlock_sha256_finup(desc, buf, 0, out); padlock_sha256_final()
294 static int padlock_sha1_init_nano(struct shash_desc *desc) padlock_sha1_init_nano() argument
296 struct sha1_state *sctx = shash_desc_ctx(desc); padlock_sha1_init_nano()
305 static int padlock_sha1_update_nano(struct shash_desc *desc, padlock_sha1_update_nano() argument
308 struct sha1_state *sctx = shash_desc_ctx(desc); padlock_sha1_update_nano()
359 static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out) padlock_sha1_final_nano() argument
361 struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); padlock_sha1_final_nano()
371 padlock_sha1_update_nano(desc, padding, padlen); padlock_sha1_final_nano()
374 padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits)); padlock_sha1_final_nano()
382 static int padlock_sha256_init_nano(struct shash_desc *desc) padlock_sha256_init_nano() argument
384 struct sha256_state *sctx = shash_desc_ctx(desc); padlock_sha256_init_nano()
394 static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, padlock_sha256_update_nano() argument
397 struct sha256_state *sctx = shash_desc_ctx(desc); padlock_sha256_update_nano()
448 static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out) padlock_sha256_final_nano() argument
451 (struct sha256_state *)shash_desc_ctx(desc); padlock_sha256_final_nano()
461 padlock_sha256_update_nano(desc, padding, padlen); padlock_sha256_final_nano()
464 padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits)); padlock_sha256_final_nano()
472 static int padlock_sha_export_nano(struct shash_desc *desc, padlock_sha_export_nano() argument
475 int statesize = crypto_shash_statesize(desc->tfm); padlock_sha_export_nano()
476 void *sctx = shash_desc_ctx(desc); padlock_sha_export_nano()
482 static int padlock_sha_import_nano(struct shash_desc *desc, padlock_sha_import_nano() argument
485 int statesize = crypto_shash_statesize(desc->tfm); padlock_sha_import_nano()
486 void *sctx = shash_desc_ctx(desc); padlock_sha_import_nano()
H A Dgeode-aes.c182 static int fallback_blk_dec(struct blkcipher_desc *desc, fallback_blk_dec() argument
188 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); fallback_blk_dec()
190 tfm = desc->tfm; fallback_blk_dec()
191 desc->tfm = op->fallback.blk; fallback_blk_dec()
193 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); fallback_blk_dec()
195 desc->tfm = tfm; fallback_blk_dec()
198 static int fallback_blk_enc(struct blkcipher_desc *desc, fallback_blk_enc() argument
204 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); fallback_blk_enc()
206 tfm = desc->tfm; fallback_blk_enc()
207 desc->tfm = op->fallback.blk; fallback_blk_enc()
209 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); fallback_blk_enc()
211 desc->tfm = tfm; fallback_blk_enc()
304 geode_cbc_decrypt(struct blkcipher_desc *desc, geode_cbc_decrypt() argument
308 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); geode_cbc_decrypt()
313 return fallback_blk_dec(desc, dst, src, nbytes); geode_cbc_decrypt()
316 err = blkcipher_walk_virt(desc, &walk); geode_cbc_decrypt()
329 err = blkcipher_walk_done(desc, &walk, nbytes); geode_cbc_decrypt()
336 geode_cbc_encrypt(struct blkcipher_desc *desc, geode_cbc_encrypt() argument
340 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); geode_cbc_encrypt()
345 return fallback_blk_enc(desc, dst, src, nbytes); geode_cbc_encrypt()
348 err = blkcipher_walk_virt(desc, &walk); geode_cbc_encrypt()
360 err = blkcipher_walk_done(desc, &walk, nbytes); geode_cbc_encrypt()
417 geode_ecb_decrypt(struct blkcipher_desc *desc, geode_ecb_decrypt() argument
421 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); geode_ecb_decrypt()
426 return fallback_blk_dec(desc, dst, src, nbytes); geode_ecb_decrypt()
429 err = blkcipher_walk_virt(desc, &walk); geode_ecb_decrypt()
440 err = blkcipher_walk_done(desc, &walk, nbytes); geode_ecb_decrypt()
447 geode_ecb_encrypt(struct blkcipher_desc *desc, geode_ecb_encrypt() argument
451 struct geode_aes_op *op = crypto_blkcipher_ctx(desc->tfm); geode_ecb_encrypt()
456 return fallback_blk_enc(desc, dst, src, nbytes); geode_ecb_encrypt()
459 err = blkcipher_walk_virt(desc, &walk); geode_ecb_encrypt()
470 ret = blkcipher_walk_done(desc, &walk, nbytes); geode_ecb_encrypt()
/linux-4.1.27/drivers/net/ethernet/amd/xgbe/
H A DMakefile4 xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
/linux-4.1.27/drivers/net/wireless/ti/wlcore/
H A Drx.c60 struct wl1271_rx_descriptor *desc, wl1271_rx_status()
66 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG) wl1271_rx_status()
71 status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band); wl1271_rx_status()
74 if (desc->rate <= wl->hw_min_ht_rate) wl1271_rx_status()
77 status->signal = desc->rssi; wl1271_rx_status()
84 wl->noise = desc->rssi - (desc->snr >> 1); wl1271_rx_status()
86 status->freq = ieee80211_channel_to_frequency(desc->channel, wl1271_rx_status()
89 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { wl1271_rx_status()
90 u8 desc_err_code = desc->status & WL1271_RX_DESC_STATUS_MASK; wl1271_rx_status()
103 wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel, wl1271_rx_status()
110 struct wl1271_rx_descriptor *desc; wl1271_rx_handle_data() local
140 desc = (struct wl1271_rx_descriptor *) data; wl1271_rx_handle_data()
142 if (desc->packet_class == WL12XX_RX_CLASS_LOGGER) { wl1271_rx_handle_data()
143 size_t len = length - sizeof(*desc); wl1271_rx_handle_data()
144 wl12xx_copy_fwlog(wl, data + sizeof(*desc), len); wl1271_rx_handle_data()
150 if (desc->status & WL1271_RX_DESC_DECRYPT_FAIL) { wl1271_rx_handle_data()
151 hdr = (void *)(data + sizeof(*desc) + offset_to_data); wl1271_rx_handle_data()
153 desc->status & WL1271_RX_DESC_STATUS_MASK, wl1271_rx_handle_data()
155 wl1271_dump((DEBUG_RX|DEBUG_CMD), "PKT: ", data + sizeof(*desc), wl1271_rx_handle_data()
179 memcpy(buf, data + sizeof(*desc), pkt_data_len); wl1271_rx_handle_data()
183 *hlid = desc->hlid; wl1271_rx_handle_data()
191 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); wl1271_rx_handle_data()
192 wlcore_hw_set_rx_csum(wl, desc, skb); wl1271_rx_handle_data()
196 skb->len - desc->pad_len, wl1271_rx_handle_data()
59 wl1271_rx_status(struct wl1271 *wl, struct wl1271_rx_descriptor *desc, struct ieee80211_rx_status *status, u8 beacon) wl1271_rx_status() argument
/linux-4.1.27/drivers/clk/mvebu/
H A Dcommon.c110 const struct coreclk_soc_desc *desc) mvebu_coreclk_setup()
123 clk_data.clk_num = 2 + desc->num_ratios; mvebu_coreclk_setup()
126 if (desc->get_refclk_freq) mvebu_coreclk_setup()
139 rate = desc->get_tclk_freq(base); mvebu_coreclk_setup()
147 rate = desc->get_cpu_freq(base); mvebu_coreclk_setup()
149 if (desc->is_sscg_enabled && desc->fix_sscg_deviation mvebu_coreclk_setup()
150 && desc->is_sscg_enabled(base)) mvebu_coreclk_setup()
151 rate = desc->fix_sscg_deviation(rate); mvebu_coreclk_setup()
158 for (n = 0; n < desc->num_ratios; n++) { mvebu_coreclk_setup()
159 const char *rclk_name = desc->ratios[n].name; mvebu_coreclk_setup()
164 desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div); mvebu_coreclk_setup()
171 if (desc->get_refclk_freq) { mvebu_coreclk_setup()
174 2 + desc->num_ratios, &name); mvebu_coreclk_setup()
175 rate = desc->get_refclk_freq(base); mvebu_coreclk_setup()
176 clk_data.clks[2 + desc->num_ratios] = mvebu_coreclk_setup()
179 WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios])); mvebu_coreclk_setup()
240 const struct clk_gating_soc_desc *desc) mvebu_clk_gating_setup()
272 for (n = 0; desc[n].name;) mvebu_clk_gating_setup()
283 (desc[n].parent) ? desc[n].parent : default_parent; mvebu_clk_gating_setup()
284 ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent, mvebu_clk_gating_setup()
285 desc[n].flags, base, desc[n].bit_idx, mvebu_clk_gating_setup()
109 mvebu_coreclk_setup(struct device_node *np, const struct coreclk_soc_desc *desc) mvebu_coreclk_setup() argument
239 mvebu_clk_gating_setup(struct device_node *np, const struct clk_gating_soc_desc *desc) mvebu_clk_gating_setup() argument
H A Dclk-corediv.c58 const struct clk_corediv_desc *desc; member in struct:clk_corediv
80 const struct clk_corediv_desc *desc = corediv->desc; clk_corediv_is_enabled() local
81 u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset; clk_corediv_is_enabled()
90 const struct clk_corediv_desc *desc = corediv->desc; clk_corediv_enable() local
97 reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset); clk_corediv_enable()
109 const struct clk_corediv_desc *desc = corediv->desc; clk_corediv_disable() local
116 reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset); clk_corediv_disable()
127 const struct clk_corediv_desc *desc = corediv->desc; clk_corediv_recalc_rate() local
131 div = (reg >> desc->offset) & desc->mask; clk_corediv_recalc_rate()
155 const struct clk_corediv_desc *desc = corediv->desc; clk_corediv_set_rate() local
165 reg &= ~(desc->mask << desc->offset); clk_corediv_set_rate()
166 reg |= (div & desc->mask) << desc->offset; clk_corediv_set_rate()
170 reg = readl(corediv->reg) | BIT(desc->fieldbit); clk_corediv_set_rate()
278 corediv[i].desc = soc_desc->descs + i; mvebu_corediv_clk_init()
/linux-4.1.27/drivers/platform/olpc/
H A Dolpc-ec.c69 struct ec_cmd_desc *desc = NULL; olpc_ec_worker() local
75 desc = list_first_entry(&ec->cmd_q, struct ec_cmd_desc, node); olpc_ec_worker()
76 list_del(&desc->node); olpc_ec_worker()
81 if (!desc) olpc_ec_worker()
86 desc->err = ec_driver->ec_cmd(desc->cmd, desc->inbuf, desc->inlen, olpc_ec_worker()
87 desc->outbuf, desc->outlen, ec_cb_arg); olpc_ec_worker()
91 complete(&desc->finished); olpc_ec_worker()
101 static void queue_ec_descriptor(struct ec_cmd_desc *desc, queue_ec_descriptor() argument
106 INIT_LIST_HEAD(&desc->node); queue_ec_descriptor()
109 list_add_tail(&desc->node, &ec->cmd_q); queue_ec_descriptor()
118 struct ec_cmd_desc desc; olpc_ec_cmd() local
133 desc.cmd = cmd; olpc_ec_cmd()
134 desc.inbuf = inbuf; olpc_ec_cmd()
135 desc.outbuf = outbuf; olpc_ec_cmd()
136 desc.inlen = inlen; olpc_ec_cmd()
137 desc.outlen = outlen; olpc_ec_cmd()
138 desc.err = 0; olpc_ec_cmd()
139 init_completion(&desc.finished); olpc_ec_cmd()
141 queue_ec_descriptor(&desc, ec); olpc_ec_cmd()
144 wait_for_completion(&desc.finished); olpc_ec_cmd()
147 return desc.err; olpc_ec_cmd()
/linux-4.1.27/arch/sh/boards/mach-x3proto/
H A Dsetup.c131 .desc = "key44",
136 .desc = "key43",
141 .desc = "key42",
145 .desc = "key41",
149 .desc = "key34",
153 .desc = "key33",
157 .desc = "key32",
161 .desc = "key31",
165 .desc = "key24",
169 .desc = "key23",
173 .desc = "key22",
177 .desc = "key21",
181 .desc = "key14",
185 .desc = "key13",
189 .desc = "key12",
193 .desc = "key11",
/linux-4.1.27/sound/usb/
H A Dhelper.h21 #define get_iface_desc(iface) (&(iface)->desc)
22 #define get_endpoint(alt,ep) (&(alt)->endpoint[ep].desc)
23 #define get_ep_desc(ep) (&(ep)->desc)
24 #define get_cfg_desc(cfg) (&(cfg)->desc)
/linux-4.1.27/drivers/staging/lustre/lustre/libcfs/linux/
H A Dlinux-crypto-adler.c67 static int adler32_init(struct shash_desc *desc) adler32_init() argument
69 u32 *mctx = crypto_shash_ctx(desc->tfm); adler32_init()
70 u32 *cksump = shash_desc_ctx(desc); adler32_init()
77 static int adler32_update(struct shash_desc *desc, const u8 *data, adler32_update() argument
80 u32 *cksump = shash_desc_ctx(desc); adler32_update()
92 static int adler32_finup(struct shash_desc *desc, const u8 *data, adler32_finup() argument
95 return __adler32_finup(shash_desc_ctx(desc), data, len, out); adler32_finup()
98 static int adler32_final(struct shash_desc *desc, u8 *out) adler32_final() argument
100 u32 *cksump = shash_desc_ctx(desc); adler32_final()
106 static int adler32_digest(struct shash_desc *desc, const u8 *data, adler32_digest() argument
109 return __adler32_finup(crypto_shash_ctx(desc->tfm), data, len, adler32_digest()
/linux-4.1.27/drivers/gpu/drm/atmel-hlcdc/
H A Datmel_hlcdc_layer.c75 bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs); atmel_hlcdc_layer_update_reset()
77 sizeof(*slot->configs) * layer->desc->nconfigs); atmel_hlcdc_layer_update_reset()
88 const struct atmel_hlcdc_layer_desc *desc = layer->desc; atmel_hlcdc_layer_update_apply() local
103 for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) { atmel_hlcdc_layer_update_apply()
105 desc->regs_offset + atmel_hlcdc_layer_update_apply()
125 desc->regs_offset + atmel_hlcdc_layer_update_apply()
129 desc->regs_offset + atmel_hlcdc_layer_update_apply()
133 desc->regs_offset + atmel_hlcdc_layer_update_apply()
149 desc->regs_offset + atmel_hlcdc_layer_update_apply()
169 desc->regs_offset + ATMEL_HLCDC_LAYER_CHER, atmel_hlcdc_layer_update_apply()
180 const struct atmel_hlcdc_layer_desc *desc = layer->desc; atmel_hlcdc_layer_irq() local
191 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr); atmel_hlcdc_layer_irq()
192 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); atmel_hlcdc_layer_irq()
277 desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, atmel_hlcdc_layer_irq()
306 const struct atmel_hlcdc_layer_desc *desc = layer->desc; atmel_hlcdc_layer_disable() local
313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, atmel_hlcdc_layer_disable()
318 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); atmel_hlcdc_layer_disable()
398 layer->desc->nconfigs * sizeof(u32)); atmel_hlcdc_layer_update_start()
401 DIV_ROUND_UP(layer->desc->nconfigs, atmel_hlcdc_layer_update_start()
414 layer->desc->regs_offset + atmel_hlcdc_layer_update_start()
417 layer->desc->nconfigs); atmel_hlcdc_layer_update_start()
486 if (cfg >= layer->desc->nconfigs) atmel_hlcdc_layer_update_cfg()
569 const struct atmel_hlcdc_layer_desc *desc) atmel_hlcdc_layer_update_init()
576 updated_size = DIV_ROUND_UP(desc->nconfigs, atmel_hlcdc_layer_update_init()
581 ((desc->nconfigs * sizeof(u32)) + atmel_hlcdc_layer_update_init()
591 buffer += desc->nconfigs * sizeof(u32); atmel_hlcdc_layer_update_init()
602 const struct atmel_hlcdc_layer_desc *desc) atmel_hlcdc_layer_init()
612 layer->desc = desc; atmel_hlcdc_layer_init()
614 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, atmel_hlcdc_layer_init()
616 for (i = 0; i < desc->formats->nformats; i++) { atmel_hlcdc_layer_init()
617 int nplanes = drm_format_num_planes(desc->formats->formats[i]); atmel_hlcdc_layer_init()
624 drm_flip_work_init(&layer->gc, desc->name, atmel_hlcdc_layer_init()
630 ret = atmel_hlcdc_layer_update_init(dev, layer, desc); atmel_hlcdc_layer_init()
635 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR, atmel_hlcdc_layer_init()
637 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, atmel_hlcdc_layer_init()
648 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp); atmel_hlcdc_layer_init()
656 const struct atmel_hlcdc_layer_desc *desc = layer->desc; atmel_hlcdc_layer_cleanup() local
659 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR, atmel_hlcdc_layer_cleanup()
661 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, atmel_hlcdc_layer_cleanup()
567 atmel_hlcdc_layer_update_init(struct drm_device *dev, struct atmel_hlcdc_layer *layer, const struct atmel_hlcdc_layer_desc *desc) atmel_hlcdc_layer_update_init() argument
600 atmel_hlcdc_layer_init(struct drm_device *dev, struct atmel_hlcdc_layer *layer, const struct atmel_hlcdc_layer_desc *desc) atmel_hlcdc_layer_init() argument
/linux-4.1.27/arch/sparc/crypto/
H A Dcrc32c_glue.c43 static int crc32c_sparc64_init(struct shash_desc *desc) crc32c_sparc64_init() argument
45 u32 *mctx = crypto_shash_ctx(desc->tfm); crc32c_sparc64_init()
46 u32 *crcp = shash_desc_ctx(desc); crc32c_sparc64_init()
69 static int crc32c_sparc64_update(struct shash_desc *desc, const u8 *data, crc32c_sparc64_update() argument
72 u32 *crcp = shash_desc_ctx(desc); crc32c_sparc64_update()
90 static int crc32c_sparc64_finup(struct shash_desc *desc, const u8 *data, crc32c_sparc64_finup() argument
93 return __crc32c_sparc64_finup(shash_desc_ctx(desc), data, len, out); crc32c_sparc64_finup()
96 static int crc32c_sparc64_final(struct shash_desc *desc, u8 *out) crc32c_sparc64_final() argument
98 u32 *crcp = shash_desc_ctx(desc); crc32c_sparc64_final()
104 static int crc32c_sparc64_digest(struct shash_desc *desc, const u8 *data, crc32c_sparc64_digest() argument
107 return __crc32c_sparc64_finup(crypto_shash_ctx(desc->tfm), data, len, crc32c_sparc64_digest()
H A Dsha256_glue.c29 static int sha224_sparc64_init(struct shash_desc *desc) sha224_sparc64_init() argument
31 struct sha256_state *sctx = shash_desc_ctx(desc); sha224_sparc64_init()
45 static int sha256_sparc64_init(struct shash_desc *desc) sha256_sparc64_init() argument
47 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_init()
82 static int sha256_sparc64_update(struct shash_desc *desc, const u8 *data, sha256_sparc64_update() argument
85 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_update()
98 static int sha256_sparc64_final(struct shash_desc *desc, u8 *out) sha256_sparc64_final() argument
100 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_final()
131 static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash) sha224_sparc64_final() argument
135 sha256_sparc64_final(desc, D); sha224_sparc64_final()
143 static int sha256_sparc64_export(struct shash_desc *desc, void *out) sha256_sparc64_export() argument
145 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_export()
151 static int sha256_sparc64_import(struct shash_desc *desc, const void *in) sha256_sparc64_import() argument
153 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_import()
H A Ddes_glue.c93 static int __ecb_crypt(struct blkcipher_desc *desc, __ecb_crypt() argument
97 struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ecb_crypt()
102 err = blkcipher_walk_virt(desc, &walk); __ecb_crypt()
103 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; __ecb_crypt()
118 err = blkcipher_walk_done(desc, &walk, nbytes); __ecb_crypt()
124 static int ecb_encrypt(struct blkcipher_desc *desc, ecb_encrypt() argument
128 return __ecb_crypt(desc, dst, src, nbytes, true); ecb_encrypt()
131 static int ecb_decrypt(struct blkcipher_desc *desc, ecb_decrypt() argument
135 return __ecb_crypt(desc, dst, src, nbytes, false); ecb_decrypt()
141 static int cbc_encrypt(struct blkcipher_desc *desc, cbc_encrypt() argument
145 struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_encrypt()
150 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
151 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_encrypt()
163 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_encrypt()
172 static int cbc_decrypt(struct blkcipher_desc *desc, cbc_decrypt() argument
176 struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_decrypt()
181 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
182 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_decrypt()
194 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_decrypt()
261 static int __ecb3_crypt(struct blkcipher_desc *desc, __ecb3_crypt() argument
265 struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ecb3_crypt()
271 err = blkcipher_walk_virt(desc, &walk); __ecb3_crypt()
272 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; __ecb3_crypt()
289 err = blkcipher_walk_done(desc, &walk, nbytes); __ecb3_crypt()
295 static int ecb3_encrypt(struct blkcipher_desc *desc, ecb3_encrypt() argument
299 return __ecb3_crypt(desc, dst, src, nbytes, true); ecb3_encrypt()
302 static int ecb3_decrypt(struct blkcipher_desc *desc, ecb3_decrypt() argument
306 return __ecb3_crypt(desc, dst, src, nbytes, false); ecb3_decrypt()
313 static int cbc3_encrypt(struct blkcipher_desc *desc, cbc3_encrypt() argument
317 struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc3_encrypt()
323 err = blkcipher_walk_virt(desc, &walk); cbc3_encrypt()
324 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc3_encrypt()
339 err = blkcipher_walk_done(desc, &walk, nbytes); cbc3_encrypt()
349 static int cbc3_decrypt(struct blkcipher_desc *desc, cbc3_decrypt() argument
353 struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc3_decrypt()
359 err = blkcipher_walk_virt(desc, &walk); cbc3_decrypt()
360 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc3_decrypt()
375 err = blkcipher_walk_done(desc, &walk, nbytes); cbc3_decrypt()
H A Dcamellia_glue.c85 static int __ecb_crypt(struct blkcipher_desc *desc, __ecb_crypt() argument
89 struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ecb_crypt()
100 err = blkcipher_walk_virt(desc, &walk); __ecb_crypt()
101 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; __ecb_crypt()
120 err = blkcipher_walk_done(desc, &walk, nbytes); __ecb_crypt()
126 static int ecb_encrypt(struct blkcipher_desc *desc, ecb_encrypt() argument
130 return __ecb_crypt(desc, dst, src, nbytes, true); ecb_encrypt()
133 static int ecb_decrypt(struct blkcipher_desc *desc, ecb_decrypt() argument
137 return __ecb_crypt(desc, dst, src, nbytes, false); ecb_decrypt()
148 static int cbc_encrypt(struct blkcipher_desc *desc, cbc_encrypt() argument
152 struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_encrypt()
163 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
164 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_encrypt()
181 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_encrypt()
187 static int cbc_decrypt(struct blkcipher_desc *desc, cbc_decrypt() argument
191 struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_decrypt()
202 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
203 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_decrypt()
220 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_decrypt()
/linux-4.1.27/drivers/dma/dw/
H A Dcore.c85 struct dw_desc *desc, *_desc; dwc_desc_get() local
91 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { dwc_desc_get()
93 if (async_tx_test_ack(&desc->txd)) { dwc_desc_get()
94 list_del(&desc->desc_node); dwc_desc_get()
95 ret = desc; dwc_desc_get()
98 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); dwc_desc_get()
109 * `desc' must not be on any lists.
111 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) dwc_desc_put() argument
115 if (desc) { dwc_desc_put()
119 list_for_each_entry(child, &desc->tx_list, desc_node) dwc_desc_put()
121 "moving child desc %p to freelist\n", dwc_desc_put()
123 list_splice_init(&desc->tx_list, &dwc->free_list); dwc_desc_put()
124 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); dwc_desc_put()
125 list_add(&desc->desc_node, &dwc->free_list); dwc_desc_put()
191 struct dw_desc *desc) dwc_do_single_block()
200 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; dwc_do_single_block()
202 channel_writel(dwc, SAR, desc->lli.sar); dwc_do_single_block()
203 channel_writel(dwc, DAR, desc->lli.dar); dwc_do_single_block()
205 channel_writel(dwc, CTL_HI, desc->lli.ctlhi); dwc_do_single_block()
260 struct dw_desc *desc; dwc_dostart_first_queued() local
266 desc = dwc_first_active(dwc); dwc_dostart_first_queued()
267 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); dwc_dostart_first_queued()
268 dwc_dostart(dwc, desc); dwc_dostart_first_queued()
274 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, dwc_descriptor_complete() argument
279 struct dma_async_tx_descriptor *txd = &desc->txd; dwc_descriptor_complete()
293 list_for_each_entry(child, &desc->tx_list, desc_node) dwc_descriptor_complete()
295 async_tx_ack(&desc->txd); dwc_descriptor_complete()
297 list_splice_init(&desc->tx_list, &dwc->free_list); dwc_descriptor_complete()
298 list_move(&desc->desc_node, &dwc->free_list); dwc_descriptor_complete()
309 struct dw_desc *desc, *_desc; dwc_complete_all() local
331 list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_complete_all()
332 dwc_descriptor_complete(dwc, desc, true); dwc_complete_all()
347 struct dw_desc *desc, *_desc; dwc_scan_descriptors() local
367 desc = dwc_first_active(dwc); dwc_scan_descriptors()
369 head = &desc->tx_list; dwc_scan_descriptors()
371 /* Update desc to reflect last sent one */ dwc_scan_descriptors()
373 desc = to_dw_desc(active->prev); dwc_scan_descriptors()
375 dwc->residue -= desc->len; dwc_scan_descriptors()
412 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { dwc_scan_descriptors()
414 dwc->residue = desc->total_len; dwc_scan_descriptors()
417 if (desc->txd.phys == llp) { dwc_scan_descriptors()
423 if (desc->lli.llp == llp) { dwc_scan_descriptors()
430 dwc->residue -= desc->len; dwc_scan_descriptors()
431 list_for_each_entry(child, &desc->tx_list, desc_node) { dwc_scan_descriptors()
446 dwc_descriptor_complete(dwc, desc, true); dwc_scan_descriptors()
462 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", dwc_dump_lli()
574 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); dwc_handle_cyclic()
658 struct dw_desc *desc = txd_to_dw_desc(tx); dwc_tx_submit() local
672 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); dwc_tx_submit()
673 list_add_tail(&desc->desc_node, &dwc->queue); dwc_tx_submit()
686 struct dw_desc *desc; dwc_prep_dma_memcpy() local
725 desc = dwc_desc_get(dwc); dwc_prep_dma_memcpy()
726 if (!desc) dwc_prep_dma_memcpy()
729 desc->lli.sar = src + offset; dwc_prep_dma_memcpy()
730 desc->lli.dar = dest + offset; dwc_prep_dma_memcpy()
731 desc->lli.ctllo = ctllo; dwc_prep_dma_memcpy()
732 desc->lli.ctlhi = xfer_count; dwc_prep_dma_memcpy()
733 desc->len = xfer_count << src_width; dwc_prep_dma_memcpy()
736 first = desc; dwc_prep_dma_memcpy()
738 prev->lli.llp = desc->txd.phys; dwc_prep_dma_memcpy()
739 list_add_tail(&desc->desc_node, dwc_prep_dma_memcpy()
742 prev = desc; dwc_prep_dma_memcpy()
803 struct dw_desc *desc; for_each_sg() local
813 desc = dwc_desc_get(dwc); for_each_sg()
814 if (!desc) for_each_sg()
817 desc->lli.sar = mem; for_each_sg()
818 desc->lli.dar = reg; for_each_sg()
819 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); for_each_sg()
829 desc->lli.ctlhi = dlen >> mem_width; for_each_sg()
830 desc->len = dlen; for_each_sg()
833 first = desc; for_each_sg()
835 prev->lli.llp = desc->txd.phys; for_each_sg()
836 list_add_tail(&desc->desc_node, for_each_sg()
839 prev = desc; for_each_sg()
860 struct dw_desc *desc; for_each_sg() local
870 desc = dwc_desc_get(dwc); for_each_sg()
871 if (!desc) for_each_sg()
874 desc->lli.sar = reg; for_each_sg()
875 desc->lli.dar = mem; for_each_sg()
876 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); for_each_sg()
885 desc->lli.ctlhi = dlen >> reg_width; for_each_sg()
886 desc->len = dlen; for_each_sg()
889 first = desc; for_each_sg()
891 prev->lli.llp = desc->txd.phys; for_each_sg()
892 list_add_tail(&desc->desc_node, for_each_sg()
895 prev = desc; for_each_sg()
1026 struct dw_desc *desc, *_desc; dwc_terminate_all() local
1045 list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_terminate_all()
1046 dwc_descriptor_complete(dwc, desc, false); dwc_terminate_all()
1131 struct dw_desc *desc; dwc_alloc_chan_resources() local
1171 desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); dwc_alloc_chan_resources()
1172 if (!desc) dwc_alloc_chan_resources()
1175 memset(desc, 0, sizeof(struct dw_desc)); dwc_alloc_chan_resources()
1177 INIT_LIST_HEAD(&desc->tx_list); dwc_alloc_chan_resources()
1178 dma_async_tx_descriptor_init(&desc->txd, chan); dwc_alloc_chan_resources()
1179 desc->txd.tx_submit = dwc_tx_submit; dwc_alloc_chan_resources()
1180 desc->txd.flags = DMA_CTRL_ACK; dwc_alloc_chan_resources()
1181 desc->txd.phys = phys; dwc_alloc_chan_resources()
1183 dwc_desc_put(dwc, desc); dwc_alloc_chan_resources()
1205 struct dw_desc *desc, *_desc; dwc_free_chan_resources() local
1242 list_for_each_entry_safe(desc, _desc, &list, desc_node) { dwc_free_chan_resources()
1243 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); dwc_free_chan_resources() local
1244 dma_pool_free(dw->desc_pool, desc, desc->txd.phys); dwc_free_chan_resources()
1275 dwc_dostart(dwc, dwc->cdesc->desc[0]); dw_dma_cyclic_start()
1322 struct dw_desc *desc; dw_dma_cyclic_prep() local
1384 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); dw_dma_cyclic_prep()
1385 if (!cdesc->desc) dw_dma_cyclic_prep()
1389 desc = dwc_desc_get(dwc); dw_dma_cyclic_prep()
1390 if (!desc) dw_dma_cyclic_prep()
1395 desc->lli.dar = sconfig->dst_addr; dw_dma_cyclic_prep()
1396 desc->lli.sar = buf_addr + (period_len * i); dw_dma_cyclic_prep()
1397 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) dw_dma_cyclic_prep()
1404 desc->lli.ctllo |= sconfig->device_fc ? dw_dma_cyclic_prep()
1410 desc->lli.dar = buf_addr + (period_len * i); dw_dma_cyclic_prep()
1411 desc->lli.sar = sconfig->src_addr; dw_dma_cyclic_prep()
1412 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) dw_dma_cyclic_prep()
1419 desc->lli.ctllo |= sconfig->device_fc ? dw_dma_cyclic_prep()
1428 desc->lli.ctlhi = (period_len >> reg_width); dw_dma_cyclic_prep()
1429 cdesc->desc[i] = desc; dw_dma_cyclic_prep()
1432 last->lli.llp = desc->txd.phys; dw_dma_cyclic_prep()
1434 last = desc; dw_dma_cyclic_prep()
1438 last->lli.llp = cdesc->desc[0]->txd.phys; dw_dma_cyclic_prep()
1451 dwc_desc_put(dwc, cdesc->desc[i]); dw_dma_cyclic_prep()
1488 dwc_desc_put(dwc, cdesc->desc[i]); dw_dma_cyclic_free()
1490 kfree(cdesc->desc); dw_dma_cyclic_free()
190 dwc_do_single_block(struct dw_dma_chan *dwc, struct dw_desc *desc) dwc_do_single_block() argument
/linux-4.1.27/arch/sh/kernel/cpu/irq/
H A Dipr.c55 void register_ipr_controller(struct ipr_desc *desc) register_ipr_controller() argument
59 desc->chip.irq_mask = disable_ipr_irq; register_ipr_controller()
60 desc->chip.irq_unmask = enable_ipr_irq; register_ipr_controller()
62 for (i = 0; i < desc->nr_irqs; i++) { register_ipr_controller()
63 struct ipr_data *p = desc->ipr_data + i; register_ipr_controller()
66 BUG_ON(p->ipr_idx >= desc->nr_offsets); register_ipr_controller()
67 BUG_ON(!desc->ipr_offsets[p->ipr_idx]); register_ipr_controller()
77 irq_set_chip_and_handler_name(p->irq, &desc->chip, register_ipr_controller()
/linux-4.1.27/include/asm-generic/
H A Dmsi.h14 * @desc: Pointer to msi descriptor
22 struct msi_desc *desc; member in struct:msi_alloc_info
/linux-4.1.27/drivers/media/pci/solo6x10/
H A Dsolo6x10-p2m.c65 struct solo_p2m_desc *desc, dma_addr_t desc_dma, solo_p2m_dma_desc()
90 /* For 6010 with more than one desc, we can do a one-shot */ solo_p2m_dma_desc()
99 /* For single descriptors and 6110, we need to run each desc */ solo_p2m_dma_desc()
102 p2m_dev->descs = desc; solo_p2m_dma_desc()
105 desc[1].dma_addr); solo_p2m_dma_desc()
107 desc[1].ext_addr); solo_p2m_dma_desc()
109 desc[1].cfg); solo_p2m_dma_desc()
111 desc[1].ctrl); solo_p2m_dma_desc()
136 void solo_p2m_fill_desc(struct solo_p2m_desc *desc, int wr, solo_p2m_fill_desc() argument
143 desc->cfg = SOLO_P2M_COPY_SIZE(size >> 2); solo_p2m_fill_desc()
144 desc->ctrl = SOLO_P2M_BURST_SIZE(SOLO_P2M_BURST_256) | solo_p2m_fill_desc()
148 desc->cfg |= SOLO_P2M_EXT_INC(ext_size >> 2); solo_p2m_fill_desc()
149 desc->ctrl |= SOLO_P2M_PCI_INC(size >> 2) | solo_p2m_fill_desc()
153 desc->dma_addr = dma_addr; solo_p2m_fill_desc()
154 desc->ext_addr = ext_addr; solo_p2m_fill_desc()
161 struct solo_p2m_desc desc[2]; solo_p2m_dma_t() local
163 solo_p2m_fill_desc(&desc[1], wr, dma_addr, ext_addr, size, repeat, solo_p2m_dma_t()
167 return solo_p2m_dma_desc(solo_dev, desc, 0, 1); solo_p2m_dma_t()
173 struct solo_p2m_desc *desc; solo_p2m_isr() local
182 desc = &p2m_dev->descs[p2m_dev->desc_idx]; solo_p2m_isr()
185 solo_reg_write(solo_dev, SOLO_P2M_TAR_ADR(id), desc->dma_addr); solo_p2m_isr()
186 solo_reg_write(solo_dev, SOLO_P2M_EXT_ADR(id), desc->ext_addr); solo_p2m_isr()
187 solo_reg_write(solo_dev, SOLO_P2M_EXT_CFG(id), desc->cfg); solo_p2m_isr()
188 solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id), desc->ctrl); solo_p2m_isr()
64 solo_p2m_dma_desc(struct solo_dev *solo_dev, struct solo_p2m_desc *desc, dma_addr_t desc_dma, int desc_cnt) solo_p2m_dma_desc() argument
/linux-4.1.27/sound/soc/intel/common/
H A Dsst-acpi.c60 struct sst_acpi_desc *desc; member in struct:sst_acpi_priv
70 struct sst_acpi_desc *desc = sst_acpi->desc; sst_acpi_fw_cb() local
81 platform_device_register_data(dev, desc->drv_name, -1, sst_acpi_fw_cb()
85 desc->drv_name, (int)PTR_ERR(sst_acpi->pdev_pcm)); sst_acpi_fw_cb()
120 struct sst_acpi_desc *desc; sst_acpi_probe() local
132 desc = (struct sst_acpi_desc *)id->driver_data; sst_acpi_probe()
133 mach = sst_acpi_find_machine(desc->machines); sst_acpi_probe()
140 sst_pdata->id = desc->sst_id; sst_acpi_probe()
142 sst_acpi->desc = desc; sst_acpi_probe()
145 sst_pdata->resindex_dma_base = desc->resindex_dma_base; sst_acpi_probe()
146 if (desc->resindex_dma_base >= 0) { sst_acpi_probe()
147 sst_pdata->dma_engine = desc->dma_engine; sst_acpi_probe()
148 sst_pdata->dma_base = desc->resindex_dma_base; sst_acpi_probe()
149 sst_pdata->dma_size = desc->dma_size; sst_acpi_probe()
152 if (desc->irqindex_host_ipc >= 0) sst_acpi_probe()
153 sst_pdata->irq = platform_get_irq(pdev, desc->irqindex_host_ipc); sst_acpi_probe()
155 if (desc->resindex_lpe_base >= 0) { sst_acpi_probe()
157 desc->resindex_lpe_base); sst_acpi_probe()
164 if (desc->resindex_pcicfg_base >= 0) { sst_acpi_probe()
166 desc->resindex_pcicfg_base); sst_acpi_probe()
173 if (desc->resindex_fw_base >= 0) { sst_acpi_probe()
175 desc->resindex_fw_base); sst_acpi_probe()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/
H A Dmxms.c100 u8 *desc = mxms + mxms_headerlen(mxm); mxms_foreach() local
101 u8 *fini = desc + mxms_structlen(mxm) - 1; mxms_foreach()
102 while (desc < fini) { mxms_foreach()
103 u8 type = desc[0] & 0x0f; mxms_foreach()
123 entries = (ROM32(desc[0]) & 0x01f00000) >> 20; mxms_foreach()
132 entries = (desc[1] & 0xf0) >> 4; mxms_foreach()
140 entries = desc[1] & 0x07; mxms_foreach()
152 u8 *dump = desc; mxms_foreach()
170 if (!exec(mxm, desc, info)) mxms_foreach()
174 desc += headerlen + (entries * recordlen); mxms_foreach()
181 mxms_output_device(struct nvkm_mxm *mxm, u8 *pdata, struct mxms_odev *desc) mxms_output_device() argument
187 desc->outp_type = (data & 0x00000000000000f0ULL) >> 4; mxms_output_device()
188 desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8; mxms_output_device()
189 desc->conn_type = (data & 0x000000000001f000ULL) >> 12; mxms_output_device()
190 desc->dig_conn = (data & 0x0000000000780000ULL) >> 19; mxms_output_device()
H A Dnv50.c37 struct mxms_odev desc; member in struct:context
44 struct mxms_odev desc; mxm_match_tmds_partner() local
46 mxms_output_device(mxm, data, &desc); mxm_match_tmds_partner()
47 if (desc.outp_type == 2 && mxm_match_tmds_partner()
48 desc.dig_conn == ctx->desc.dig_conn) mxm_match_tmds_partner()
58 u64 desc = *(u64 *)data; mxm_match_dcb() local
60 mxms_output_device(mxm, data, &ctx->desc); mxm_match_dcb()
63 if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type) mxm_match_dcb()
70 if ((desc & 0x00000000000000f0) >= 0x20) { mxm_match_dcb()
72 u8 link = mxm_sor_map(bios, ctx->desc.dig_conn); mxm_match_dcb()
88 if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 && mxm_match_dcb()
120 i2cidx = mxm_ddc_map(bios, ctx.desc.ddc_port); mxm_dcb_sanitise_entry()
132 switch (ctx.desc.outp_type) { mxm_dcb_sanitise_entry()
137 link = mxm_sor_map(bios, ctx.desc.dig_conn) & 0x30; mxm_dcb_sanitise_entry()
153 switch (ctx.desc.conn_type) { mxm_dcb_sanitise_entry()
183 u64 desc = *(u64 *)data; mxm_show_unmatched() local
184 if ((desc & 0xf0) != 0xf0) mxm_show_unmatched()
185 nv_info(mxm, "unmatched output device 0x%016llx\n", desc); mxm_show_unmatched()
/linux-4.1.27/drivers/net/wireless/p54/
H A Dp54pci.c150 struct p54p_desc *desc = &ring[i]; p54p_refill_rx_ring() local
152 if (!desc->host_addr) { p54p_refill_rx_ring()
171 desc->host_addr = cpu_to_le32(mapping); p54p_refill_rx_ring()
172 desc->device_addr = 0; // FIXME: necessary? p54p_refill_rx_ring()
173 desc->len = cpu_to_le16(priv->common.rx_mtu + 32); p54p_refill_rx_ring()
174 desc->flags = 0; p54p_refill_rx_ring()
193 struct p54p_desc *desc; p54p_check_rx_ring() local
203 desc = &ring[i]; p54p_check_rx_ring()
204 len = le16_to_cpu(desc->len); p54p_check_rx_ring()
220 dma_addr = le32_to_cpu(desc->host_addr); p54p_check_rx_ring()
229 desc->host_addr = cpu_to_le32(0); p54p_check_rx_ring()
234 desc->len = cpu_to_le16(priv->common.rx_mtu + 32); p54p_check_rx_ring()
250 struct p54p_desc *desc; p54p_check_tx_ring() local
259 desc = &ring[i]; p54p_check_tx_ring()
264 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), p54p_check_tx_ring()
265 le16_to_cpu(desc->len), PCI_DMA_TODEVICE); p54p_check_tx_ring()
267 desc->host_addr = 0; p54p_check_tx_ring()
268 desc->device_addr = 0; p54p_check_tx_ring()
269 desc->len = 0; p54p_check_tx_ring()
270 desc->flags = 0; p54p_check_tx_ring()
332 struct p54p_desc *desc; p54p_tx() local
350 desc = &ring_control->tx_data[i]; p54p_tx()
351 desc->host_addr = cpu_to_le32(mapping); p54p_tx()
352 desc->device_addr = ((struct p54_hdr *)skb->data)->req_id; p54p_tx()
353 desc->len = cpu_to_le16(skb->len); p54p_tx()
354 desc->flags = 0; p54p_tx()
369 struct p54p_desc *desc; p54p_stop() local
382 desc = &ring_control->rx_data[i]; p54p_stop()
383 if (desc->host_addr) p54p_stop()
385 le32_to_cpu(desc->host_addr), p54p_stop()
393 desc = &ring_control->rx_mgmt[i]; p54p_stop()
394 if (desc->host_addr) p54p_stop()
396 le32_to_cpu(desc->host_addr), p54p_stop()
404 desc = &ring_control->tx_data[i]; p54p_stop()
405 if (desc->host_addr) p54p_stop()
407 le32_to_cpu(desc->host_addr), p54p_stop()
408 le16_to_cpu(desc->len), p54p_stop()
416 desc = &ring_control->tx_mgmt[i]; p54p_stop()
417 if (desc->host_addr) p54p_stop()
419 le32_to_cpu(desc->host_addr), p54p_stop()
420 le16_to_cpu(desc->len), p54p_stop()
/linux-4.1.27/drivers/pinctrl/sunxi/
H A Dpinctrl-sunxi.c78 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_desc_find_function_by_name()
79 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_desc_find_function_by_name()
103 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_desc_find_function_by_pin()
104 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_desc_find_function_by_pin()
291 unsigned pin = g->pin - pctl->desc->pin_base; sunxi_pconf_group_set()
388 pin -= pctl->desc->pin_base; sunxi_pmx_set()
404 struct sunxi_desc_function *desc = sunxi_pmx_set_mux() local
409 if (!desc) sunxi_pmx_set_mux()
412 sunxi_pmx_set(pctldev, g->pin, desc->muxval); sunxi_pmx_set_mux()
424 struct sunxi_desc_function *desc; sunxi_pmx_gpio_set_direction() local
432 desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, offset, func); sunxi_pmx_gpio_set_direction()
433 if (!desc) sunxi_pmx_gpio_set_direction()
436 sunxi_pmx_set(pctldev, offset, desc->muxval); sunxi_pmx_gpio_set_direction()
470 u32 set_mux = pctl->desc->irq_read_needs_mux && sunxi_pinctrl_gpio_get()
471 test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); sunxi_pinctrl_gpio_get()
536 struct sunxi_desc_function *desc; sunxi_pinctrl_gpio_to_irq() local
537 unsigned pinnum = pctl->desc->pin_base + offset; sunxi_pinctrl_gpio_to_irq()
543 desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, pinnum, "irq"); sunxi_pinctrl_gpio_to_irq()
544 if (!desc) sunxi_pinctrl_gpio_to_irq()
547 irqnum = desc->irqbank * IRQ_PER_BANK + desc->irqnum; sunxi_pinctrl_gpio_to_irq()
567 pctl->irq_array[d->hwirq] - pctl->desc->pin_base); sunxi_pinctrl_irq_request_resources()
585 pctl->irq_array[d->hwirq] - pctl->desc->pin_base); sunxi_pinctrl_irq_release_resources()
591 struct irq_desc *desc = container_of(d, struct irq_desc, irq_data); sunxi_pinctrl_irq_set_type() local
620 desc->handle_irq = handle_fasteoi_irq; sunxi_pinctrl_irq_set_type()
623 desc->handle_irq = handle_edge_irq; sunxi_pinctrl_irq_set_type()
712 static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) sunxi_pinctrl_irq_handler() argument
718 for (bank = 0; bank < pctl->desc->irq_banks; bank++) sunxi_pinctrl_irq_handler()
722 if (bank == pctl->desc->irq_banks) sunxi_pinctrl_irq_handler()
731 chained_irq_enter(chip, desc); sunxi_pinctrl_irq_handler()
737 chained_irq_exit(chip, desc); sunxi_pinctrl_irq_handler()
768 pctl->ngroups = pctl->desc->npins; sunxi_pinctrl_build_state()
777 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_build_state()
778 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_build_state()
790 pctl->desc->npins * sizeof(*pctl->functions), sunxi_pinctrl_build_state()
796 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_build_state()
797 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_build_state()
816 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_build_state()
817 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_build_state()
851 const struct sunxi_pinctrl_desc *desc) sunxi_pinctrl_init()
874 pctl->desc = desc; sunxi_pinctrl_init()
877 IRQ_PER_BANK * pctl->desc->irq_banks, sunxi_pinctrl_init()
890 pctl->desc->npins * sizeof(*pins), sunxi_pinctrl_init()
895 for (i = 0; i < pctl->desc->npins; i++) sunxi_pinctrl_init()
896 pins[i] = pctl->desc->pins[i].pin; sunxi_pinctrl_init()
907 pctrl_desc->npins = pctl->desc->npins; sunxi_pinctrl_init()
925 last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number; sunxi_pinctrl_init()
938 pctl->desc->pin_base; sunxi_pinctrl_init()
941 pctl->chip->base = pctl->desc->pin_base; sunxi_pinctrl_init()
947 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_init()
948 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_init()
951 pin->pin.number - pctl->desc->pin_base, sunxi_pinctrl_init()
968 pctl->desc->irq_banks, sunxi_pinctrl_init()
976 for (i = 0; i < pctl->desc->irq_banks; i++) { sunxi_pinctrl_init()
985 pctl->desc->irq_banks * IRQ_PER_BANK, sunxi_pinctrl_init()
994 for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) { sunxi_pinctrl_init()
1002 for (i = 0; i < pctl->desc->irq_banks; i++) { sunxi_pinctrl_init()
850 sunxi_pinctrl_init(struct platform_device *pdev, const struct sunxi_pinctrl_desc *desc) sunxi_pinctrl_init() argument
/linux-4.1.27/drivers/idle/
H A Dintel_idle.c132 .desc = "MWAIT 0x00",
140 .desc = "MWAIT 0x01",
148 .desc = "MWAIT 0x10",
156 .desc = "MWAIT 0x20",
169 .desc = "MWAIT 0x00",
177 .desc = "MWAIT 0x01",
185 .desc = "MWAIT 0x10",
193 .desc = "MWAIT 0x20",
201 .desc = "MWAIT 0x30",
214 .desc = "MWAIT 0x00",
222 .desc = "MWAIT 0x58",
230 .desc = "MWAIT 0x52",
238 .desc = "MWAIT 0x60",
246 .desc = "MWAIT 0x64",
259 .desc = "MWAIT 0x00",
267 .desc = "MWAIT 0x58",
275 .desc = "MWAIT 0x52",
283 .desc = "MWAIT 0x60",
291 .desc = "MWAIT 0x64",
304 .desc = "MWAIT 0x00",
312 .desc = "MWAIT 0x01",
320 .desc = "MWAIT 0x10",
328 .desc = "MWAIT 0x20",
336 .desc = "MWAIT 0x30",
349 .desc = "MWAIT 0x00",
357 .desc = "MWAIT 0x01",
365 .desc = "MWAIT 0x10",
373 .desc = "MWAIT 0x20",
386 .desc = "MWAIT 0x00",
394 .desc = "MWAIT 0x01",
402 .desc = "MWAIT 0x10",
410 .desc = "MWAIT 0x20",
423 .desc = "MWAIT 0x00",
431 .desc = "MWAIT 0x01",
439 .desc = "MWAIT 0x10",
447 .desc = "MWAIT 0x20",
460 .desc = "MWAIT 0x00",
468 .desc = "MWAIT 0x01",
476 .desc = "MWAIT 0x10",
484 .desc = "MWAIT 0x20",
492 .desc = "MWAIT 0x32",
500 .desc = "MWAIT 0x40",
508 .desc = "MWAIT 0x50",
516 .desc = "MWAIT 0x60",
528 .desc = "MWAIT 0x00",
536 .desc = "MWAIT 0x01",
544 .desc = "MWAIT 0x10",
552 .desc = "MWAIT 0x20",
560 .desc = "MWAIT 0x32",
568 .desc = "MWAIT 0x40",
576 .desc = "MWAIT 0x50",
584 .desc = "MWAIT 0x60",
597 .desc = "MWAIT 0x00",
605 .desc = "MWAIT 0x10",
613 .desc = "MWAIT 0x30",
621 .desc = "MWAIT 0x52",
633 .desc = "MWAIT 0x00",
641 .desc = "MWAIT 0x51",
/linux-4.1.27/arch/arm/include/asm/hardware/
H A Diop3xx-adma.h199 iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_pq() argument
206 iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr) iop_desc_set_pq_addr() argument
212 iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx, iop_desc_set_pq_src_addr() argument
226 iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_pq_zero_sum() argument
233 iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) iop_desc_set_pq_zero_sum_byte_count() argument
241 iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx, iop_desc_set_pq_zero_sum_addr() argument
302 static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc, iop_desc_is_aligned() argument
306 return (desc->idx & (num_slots - 1)) ? 0 : 1; iop_desc_is_aligned()
396 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, iop_desc_get_byte_count() argument
399 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_get_byte_count()
429 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc, iop_desc_get_src_addr() argument
433 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_get_src_addr()
461 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags) iop_desc_init_memcpy() argument
463 struct iop3xx_desc_dma *hw_desc = desc->hw_desc; iop_desc_init_memcpy()
479 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags) iop_desc_init_memset() argument
481 struct iop3xx_desc_aau *hw_desc = desc->hw_desc; iop_desc_init_memset()
552 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ iop3xx_desc_init_xor()
564 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_xor() argument
567 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags); iop_desc_init_xor()
572 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_zero_sum() argument
575 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; iop_desc_init_zero_sum()
583 hw_desc = desc->hw_desc; iop_desc_init_zero_sum()
601 (u32) (desc->async_tx.phys + (i << 5)); iop_desc_init_zero_sum()
609 iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_null_xor() argument
612 struct iop3xx_desc_aau *hw_desc = desc->hw_desc; iop_desc_init_null_xor()
638 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ iop_desc_init_null_xor()
646 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc, iop_desc_set_byte_count() argument
650 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_set_byte_count()
666 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, iop_desc_init_interrupt() argument
669 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_init_interrupt()
674 iop_desc_init_memcpy(desc, 1); iop_desc_init_interrupt()
680 iop_desc_init_null_xor(desc, 2, 1); iop_desc_init_interrupt()
692 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) iop_desc_set_zero_sum_byte_count() argument
694 int slots_per_op = desc->slots_per_op; iop_desc_set_zero_sum_byte_count()
695 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; iop_desc_set_zero_sum_byte_count()
713 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, iop_desc_set_dest_addr() argument
717 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_set_dest_addr()
732 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc, iop_desc_set_memcpy_src_addr() argument
735 struct iop3xx_desc_dma *hw_desc = desc->hw_desc; iop_desc_set_memcpy_src_addr()
740 iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx, iop_desc_set_zero_sum_src_addr() argument
744 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; iop_desc_set_zero_sum_src_addr()
745 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; iop_desc_set_zero_sum_src_addr()
755 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc, iop_desc_set_xor_src_addr() argument
759 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; iop_desc_set_xor_src_addr()
760 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; iop_desc_set_xor_src_addr()
770 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, iop_desc_set_next_desc() argument
774 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_set_next_desc()
780 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc) iop_desc_get_next_desc() argument
783 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_get_next_desc()
787 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc) iop_desc_clear_next_desc() argument
790 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_clear_next_desc()
794 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, iop_desc_set_block_fill_val() argument
797 struct iop3xx_desc_aau *hw_desc = desc->hw_desc; iop_desc_set_block_fill_val()
802 iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) iop_desc_get_zero_result() argument
804 struct iop3xx_desc_aau *hw_desc = desc->hw_desc; iop_desc_get_zero_result()
/linux-4.1.27/drivers/hwtracing/coresight/
H A Dcoresight-replicator.c68 struct coresight_desc *desc; replicator_probe() local
85 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); replicator_probe()
86 if (!desc) replicator_probe()
89 desc->type = CORESIGHT_DEV_TYPE_LINK; replicator_probe()
90 desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT; replicator_probe()
91 desc->ops = &replicator_cs_ops; replicator_probe()
92 desc->pdata = pdev->dev.platform_data; replicator_probe()
93 desc->dev = &pdev->dev; replicator_probe()
94 drvdata->csdev = coresight_register(desc); replicator_probe()
/linux-4.1.27/drivers/clk/qcom/
H A Dcommon.c59 qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc) qcom_cc_map() argument
70 return devm_regmap_init_mmio(dev, base, desc->config); qcom_cc_map()
75 const struct qcom_cc_desc *desc, struct regmap *regmap) qcom_cc_really_probe()
84 size_t num_clks = desc->num_clks; qcom_cc_really_probe()
85 struct clk_regmap **rclks = desc->clks; qcom_cc_really_probe()
116 reset->rcdev.nr_resets = desc->num_resets; qcom_cc_really_probe()
118 reset->reset_map = desc->resets; qcom_cc_really_probe()
129 int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc) qcom_cc_probe() argument
133 regmap = qcom_cc_map(pdev, desc); qcom_cc_probe()
137 return qcom_cc_really_probe(pdev, desc, regmap); qcom_cc_probe()
74 qcom_cc_really_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc, struct regmap *regmap) qcom_cc_really_probe() argument
/linux-4.1.27/drivers/net/ethernet/xscale/
H A Dixp4xx_eth.c58 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
177 struct desc *desc_tab; /* coherent */
198 struct desc { struct
236 (n) * sizeof(struct desc))
240 ((n) + RX_DESCS) * sizeof(struct desc))
612 static inline void debug_desc(u32 phys, struct desc *desc) debug_desc() argument
617 phys, desc->next, desc->buf_len, desc->pkt_len, debug_desc()
618 desc->data, desc->dest_id, desc->src_id, desc->flags, debug_desc()
619 desc->qos, desc->padlen, desc->vlan_tci, debug_desc()
620 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, debug_desc()
621 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, debug_desc()
622 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, debug_desc()
623 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); debug_desc()
631 struct desc *tab; queue_get_desc()
639 n_desc = (phys - tab_phys) / sizeof(struct desc); queue_get_desc()
647 struct desc *desc) queue_put_desc()
649 debug_desc(phys, desc); queue_put_desc()
657 static inline void dma_unmap_tx(struct port *port, struct desc *desc) dma_unmap_tx() argument
660 dma_unmap_single(&port->netdev->dev, desc->data, dma_unmap_tx()
661 desc->buf_len, DMA_TO_DEVICE); dma_unmap_tx()
663 dma_unmap_single(&port->netdev->dev, desc->data & ~3, dma_unmap_tx()
664 ALIGN((desc->data & 3) + desc->buf_len, 4), dma_unmap_tx()
695 struct desc *desc; eth_poll() local
726 desc = rx_desc_ptr(port, n); eth_poll()
739 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); eth_poll()
744 /* put the desc back on RX-ready queue */ eth_poll()
745 desc->buf_len = MAX_MRU; eth_poll()
746 desc->pkt_len = 0; eth_poll()
747 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); eth_poll()
755 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, eth_poll()
758 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, eth_poll()
761 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); eth_poll()
764 skb_put(skb, desc->pkt_len); eth_poll()
777 desc->data = phys + NET_IP_ALIGN; eth_poll()
779 desc->buf_len = MAX_MRU; eth_poll()
780 desc->pkt_len = 0; eth_poll()
781 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); eth_poll()
802 struct desc *desc; eth_txdone_irq() local
810 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); eth_txdone_irq()
812 desc = tx_desc_ptr(port, n_desc); eth_txdone_irq()
813 debug_desc(phys, desc); eth_txdone_irq()
817 port->netdev->stats.tx_bytes += desc->pkt_len; eth_txdone_irq()
819 dma_unmap_tx(port, desc); eth_txdone_irq()
829 queue_put_desc(port->plat->txreadyq, phys, desc); eth_txdone_irq()
847 struct desc *desc; eth_xmit() local
889 desc = tx_desc_ptr(port, n); eth_xmit()
896 desc->data = phys + offset; eth_xmit()
897 desc->buf_len = desc->pkt_len = len; eth_xmit()
901 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); eth_xmit()
1142 struct desc *desc = rx_desc_ptr(port, i); init_queues() local
1154 desc->buf_len = MAX_MRU; init_queues()
1155 desc->data = dma_map_single(&port->netdev->dev, data, init_queues()
1157 if (dma_mapping_error(&port->netdev->dev, desc->data)) { init_queues()
1161 desc->data += NET_IP_ALIGN; init_queues()
1174 struct desc *desc = rx_desc_ptr(port, i); destroy_queues() local
1178 desc->data - NET_IP_ALIGN, destroy_queues()
1184 struct desc *desc = tx_desc_ptr(port, i); destroy_queues() local
1187 dma_unmap_tx(port, desc); destroy_queues()
1337 struct desc *desc; eth_close() local
1341 desc = tx_desc_ptr(port, n); eth_close()
1343 desc->buf_len = desc->pkt_len = 1; eth_close()
1345 queue_put_desc(TX_QUEUE(port->id), phys, desc); eth_close()
646 queue_put_desc(unsigned int queue, u32 phys, struct desc *desc) queue_put_desc() argument
/linux-4.1.27/drivers/scsi/
H A Dses.c124 unsigned char *desc) ses_set_page2_descriptor()
141 memcpy(desc_ptr, desc, 4); ses_set_page2_descriptor()
184 unsigned char *desc; ses_get_fault() local
186 desc = ses_get_page2_descriptor(edev, ecomp); ses_get_fault()
187 if (desc) ses_get_fault()
188 ecomp->fault = (desc[3] & 0x60) >> 4; ses_get_fault()
195 unsigned char desc[4]; ses_set_fault() local
203 init_device_slot_control(desc, ecomp, desc_ptr); ses_set_fault()
207 desc[3] &= 0xdf; ses_set_fault()
210 desc[3] |= 0x20; ses_set_fault()
217 return ses_set_page2_descriptor(edev, ecomp, desc); ses_set_fault()
223 unsigned char *desc; ses_get_status() local
225 desc = ses_get_page2_descriptor(edev, ecomp); ses_get_status()
226 if (desc) ses_get_status()
227 ecomp->status = (desc[0] & 0x0f); ses_get_status()
233 unsigned char *desc; ses_get_locate() local
235 desc = ses_get_page2_descriptor(edev, ecomp); ses_get_locate()
236 if (desc) ses_get_locate()
237 ecomp->locate = (desc[2] & 0x02) ? 1 : 0; ses_get_locate()
244 unsigned char desc[4]; ses_set_locate() local
252 init_device_slot_control(desc, ecomp, desc_ptr); ses_set_locate()
256 desc[2] &= 0xfd; ses_set_locate()
259 desc[2] |= 0x02; ses_set_locate()
265 return ses_set_page2_descriptor(edev, ecomp, desc); ses_set_locate()
272 unsigned char desc[4]; ses_set_active() local
280 init_device_slot_control(desc, ecomp, desc_ptr); ses_set_active()
284 desc[2] &= 0x7f; ses_set_active()
288 desc[2] |= 0x80; ses_set_active()
295 return ses_set_page2_descriptor(edev, ecomp, desc); ses_set_active()
309 unsigned char *desc; ses_get_power_status() local
311 desc = ses_get_page2_descriptor(edev, ecomp); ses_get_power_status()
312 if (desc) ses_get_power_status()
313 ecomp->power_status = (desc[3] & 0x10) ? 0 : 1; ses_get_power_status()
320 unsigned char desc[4]; ses_set_power_status() local
328 init_device_slot_control(desc, ecomp, desc_ptr); ses_set_power_status()
333 desc[3] |= 0x10; ses_set_power_status()
336 desc[3] &= 0xef; ses_set_power_status()
342 return ses_set_page2_descriptor(edev, ecomp, desc); ses_set_power_status()
382 unsigned char *desc) ses_process_descriptor()
384 int eip = desc[0] & 0x10; ses_process_descriptor()
385 int invalid = desc[0] & 0x80; ses_process_descriptor()
386 enum scsi_protocol proto = desc[0] & 0x0f; ses_process_descriptor()
398 d = desc + 4; ses_process_descriptor()
404 d = desc + 4; ses_process_descriptor()
406 d = desc + 8; ses_process_descriptor()
408 d = desc + 4; ses_process_descriptor()
556 unsigned char *desc; ses_match_to_enclosure() local
566 desc = sdev->vpd_pg83 + 4; ses_match_to_enclosure()
567 while (desc < sdev->vpd_pg83 + sdev->vpd_pg83_len) { ses_match_to_enclosure()
568 enum scsi_protocol proto = desc[0] >> 4; ses_match_to_enclosure()
569 u8 code_set = desc[0] & 0x0f; ses_match_to_enclosure()
570 u8 piv = desc[1] & 0x80; ses_match_to_enclosure()
571 u8 assoc = (desc[1] & 0x30) >> 4; ses_match_to_enclosure()
572 u8 type = desc[1] & 0x0f; ses_match_to_enclosure()
573 u8 len = desc[3]; ses_match_to_enclosure()
577 efd.addr = get_unaligned_be64(&desc[4]); ses_match_to_enclosure()
579 desc += len + 4; ses_match_to_enclosure()
122 ses_set_page2_descriptor(struct enclosure_device *edev, struct enclosure_component *ecomp, unsigned char *desc) ses_set_page2_descriptor() argument
381 ses_process_descriptor(struct enclosure_component *ecomp, unsigned char *desc) ses_process_descriptor() argument
/linux-4.1.27/fs/ext2/
H A Dialloc.c48 struct ext2_group_desc *desc; read_inode_bitmap() local
51 desc = ext2_get_group_desc(sb, block_group, NULL); read_inode_bitmap()
52 if (!desc) read_inode_bitmap()
55 bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap)); read_inode_bitmap()
60 block_group, le32_to_cpu(desc->bg_inode_bitmap)); read_inode_bitmap()
67 struct ext2_group_desc * desc; ext2_release_inode() local
70 desc = ext2_get_group_desc(sb, group, &bh); ext2_release_inode()
71 if (!desc) { ext2_release_inode()
78 le16_add_cpu(&desc->bg_free_inodes_count, 1); ext2_release_inode()
80 le16_add_cpu(&desc->bg_used_dirs_count, -1); ext2_release_inode()
208 struct ext2_group_desc *desc, *best_desc = NULL; find_group_dir() local
212 desc = ext2_get_group_desc (sb, group, NULL); find_group_dir()
213 if (!desc || !desc->bg_free_inodes_count) find_group_dir()
215 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) find_group_dir()
218 (le16_to_cpu(desc->bg_free_blocks_count) > find_group_dir()
221 best_desc = desc; find_group_dir()
273 struct ext2_group_desc *desc; find_group_orlov() local
291 desc = ext2_get_group_desc (sb, group, NULL); find_group_orlov()
292 if (!desc || !desc->bg_free_inodes_count) find_group_orlov()
294 if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir) find_group_orlov()
296 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) find_group_orlov()
298 if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb) find_group_orlov()
301 best_ndir = le16_to_cpu(desc->bg_used_dirs_count); find_group_orlov()
302 best_desc = desc; find_group_orlov()
305 desc = best_desc; find_group_orlov()
331 desc = ext2_get_group_desc (sb, group, NULL); find_group_orlov()
332 if (!desc || !desc->bg_free_inodes_count) find_group_orlov()
336 if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs) find_group_orlov()
338 if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes) find_group_orlov()
340 if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks) find_group_orlov()
348 desc = ext2_get_group_desc (sb, group, NULL); find_group_orlov()
349 if (!desc || !desc->bg_free_inodes_count) find_group_orlov()
351 if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei) find_group_orlov()
374 struct ext2_group_desc *desc; find_group_other() local
381 desc = ext2_get_group_desc (sb, group, NULL); find_group_other()
382 if (desc && le16_to_cpu(desc->bg_free_inodes_count) && find_group_other()
383 le16_to_cpu(desc->bg_free_blocks_count)) find_group_other()
405 desc = ext2_get_group_desc (sb, group, NULL); find_group_other()
406 if (desc && le16_to_cpu(desc->bg_free_inodes_count) && find_group_other()
407 le16_to_cpu(desc->bg_free_blocks_count)) find_group_other()
419 desc = ext2_get_group_desc (sb, group, NULL); find_group_other()
420 if (desc && le16_to_cpu(desc->bg_free_inodes_count)) find_group_other()
617 struct ext2_group_desc *desc; ext2_count_free_inodes() local
630 desc = ext2_get_group_desc (sb, i, NULL); ext2_count_free_inodes()
631 if (!desc) ext2_count_free_inodes()
633 desc_count += le16_to_cpu(desc->bg_free_inodes_count); ext2_count_free_inodes()
641 i, le16_to_cpu(desc->bg_free_inodes_count), x); ext2_count_free_inodes()
652 desc = ext2_get_group_desc (sb, i, NULL); ext2_count_free_inodes()
653 if (!desc) ext2_count_free_inodes()
655 desc_count += le16_to_cpu(desc->bg_free_inodes_count); ext2_count_free_inodes()
/linux-4.1.27/net/xfrm/
H A Dxfrm_algo.c40 .desc = {
58 .desc = {
76 .desc = {
94 .desc = {
112 .desc = {
130 .desc = {
148 .desc = {
170 .desc = {
190 .desc = {
210 .desc = {
230 .desc = {
249 .desc = {
268 .desc = {
288 .desc = {
307 .desc = {
343 .desc = {
363 .desc = {
383 .desc = {
403 .desc = {
423 .desc = {
443 .desc = {
463 .desc = {
483 .desc = {
503 .desc = {
522 .desc = {
540 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
550 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
560 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
646 return entry->desc.sadb_alg_id == (unsigned long)data; xfrm_alg_id_match()
/linux-4.1.27/drivers/devfreq/
H A Ddevfreq-event.c44 if (!edev || !edev->desc) devfreq_event_enable_edev()
48 if (edev->desc->ops && edev->desc->ops->enable devfreq_event_enable_edev()
50 ret = edev->desc->ops->enable(edev); devfreq_event_enable_edev()
76 if (!edev || !edev->desc) devfreq_event_disable_edev()
86 if (edev->desc->ops && edev->desc->ops->disable devfreq_event_disable_edev()
88 ret = edev->desc->ops->disable(edev); devfreq_event_disable_edev()
113 if (!edev || !edev->desc) devfreq_event_is_enabled()
138 if (!edev || !edev->desc) devfreq_event_set_event()
141 if (!edev->desc->ops || !edev->desc->ops->set_event) devfreq_event_set_event()
148 ret = edev->desc->ops->set_event(edev); devfreq_event_set_event()
168 if (!edev || !edev->desc) devfreq_event_get_event()
171 if (!edev->desc->ops || !edev->desc->ops->get_event) devfreq_event_get_event()
180 ret = edev->desc->ops->get_event(edev, edata); devfreq_event_get_event()
200 if (!edev || !edev->desc) devfreq_event_reset_event()
207 if (edev->desc->ops && edev->desc->ops->reset) devfreq_event_reset_event()
208 ret = edev->desc->ops->reset(edev); devfreq_event_reset_event()
243 if (!strcmp(edev->desc->name, node->name)) devfreq_event_get_edev_by_phandle()
301 * @desc : the devfreq-event device's decriptor which include essential
308 struct devfreq_event_desc *desc) devfreq_event_add_edev()
314 if (!dev || !desc) devfreq_event_add_edev()
317 if (!desc->name || !desc->ops) devfreq_event_add_edev()
320 if (!desc->ops->set_event || !desc->ops->get_event) devfreq_event_add_edev()
328 edev->desc = desc; devfreq_event_add_edev()
393 * @desc : the devfreq-event device's decriptor which include essential
401 struct devfreq_event_desc *desc) devm_devfreq_event_add_edev()
409 edev = devfreq_event_add_edev(dev, desc); devm_devfreq_event_add_edev()
446 if (!edev || !edev->desc) name_show()
449 return sprintf(buf, "%s\n", edev->desc->name); name_show()
458 if (!edev || !edev->desc) enable_count_show()
307 devfreq_event_add_edev(struct device *dev, struct devfreq_event_desc *desc) devfreq_event_add_edev() argument
400 devm_devfreq_event_add_edev(struct device *dev, struct devfreq_event_desc *desc) devm_devfreq_event_add_edev() argument
/linux-4.1.27/drivers/net/ethernet/ti/
H A Dcpmac.c239 static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) cpmac_dump_desc() argument
243 printk("%s: desc[%p]:", dev->name, desc); cpmac_dump_desc()
244 for (i = 0; i < sizeof(*desc) / 4; i++) cpmac_dump_desc()
245 printk(" %08x", ((u32 *)desc)[i]); cpmac_dump_desc()
367 struct cpmac_desc *desc) cpmac_rx_one()
372 cpmac_dump_desc(priv->dev, desc); cpmac_rx_one()
373 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); cpmac_rx_one()
374 if (unlikely(!desc->datalen)) { cpmac_rx_one()
383 skb_put(desc->skb, desc->datalen); cpmac_rx_one()
384 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); cpmac_rx_one()
385 skb_checksum_none_assert(desc->skb); cpmac_rx_one()
387 priv->dev->stats.rx_bytes += desc->datalen; cpmac_rx_one()
388 result = desc->skb; cpmac_rx_one()
389 dma_unmap_single(&priv->dev->dev, desc->data_mapping, cpmac_rx_one()
391 desc->skb = skb; cpmac_rx_one()
392 desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, cpmac_rx_one()
395 desc->hw_data = (u32)desc->data_mapping; cpmac_rx_one()
408 desc->buflen = CPMAC_SKB_SIZE; cpmac_rx_one()
409 desc->dataflags = CPMAC_OWN; cpmac_rx_one()
417 struct cpmac_desc *desc, *restart; cpmac_poll() local
431 desc = priv->rx_head; cpmac_poll()
433 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { cpmac_poll()
436 if ((desc->dataflags & CPMAC_EOQ) != 0) { cpmac_poll()
446 restart, desc); cpmac_poll()
450 restart = desc->next; cpmac_poll()
453 skb = cpmac_rx_one(priv, desc); cpmac_poll()
458 desc = desc->next; cpmac_poll()
461 if (desc != priv->rx_head) { cpmac_poll()
465 desc->prev->hw_next = (u32)0; cpmac_poll()
505 priv->rx_head = desc; cpmac_poll()
551 struct cpmac_desc *desc; cpmac_start_xmit() local
564 desc = &priv->desc_ring[queue]; cpmac_start_xmit()
565 if (unlikely(desc->dataflags & CPMAC_OWN)) { cpmac_start_xmit()
574 desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; cpmac_start_xmit()
575 desc->skb = skb; cpmac_start_xmit()
576 desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, cpmac_start_xmit()
578 desc->hw_data = (u32)desc->data_mapping; cpmac_start_xmit()
579 desc->datalen = len; cpmac_start_xmit()
580 desc->buflen = len; cpmac_start_xmit()
584 cpmac_dump_desc(dev, desc); cpmac_start_xmit()
587 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); cpmac_start_xmit()
594 struct cpmac_desc *desc; cpmac_end_xmit() local
597 desc = &priv->desc_ring[queue]; cpmac_end_xmit()
598 cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); cpmac_end_xmit()
599 if (likely(desc->skb)) { cpmac_end_xmit()
602 dev->stats.tx_bytes += desc->skb->len; cpmac_end_xmit()
604 dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, cpmac_end_xmit()
609 desc->skb, desc->skb->len); cpmac_end_xmit()
611 dev_kfree_skb_irq(desc->skb); cpmac_end_xmit()
612 desc->skb = NULL; cpmac_end_xmit()
690 struct cpmac_desc *desc; cpmac_clear_rx() local
695 desc = priv->rx_head; cpmac_clear_rx()
697 if ((desc->dataflags & CPMAC_OWN) == 0) { cpmac_clear_rx()
701 cpmac_dump_desc(dev, desc); cpmac_clear_rx()
702 desc->dataflags = CPMAC_OWN; cpmac_clear_rx()
705 desc->hw_next = desc->next->mapping; cpmac_clear_rx()
706 desc = desc->next; cpmac_clear_rx()
952 struct cpmac_desc *desc; cpmac_open() local
984 priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; cpmac_open()
987 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { cpmac_open()
993 desc->skb = skb; cpmac_open()
994 desc->data_mapping = dma_map_single(&dev->dev, skb->data, cpmac_open()
997 desc->hw_data = (u32)desc->data_mapping; cpmac_open()
998 desc->buflen = CPMAC_SKB_SIZE; cpmac_open()
999 desc->dataflags = CPMAC_OWN; cpmac_open()
1000 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; cpmac_open()
1001 desc->next->prev = desc; cpmac_open()
1002 desc->hw_next = (u32)desc->next->mapping; cpmac_open()
366 cpmac_rx_one(struct cpmac_priv *priv, struct cpmac_desc *desc) cpmac_rx_one() argument
/linux-4.1.27/sound/soc/blackfin/
H A Dbf5xx-sport.c127 static void setup_desc(struct dmasg *desc, void *buf, int fragcount, setup_desc() argument
135 desc[i].next_desc_addr = &(desc[i + 1]); setup_desc()
136 desc[i].start_addr = (unsigned long)buf + i*fragsize; setup_desc()
137 desc[i].cfg = cfg; setup_desc()
138 desc[i].x_count = x_count; setup_desc()
139 desc[i].x_modify = wdsize; setup_desc()
140 desc[i].y_count = ycount; setup_desc()
141 desc[i].y_modify = wdsize; setup_desc()
145 desc[fragcount-1].next_desc_addr = desc; setup_desc()
147 pr_debug("setup desc: desc0=%p, next0=%p, desc1=%p," setup_desc()
149 desc, desc[0].next_desc_addr, setup_desc()
150 desc+1, desc[1].next_desc_addr, setup_desc()
151 desc[0].x_count, desc[0].y_count, setup_desc()
152 desc[0].start_addr, desc[0].cfg); setup_desc()
179 struct dmasg *desc, temp_desc; sport_hook_rx_dummy() local
190 desc = get_dma_next_desc_ptr(sport->dma_rx_chan); sport_hook_rx_dummy()
192 temp_desc = *desc; sport_hook_rx_dummy()
193 desc->x_count = sport->dummy_count / 2; sport_hook_rx_dummy()
194 desc->y_count = 0; sport_hook_rx_dummy()
195 desc->next_desc_addr = sport->dummy_rx_desc; sport_hook_rx_dummy()
203 *desc = temp_desc; sport_hook_rx_dummy()
300 struct dmasg *desc, temp_desc; sport_hook_tx_dummy() local
311 desc = get_dma_next_desc_ptr(sport->dma_tx_chan); sport_hook_tx_dummy()
313 temp_desc = *desc; sport_hook_tx_dummy()
314 desc->x_count = sport->dummy_count / 2; sport_hook_tx_dummy()
315 desc->y_count = 0; sport_hook_tx_dummy()
316 desc->next_desc_addr = sport->dummy_tx_desc; sport_hook_tx_dummy()
324 *desc = temp_desc; sport_hook_tx_dummy()
437 pr_err("Failed to allocate memory for rx desc\n"); sport_config_rx_dma()
502 pr_err("Failed to allocate memory for tx desc\n"); sport_config_tx_dma()
526 struct dmasg *desc; sport_config_rx_dummy() local
531 desc = l1_data_sram_zalloc(2 * sizeof(*desc)); sport_config_rx_dummy()
534 desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0); sport_config_rx_dummy()
535 memset(desc, 0, 2 * sizeof(*desc)); sport_config_rx_dummy()
537 if (desc == NULL) { sport_config_rx_dummy()
538 pr_err("Failed to allocate memory for dummy rx desc\n"); sport_config_rx_dummy()
541 sport->dummy_rx_desc = desc; sport_config_rx_dummy()
542 desc->start_addr = (unsigned long)sport->dummy_buf; sport_config_rx_dummy()
545 desc->cfg = config; sport_config_rx_dummy()
546 desc->x_count = sport->dummy_count/sport->wdsize; sport_config_rx_dummy()
547 desc->x_modify = sport->wdsize; sport_config_rx_dummy()
548 desc->y_count = 0; sport_config_rx_dummy()
549 desc->y_modify = 0; sport_config_rx_dummy()
550 memcpy(desc+1, desc, sizeof(*desc)); sport_config_rx_dummy()
551 desc->next_desc_addr = desc + 1; sport_config_rx_dummy()
552 desc[1].next_desc_addr = desc; sport_config_rx_dummy()
558 struct dmasg *desc; sport_config_tx_dummy() local
564 desc = l1_data_sram_zalloc(2 * sizeof(*desc)); sport_config_tx_dummy()
567 desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0); sport_config_tx_dummy()
568 memset(desc, 0, 2 * sizeof(*desc)); sport_config_tx_dummy()
570 if (!desc) { sport_config_tx_dummy()
571 pr_err("Failed to allocate memory for dummy tx desc\n"); sport_config_tx_dummy()
574 sport->dummy_tx_desc = desc; sport_config_tx_dummy()
575 desc->start_addr = (unsigned long)sport->dummy_buf + \ sport_config_tx_dummy()
579 desc->cfg = config; sport_config_tx_dummy()
580 desc->x_count = sport->dummy_count/sport->wdsize; sport_config_tx_dummy()
581 desc->x_modify = sport->wdsize; sport_config_tx_dummy()
582 desc->y_count = 0; sport_config_tx_dummy()
583 desc->y_modify = 0; sport_config_tx_dummy()
584 memcpy(desc+1, desc, sizeof(*desc)); sport_config_tx_dummy()
585 desc->next_desc_addr = desc + 1; sport_config_tx_dummy()
586 desc[1].next_desc_addr = desc; sport_config_tx_dummy()
/linux-4.1.27/net/netfilter/
H A Dnft_cmp.c75 struct nft_data_desc desc; nft_cmp_init() local
78 err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc, nft_cmp_init()
83 err = nft_validate_register_load(priv->sreg, desc.len); nft_cmp_init()
88 priv->len = desc.len; nft_cmp_init()
124 struct nft_data_desc desc; nft_cmp_fast_init() local
129 err = nft_data_init(NULL, &data, sizeof(data), &desc, nft_cmp_fast_init()
134 err = nft_validate_register_load(priv->sreg, desc.len); nft_cmp_fast_init()
138 desc.len *= BITS_PER_BYTE; nft_cmp_fast_init()
139 mask = nft_cmp_fast_mask(desc.len); nft_cmp_fast_init()
142 priv->len = desc.len; nft_cmp_fast_init()
177 struct nft_data_desc desc; nft_cmp_select_ops() local
200 err = nft_data_init(NULL, &data, sizeof(data), &desc, nft_cmp_select_ops()
205 if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ) nft_cmp_select_ops()
/linux-4.1.27/drivers/crypto/nx/
H A Dnx-aes-cbc.c65 static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, cbc_aes_nx_crypt() argument
71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_nx_crypt()
87 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, cbc_aes_nx_crypt()
98 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); cbc_aes_nx_crypt()
102 memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); cbc_aes_nx_crypt()
114 static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc, cbc_aes_nx_encrypt() argument
119 return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1); cbc_aes_nx_encrypt()
122 static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc, cbc_aes_nx_decrypt() argument
127 return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0); cbc_aes_nx_decrypt()
/linux-4.1.27/include/uapi/linux/
H A Dmic_common.h98 * @desc: Array of MIC virtio device descriptors.
102 struct mic_device_desc desc[0]; member in struct:mic_device_page
109 * (avail and desc rings)
129 * Max vring entries (power of 2) to ensure desc and avail rings
135 * Max size of the desc block in bytes: includes:
174 static inline unsigned mic_desc_size(const struct mic_device_desc *desc) mic_desc_size() argument
176 return sizeof(*desc) + desc->num_vq * sizeof(struct mic_vqconfig) mic_desc_size()
177 + desc->feature_len * 2 + desc->config_len; mic_desc_size()
181 mic_vq_config(const struct mic_device_desc *desc) mic_vq_config() argument
183 return (struct mic_vqconfig *)(desc + 1); mic_vq_config()
186 static inline __u8 *mic_vq_features(const struct mic_device_desc *desc) mic_vq_features() argument
188 return (__u8 *)(mic_vq_config(desc) + desc->num_vq); mic_vq_features()
191 static inline __u8 *mic_vq_configspace(const struct mic_device_desc *desc) mic_vq_configspace() argument
193 return mic_vq_features(desc) + desc->feature_len * 2; mic_vq_configspace()
195 static inline unsigned mic_total_desc_size(struct mic_device_desc *desc) mic_total_desc_size() argument
197 return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); mic_total_desc_size()
/linux-4.1.27/drivers/mtd/ubi/
H A Dcdev.c51 * @desc: volume descriptor
57 static int get_exclusive(struct ubi_volume_desc *desc) get_exclusive() argument
60 struct ubi_volume *vol = desc->vol; get_exclusive()
71 err = desc->mode; get_exclusive()
72 desc->mode = UBI_EXCLUSIVE; get_exclusive()
81 * @desc: volume descriptor
84 static void revoke_exclusive(struct ubi_volume_desc *desc, int mode) revoke_exclusive() argument
86 struct ubi_volume *vol = desc->vol; revoke_exclusive()
90 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); revoke_exclusive()
102 desc->mode = mode; revoke_exclusive()
107 struct ubi_volume_desc *desc; vol_cdev_open() local
122 desc = ubi_open_volume(ubi_num, vol_id, mode); vol_cdev_open()
123 if (IS_ERR(desc)) vol_cdev_open()
124 return PTR_ERR(desc); vol_cdev_open()
126 file->private_data = desc; vol_cdev_open()
132 struct ubi_volume_desc *desc = file->private_data; vol_cdev_release() local
133 struct ubi_volume *vol = desc->vol; vol_cdev_release()
136 vol->ubi->ubi_num, vol->vol_id, desc->mode); vol_cdev_release()
152 ubi_close_volume(desc); vol_cdev_release()
158 struct ubi_volume_desc *desc = file->private_data; vol_cdev_llseek() local
159 struct ubi_volume *vol = desc->vol; vol_cdev_llseek()
173 struct ubi_volume_desc *desc = file->private_data; vol_cdev_fsync() local
174 struct ubi_device *ubi = desc->vol->ubi; vol_cdev_fsync()
187 struct ubi_volume_desc *desc = file->private_data; vol_cdev_read() local
188 struct ubi_volume *vol = desc->vol; vol_cdev_read()
264 struct ubi_volume_desc *desc = file->private_data; vol_cdev_direct_write() local
265 struct ubi_volume *vol = desc->vol; vol_cdev_direct_write()
340 struct ubi_volume_desc *desc = file->private_data; vol_cdev_write() local
341 struct ubi_volume *vol = desc->vol; vol_cdev_write()
366 revoke_exclusive(desc, UBI_READWRITE); vol_cdev_write()
381 revoke_exclusive(desc, UBI_READWRITE); vol_cdev_write()
391 struct ubi_volume_desc *desc = file->private_data; vol_cdev_ioctl() local
392 struct ubi_volume *vol = desc->vol; vol_cdev_ioctl()
413 if (desc->mode == UBI_READONLY) { vol_cdev_ioctl()
425 err = get_exclusive(desc); vol_cdev_ioctl()
432 revoke_exclusive(desc, UBI_READWRITE); vol_cdev_ioctl()
449 if (desc->mode == UBI_READONLY || vol_cdev_ioctl()
461 err = get_exclusive(desc); vol_cdev_ioctl()
467 revoke_exclusive(desc, UBI_READWRITE); vol_cdev_ioctl()
482 if (desc->mode == UBI_READONLY || vol_cdev_ioctl()
512 err = ubi_leb_map(desc, req.lnum); vol_cdev_ioctl()
526 err = ubi_leb_unmap(desc, lnum); vol_cdev_ioctl()
540 err = ubi_is_mapped(desc, lnum); vol_cdev_ioctl()
558 desc->vol->direct_writes = !!req.value; vol_cdev_ioctl()
573 ubi_get_volume_info(desc, &vi); vol_cdev_ioctl()
583 ubi_get_volume_info(desc, &vi); vol_cdev_ioctl()
738 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY); rename_volumes()
739 if (IS_ERR(re->desc)) { rename_volumes()
740 err = PTR_ERR(re->desc); rename_volumes()
748 if (re->desc->vol->name_len == name_len && rename_volumes()
749 !memcmp(re->desc->vol->name, name, name_len)) { rename_volumes()
750 ubi_close_volume(re->desc); rename_volumes()
759 vol_id, re->desc->vol->name, name); rename_volumes()
767 struct ubi_volume_desc *desc; rename_volumes() local
777 if (re->new_name_len == re1->desc->vol->name_len && rename_volumes()
778 !memcmp(re->new_name, re1->desc->vol->name, rename_volumes()
779 re1->desc->vol->name_len)) { rename_volumes()
792 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, rename_volumes()
794 if (IS_ERR(desc)) { rename_volumes()
795 err = PTR_ERR(desc); rename_volumes()
809 ubi_close_volume(desc); rename_volumes()
814 re1->desc = desc; rename_volumes()
817 re1->desc->vol->vol_id, re1->desc->vol->name); rename_volumes()
826 ubi_close_volume(re->desc); rename_volumes()
838 struct ubi_volume_desc *desc; ubi_cdev_ioctl() local
890 desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); ubi_cdev_ioctl()
891 if (IS_ERR(desc)) { ubi_cdev_ioctl()
892 err = PTR_ERR(desc); ubi_cdev_ioctl()
897 err = ubi_remove_volume(desc, 0); ubi_cdev_ioctl()
905 ubi_close_volume(desc); ubi_cdev_ioctl()
926 desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE); ubi_cdev_ioctl()
927 if (IS_ERR(desc)) { ubi_cdev_ioctl()
928 err = PTR_ERR(desc); ubi_cdev_ioctl()
932 pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, ubi_cdev_ioctl()
933 desc->vol->usable_leb_size); ubi_cdev_ioctl()
936 err = ubi_resize_volume(desc, pebs); ubi_cdev_ioctl()
938 ubi_close_volume(desc); ubi_cdev_ioctl()
/linux-4.1.27/drivers/crypto/vmx/
H A Daes_cbc.c93 static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, p8_aes_cbc_encrypt() argument
100 crypto_blkcipher_tfm(desc->tfm)); p8_aes_cbc_encrypt()
103 .info = desc->info, p8_aes_cbc_encrypt()
104 .flags = desc->flags p8_aes_cbc_encrypt()
115 ret = blkcipher_walk_virt(desc, &walk); p8_aes_cbc_encrypt()
120 ret = blkcipher_walk_done(desc, &walk, nbytes); p8_aes_cbc_encrypt()
129 static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, p8_aes_cbc_decrypt() argument
136 crypto_blkcipher_tfm(desc->tfm)); p8_aes_cbc_decrypt()
139 .info = desc->info, p8_aes_cbc_decrypt()
140 .flags = desc->flags p8_aes_cbc_decrypt()
151 ret = blkcipher_walk_virt(desc, &walk); p8_aes_cbc_decrypt()
156 ret = blkcipher_walk_done(desc, &walk, nbytes); p8_aes_cbc_decrypt()
/linux-4.1.27/drivers/tty/serial/8250/
H A D8250_dma.c75 struct dma_async_tx_descriptor *desc; serial8250_tx_dma() local
84 desc = dmaengine_prep_slave_single(dma->txchan, serial8250_tx_dma()
88 if (!desc) { serial8250_tx_dma()
94 desc->callback = __dma_tx_complete; serial8250_tx_dma()
95 desc->callback_param = p; serial8250_tx_dma()
97 dma->tx_cookie = dmaengine_submit(desc); serial8250_tx_dma()
119 struct dma_async_tx_descriptor *desc; serial8250_rx_dma() local
143 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, serial8250_rx_dma()
146 if (!desc) serial8250_rx_dma()
150 desc->callback = __dma_rx_complete; serial8250_rx_dma()
151 desc->callback_param = p; serial8250_rx_dma()
153 dma->rx_cookie = dmaengine_submit(desc); serial8250_rx_dma()
/linux-4.1.27/drivers/input/misc/
H A Dgpio-beeper.c23 struct gpio_desc *desc; member in struct:gpio_beeper
29 gpiod_set_value_cansleep(beep->desc, on); gpio_beeper_toggle()
75 beep->desc = devm_gpiod_get(&pdev->dev, NULL); gpio_beeper_probe()
76 if (IS_ERR(beep->desc)) gpio_beeper_probe()
77 return PTR_ERR(beep->desc); gpio_beeper_probe()
95 err = gpiod_direction_output(beep->desc, 0); gpio_beeper_probe()
/linux-4.1.27/arch/arm/mach-s3c64xx/
H A Dmach-smartq7.c66 .desc = "Power",
74 .desc = "Function",
82 .desc = "Minus",
90 .desc = "Plus",
98 .desc = "Enter",
106 .desc = "Cancel",
/linux-4.1.27/drivers/hv/
H A Dchannel.c588 struct vmpacket_descriptor desc; vmbus_sendpacket_ctl() local
598 desc.type = type; /* VmbusPacketTypeDataInBand; */ vmbus_sendpacket_ctl()
599 desc.flags = flags; /* VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; */ vmbus_sendpacket_ctl()
601 desc.offset8 = sizeof(struct vmpacket_descriptor) >> 3; vmbus_sendpacket_ctl()
602 desc.len8 = (u16)(packetlen_aligned >> 3); vmbus_sendpacket_ctl()
603 desc.trans_id = requestid; vmbus_sendpacket_ctl()
605 bufferlist[0].iov_base = &desc; vmbus_sendpacket_ctl()
672 struct vmbus_channel_packet_page_buffer desc; vmbus_sendpacket_pagebuffer_ctl() local
695 desc.type = VM_PKT_DATA_USING_GPA_DIRECT; vmbus_sendpacket_pagebuffer_ctl()
696 desc.flags = flags; vmbus_sendpacket_pagebuffer_ctl()
697 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */ vmbus_sendpacket_pagebuffer_ctl()
698 desc.length8 = (u16)(packetlen_aligned >> 3); vmbus_sendpacket_pagebuffer_ctl()
699 desc.transactionid = requestid; vmbus_sendpacket_pagebuffer_ctl()
700 desc.rangecount = pagecount; vmbus_sendpacket_pagebuffer_ctl()
703 desc.range[i].len = pagebuffers[i].len; vmbus_sendpacket_pagebuffer_ctl()
704 desc.range[i].offset = pagebuffers[i].offset; vmbus_sendpacket_pagebuffer_ctl()
705 desc.range[i].pfn = pagebuffers[i].pfn; vmbus_sendpacket_pagebuffer_ctl()
708 bufferlist[0].iov_base = &desc; vmbus_sendpacket_pagebuffer_ctl()
759 struct vmbus_packet_mpb_array *desc, vmbus_sendpacket_mpb_desc()
774 desc->type = VM_PKT_DATA_USING_GPA_DIRECT; vmbus_sendpacket_mpb_desc()
775 desc->flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; vmbus_sendpacket_mpb_desc()
776 desc->dataoffset8 = desc_size >> 3; /* in 8-bytes grandularity */ vmbus_sendpacket_mpb_desc()
777 desc->length8 = (u16)(packetlen_aligned >> 3); vmbus_sendpacket_mpb_desc()
778 desc->transactionid = requestid; vmbus_sendpacket_mpb_desc()
779 desc->rangecount = 1; vmbus_sendpacket_mpb_desc()
781 bufferlist[0].iov_base = desc; vmbus_sendpacket_mpb_desc()
806 struct vmbus_channel_packet_multipage_buffer desc; vmbus_sendpacket_multipagebuffer() local
831 desc.type = VM_PKT_DATA_USING_GPA_DIRECT; vmbus_sendpacket_multipagebuffer()
832 desc.flags = VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED; vmbus_sendpacket_multipagebuffer()
833 desc.dataoffset8 = descsize >> 3; /* in 8-bytes grandularity */ vmbus_sendpacket_multipagebuffer()
834 desc.length8 = (u16)(packetlen_aligned >> 3); vmbus_sendpacket_multipagebuffer()
835 desc.transactionid = requestid; vmbus_sendpacket_multipagebuffer()
836 desc.rangecount = 1; vmbus_sendpacket_multipagebuffer()
838 desc.range.len = multi_pagebuffer->len; vmbus_sendpacket_multipagebuffer()
839 desc.range.offset = multi_pagebuffer->offset; vmbus_sendpacket_multipagebuffer()
841 memcpy(desc.range.pfn_array, multi_pagebuffer->pfn_array, vmbus_sendpacket_multipagebuffer()
844 bufferlist[0].iov_base = &desc; vmbus_sendpacket_multipagebuffer()
876 struct vmpacket_descriptor desc; vmbus_recvpacket() local
886 ret = hv_ringbuffer_peek(&channel->inbound, &desc, vmbus_recvpacket()
891 packetlen = desc.len8 << 3; vmbus_recvpacket()
892 userlen = packetlen - (desc.offset8 << 3); vmbus_recvpacket()
903 *requestid = desc.trans_id; vmbus_recvpacket()
907 (desc.offset8 << 3), &signal); vmbus_recvpacket()
923 struct vmpacket_descriptor desc; vmbus_recvpacket_raw() local
932 ret = hv_ringbuffer_peek(&channel->inbound, &desc, vmbus_recvpacket_raw()
938 packetlen = desc.len8 << 3; vmbus_recvpacket_raw()
945 *requestid = desc.trans_id; vmbus_recvpacket_raw()
758 vmbus_sendpacket_mpb_desc(struct vmbus_channel *channel, struct vmbus_packet_mpb_array *desc, u32 desc_size, void *buffer, u32 bufferlen, u64 requestid) vmbus_sendpacket_mpb_desc() argument
/linux-4.1.27/drivers/net/wan/
H A Dixp4xx_hss.c43 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
235 /* HDLC packet status values - desc->status */
263 struct desc *desc_tab; /* coherent */
292 struct desc { struct
315 (n) * sizeof(struct desc))
319 ((n) + RX_DESCS) * sizeof(struct desc))
573 static inline void debug_desc(u32 phys, struct desc *desc) debug_desc() argument
577 phys, desc->next, desc->buf_len, desc->pkt_len, debug_desc()
578 desc->data, desc->status, desc->error_count); debug_desc()
586 struct desc *tab; queue_get_desc()
594 n_desc = (phys - tab_phys) / sizeof(struct desc); queue_get_desc()
602 struct desc *desc) queue_put_desc()
604 debug_desc(phys, desc); queue_put_desc()
612 static inline void dma_unmap_tx(struct port *port, struct desc *desc) dma_unmap_tx() argument
615 dma_unmap_single(&port->netdev->dev, desc->data, dma_unmap_tx()
616 desc->buf_len, DMA_TO_DEVICE); dma_unmap_tx()
618 dma_unmap_single(&port->netdev->dev, desc->data & ~3, dma_unmap_tx()
619 ALIGN((desc->data & 3) + desc->buf_len, 4), dma_unmap_tx()
668 struct desc *desc; hss_hdlc_poll() local
699 desc = rx_desc_ptr(port, n); hss_hdlc_poll()
701 if (desc->error_count) hss_hdlc_poll()
703 " errors %u\n", dev->name, desc->status, hss_hdlc_poll()
704 desc->error_count); hss_hdlc_poll()
707 switch (desc->status) { hss_hdlc_poll()
720 skb = netdev_alloc_skb(dev, desc->pkt_len); hss_hdlc_poll()
740 desc->status, desc->error_count); hss_hdlc_poll()
745 /* put the desc back on RX-ready queue */ hss_hdlc_poll()
746 desc->buf_len = RX_SIZE; hss_hdlc_poll()
747 desc->pkt_len = desc->status = 0; hss_hdlc_poll()
748 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); hss_hdlc_poll()
756 dma_unmap_single(&dev->dev, desc->data, hss_hdlc_poll()
759 dma_sync_single_for_cpu(&dev->dev, desc->data, hss_hdlc_poll()
762 ALIGN(desc->pkt_len, 4) / 4); hss_hdlc_poll()
764 skb_put(skb, desc->pkt_len); hss_hdlc_poll()
776 desc->data = phys; hss_hdlc_poll()
778 desc->buf_len = RX_SIZE; hss_hdlc_poll()
779 desc->pkt_len = 0; hss_hdlc_poll()
780 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); hss_hdlc_poll()
801 struct desc *desc; hss_hdlc_txdone_irq() local
804 desc = tx_desc_ptr(port, n_desc); hss_hdlc_txdone_irq()
807 dev->stats.tx_bytes += desc->pkt_len; hss_hdlc_txdone_irq()
809 dma_unmap_tx(port, desc); hss_hdlc_txdone_irq()
819 tx_desc_phys(port, n_desc), desc); hss_hdlc_txdone_irq()
837 struct desc *desc; hss_hdlc_xmit() local
881 desc = tx_desc_ptr(port, n); hss_hdlc_xmit()
888 desc->data = phys + offset; hss_hdlc_xmit()
889 desc->buf_len = desc->pkt_len = len; hss_hdlc_xmit()
892 queue_put_desc(queue_ids[port->id].tx, tx_desc_phys(port, n), desc); hss_hdlc_xmit()
988 struct desc *desc = rx_desc_ptr(port, i); init_hdlc_queues() local
1000 desc->buf_len = RX_SIZE; init_hdlc_queues()
1001 desc->data = dma_map_single(&port->netdev->dev, data, init_hdlc_queues()
1003 if (dma_mapping_error(&port->netdev->dev, desc->data)) { init_hdlc_queues()
1019 struct desc *desc = rx_desc_ptr(port, i); destroy_hdlc_queues() local
1023 desc->data, RX_SIZE, destroy_hdlc_queues()
1029 struct desc *desc = tx_desc_ptr(port, i); destroy_hdlc_queues() local
1032 dma_unmap_tx(port, desc); destroy_hdlc_queues()
601 queue_put_desc(unsigned int queue, u32 phys, struct desc *desc) queue_put_desc() argument

Completed in 4813 milliseconds

1234567891011