Searched refs:desc (Results 1 - 200 of 2283) sorted by relevance

1234567891011>>

/linux-4.4.14/kernel/irq/
H A Dsettings.h37 irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) irq_settings_clr_and_set() argument
39 desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK); irq_settings_clr_and_set()
40 desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); irq_settings_clr_and_set()
43 static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) irq_settings_is_per_cpu() argument
45 return desc->status_use_accessors & _IRQ_PER_CPU; irq_settings_is_per_cpu()
48 static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc) irq_settings_is_per_cpu_devid() argument
50 return desc->status_use_accessors & _IRQ_PER_CPU_DEVID; irq_settings_is_per_cpu_devid()
53 static inline void irq_settings_set_per_cpu(struct irq_desc *desc) irq_settings_set_per_cpu() argument
55 desc->status_use_accessors |= _IRQ_PER_CPU; irq_settings_set_per_cpu()
58 static inline void irq_settings_set_no_balancing(struct irq_desc *desc) irq_settings_set_no_balancing() argument
60 desc->status_use_accessors |= _IRQ_NO_BALANCING; irq_settings_set_no_balancing()
63 static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) irq_settings_has_no_balance_set() argument
65 return desc->status_use_accessors & _IRQ_NO_BALANCING; irq_settings_has_no_balance_set()
68 static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) irq_settings_get_trigger_mask() argument
70 return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK; irq_settings_get_trigger_mask()
74 irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) irq_settings_set_trigger_mask() argument
76 desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK; irq_settings_set_trigger_mask()
77 desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK; irq_settings_set_trigger_mask()
80 static inline bool irq_settings_is_level(struct irq_desc *desc) irq_settings_is_level() argument
82 return desc->status_use_accessors & _IRQ_LEVEL; irq_settings_is_level()
85 static inline void irq_settings_clr_level(struct irq_desc *desc) irq_settings_clr_level() argument
87 desc->status_use_accessors &= ~_IRQ_LEVEL; irq_settings_clr_level()
90 static inline void irq_settings_set_level(struct irq_desc *desc) irq_settings_set_level() argument
92 desc->status_use_accessors |= _IRQ_LEVEL; irq_settings_set_level()
95 static inline bool irq_settings_can_request(struct irq_desc *desc) irq_settings_can_request() argument
97 return !(desc->status_use_accessors & _IRQ_NOREQUEST); irq_settings_can_request()
100 static inline void irq_settings_clr_norequest(struct irq_desc *desc) irq_settings_clr_norequest() argument
102 desc->status_use_accessors &= ~_IRQ_NOREQUEST; irq_settings_clr_norequest()
105 static inline void irq_settings_set_norequest(struct irq_desc *desc) irq_settings_set_norequest() argument
107 desc->status_use_accessors |= _IRQ_NOREQUEST; irq_settings_set_norequest()
110 static inline bool irq_settings_can_thread(struct irq_desc *desc) irq_settings_can_thread() argument
112 return !(desc->status_use_accessors & _IRQ_NOTHREAD); irq_settings_can_thread()
115 static inline void irq_settings_clr_nothread(struct irq_desc *desc) irq_settings_clr_nothread() argument
117 desc->status_use_accessors &= ~_IRQ_NOTHREAD; irq_settings_clr_nothread()
120 static inline void irq_settings_set_nothread(struct irq_desc *desc) irq_settings_set_nothread() argument
122 desc->status_use_accessors |= _IRQ_NOTHREAD; irq_settings_set_nothread()
125 static inline bool irq_settings_can_probe(struct irq_desc *desc) irq_settings_can_probe() argument
127 return !(desc->status_use_accessors & _IRQ_NOPROBE); irq_settings_can_probe()
130 static inline void irq_settings_clr_noprobe(struct irq_desc *desc) irq_settings_clr_noprobe() argument
132 desc->status_use_accessors &= ~_IRQ_NOPROBE; irq_settings_clr_noprobe()
135 static inline void irq_settings_set_noprobe(struct irq_desc *desc) irq_settings_set_noprobe() argument
137 desc->status_use_accessors |= _IRQ_NOPROBE; irq_settings_set_noprobe()
140 static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) irq_settings_can_move_pcntxt() argument
142 return desc->status_use_accessors & _IRQ_MOVE_PCNTXT; irq_settings_can_move_pcntxt()
145 static inline bool irq_settings_can_autoenable(struct irq_desc *desc) irq_settings_can_autoenable() argument
147 return !(desc->status_use_accessors & _IRQ_NOAUTOEN); irq_settings_can_autoenable()
150 static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) irq_settings_is_nested_thread() argument
152 return desc->status_use_accessors & _IRQ_NESTED_THREAD; irq_settings_is_nested_thread()
155 static inline bool irq_settings_is_polled(struct irq_desc *desc) irq_settings_is_polled() argument
157 return desc->status_use_accessors & _IRQ_IS_POLLED; irq_settings_is_polled()
160 static inline bool irq_settings_disable_unlazy(struct irq_desc *desc) irq_settings_disable_unlazy() argument
162 return desc->status_use_accessors & _IRQ_DISABLE_UNLAZY; irq_settings_disable_unlazy()
165 static inline void irq_settings_clr_disable_unlazy(struct irq_desc *desc) irq_settings_clr_disable_unlazy() argument
167 desc->status_use_accessors &= ~_IRQ_DISABLE_UNLAZY; irq_settings_clr_disable_unlazy()
H A Ddebug.h7 #define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
8 #define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)
12 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) print_irq_desc() argument
14 printk("irq %d, desc: %p, depth: %d, count: %d, unhandled: %d\n", print_irq_desc()
15 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); print_irq_desc()
16 printk("->handle_irq(): %p, ", desc->handle_irq); print_irq_desc()
17 print_symbol("%s\n", (unsigned long)desc->handle_irq); print_irq_desc()
18 printk("->irq_data.chip(): %p, ", desc->irq_data.chip); print_irq_desc()
19 print_symbol("%s\n", (unsigned long)desc->irq_data.chip); print_irq_desc()
20 printk("->action(): %p\n", desc->action); print_irq_desc()
21 if (desc->action) { print_irq_desc()
22 printk("->action->handler(): %p, ", desc->action->handler); print_irq_desc()
23 print_symbol("%s\n", (unsigned long)desc->action->handler); print_irq_desc()
H A Dpm.c17 bool irq_pm_check_wakeup(struct irq_desc *desc) irq_pm_check_wakeup() argument
19 if (irqd_is_wakeup_armed(&desc->irq_data)) { irq_pm_check_wakeup()
20 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); irq_pm_check_wakeup()
21 desc->istate |= IRQS_SUSPENDED | IRQS_PENDING; irq_pm_check_wakeup()
22 desc->depth++; irq_pm_check_wakeup()
23 irq_disable(desc); irq_pm_check_wakeup()
24 pm_system_irq_wakeup(irq_desc_get_irq(desc)); irq_pm_check_wakeup()
31 * Called from __setup_irq() with desc->lock held after @action has
34 void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) irq_pm_install_action() argument
36 desc->nr_actions++; irq_pm_install_action()
39 desc->force_resume_depth++; irq_pm_install_action()
41 WARN_ON_ONCE(desc->force_resume_depth && irq_pm_install_action()
42 desc->force_resume_depth != desc->nr_actions); irq_pm_install_action()
45 desc->no_suspend_depth++; irq_pm_install_action()
47 desc->cond_suspend_depth++; irq_pm_install_action()
49 WARN_ON_ONCE(desc->no_suspend_depth && irq_pm_install_action()
50 (desc->no_suspend_depth + irq_pm_install_action()
51 desc->cond_suspend_depth) != desc->nr_actions); irq_pm_install_action()
55 * Called from __free_irq() with desc->lock held after @action has
58 void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) irq_pm_remove_action() argument
60 desc->nr_actions--; irq_pm_remove_action()
63 desc->force_resume_depth--; irq_pm_remove_action()
66 desc->no_suspend_depth--; irq_pm_remove_action()
68 desc->cond_suspend_depth--; irq_pm_remove_action()
71 static bool suspend_device_irq(struct irq_desc *desc) suspend_device_irq() argument
73 if (!desc->action || irq_desc_is_chained(desc) || suspend_device_irq()
74 desc->no_suspend_depth) suspend_device_irq()
77 if (irqd_is_wakeup_set(&desc->irq_data)) { suspend_device_irq()
78 irqd_set(&desc->irq_data, IRQD_WAKEUP_ARMED); suspend_device_irq()
88 desc->istate |= IRQS_SUSPENDED; suspend_device_irq()
89 __disable_irq(desc); suspend_device_irq()
97 if (irq_desc_get_chip(desc)->flags & IRQCHIP_MASK_ON_SUSPEND) suspend_device_irq()
98 mask_irq(desc); suspend_device_irq()
120 struct irq_desc *desc; suspend_device_irqs() local
123 for_each_irq_desc(irq, desc) { for_each_irq_desc()
127 if (irq_settings_is_nested_thread(desc)) for_each_irq_desc()
129 raw_spin_lock_irqsave(&desc->lock, flags); for_each_irq_desc()
130 sync = suspend_device_irq(desc); for_each_irq_desc()
131 raw_spin_unlock_irqrestore(&desc->lock, flags); for_each_irq_desc()
139 static void resume_irq(struct irq_desc *desc) resume_irq() argument
141 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); resume_irq()
143 if (desc->istate & IRQS_SUSPENDED) resume_irq()
147 if (!desc->force_resume_depth) resume_irq()
151 desc->depth++; resume_irq()
153 desc->istate &= ~IRQS_SUSPENDED; resume_irq()
154 __enable_irq(desc); resume_irq()
159 struct irq_desc *desc; resume_irqs() local
162 for_each_irq_desc(irq, desc) { for_each_irq_desc()
164 bool is_early = desc->action && for_each_irq_desc()
165 desc->action->flags & IRQF_EARLY_RESUME; for_each_irq_desc()
169 if (irq_settings_is_nested_thread(desc)) for_each_irq_desc()
172 raw_spin_lock_irqsave(&desc->lock, flags); for_each_irq_desc()
173 resume_irq(desc); for_each_irq_desc()
174 raw_spin_unlock_irqrestore(&desc->lock, flags); for_each_irq_desc()
H A Dchip.c46 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_set_chip() local
48 if (!desc) irq_set_chip()
54 desc->irq_data.chip = chip; irq_set_chip()
55 irq_put_desc_unlock(desc, flags); irq_set_chip()
73 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); irq_set_irq_type() local
76 if (!desc) irq_set_irq_type()
80 ret = __irq_set_trigger(desc, type); irq_set_irq_type()
81 irq_put_desc_busunlock(desc, flags); irq_set_irq_type()
96 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_set_handler_data() local
98 if (!desc) irq_set_handler_data()
100 desc->irq_common_data.handler_data = data; irq_set_handler_data()
101 irq_put_desc_unlock(desc, flags); irq_set_handler_data()
118 struct irq_desc *desc = irq_get_desc_lock(irq_base + irq_offset, &flags, IRQ_GET_DESC_CHECK_GLOBAL); irq_set_msi_desc_off() local
120 if (!desc) irq_set_msi_desc_off()
122 desc->irq_common_data.msi_desc = entry; irq_set_msi_desc_off()
125 irq_put_desc_unlock(desc, flags); irq_set_msi_desc_off()
151 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_set_chip_data() local
153 if (!desc) irq_set_chip_data()
155 desc->irq_data.chip_data = data; irq_set_chip_data()
156 irq_put_desc_unlock(desc, flags); irq_set_chip_data()
163 struct irq_desc *desc = irq_to_desc(irq); irq_get_irq_data() local
165 return desc ? &desc->irq_data : NULL; irq_get_irq_data()
169 static void irq_state_clr_disabled(struct irq_desc *desc) irq_state_clr_disabled() argument
171 irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); irq_state_clr_disabled()
174 static void irq_state_set_disabled(struct irq_desc *desc) irq_state_set_disabled() argument
176 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); irq_state_set_disabled()
179 static void irq_state_clr_masked(struct irq_desc *desc) irq_state_clr_masked() argument
181 irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); irq_state_clr_masked()
184 static void irq_state_set_masked(struct irq_desc *desc) irq_state_set_masked() argument
186 irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); irq_state_set_masked()
189 int irq_startup(struct irq_desc *desc, bool resend) irq_startup() argument
193 irq_state_clr_disabled(desc); irq_startup()
194 desc->depth = 0; irq_startup()
196 irq_domain_activate_irq(&desc->irq_data); irq_startup()
197 if (desc->irq_data.chip->irq_startup) { irq_startup()
198 ret = desc->irq_data.chip->irq_startup(&desc->irq_data); irq_startup()
199 irq_state_clr_masked(desc); irq_startup()
201 irq_enable(desc); irq_startup()
204 check_irq_resend(desc); irq_startup()
208 void irq_shutdown(struct irq_desc *desc) irq_shutdown() argument
210 irq_state_set_disabled(desc); irq_shutdown()
211 desc->depth = 1; irq_shutdown()
212 if (desc->irq_data.chip->irq_shutdown) irq_shutdown()
213 desc->irq_data.chip->irq_shutdown(&desc->irq_data); irq_shutdown()
214 else if (desc->irq_data.chip->irq_disable) irq_shutdown()
215 desc->irq_data.chip->irq_disable(&desc->irq_data); irq_shutdown()
217 desc->irq_data.chip->irq_mask(&desc->irq_data); irq_shutdown()
218 irq_domain_deactivate_irq(&desc->irq_data); irq_shutdown()
219 irq_state_set_masked(desc); irq_shutdown()
222 void irq_enable(struct irq_desc *desc) irq_enable() argument
224 irq_state_clr_disabled(desc); irq_enable()
225 if (desc->irq_data.chip->irq_enable) irq_enable()
226 desc->irq_data.chip->irq_enable(&desc->irq_data); irq_enable()
228 desc->irq_data.chip->irq_unmask(&desc->irq_data); irq_enable()
229 irq_state_clr_masked(desc); irq_enable()
234 * @desc: irq descriptor which should be disabled
252 void irq_disable(struct irq_desc *desc) irq_disable() argument
254 irq_state_set_disabled(desc); irq_disable()
255 if (desc->irq_data.chip->irq_disable) { irq_disable()
256 desc->irq_data.chip->irq_disable(&desc->irq_data); irq_disable()
257 irq_state_set_masked(desc); irq_disable()
258 } else if (irq_settings_disable_unlazy(desc)) { irq_disable()
259 mask_irq(desc); irq_disable()
263 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu) irq_percpu_enable() argument
265 if (desc->irq_data.chip->irq_enable) irq_percpu_enable()
266 desc->irq_data.chip->irq_enable(&desc->irq_data); irq_percpu_enable()
268 desc->irq_data.chip->irq_unmask(&desc->irq_data); irq_percpu_enable()
269 cpumask_set_cpu(cpu, desc->percpu_enabled); irq_percpu_enable()
272 void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu) irq_percpu_disable() argument
274 if (desc->irq_data.chip->irq_disable) irq_percpu_disable()
275 desc->irq_data.chip->irq_disable(&desc->irq_data); irq_percpu_disable()
277 desc->irq_data.chip->irq_mask(&desc->irq_data); irq_percpu_disable()
278 cpumask_clear_cpu(cpu, desc->percpu_enabled); irq_percpu_disable()
281 static inline void mask_ack_irq(struct irq_desc *desc) mask_ack_irq() argument
283 if (desc->irq_data.chip->irq_mask_ack) mask_ack_irq()
284 desc->irq_data.chip->irq_mask_ack(&desc->irq_data); mask_ack_irq()
286 desc->irq_data.chip->irq_mask(&desc->irq_data); mask_ack_irq()
287 if (desc->irq_data.chip->irq_ack) mask_ack_irq()
288 desc->irq_data.chip->irq_ack(&desc->irq_data); mask_ack_irq()
290 irq_state_set_masked(desc); mask_ack_irq()
293 void mask_irq(struct irq_desc *desc) mask_irq() argument
295 if (desc->irq_data.chip->irq_mask) { mask_irq()
296 desc->irq_data.chip->irq_mask(&desc->irq_data); mask_irq()
297 irq_state_set_masked(desc); mask_irq()
301 void unmask_irq(struct irq_desc *desc) unmask_irq() argument
303 if (desc->irq_data.chip->irq_unmask) { unmask_irq()
304 desc->irq_data.chip->irq_unmask(&desc->irq_data); unmask_irq()
305 irq_state_clr_masked(desc); unmask_irq()
309 void unmask_threaded_irq(struct irq_desc *desc) unmask_threaded_irq() argument
311 struct irq_chip *chip = desc->irq_data.chip; unmask_threaded_irq()
314 chip->irq_eoi(&desc->irq_data); unmask_threaded_irq()
317 chip->irq_unmask(&desc->irq_data); unmask_threaded_irq()
318 irq_state_clr_masked(desc); unmask_threaded_irq()
332 struct irq_desc *desc = irq_to_desc(irq); handle_nested_irq() local
338 raw_spin_lock_irq(&desc->lock); handle_nested_irq()
340 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_nested_irq()
341 kstat_incr_irqs_this_cpu(desc); handle_nested_irq()
343 action = desc->action; handle_nested_irq()
344 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) { handle_nested_irq()
345 desc->istate |= IRQS_PENDING; handle_nested_irq()
349 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); handle_nested_irq()
350 raw_spin_unlock_irq(&desc->lock); handle_nested_irq()
354 note_interrupt(desc, action_ret); handle_nested_irq()
356 raw_spin_lock_irq(&desc->lock); handle_nested_irq()
357 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); handle_nested_irq()
360 raw_spin_unlock_irq(&desc->lock); handle_nested_irq()
364 static bool irq_check_poll(struct irq_desc *desc) irq_check_poll() argument
366 if (!(desc->istate & IRQS_POLL_INPROGRESS)) irq_check_poll()
368 return irq_wait_for_poll(desc); irq_check_poll()
371 static bool irq_may_run(struct irq_desc *desc) irq_may_run() argument
379 if (!irqd_has_set(&desc->irq_data, mask)) irq_may_run()
387 if (irq_pm_check_wakeup(desc)) irq_may_run()
393 return irq_check_poll(desc); irq_may_run()
398 * @desc: the interrupt description structure for this irq
407 void handle_simple_irq(struct irq_desc *desc) handle_simple_irq() argument
409 raw_spin_lock(&desc->lock); handle_simple_irq()
411 if (!irq_may_run(desc)) handle_simple_irq()
414 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_simple_irq()
415 kstat_incr_irqs_this_cpu(desc); handle_simple_irq()
417 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { handle_simple_irq()
418 desc->istate |= IRQS_PENDING; handle_simple_irq()
422 handle_irq_event(desc); handle_simple_irq()
425 raw_spin_unlock(&desc->lock); handle_simple_irq()
433 static void cond_unmask_irq(struct irq_desc *desc) cond_unmask_irq() argument
442 if (!irqd_irq_disabled(&desc->irq_data) && cond_unmask_irq()
443 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) cond_unmask_irq()
444 unmask_irq(desc); cond_unmask_irq()
449 * @desc: the interrupt description structure for this irq
456 void handle_level_irq(struct irq_desc *desc) handle_level_irq() argument
458 raw_spin_lock(&desc->lock); handle_level_irq()
459 mask_ack_irq(desc); handle_level_irq()
461 if (!irq_may_run(desc)) handle_level_irq()
464 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_level_irq()
465 kstat_incr_irqs_this_cpu(desc); handle_level_irq()
471 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { handle_level_irq()
472 desc->istate |= IRQS_PENDING; handle_level_irq()
476 handle_irq_event(desc); handle_level_irq()
478 cond_unmask_irq(desc); handle_level_irq()
481 raw_spin_unlock(&desc->lock); handle_level_irq()
486 static inline void preflow_handler(struct irq_desc *desc) preflow_handler() argument
488 if (desc->preflow_handler) preflow_handler()
489 desc->preflow_handler(&desc->irq_data); preflow_handler()
492 static inline void preflow_handler(struct irq_desc *desc) { } preflow_handler() argument
495 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip) cond_unmask_eoi_irq() argument
497 if (!(desc->istate & IRQS_ONESHOT)) { cond_unmask_eoi_irq()
498 chip->irq_eoi(&desc->irq_data); cond_unmask_eoi_irq()
507 if (!irqd_irq_disabled(&desc->irq_data) && cond_unmask_eoi_irq()
508 irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) { cond_unmask_eoi_irq()
509 chip->irq_eoi(&desc->irq_data); cond_unmask_eoi_irq()
510 unmask_irq(desc); cond_unmask_eoi_irq()
512 chip->irq_eoi(&desc->irq_data); cond_unmask_eoi_irq()
518 * @desc: the interrupt description structure for this irq
525 void handle_fasteoi_irq(struct irq_desc *desc) handle_fasteoi_irq() argument
527 struct irq_chip *chip = desc->irq_data.chip; handle_fasteoi_irq()
529 raw_spin_lock(&desc->lock); handle_fasteoi_irq()
531 if (!irq_may_run(desc)) handle_fasteoi_irq()
534 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_fasteoi_irq()
535 kstat_incr_irqs_this_cpu(desc); handle_fasteoi_irq()
541 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { handle_fasteoi_irq()
542 desc->istate |= IRQS_PENDING; handle_fasteoi_irq()
543 mask_irq(desc); handle_fasteoi_irq()
547 if (desc->istate & IRQS_ONESHOT) handle_fasteoi_irq()
548 mask_irq(desc); handle_fasteoi_irq()
550 preflow_handler(desc); handle_fasteoi_irq()
551 handle_irq_event(desc); handle_fasteoi_irq()
553 cond_unmask_eoi_irq(desc, chip); handle_fasteoi_irq()
555 raw_spin_unlock(&desc->lock); handle_fasteoi_irq()
559 chip->irq_eoi(&desc->irq_data); handle_fasteoi_irq()
560 raw_spin_unlock(&desc->lock); handle_fasteoi_irq()
566 * @desc: the interrupt description structure for this irq
579 void handle_edge_irq(struct irq_desc *desc) handle_edge_irq() argument
581 raw_spin_lock(&desc->lock); handle_edge_irq()
583 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_edge_irq()
585 if (!irq_may_run(desc)) { handle_edge_irq()
586 desc->istate |= IRQS_PENDING; handle_edge_irq()
587 mask_ack_irq(desc); handle_edge_irq()
595 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { handle_edge_irq()
596 desc->istate |= IRQS_PENDING; handle_edge_irq()
597 mask_ack_irq(desc); handle_edge_irq()
601 kstat_incr_irqs_this_cpu(desc); handle_edge_irq()
604 desc->irq_data.chip->irq_ack(&desc->irq_data); handle_edge_irq()
607 if (unlikely(!desc->action)) { handle_edge_irq()
608 mask_irq(desc); handle_edge_irq()
617 if (unlikely(desc->istate & IRQS_PENDING)) { handle_edge_irq()
618 if (!irqd_irq_disabled(&desc->irq_data) && handle_edge_irq()
619 irqd_irq_masked(&desc->irq_data)) handle_edge_irq()
620 unmask_irq(desc); handle_edge_irq()
623 handle_irq_event(desc); handle_edge_irq()
625 } while ((desc->istate & IRQS_PENDING) && handle_edge_irq()
626 !irqd_irq_disabled(&desc->irq_data)); handle_edge_irq()
629 raw_spin_unlock(&desc->lock); handle_edge_irq()
636 * @desc: the interrupt description structure for this irq
641 void handle_edge_eoi_irq(struct irq_desc *desc) handle_edge_eoi_irq() argument
643 struct irq_chip *chip = irq_desc_get_chip(desc); handle_edge_eoi_irq()
645 raw_spin_lock(&desc->lock); handle_edge_eoi_irq()
647 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); handle_edge_eoi_irq()
649 if (!irq_may_run(desc)) { handle_edge_eoi_irq()
650 desc->istate |= IRQS_PENDING; handle_edge_eoi_irq()
658 if (irqd_irq_disabled(&desc->irq_data) || !desc->action) { handle_edge_eoi_irq()
659 desc->istate |= IRQS_PENDING; handle_edge_eoi_irq()
663 kstat_incr_irqs_this_cpu(desc); handle_edge_eoi_irq()
666 if (unlikely(!desc->action)) handle_edge_eoi_irq()
669 handle_irq_event(desc); handle_edge_eoi_irq()
671 } while ((desc->istate & IRQS_PENDING) && handle_edge_eoi_irq()
672 !irqd_irq_disabled(&desc->irq_data)); handle_edge_eoi_irq()
675 chip->irq_eoi(&desc->irq_data); handle_edge_eoi_irq()
676 raw_spin_unlock(&desc->lock); handle_edge_eoi_irq()
682 * @desc: the interrupt description structure for this irq
686 void handle_percpu_irq(struct irq_desc *desc) handle_percpu_irq() argument
688 struct irq_chip *chip = irq_desc_get_chip(desc); handle_percpu_irq()
690 kstat_incr_irqs_this_cpu(desc); handle_percpu_irq()
693 chip->irq_ack(&desc->irq_data); handle_percpu_irq()
695 handle_irq_event_percpu(desc); handle_percpu_irq()
698 chip->irq_eoi(&desc->irq_data); handle_percpu_irq()
703 * @desc: the interrupt description structure for this irq
712 void handle_percpu_devid_irq(struct irq_desc *desc) handle_percpu_devid_irq() argument
714 struct irq_chip *chip = irq_desc_get_chip(desc); handle_percpu_devid_irq()
715 struct irqaction *action = desc->action; handle_percpu_devid_irq()
717 unsigned int irq = irq_desc_get_irq(desc); handle_percpu_devid_irq()
720 kstat_incr_irqs_this_cpu(desc); handle_percpu_devid_irq()
723 chip->irq_ack(&desc->irq_data); handle_percpu_devid_irq()
730 chip->irq_eoi(&desc->irq_data); handle_percpu_devid_irq()
734 __irq_do_set_handler(struct irq_desc *desc, irq_flow_handler_t handle, __irq_do_set_handler() argument
740 struct irq_data *irq_data = &desc->irq_data; __irq_do_set_handler()
769 if (desc->irq_data.chip != &no_irq_chip) __irq_do_set_handler()
770 mask_ack_irq(desc); __irq_do_set_handler()
771 irq_state_set_disabled(desc); __irq_do_set_handler()
773 desc->action = NULL; __irq_do_set_handler()
774 desc->depth = 1; __irq_do_set_handler()
776 desc->handle_irq = handle; __irq_do_set_handler()
777 desc->name = name; __irq_do_set_handler()
780 irq_settings_set_noprobe(desc); __irq_do_set_handler()
781 irq_settings_set_norequest(desc); __irq_do_set_handler()
782 irq_settings_set_nothread(desc); __irq_do_set_handler()
783 desc->action = &chained_action; __irq_do_set_handler()
784 irq_startup(desc, true); __irq_do_set_handler()
793 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); __irq_set_handler() local
795 if (!desc) __irq_set_handler()
798 __irq_do_set_handler(desc, handle, is_chained, name); __irq_set_handler()
799 irq_put_desc_busunlock(desc, flags); __irq_set_handler()
808 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, 0); irq_set_chained_handler_and_data() local
810 if (!desc) irq_set_chained_handler_and_data()
813 __irq_do_set_handler(desc, handle, 1, NULL); irq_set_chained_handler_and_data()
814 desc->irq_common_data.handler_data = data; irq_set_chained_handler_and_data()
816 irq_put_desc_busunlock(desc, flags); irq_set_chained_handler_and_data()
832 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_modify_status() local
834 if (!desc) irq_modify_status()
836 irq_settings_clr_and_set(desc, clr, set); irq_modify_status()
838 irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU | irq_modify_status()
840 if (irq_settings_has_no_balance_set(desc)) irq_modify_status()
841 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); irq_modify_status()
842 if (irq_settings_is_per_cpu(desc)) irq_modify_status()
843 irqd_set(&desc->irq_data, IRQD_PER_CPU); irq_modify_status()
844 if (irq_settings_can_move_pcntxt(desc)) irq_modify_status()
845 irqd_set(&desc->irq_data, IRQD_MOVE_PCNTXT); irq_modify_status()
846 if (irq_settings_is_level(desc)) irq_modify_status()
847 irqd_set(&desc->irq_data, IRQD_LEVEL); irq_modify_status()
849 irqd_set(&desc->irq_data, irq_settings_get_trigger_mask(desc)); irq_modify_status()
851 irq_put_desc_unlock(desc, flags); irq_modify_status()
863 struct irq_desc *desc; irq_cpu_online() local
869 desc = irq_to_desc(irq); for_each_active_irq()
870 if (!desc) for_each_active_irq()
873 raw_spin_lock_irqsave(&desc->lock, flags); for_each_active_irq()
875 chip = irq_data_get_irq_chip(&desc->irq_data); for_each_active_irq()
878 !irqd_irq_disabled(&desc->irq_data))) for_each_active_irq()
879 chip->irq_cpu_online(&desc->irq_data); for_each_active_irq()
881 raw_spin_unlock_irqrestore(&desc->lock, flags); for_each_active_irq()
893 struct irq_desc *desc; irq_cpu_offline() local
899 desc = irq_to_desc(irq); for_each_active_irq()
900 if (!desc) for_each_active_irq()
903 raw_spin_lock_irqsave(&desc->lock, flags); for_each_active_irq()
905 chip = irq_data_get_irq_chip(&desc->irq_data); for_each_active_irq()
908 !irqd_irq_disabled(&desc->irq_data))) for_each_active_irq()
909 chip->irq_cpu_offline(&desc->irq_data); for_each_active_irq()
911 raw_spin_unlock_irqrestore(&desc->lock, flags); for_each_active_irq()
H A Dautoprobe.c33 struct irq_desc *desc; probe_irq_on() local
46 for_each_irq_desc_reverse(i, desc) { for_each_irq_desc_reverse()
47 raw_spin_lock_irq(&desc->lock); for_each_irq_desc_reverse()
48 if (!desc->action && irq_settings_can_probe(desc)) { for_each_irq_desc_reverse()
53 if (desc->irq_data.chip->irq_set_type) for_each_irq_desc_reverse()
54 desc->irq_data.chip->irq_set_type(&desc->irq_data, for_each_irq_desc_reverse()
56 irq_startup(desc, false); for_each_irq_desc_reverse()
58 raw_spin_unlock_irq(&desc->lock); for_each_irq_desc_reverse()
69 for_each_irq_desc_reverse(i, desc) { for_each_irq_desc_reverse()
70 raw_spin_lock_irq(&desc->lock); for_each_irq_desc_reverse()
71 if (!desc->action && irq_settings_can_probe(desc)) { for_each_irq_desc_reverse()
72 desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; for_each_irq_desc_reverse()
73 if (irq_startup(desc, false)) for_each_irq_desc_reverse()
74 desc->istate |= IRQS_PENDING; for_each_irq_desc_reverse()
76 raw_spin_unlock_irq(&desc->lock); for_each_irq_desc_reverse()
87 for_each_irq_desc(i, desc) { for_each_irq_desc()
88 raw_spin_lock_irq(&desc->lock); for_each_irq_desc()
90 if (desc->istate & IRQS_AUTODETECT) { for_each_irq_desc()
92 if (!(desc->istate & IRQS_WAITING)) { for_each_irq_desc()
93 desc->istate &= ~IRQS_AUTODETECT; for_each_irq_desc()
94 irq_shutdown(desc); for_each_irq_desc()
99 raw_spin_unlock_irq(&desc->lock); for_each_irq_desc()
121 struct irq_desc *desc; probe_irq_mask() local
124 for_each_irq_desc(i, desc) { for_each_irq_desc()
125 raw_spin_lock_irq(&desc->lock); for_each_irq_desc()
126 if (desc->istate & IRQS_AUTODETECT) { for_each_irq_desc()
127 if (i < 16 && !(desc->istate & IRQS_WAITING)) for_each_irq_desc()
130 desc->istate &= ~IRQS_AUTODETECT; for_each_irq_desc()
131 irq_shutdown(desc); for_each_irq_desc()
133 raw_spin_unlock_irq(&desc->lock); for_each_irq_desc()
161 struct irq_desc *desc; probe_irq_off() local
163 for_each_irq_desc(i, desc) { for_each_irq_desc()
164 raw_spin_lock_irq(&desc->lock); for_each_irq_desc()
166 if (desc->istate & IRQS_AUTODETECT) { for_each_irq_desc()
167 if (!(desc->istate & IRQS_WAITING)) { for_each_irq_desc()
172 desc->istate &= ~IRQS_AUTODETECT; for_each_irq_desc()
173 irq_shutdown(desc); for_each_irq_desc()
175 raw_spin_unlock_irq(&desc->lock); for_each_irq_desc()
H A Dresend.c33 struct irq_desc *desc; resend_irqs() local
39 desc = irq_to_desc(irq); resend_irqs()
41 desc->handle_irq(desc); resend_irqs()
54 * Is called with interrupts disabled and desc->lock held.
56 void check_irq_resend(struct irq_desc *desc) check_irq_resend() argument
64 if (irq_settings_is_level(desc)) { check_irq_resend()
65 desc->istate &= ~IRQS_PENDING; check_irq_resend()
68 if (desc->istate & IRQS_REPLAY) check_irq_resend()
70 if (desc->istate & IRQS_PENDING) { check_irq_resend()
71 desc->istate &= ~IRQS_PENDING; check_irq_resend()
72 desc->istate |= IRQS_REPLAY; check_irq_resend()
74 if (!desc->irq_data.chip->irq_retrigger || check_irq_resend()
75 !desc->irq_data.chip->irq_retrigger(&desc->irq_data)) { check_irq_resend()
77 unsigned int irq = irq_desc_get_irq(desc); check_irq_resend()
85 if (irq_settings_is_nested_thread(desc)) { check_irq_resend()
91 if (!desc->parent_irq) check_irq_resend()
93 irq = desc->parent_irq; check_irq_resend()
H A Dirqdesc.c39 static int alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) alloc_masks() argument
41 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, alloc_masks()
46 if (!zalloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { alloc_masks()
47 free_cpumask_var(desc->irq_common_data.affinity); alloc_masks()
54 static void desc_smp_init(struct irq_desc *desc, int node) desc_smp_init() argument
56 cpumask_copy(desc->irq_common_data.affinity, irq_default_affinity); desc_smp_init()
58 cpumask_clear(desc->pending_mask); desc_smp_init()
61 desc->irq_common_data.node = node; desc_smp_init()
67 alloc_masks(struct irq_desc *desc, gfp_t gfp, int node) { return 0; } desc_smp_init() argument
68 static inline void desc_smp_init(struct irq_desc *desc, int node) { } desc_smp_init() argument
71 static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node, desc_set_defaults() argument
76 desc->irq_common_data.handler_data = NULL; desc_set_defaults()
77 desc->irq_common_data.msi_desc = NULL; desc_set_defaults()
79 desc->irq_data.common = &desc->irq_common_data; desc_set_defaults()
80 desc->irq_data.irq = irq; desc_set_defaults()
81 desc->irq_data.chip = &no_irq_chip; desc_set_defaults()
82 desc->irq_data.chip_data = NULL; desc_set_defaults()
83 irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS); desc_set_defaults()
84 irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); desc_set_defaults()
85 desc->handle_irq = handle_bad_irq; desc_set_defaults()
86 desc->depth = 1; desc_set_defaults()
87 desc->irq_count = 0; desc_set_defaults()
88 desc->irqs_unhandled = 0; desc_set_defaults()
89 desc->name = NULL; desc_set_defaults()
90 desc->owner = owner; desc_set_defaults()
92 *per_cpu_ptr(desc->kstat_irqs, cpu) = 0; desc_set_defaults()
93 desc_smp_init(desc, node); desc_set_defaults()
106 static void irq_insert_desc(unsigned int irq, struct irq_desc *desc) irq_insert_desc() argument
108 radix_tree_insert(&irq_desc_tree, irq, desc); irq_insert_desc()
123 static void free_masks(struct irq_desc *desc) free_masks() argument
126 free_cpumask_var(desc->pending_mask); free_masks()
128 free_cpumask_var(desc->irq_common_data.affinity); free_masks()
131 static inline void free_masks(struct irq_desc *desc) { } free_masks() argument
146 struct irq_desc *desc; alloc_desc() local
149 desc = kzalloc_node(sizeof(*desc), gfp, node); alloc_desc()
150 if (!desc) alloc_desc()
153 desc->kstat_irqs = alloc_percpu(unsigned int); alloc_desc()
154 if (!desc->kstat_irqs) alloc_desc()
157 if (alloc_masks(desc, gfp, node)) alloc_desc()
160 raw_spin_lock_init(&desc->lock); alloc_desc()
161 lockdep_set_class(&desc->lock, &irq_desc_lock_class); alloc_desc()
163 desc_set_defaults(irq, desc, node, owner); alloc_desc()
165 return desc; alloc_desc()
168 free_percpu(desc->kstat_irqs); alloc_desc()
170 kfree(desc); alloc_desc()
176 struct irq_desc *desc = irq_to_desc(irq); free_desc() local
178 unregister_irq_proc(irq, desc); free_desc()
190 free_masks(desc); free_desc()
191 free_percpu(desc->kstat_irqs); free_desc()
192 kfree(desc); free_desc()
198 struct irq_desc *desc; alloc_descs() local
202 desc = alloc_desc(start + i, node, owner); alloc_descs()
203 if (!desc) alloc_descs()
206 irq_insert_desc(start + i, desc); alloc_descs()
232 struct irq_desc *desc; early_irq_init() local
250 desc = alloc_desc(i, node, NULL); early_irq_init()
252 irq_insert_desc(i, desc); early_irq_init()
270 struct irq_desc *desc; early_irq_init() local
276 desc = irq_desc; early_irq_init()
280 desc[i].kstat_irqs = alloc_percpu(unsigned int); early_irq_init()
281 alloc_masks(&desc[i], GFP_KERNEL, node); early_irq_init()
282 raw_spin_lock_init(&desc[i].lock); early_irq_init()
283 lockdep_set_class(&desc[i].lock, &irq_desc_lock_class); early_irq_init()
284 desc_set_defaults(i, &desc[i], node, NULL); early_irq_init()
297 struct irq_desc *desc = irq_to_desc(irq); free_desc() local
300 raw_spin_lock_irqsave(&desc->lock, flags); free_desc()
301 desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL); free_desc()
302 raw_spin_unlock_irqrestore(&desc->lock, flags); free_desc()
311 struct irq_desc *desc = irq_to_desc(start + i); alloc_descs() local
313 desc->owner = owner; alloc_descs()
346 struct irq_desc *desc = irq_to_desc(irq); generic_handle_irq() local
348 if (!desc) generic_handle_irq()
350 generic_handle_irq_desc(desc); generic_handle_irq()
541 struct irq_desc *desc = irq_to_desc(irq); __irq_get_desc_lock() local
543 if (desc) { __irq_get_desc_lock()
546 !irq_settings_is_per_cpu_devid(desc)) __irq_get_desc_lock()
550 irq_settings_is_per_cpu_devid(desc)) __irq_get_desc_lock()
555 chip_bus_lock(desc); __irq_get_desc_lock()
556 raw_spin_lock_irqsave(&desc->lock, *flags); __irq_get_desc_lock()
558 return desc; __irq_get_desc_lock()
561 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus) __irq_put_desc_unlock() argument
563 raw_spin_unlock_irqrestore(&desc->lock, flags); __irq_put_desc_unlock()
565 chip_bus_sync_unlock(desc); __irq_put_desc_unlock()
570 struct irq_desc *desc = irq_to_desc(irq); irq_set_percpu_devid() local
572 if (!desc) irq_set_percpu_devid()
575 if (desc->percpu_enabled) irq_set_percpu_devid()
578 desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL); irq_set_percpu_devid()
580 if (!desc->percpu_enabled) irq_set_percpu_devid()
603 struct irq_desc *desc = irq_to_desc(irq); kstat_irqs_cpu() local
605 return desc && desc->kstat_irqs ? kstat_irqs_cpu()
606 *per_cpu_ptr(desc->kstat_irqs, cpu) : 0; kstat_irqs_cpu()
619 struct irq_desc *desc = irq_to_desc(irq); kstat_irqs() local
623 if (!desc || !desc->kstat_irqs) kstat_irqs()
626 sum += *per_cpu_ptr(desc->kstat_irqs, cpu); kstat_irqs()
H A Dhandle.c25 * @desc: description of the interrupt
29 void handle_bad_irq(struct irq_desc *desc) handle_bad_irq() argument
31 unsigned int irq = irq_desc_get_irq(desc); handle_bad_irq()
33 print_irq_desc(irq, desc); handle_bad_irq()
34 kstat_incr_irqs_this_cpu(desc); handle_bad_irq()
57 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action) __irq_wake_thread() argument
84 * in threads_oneshot are serialized via desc->lock against __irq_wake_thread()
90 * spin_lock(desc->lock); __irq_wake_thread()
91 * desc->state |= IRQS_INPROGRESS; __irq_wake_thread()
92 * spin_unlock(desc->lock); __irq_wake_thread()
94 * desc->threads_oneshot |= mask; __irq_wake_thread()
95 * spin_lock(desc->lock); __irq_wake_thread()
96 * desc->state &= ~IRQS_INPROGRESS; __irq_wake_thread()
97 * spin_unlock(desc->lock); __irq_wake_thread()
102 * spin_lock(desc->lock); __irq_wake_thread()
103 * if (desc->state & IRQS_INPROGRESS) { __irq_wake_thread()
104 * spin_unlock(desc->lock); __irq_wake_thread()
105 * while(desc->state & IRQS_INPROGRESS) __irq_wake_thread()
110 * desc->threads_oneshot &= ~mask; __irq_wake_thread()
111 * spin_unlock(desc->lock); __irq_wake_thread()
114 * or we are waiting in the flow handler for desc->lock to be __irq_wake_thread()
116 * IRQTF_RUNTHREAD under desc->lock. If set it leaves __irq_wake_thread()
119 desc->threads_oneshot |= action->thread_mask; __irq_wake_thread()
130 atomic_inc(&desc->threads_active); __irq_wake_thread()
135 irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) handle_irq_event_percpu() argument
138 unsigned int flags = 0, irq = desc->irq_data.irq; handle_irq_event_percpu()
139 struct irqaction *action = desc->action; handle_irq_event_percpu()
164 __irq_wake_thread(desc, action); handle_irq_event_percpu()
182 note_interrupt(desc, retval); handle_irq_event_percpu()
186 irqreturn_t handle_irq_event(struct irq_desc *desc) handle_irq_event() argument
190 desc->istate &= ~IRQS_PENDING; handle_irq_event()
191 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); handle_irq_event()
192 raw_spin_unlock(&desc->lock); handle_irq_event()
194 ret = handle_irq_event_percpu(desc); handle_irq_event()
196 raw_spin_lock(&desc->lock); handle_irq_event()
197 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); handle_irq_event()
H A Dinternals.h38 * Bit masks for desc->core_internal_state__do_not_mess_with_it
64 extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags);
65 extern void __disable_irq(struct irq_desc *desc);
66 extern void __enable_irq(struct irq_desc *desc);
68 extern int irq_startup(struct irq_desc *desc, bool resend);
69 extern void irq_shutdown(struct irq_desc *desc);
70 extern void irq_enable(struct irq_desc *desc);
71 extern void irq_disable(struct irq_desc *desc);
72 extern void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu);
73 extern void irq_percpu_disable(struct irq_desc *desc, unsigned int cpu);
74 extern void mask_irq(struct irq_desc *desc);
75 extern void unmask_irq(struct irq_desc *desc);
76 extern void unmask_threaded_irq(struct irq_desc *desc);
84 extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); irq_mark_irq()
86 irqreturn_t handle_irq_event_percpu(struct irq_desc *desc); irq_mark_irq()
87 irqreturn_t handle_irq_event(struct irq_desc *desc); irq_mark_irq()
90 void check_irq_resend(struct irq_desc *desc); irq_mark_irq()
91 bool irq_wait_for_poll(struct irq_desc *desc); irq_mark_irq()
92 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action); irq_mark_irq()
95 extern void register_irq_proc(unsigned int irq, struct irq_desc *desc); irq_mark_irq()
96 extern void unregister_irq_proc(unsigned int irq, struct irq_desc *desc); irq_mark_irq()
100 static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { } unregister_irq_proc() argument
101 static inline void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) { } register_handler_proc() argument
110 extern void irq_set_thread_affinity(struct irq_desc *desc);
116 static inline void chip_bus_lock(struct irq_desc *desc) chip_bus_lock() argument
118 if (unlikely(desc->irq_data.chip->irq_bus_lock)) chip_bus_lock()
119 desc->irq_data.chip->irq_bus_lock(&desc->irq_data); chip_bus_lock()
122 static inline void chip_bus_sync_unlock(struct irq_desc *desc) chip_bus_sync_unlock() argument
124 if (unlikely(desc->irq_data.chip->irq_bus_sync_unlock)) chip_bus_sync_unlock()
125 desc->irq_data.chip->irq_bus_sync_unlock(&desc->irq_data); chip_bus_sync_unlock()
137 void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus);
146 irq_put_desc_busunlock(struct irq_desc *desc, unsigned long flags) irq_put_desc_busunlock() argument
148 __irq_put_desc_unlock(desc, flags, true); irq_put_desc_busunlock()
158 irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) irq_put_desc_unlock() argument
160 __irq_put_desc_unlock(desc, flags, false); irq_put_desc_unlock()
191 static inline void kstat_incr_irqs_this_cpu(struct irq_desc *desc) kstat_incr_irqs_this_cpu() argument
193 __this_cpu_inc(*desc->kstat_irqs); kstat_incr_irqs_this_cpu()
197 static inline int irq_desc_get_node(struct irq_desc *desc) irq_desc_get_node() argument
199 return irq_common_data_get_node(&desc->irq_common_data); irq_desc_get_node()
202 static inline int irq_desc_is_chained(struct irq_desc *desc) irq_desc_is_chained() argument
204 return (desc->action && desc->action == &chained_action); irq_desc_is_chained()
208 bool irq_pm_check_wakeup(struct irq_desc *desc);
209 void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action);
210 void irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action);
212 static inline bool irq_pm_check_wakeup(struct irq_desc *desc) { return false; } irq_pm_check_wakeup() argument
214 irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } irq_pm_install_action() argument
216 irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } argument
H A Dproc.c42 struct irq_desc *desc = irq_to_desc((long)m->private); show_irq_affinity() local
43 const struct cpumask *mask = desc->irq_common_data.affinity; show_irq_affinity()
46 if (irqd_is_setaffinity_pending(&desc->irq_data)) show_irq_affinity()
47 mask = desc->pending_mask; show_irq_affinity()
58 struct irq_desc *desc = irq_to_desc((long)m->private); irq_affinity_hint_proc_show() local
65 raw_spin_lock_irqsave(&desc->lock, flags); irq_affinity_hint_proc_show()
66 if (desc->affinity_hint) irq_affinity_hint_proc_show()
67 cpumask_copy(mask, desc->affinity_hint); irq_affinity_hint_proc_show()
68 raw_spin_unlock_irqrestore(&desc->lock, flags); irq_affinity_hint_proc_show()
243 struct irq_desc *desc = irq_to_desc((long) m->private); irq_node_proc_show() local
245 seq_printf(m, "%d\n", irq_desc_get_node(desc)); irq_node_proc_show()
264 struct irq_desc *desc = irq_to_desc((long) m->private); irq_spurious_proc_show() local
267 desc->irq_count, desc->irqs_unhandled, irq_spurious_proc_show()
268 jiffies_to_msecs(desc->last_unhandled)); irq_spurious_proc_show()
288 struct irq_desc *desc = irq_to_desc(irq); name_unique() local
293 raw_spin_lock_irqsave(&desc->lock, flags); name_unique()
294 for (action = desc->action ; action; action = action->next) { name_unique()
301 raw_spin_unlock_irqrestore(&desc->lock, flags); name_unique()
308 struct irq_desc *desc = irq_to_desc(irq); register_handler_proc() local
310 if (!desc->dir || action->dir || !action->name || register_handler_proc()
318 action->dir = proc_mkdir(name, desc->dir); register_handler_proc()
325 void register_irq_proc(unsigned int irq, struct irq_desc *desc) register_irq_proc() argument
330 if (!root_irq_dir || (desc->irq_data.chip == &no_irq_chip)) register_irq_proc()
340 if (desc->dir) register_irq_proc()
347 desc->dir = proc_mkdir(name, root_irq_dir); register_irq_proc()
348 if (!desc->dir) register_irq_proc()
353 proc_create_data("smp_affinity", 0644, desc->dir, register_irq_proc()
357 proc_create_data("affinity_hint", 0444, desc->dir, register_irq_proc()
361 proc_create_data("smp_affinity_list", 0644, desc->dir, register_irq_proc()
364 proc_create_data("node", 0444, desc->dir, register_irq_proc()
368 proc_create_data("spurious", 0444, desc->dir, register_irq_proc()
375 void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) unregister_irq_proc() argument
379 if (!root_irq_dir || !desc->dir) unregister_irq_proc()
382 remove_proc_entry("smp_affinity", desc->dir); unregister_irq_proc()
383 remove_proc_entry("affinity_hint", desc->dir); unregister_irq_proc()
384 remove_proc_entry("smp_affinity_list", desc->dir); unregister_irq_proc()
385 remove_proc_entry("node", desc->dir); unregister_irq_proc()
387 remove_proc_entry("spurious", desc->dir); unregister_irq_proc()
412 struct irq_desc *desc; init_irq_proc() local
424 for_each_irq_desc(irq, desc) { for_each_irq_desc()
425 if (!desc) for_each_irq_desc()
428 register_irq_proc(irq, desc); for_each_irq_desc()
450 struct irq_desc *desc; show_interrupts() local
470 desc = irq_to_desc(i); show_interrupts()
471 if (!desc) show_interrupts()
474 raw_spin_lock_irqsave(&desc->lock, flags); show_interrupts()
477 action = desc->action; show_interrupts()
478 if ((!action || irq_desc_is_chained(desc)) && !any_count) show_interrupts()
485 if (desc->irq_data.chip) { show_interrupts()
486 if (desc->irq_data.chip->irq_print_chip) show_interrupts()
487 desc->irq_data.chip->irq_print_chip(&desc->irq_data, p); show_interrupts()
488 else if (desc->irq_data.chip->name) show_interrupts()
489 seq_printf(p, " %8s", desc->irq_data.chip->name); show_interrupts()
495 if (desc->irq_data.domain) show_interrupts()
496 seq_printf(p, " %*d", prec, (int) desc->irq_data.hwirq); show_interrupts()
498 seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge"); show_interrupts()
500 if (desc->name) show_interrupts()
501 seq_printf(p, "-%-8s", desc->name); show_interrupts()
511 raw_spin_unlock_irqrestore(&desc->lock, flags); show_interrupts()
H A Dmanage.c35 static void __synchronize_hardirq(struct irq_desc *desc) __synchronize_hardirq() argument
46 while (irqd_irq_inprogress(&desc->irq_data)) __synchronize_hardirq()
50 raw_spin_lock_irqsave(&desc->lock, flags); __synchronize_hardirq()
51 inprogress = irqd_irq_inprogress(&desc->irq_data); __synchronize_hardirq()
52 raw_spin_unlock_irqrestore(&desc->lock, flags); __synchronize_hardirq()
77 struct irq_desc *desc = irq_to_desc(irq); synchronize_hardirq() local
79 if (desc) { synchronize_hardirq()
80 __synchronize_hardirq(desc); synchronize_hardirq()
81 return !atomic_read(&desc->threads_active); synchronize_hardirq()
100 struct irq_desc *desc = irq_to_desc(irq); synchronize_irq() local
102 if (desc) { synchronize_irq()
103 __synchronize_hardirq(desc); synchronize_irq()
109 wait_event(desc->wait_for_threads, synchronize_irq()
110 !atomic_read(&desc->threads_active)); synchronize_irq()
118 static int __irq_can_set_affinity(struct irq_desc *desc) __irq_can_set_affinity() argument
120 if (!desc || !irqd_can_balance(&desc->irq_data) || __irq_can_set_affinity()
121 !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity) __irq_can_set_affinity()
138 * @desc: irq descriptor which has affitnity changed
142 * set_cpus_allowed_ptr() here as we hold desc->lock and this
145 void irq_set_thread_affinity(struct irq_desc *desc) irq_set_thread_affinity() argument
147 struct irqaction *action = desc->action; irq_set_thread_affinity()
166 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) irq_copy_pending() argument
168 cpumask_copy(desc->pending_mask, mask); irq_copy_pending()
171 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) irq_get_pending() argument
173 cpumask_copy(mask, desc->pending_mask); irq_get_pending()
179 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } irq_copy_pending() argument
181 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } irq_get_pending() argument
187 struct irq_desc *desc = irq_data_to_desc(data); irq_do_set_affinity() local
195 cpumask_copy(desc->irq_common_data.affinity, mask); irq_do_set_affinity()
197 irq_set_thread_affinity(desc); irq_do_set_affinity()
208 struct irq_desc *desc = irq_data_to_desc(data); irq_set_affinity_locked() local
218 irq_copy_pending(desc, mask); irq_set_affinity_locked()
221 if (desc->affinity_notify) { irq_set_affinity_locked()
222 kref_get(&desc->affinity_notify->kref); irq_set_affinity_locked()
223 schedule_work(&desc->affinity_notify->work); irq_set_affinity_locked()
232 struct irq_desc *desc = irq_to_desc(irq); __irq_set_affinity() local
236 if (!desc) __irq_set_affinity()
239 raw_spin_lock_irqsave(&desc->lock, flags); __irq_set_affinity()
240 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); __irq_set_affinity()
241 raw_spin_unlock_irqrestore(&desc->lock, flags); __irq_set_affinity()
248 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); irq_set_affinity_hint() local
250 if (!desc) irq_set_affinity_hint()
252 desc->affinity_hint = m; irq_set_affinity_hint()
253 irq_put_desc_unlock(desc, flags); irq_set_affinity_hint()
265 struct irq_desc *desc = irq_to_desc(notify->irq); irq_affinity_notify() local
269 if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL)) irq_affinity_notify()
272 raw_spin_lock_irqsave(&desc->lock, flags); irq_affinity_notify()
273 if (irq_move_pending(&desc->irq_data)) irq_affinity_notify()
274 irq_get_pending(cpumask, desc); irq_affinity_notify()
276 cpumask_copy(cpumask, desc->irq_common_data.affinity); irq_affinity_notify()
277 raw_spin_unlock_irqrestore(&desc->lock, flags); irq_affinity_notify()
300 struct irq_desc *desc = irq_to_desc(irq); irq_set_affinity_notifier() local
307 if (!desc) irq_set_affinity_notifier()
317 raw_spin_lock_irqsave(&desc->lock, flags); irq_set_affinity_notifier()
318 old_notify = desc->affinity_notify; irq_set_affinity_notifier()
319 desc->affinity_notify = notify; irq_set_affinity_notifier()
320 raw_spin_unlock_irqrestore(&desc->lock, flags); irq_set_affinity_notifier()
333 static int setup_affinity(struct irq_desc *desc, struct cpumask *mask) setup_affinity() argument
336 int node = irq_desc_get_node(desc); setup_affinity()
339 if (!__irq_can_set_affinity(desc)) setup_affinity()
346 if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) { setup_affinity()
347 if (cpumask_intersects(desc->irq_common_data.affinity, setup_affinity()
349 set = desc->irq_common_data.affinity; setup_affinity()
351 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); setup_affinity()
362 irq_do_set_affinity(&desc->irq_data, mask, false); setup_affinity()
378 struct irq_desc *desc = irq_to_desc(irq); irq_select_affinity_usr() local
382 raw_spin_lock_irqsave(&desc->lock, flags); irq_select_affinity_usr()
383 ret = setup_affinity(desc, mask); irq_select_affinity_usr()
384 raw_spin_unlock_irqrestore(&desc->lock, flags); irq_select_affinity_usr()
390 setup_affinity(struct irq_desc *desc, struct cpumask *mask) setup_affinity() argument
409 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_set_vcpu_affinity() local
414 if (!desc) irq_set_vcpu_affinity()
417 data = irq_desc_get_irq_data(desc); irq_set_vcpu_affinity()
421 irq_put_desc_unlock(desc, flags); irq_set_vcpu_affinity()
427 void __disable_irq(struct irq_desc *desc) __disable_irq() argument
429 if (!desc->depth++) __disable_irq()
430 irq_disable(desc); __disable_irq()
436 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); __disable_irq_nosync() local
438 if (!desc) __disable_irq_nosync()
440 __disable_irq(desc); __disable_irq_nosync()
441 irq_put_desc_busunlock(desc, flags); __disable_irq_nosync()
507 void __enable_irq(struct irq_desc *desc) __enable_irq() argument
509 switch (desc->depth) { __enable_irq()
513 irq_desc_get_irq(desc)); __enable_irq()
516 if (desc->istate & IRQS_SUSPENDED) __enable_irq()
519 irq_settings_set_noprobe(desc); __enable_irq()
520 irq_enable(desc); __enable_irq()
521 check_irq_resend(desc); __enable_irq()
525 desc->depth--; __enable_irq()
538 * desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
543 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); enable_irq() local
545 if (!desc) enable_irq()
547 if (WARN(!desc->irq_data.chip, enable_irq()
551 __enable_irq(desc); enable_irq()
553 irq_put_desc_busunlock(desc, flags); enable_irq()
559 struct irq_desc *desc = irq_to_desc(irq); set_irq_wake_real() local
562 if (irq_desc_get_chip(desc)->flags & IRQCHIP_SKIP_SET_WAKE) set_irq_wake_real()
565 if (desc->irq_data.chip->irq_set_wake) set_irq_wake_real()
566 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on); set_irq_wake_real()
586 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); irq_set_irq_wake() local
589 if (!desc) irq_set_irq_wake()
596 if (desc->wake_depth++ == 0) { irq_set_irq_wake()
599 desc->wake_depth = 0; irq_set_irq_wake()
601 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE); irq_set_irq_wake()
604 if (desc->wake_depth == 0) { irq_set_irq_wake()
606 } else if (--desc->wake_depth == 0) { irq_set_irq_wake()
609 desc->wake_depth = 1; irq_set_irq_wake()
611 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE); irq_set_irq_wake()
614 irq_put_desc_busunlock(desc, flags); irq_set_irq_wake()
627 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); can_request_irq() local
630 if (!desc) can_request_irq()
633 if (irq_settings_can_request(desc)) { can_request_irq()
634 if (!desc->action || can_request_irq()
635 irqflags & desc->action->flags & IRQF_SHARED) can_request_irq()
638 irq_put_desc_unlock(desc, flags); can_request_irq()
642 int __irq_set_trigger(struct irq_desc *desc, unsigned long flags) __irq_set_trigger() argument
644 struct irq_chip *chip = desc->irq_data.chip; __irq_set_trigger()
653 irq_desc_get_irq(desc), __irq_set_trigger()
661 if (!irqd_irq_masked(&desc->irq_data)) __irq_set_trigger()
662 mask_irq(desc); __irq_set_trigger()
663 if (!irqd_irq_disabled(&desc->irq_data)) __irq_set_trigger()
668 ret = chip->irq_set_type(&desc->irq_data, flags); __irq_set_trigger()
673 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); __irq_set_trigger()
674 irqd_set(&desc->irq_data, flags); __irq_set_trigger()
677 flags = irqd_get_trigger_type(&desc->irq_data); __irq_set_trigger()
678 irq_settings_set_trigger_mask(desc, flags); __irq_set_trigger()
679 irqd_clear(&desc->irq_data, IRQD_LEVEL); __irq_set_trigger()
680 irq_settings_clr_level(desc); __irq_set_trigger()
682 irq_settings_set_level(desc); __irq_set_trigger()
683 irqd_set(&desc->irq_data, IRQD_LEVEL); __irq_set_trigger()
690 flags, irq_desc_get_irq(desc), chip->irq_set_type); __irq_set_trigger()
693 unmask_irq(desc); __irq_set_trigger()
701 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); irq_set_parent() local
703 if (!desc) irq_set_parent()
706 desc->parent_irq = parent_irq; irq_set_parent()
708 irq_put_desc_unlock(desc, flags); irq_set_parent()
762 static void irq_finalize_oneshot(struct irq_desc *desc, irq_finalize_oneshot() argument
765 if (!(desc->istate & IRQS_ONESHOT) || irq_finalize_oneshot()
769 chip_bus_lock(desc); irq_finalize_oneshot()
770 raw_spin_lock_irq(&desc->lock); irq_finalize_oneshot()
782 * versus "desc->threads_onehsot |= action->thread_mask;" in irq_finalize_oneshot()
786 if (unlikely(irqd_irq_inprogress(&desc->irq_data))) { irq_finalize_oneshot()
787 raw_spin_unlock_irq(&desc->lock); irq_finalize_oneshot()
788 chip_bus_sync_unlock(desc); irq_finalize_oneshot()
801 desc->threads_oneshot &= ~action->thread_mask; irq_finalize_oneshot()
803 if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) && irq_finalize_oneshot()
804 irqd_irq_masked(&desc->irq_data)) irq_finalize_oneshot()
805 unmask_threaded_irq(desc); irq_finalize_oneshot()
808 raw_spin_unlock_irq(&desc->lock); irq_finalize_oneshot()
809 chip_bus_sync_unlock(desc); irq_finalize_oneshot()
817 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) irq_thread_check_affinity() argument
834 raw_spin_lock_irq(&desc->lock); irq_thread_check_affinity()
839 if (desc->irq_common_data.affinity) irq_thread_check_affinity()
840 cpumask_copy(mask, desc->irq_common_data.affinity); irq_thread_check_affinity()
843 raw_spin_unlock_irq(&desc->lock); irq_thread_check_affinity()
851 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } irq_thread_check_affinity() argument
861 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) irq_forced_thread_fn() argument
867 irq_finalize_oneshot(desc, action); irq_forced_thread_fn()
877 static irqreturn_t irq_thread_fn(struct irq_desc *desc, irq_thread_fn() argument
883 irq_finalize_oneshot(desc, action); irq_thread_fn()
887 static void wake_threads_waitq(struct irq_desc *desc) wake_threads_waitq() argument
889 if (atomic_dec_and_test(&desc->threads_active)) wake_threads_waitq()
890 wake_up(&desc->wait_for_threads); wake_threads_waitq()
896 struct irq_desc *desc; irq_thread_dtor() local
908 desc = irq_to_desc(action->irq); irq_thread_dtor()
911 * desc->threads_active and wake possible waiters. irq_thread_dtor()
914 wake_threads_waitq(desc); irq_thread_dtor()
916 /* Prevent a stale desc->threads_oneshot */ irq_thread_dtor()
917 irq_finalize_oneshot(desc, action); irq_thread_dtor()
920 static void irq_wake_secondary(struct irq_desc *desc, struct irqaction *action) irq_wake_secondary() argument
927 raw_spin_lock_irq(&desc->lock); irq_wake_secondary()
928 __irq_wake_thread(desc, secondary); irq_wake_secondary()
929 raw_spin_unlock_irq(&desc->lock); irq_wake_secondary()
939 struct irq_desc *desc = irq_to_desc(action->irq); irq_thread() local
940 irqreturn_t (*handler_fn)(struct irq_desc *desc, irq_thread()
952 irq_thread_check_affinity(desc, action); irq_thread()
957 irq_thread_check_affinity(desc, action); irq_thread()
959 action_ret = handler_fn(desc, action); irq_thread()
961 atomic_inc(&desc->threads_handled); irq_thread()
963 irq_wake_secondary(desc, action); irq_thread()
965 wake_threads_waitq(desc); irq_thread()
989 struct irq_desc *desc = irq_to_desc(irq); irq_wake_thread() local
993 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) irq_wake_thread()
996 raw_spin_lock_irqsave(&desc->lock, flags); irq_wake_thread()
997 for (action = desc->action; action; action = action->next) { irq_wake_thread()
1000 __irq_wake_thread(desc, action); irq_wake_thread()
1004 raw_spin_unlock_irqrestore(&desc->lock, flags); irq_wake_thread()
1040 static int irq_request_resources(struct irq_desc *desc) irq_request_resources() argument
1042 struct irq_data *d = &desc->irq_data; irq_request_resources()
1048 static void irq_release_resources(struct irq_desc *desc) irq_release_resources() argument
1050 struct irq_data *d = &desc->irq_data; irq_release_resources()
1104 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) __setup_irq() argument
1111 if (!desc) __setup_irq()
1114 if (desc->irq_data.chip == &no_irq_chip) __setup_irq()
1116 if (!try_module_get(desc->owner)) __setup_irq()
1125 nested = irq_settings_is_nested_thread(desc); __setup_irq()
1138 if (irq_settings_can_thread(desc)) { __setup_irq()
1175 if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE) __setup_irq()
1181 raw_spin_lock_irqsave(&desc->lock, flags); __setup_irq()
1182 old_ptr = &desc->action; __setup_irq()
1232 * desc->thread_active to indicate that the __setup_irq()
1236 * line have completed desc->threads_active becomes __setup_irq()
1241 * interrupt handlers, then desc->threads_active is __setup_irq()
1253 !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) { __setup_irq()
1276 ret = irq_request_resources(desc); __setup_irq()
1279 new->name, irq, desc->irq_data.chip->name); __setup_irq()
1283 init_waitqueue_head(&desc->wait_for_threads); __setup_irq()
1287 ret = __irq_set_trigger(desc, __setup_irq()
1294 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \ __setup_irq()
1296 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); __setup_irq()
1299 irqd_set(&desc->irq_data, IRQD_PER_CPU); __setup_irq()
1300 irq_settings_set_per_cpu(desc); __setup_irq()
1304 desc->istate |= IRQS_ONESHOT; __setup_irq()
1306 if (irq_settings_can_autoenable(desc)) __setup_irq()
1307 irq_startup(desc, true); __setup_irq()
1310 desc->depth = 1; __setup_irq()
1314 irq_settings_set_no_balancing(desc); __setup_irq()
1315 irqd_set(&desc->irq_data, IRQD_NO_BALANCING); __setup_irq()
1319 setup_affinity(desc, mask); __setup_irq()
1323 unsigned int omsk = irq_settings_get_trigger_mask(desc); __setup_irq()
1333 irq_pm_install_action(desc, new); __setup_irq()
1336 desc->irq_count = 0; __setup_irq()
1337 desc->irqs_unhandled = 0; __setup_irq()
1343 if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) { __setup_irq()
1344 desc->istate &= ~IRQS_SPURIOUS_DISABLED; __setup_irq()
1345 __enable_irq(desc); __setup_irq()
1348 raw_spin_unlock_irqrestore(&desc->lock, flags); __setup_irq()
1359 register_irq_proc(irq, desc); __setup_irq()
1377 raw_spin_unlock_irqrestore(&desc->lock, flags); __setup_irq()
1396 module_put(desc->owner); __setup_irq()
1410 struct irq_desc *desc = irq_to_desc(irq); setup_irq() local
1412 if (WARN_ON(irq_settings_is_per_cpu_devid(desc))) setup_irq()
1414 chip_bus_lock(desc); setup_irq()
1415 retval = __setup_irq(irq, desc, act); setup_irq()
1416 chip_bus_sync_unlock(desc); setup_irq()
1428 struct irq_desc *desc = irq_to_desc(irq); __free_irq() local
1434 if (!desc) __free_irq()
1437 chip_bus_lock(desc); __free_irq()
1438 raw_spin_lock_irqsave(&desc->lock, flags); __free_irq()
1444 action_ptr = &desc->action; __free_irq()
1450 raw_spin_unlock_irqrestore(&desc->lock, flags); __free_irq()
1451 chip_bus_sync_unlock(desc); __free_irq()
1463 irq_pm_remove_action(desc, action); __free_irq()
1466 if (!desc->action) { __free_irq()
1467 irq_settings_clr_disable_unlazy(desc); __free_irq()
1468 irq_shutdown(desc); __free_irq()
1469 irq_release_resources(desc); __free_irq()
1474 if (WARN_ON_ONCE(desc->affinity_hint)) __free_irq()
1475 desc->affinity_hint = NULL; __free_irq()
1478 raw_spin_unlock_irqrestore(&desc->lock, flags); __free_irq()
1479 chip_bus_sync_unlock(desc); __free_irq()
1511 module_put(desc->owner); __free_irq()
1525 struct irq_desc *desc = irq_to_desc(irq); remove_irq() local
1527 if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc))) remove_irq()
1548 struct irq_desc *desc = irq_to_desc(irq); free_irq() local
1550 if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc))) free_irq()
1554 if (WARN_ON(desc->affinity_notify)) free_irq()
1555 desc->affinity_notify = NULL; free_irq()
1609 struct irq_desc *desc; request_threaded_irq() local
1626 desc = irq_to_desc(irq); request_threaded_irq()
1627 if (!desc) request_threaded_irq()
1630 if (!irq_settings_can_request(desc) || request_threaded_irq()
1631 WARN_ON(irq_settings_is_per_cpu_devid(desc))) request_threaded_irq()
1650 chip_bus_lock(desc); request_threaded_irq()
1651 retval = __setup_irq(irq, desc, action); request_threaded_irq()
1652 chip_bus_sync_unlock(desc); request_threaded_irq()
1702 struct irq_desc *desc = irq_to_desc(irq); request_any_context_irq() local
1705 if (!desc) request_any_context_irq()
1708 if (irq_settings_is_nested_thread(desc)) { request_any_context_irq()
1723 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); enable_percpu_irq() local
1725 if (!desc) enable_percpu_irq()
1732 ret = __irq_set_trigger(desc, type); enable_percpu_irq()
1740 irq_percpu_enable(desc, cpu); enable_percpu_irq()
1742 irq_put_desc_unlock(desc, flags); enable_percpu_irq()
1750 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU); disable_percpu_irq() local
1752 if (!desc) disable_percpu_irq()
1755 irq_percpu_disable(desc, cpu); disable_percpu_irq()
1756 irq_put_desc_unlock(desc, flags); disable_percpu_irq()
1765 struct irq_desc *desc = irq_to_desc(irq); __free_percpu_irq() local
1771 if (!desc) __free_percpu_irq()
1774 raw_spin_lock_irqsave(&desc->lock, flags); __free_percpu_irq()
1776 action = desc->action; __free_percpu_irq()
1782 if (!cpumask_empty(desc->percpu_enabled)) { __free_percpu_irq()
1784 irq, cpumask_first(desc->percpu_enabled)); __free_percpu_irq()
1789 desc->action = NULL; __free_percpu_irq()
1791 raw_spin_unlock_irqrestore(&desc->lock, flags); __free_percpu_irq()
1795 module_put(desc->owner); __free_percpu_irq()
1799 raw_spin_unlock_irqrestore(&desc->lock, flags); __free_percpu_irq()
1812 struct irq_desc *desc = irq_to_desc(irq); remove_percpu_irq() local
1814 if (desc && irq_settings_is_per_cpu_devid(desc)) remove_percpu_irq()
1832 struct irq_desc *desc = irq_to_desc(irq); free_percpu_irq() local
1834 if (!desc || !irq_settings_is_per_cpu_devid(desc)) free_percpu_irq()
1837 chip_bus_lock(desc); free_percpu_irq()
1839 chip_bus_sync_unlock(desc); free_percpu_irq()
1852 struct irq_desc *desc = irq_to_desc(irq); setup_percpu_irq() local
1855 if (!desc || !irq_settings_is_per_cpu_devid(desc)) setup_percpu_irq()
1857 chip_bus_lock(desc); setup_percpu_irq()
1858 retval = __setup_irq(irq, desc, act); setup_percpu_irq()
1859 chip_bus_sync_unlock(desc); setup_percpu_irq()
1884 struct irq_desc *desc; request_percpu_irq() local
1890 desc = irq_to_desc(irq); request_percpu_irq()
1891 if (!desc || !irq_settings_can_request(desc) || request_percpu_irq()
1892 !irq_settings_is_per_cpu_devid(desc)) request_percpu_irq()
1904 chip_bus_lock(desc); request_percpu_irq()
1905 retval = __setup_irq(irq, desc, action); request_percpu_irq()
1906 chip_bus_sync_unlock(desc); request_percpu_irq()
1931 struct irq_desc *desc; irq_get_irqchip_state() local
1937 desc = irq_get_desc_buslock(irq, &flags, 0); irq_get_irqchip_state()
1938 if (!desc) irq_get_irqchip_state()
1941 data = irq_desc_get_irq_data(desc); irq_get_irqchip_state()
1957 irq_put_desc_busunlock(desc, flags); irq_get_irqchip_state()
1977 struct irq_desc *desc; irq_set_irqchip_state() local
1983 desc = irq_get_desc_buslock(irq, &flags, 0); irq_set_irqchip_state()
1984 if (!desc) irq_set_irqchip_state()
1987 data = irq_desc_get_irq_data(desc); irq_set_irqchip_state()
2003 irq_put_desc_busunlock(desc, flags); irq_set_irqchip_state()
H A Dmigration.c9 struct irq_desc *desc = irq_data_to_desc(idata); irq_move_masked_irq() local
10 struct irq_chip *chip = desc->irq_data.chip; irq_move_masked_irq()
12 if (likely(!irqd_is_setaffinity_pending(&desc->irq_data))) irq_move_masked_irq()
15 irqd_clr_move_pending(&desc->irq_data); irq_move_masked_irq()
20 if (irqd_is_per_cpu(&desc->irq_data)) { irq_move_masked_irq()
25 if (unlikely(cpumask_empty(desc->pending_mask))) irq_move_masked_irq()
31 assert_raw_spin_locked(&desc->lock); irq_move_masked_irq()
45 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) irq_move_masked_irq()
46 irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false); irq_move_masked_irq()
48 cpumask_clear(desc->pending_mask); irq_move_masked_irq()
H A Dspurious.c38 bool irq_wait_for_poll(struct irq_desc *desc) irq_wait_for_poll() argument
42 smp_processor_id(), desc->irq_data.irq)) irq_wait_for_poll()
47 raw_spin_unlock(&desc->lock); irq_wait_for_poll()
48 while (irqd_irq_inprogress(&desc->irq_data)) irq_wait_for_poll()
50 raw_spin_lock(&desc->lock); irq_wait_for_poll()
51 } while (irqd_irq_inprogress(&desc->irq_data)); irq_wait_for_poll()
53 return !irqd_irq_disabled(&desc->irq_data) && desc->action; irq_wait_for_poll()
63 static int try_one_irq(struct irq_desc *desc, bool force) try_one_irq() argument
68 raw_spin_lock(&desc->lock); try_one_irq()
74 if (irq_settings_is_per_cpu(desc) || try_one_irq()
75 irq_settings_is_nested_thread(desc) || try_one_irq()
76 irq_settings_is_polled(desc)) try_one_irq()
83 if (irqd_irq_disabled(&desc->irq_data) && !force) try_one_irq()
90 action = desc->action; try_one_irq()
96 if (irqd_irq_inprogress(&desc->irq_data)) { try_one_irq()
101 desc->istate |= IRQS_PENDING; try_one_irq()
106 desc->istate |= IRQS_POLL_INPROGRESS; try_one_irq()
108 if (handle_irq_event(desc) == IRQ_HANDLED) try_one_irq()
111 action = desc->action; try_one_irq()
112 } while ((desc->istate & IRQS_PENDING) && action); try_one_irq()
113 desc->istate &= ~IRQS_POLL_INPROGRESS; try_one_irq()
115 raw_spin_unlock(&desc->lock); try_one_irq()
121 struct irq_desc *desc; misrouted_irq() local
129 for_each_irq_desc(i, desc) { for_each_irq_desc()
136 if (try_one_irq(desc, false)) for_each_irq_desc()
147 struct irq_desc *desc; poll_spurious_irqs() local
154 for_each_irq_desc(i, desc) { for_each_irq_desc()
161 state = desc->istate; for_each_irq_desc()
167 try_one_irq(desc, true); for_each_irq_desc()
191 static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) __report_bad_irq() argument
193 unsigned int irq = irq_desc_get_irq(desc); __report_bad_irq()
208 * We need to take desc->lock here. note_interrupt() is called __report_bad_irq()
209 * w/o desc->lock held, but IRQ_PROGRESS set. We might race __report_bad_irq()
211 * desc->lock here. See synchronize_irq(). __report_bad_irq()
213 raw_spin_lock_irqsave(&desc->lock, flags); __report_bad_irq()
214 action = desc->action; __report_bad_irq()
223 raw_spin_unlock_irqrestore(&desc->lock, flags); __report_bad_irq()
226 static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret) report_bad_irq() argument
232 __report_bad_irq(desc, action_ret); report_bad_irq()
237 try_misrouted_irq(unsigned int irq, struct irq_desc *desc, try_misrouted_irq() argument
266 action = desc->action; try_misrouted_irq()
273 void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret) note_interrupt() argument
277 if (desc->istate & IRQS_POLL_INPROGRESS || note_interrupt()
278 irq_settings_is_polled(desc)) note_interrupt()
282 report_bad_irq(desc, action_ret); note_interrupt()
323 if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { note_interrupt()
324 desc->threads_handled_last |= SPURIOUS_DEFERRED; note_interrupt()
339 handled = atomic_read(&desc->threads_handled); note_interrupt()
341 if (handled != desc->threads_handled_last) { note_interrupt()
351 desc->threads_handled_last = handled; note_interrupt()
382 desc->threads_handled_last &= ~SPURIOUS_DEFERRED; note_interrupt()
393 if (time_after(jiffies, desc->last_unhandled + HZ/10)) note_interrupt()
394 desc->irqs_unhandled = 1; note_interrupt()
396 desc->irqs_unhandled++; note_interrupt()
397 desc->last_unhandled = jiffies; note_interrupt()
400 irq = irq_desc_get_irq(desc); note_interrupt()
401 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { note_interrupt()
404 desc->irqs_unhandled -= ok; note_interrupt()
407 desc->irq_count++; note_interrupt()
408 if (likely(desc->irq_count < 100000)) note_interrupt()
411 desc->irq_count = 0; note_interrupt()
412 if (unlikely(desc->irqs_unhandled > 99900)) { note_interrupt()
416 __report_bad_irq(desc, action_ret); note_interrupt()
421 desc->istate |= IRQS_SPURIOUS_DISABLED; note_interrupt()
422 desc->depth++; note_interrupt()
423 irq_disable(desc); note_interrupt()
428 desc->irqs_unhandled = 0; note_interrupt()
H A Dcpuhotplug.c17 static bool migrate_one_irq(struct irq_desc *desc) migrate_one_irq() argument
19 struct irq_data *d = irq_desc_get_irq_data(desc); migrate_one_irq()
63 struct irq_desc *desc; irq_migrate_all_off_this_cpu() local
71 desc = irq_to_desc(irq); for_each_active_irq()
72 raw_spin_lock(&desc->lock); for_each_active_irq()
73 affinity_broken = migrate_one_irq(desc); for_each_active_irq()
74 raw_spin_unlock(&desc->lock); for_each_active_irq()
H A Dmsi.c23 struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); alloc_msi_entry() local
24 if (!desc) alloc_msi_entry()
27 INIT_LIST_HEAD(&desc->list); alloc_msi_entry()
28 desc->dev = dev; alloc_msi_entry()
30 return desc; alloc_msi_entry()
166 struct msi_desc *desc) msi_domain_ops_set_desc()
168 arg->desc = desc; msi_domain_ops_set_desc()
270 struct msi_desc *desc; msi_domain_alloc_irqs() local
279 for_each_msi_entry(desc, dev) { for_each_msi_entry()
280 ops->set_desc(&arg, desc); for_each_msi_entry()
286 virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, for_each_msi_entry()
291 ret = ops->handle_error(domain, desc, ret); for_each_msi_entry()
297 for (i = 0; i < desc->nvec_used; i++) for_each_msi_entry()
298 irq_set_msi_desc_off(virq, i, desc); for_each_msi_entry()
304 for_each_msi_entry(desc, dev) { for_each_msi_entry()
305 if (desc->nvec_used == 1) for_each_msi_entry()
309 virq, virq + desc->nvec_used - 1); for_each_msi_entry()
323 struct msi_desc *desc; msi_domain_free_irqs() local
325 for_each_msi_entry(desc, dev) { for_each_msi_entry()
331 if (desc->irq) { for_each_msi_entry()
332 irq_domain_free_irqs(desc->irq, desc->nvec_used); for_each_msi_entry()
333 desc->irq = 0; for_each_msi_entry()
165 msi_domain_ops_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) msi_domain_ops_set_desc() argument
/linux-4.4.14/drivers/crypto/caam/
H A Ddesc_constr.h7 #include "desc.h"
16 #define PRINT_POS do { printk(KERN_DEBUG "%02d: %s\n", desc_len(desc),\
33 static inline int desc_len(u32 *desc) desc_len() argument
35 return *desc & HDR_DESCLEN_MASK; desc_len()
38 static inline int desc_bytes(void *desc) desc_bytes() argument
40 return desc_len(desc) * CAAM_CMD_SZ; desc_bytes()
43 static inline u32 *desc_end(u32 *desc) desc_end() argument
45 return desc + desc_len(desc); desc_end()
48 static inline void *sh_desc_pdb(u32 *desc) sh_desc_pdb() argument
50 return desc + 1; sh_desc_pdb()
53 static inline void init_desc(u32 *desc, u32 options) init_desc() argument
55 *desc = (options | HDR_ONE) + 1; init_desc()
58 static inline void init_sh_desc(u32 *desc, u32 options) init_sh_desc() argument
61 init_desc(desc, CMD_SHARED_DESC_HDR | options); init_sh_desc()
64 static inline void init_sh_desc_pdb(u32 *desc, u32 options, size_t pdb_bytes) init_sh_desc_pdb() argument
68 init_sh_desc(desc, (((pdb_len + 1) << HDR_START_IDX_SHIFT) + pdb_len) | init_sh_desc_pdb()
72 static inline void init_job_desc(u32 *desc, u32 options) init_job_desc() argument
74 init_desc(desc, CMD_DESC_HDR | options); init_job_desc()
77 static inline void append_ptr(u32 *desc, dma_addr_t ptr) append_ptr() argument
79 dma_addr_t *offset = (dma_addr_t *)desc_end(desc); append_ptr()
83 (*desc) += CAAM_PTR_SZ / CAAM_CMD_SZ; append_ptr()
86 static inline void init_job_desc_shared(u32 *desc, dma_addr_t ptr, int len, init_job_desc_shared() argument
90 init_job_desc(desc, HDR_SHARED | options | init_job_desc_shared()
92 append_ptr(desc, ptr); init_job_desc_shared()
95 static inline void append_data(u32 *desc, void *data, int len) append_data() argument
97 u32 *offset = desc_end(desc); append_data()
102 (*desc) += (len + CAAM_CMD_SZ - 1) / CAAM_CMD_SZ; append_data()
105 static inline void append_cmd(u32 *desc, u32 command) append_cmd() argument
107 u32 *cmd = desc_end(desc); append_cmd()
111 (*desc)++; append_cmd()
116 static inline void append_u64(u32 *desc, u64 data) append_u64() argument
118 u32 *offset = desc_end(desc); append_u64()
123 (*desc) += 2; append_u64()
127 static inline u32 *write_cmd(u32 *desc, u32 command) write_cmd() argument
129 *desc = command; write_cmd()
131 return desc + 1; write_cmd()
134 static inline void append_cmd_ptr(u32 *desc, dma_addr_t ptr, int len, append_cmd_ptr() argument
137 append_cmd(desc, command | len); append_cmd_ptr()
138 append_ptr(desc, ptr); append_cmd_ptr()
142 static inline void append_cmd_ptr_extlen(u32 *desc, dma_addr_t ptr, append_cmd_ptr_extlen() argument
145 append_cmd(desc, command); append_cmd_ptr_extlen()
147 append_ptr(desc, ptr); append_cmd_ptr_extlen()
148 append_cmd(desc, len); append_cmd_ptr_extlen()
151 static inline void append_cmd_data(u32 *desc, void *data, int len, append_cmd_data() argument
154 append_cmd(desc, command | IMMEDIATE | len); append_cmd_data()
155 append_data(desc, data, len); append_cmd_data()
159 static inline u32 *append_##cmd(u32 *desc, u32 options) \
161 u32 *cmd = desc_end(desc); \
163 append_cmd(desc, CMD_##op | options); \
169 static inline void set_jump_tgt_here(u32 *desc, u32 *jump_cmd) set_jump_tgt_here() argument
171 *jump_cmd = *jump_cmd | (desc_len(desc) - (jump_cmd - desc)); set_jump_tgt_here()
174 static inline void set_move_tgt_here(u32 *desc, u32 *move_cmd) set_move_tgt_here() argument
177 *move_cmd = *move_cmd | ((desc_len(desc) << (MOVE_OFFSET_SHIFT + 2)) & set_move_tgt_here()
182 static inline void append_##cmd(u32 *desc, u32 options) \
185 append_cmd(desc, CMD_##op | options); \
190 static inline void append_##cmd(u32 *desc, unsigned int len, u32 options) \
193 append_cmd(desc, CMD_##op | len | options); \
202 static inline void append_##cmd(u32 *desc, dma_addr_t ptr, unsigned int len, \
206 append_cmd_ptr(desc, ptr, len, CMD_##op | options); \
213 static inline void append_store(u32 *desc, dma_addr_t ptr, unsigned int len, append_store() argument
220 append_cmd(desc, CMD_STORE | options | len); append_store()
227 append_ptr(desc, ptr); append_store()
231 static inline void append_seq_##cmd##_ptr_intlen(u32 *desc, dma_addr_t ptr, \
237 append_cmd(desc, CMD_SEQ_##op##_PTR | len | options); \
239 append_cmd_ptr(desc, ptr, len, CMD_SEQ_##op##_PTR | options); \
245 static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
249 append_cmd_data(desc, data, len, CMD_##op | options); \
255 static inline void append_##cmd##_extlen(u32 *desc, dma_addr_t ptr, \
259 append_cmd_ptr_extlen(desc, ptr, len, CMD_##op | SQIN_EXT | options); \
269 static inline void append_##cmd(u32 *desc, dma_addr_t ptr, \
274 append_##cmd##_extlen(desc, ptr, len, options); \
276 append_##cmd##_intlen(desc, ptr, len, options); \
286 static inline void append_##cmd##_as_imm(u32 *desc, void *data, \
291 append_cmd(desc, CMD_##op | IMMEDIATE | len | options); \
292 append_data(desc, data, data_len); \
297 static inline void append_##cmd##_imm_##type(u32 *desc, type immediate, \
301 append_cmd(desc, CMD_##op | IMMEDIATE | options | sizeof(type)); \
302 append_cmd(desc, immediate); \
310 #define APPEND_MATH(op, desc, dest, src_0, src_1, len) \
311 append_cmd(desc, CMD_MATH | MATH_FUN_##op | MATH_DEST_##dest | \
314 #define append_math_add(desc, dest, src0, src1, len) \
315 APPEND_MATH(ADD, desc, dest, src0, src1, len)
316 #define append_math_sub(desc, dest, src0, src1, len) \
317 APPEND_MATH(SUB, desc, dest, src0, src1, len)
318 #define append_math_add_c(desc, dest, src0, src1, len) \
319 APPEND_MATH(ADDC, desc, dest, src0, src1, len)
320 #define append_math_sub_b(desc, dest, src0, src1, len) \
321 APPEND_MATH(SUBB, desc, dest, src0, src1, len)
322 #define append_math_and(desc, dest, src0, src1, len) \
323 APPEND_MATH(AND, desc, dest, src0, src1, len)
324 #define append_math_or(desc, dest, src0, src1, len) \
325 APPEND_MATH(OR, desc, dest, src0, src1, len)
326 #define append_math_xor(desc, dest, src0, src1, len) \
327 APPEND_MATH(XOR, desc, dest, src0, src1, len)
328 #define append_math_lshift(desc, dest, src0, src1, len) \
329 APPEND_MATH(LSHIFT, desc, dest, src0, src1, len)
330 #define append_math_rshift(desc, dest, src0, src1, len) \
331 APPEND_MATH(RSHIFT, desc, dest, src0, src1, len)
332 #define append_math_ldshift(desc, dest, src0, src1, len) \
333 APPEND_MATH(SHLD, desc, dest, src0, src1, len)
336 #define APPEND_MATH_IMM_u32(op, desc, dest, src_0, src_1, data) \
338 APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ); \
339 append_cmd(desc, data); \
342 #define append_math_add_imm_u32(desc, dest, src0, src1, data) \
343 APPEND_MATH_IMM_u32(ADD, desc, dest, src0, src1, data)
344 #define append_math_sub_imm_u32(desc, dest, src0, src1, data) \
345 APPEND_MATH_IMM_u32(SUB, desc, dest, src0, src1, data)
346 #define append_math_add_c_imm_u32(desc, dest, src0, src1, data) \
347 APPEND_MATH_IMM_u32(ADDC, desc, dest, src0, src1, data)
348 #define append_math_sub_b_imm_u32(desc, dest, src0, src1, data) \
349 APPEND_MATH_IMM_u32(SUBB, desc, dest, src0, src1, data)
350 #define append_math_and_imm_u32(desc, dest, src0, src1, data) \
351 APPEND_MATH_IMM_u32(AND, desc, dest, src0, src1, data)
352 #define append_math_or_imm_u32(desc, dest, src0, src1, data) \
353 APPEND_MATH_IMM_u32(OR, desc, dest, src0, src1, data)
354 #define append_math_xor_imm_u32(desc, dest, src0, src1, data) \
355 APPEND_MATH_IMM_u32(XOR, desc, dest, src0, src1, data)
356 #define append_math_lshift_imm_u32(desc, dest, src0, src1, data) \
357 APPEND_MATH_IMM_u32(LSHIFT, desc, dest, src0, src1, data)
358 #define append_math_rshift_imm_u32(desc, dest, src0, src1, data) \
359 APPEND_MATH_IMM_u32(RSHIFT, desc, dest, src0, src1, data)
362 #define APPEND_MATH_IMM_u64(op, desc, dest, src_0, src_1, data) \
365 APPEND_MATH(op, desc, dest, src_0, src_1, CAAM_CMD_SZ * 2 | \
368 append_u64(desc, data); \
370 append_u32(desc, lower_32_bits(data)); \
373 #define append_math_add_imm_u64(desc, dest, src0, src1, data) \
374 APPEND_MATH_IMM_u64(ADD, desc, dest, src0, src1, data)
375 #define append_math_sub_imm_u64(desc, dest, src0, src1, data) \
376 APPEND_MATH_IMM_u64(SUB, desc, dest, src0, src1, data)
377 #define append_math_add_c_imm_u64(desc, dest, src0, src1, data) \
378 APPEND_MATH_IMM_u64(ADDC, desc, dest, src0, src1, data)
379 #define append_math_sub_b_imm_u64(desc, dest, src0, src1, data) \
380 APPEND_MATH_IMM_u64(SUBB, desc, dest, src0, src1, data)
381 #define append_math_and_imm_u64(desc, dest, src0, src1, data) \
382 APPEND_MATH_IMM_u64(AND, desc, dest, src0, src1, data)
383 #define append_math_or_imm_u64(desc, dest, src0, src1, data) \
384 APPEND_MATH_IMM_u64(OR, desc, dest, src0, src1, data)
385 #define append_math_xor_imm_u64(desc, dest, src0, src1, data) \
386 APPEND_MATH_IMM_u64(XOR, desc, dest, src0, src1, data)
387 #define append_math_lshift_imm_u64(desc, dest, src0, src1, data) \
388 APPEND_MATH_IMM_u64(LSHIFT, desc, dest, src0, src1, data)
389 #define append_math_rshift_imm_u64(desc, dest, src0, src1, data) \
390 APPEND_MATH_IMM_u64(RSHIFT, desc, dest, src0, src1, data)
H A Dkey_gen.c13 void split_key_done(struct device *dev, u32 *desc, u32 err, split_key_done() argument
48 u32 *desc; gen_split_key() local
53 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); gen_split_key()
54 if (!desc) { gen_split_key()
73 init_job_desc(desc, 0); gen_split_key()
74 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); gen_split_key()
77 append_operation(desc, alg_op | OP_ALG_DECRYPT | OP_ALG_AS_INIT); gen_split_key()
83 append_fifo_load_as_imm(desc, NULL, 0, LDST_CLASS_2_CCB | gen_split_key()
90 append_fifo_store(desc, dma_addr_out, split_key_len, gen_split_key()
97 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); gen_split_key()
103 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); gen_split_key()
120 kfree(desc); gen_split_key()
H A Djr.h13 int caam_jr_enqueue(struct device *dev, u32 *desc,
14 void (*cbk)(struct device *dev, u32 *desc, u32 status,
H A Dcaamalg.c33 * So, a job desc looks like:
131 static inline void append_dec_op1(u32 *desc, u32 type) append_dec_op1() argument
137 append_operation(desc, type | OP_ALG_AS_INITFINAL | append_dec_op1()
142 jump_cmd = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD); append_dec_op1()
143 append_operation(desc, type | OP_ALG_AS_INITFINAL | append_dec_op1()
145 uncond_jump_cmd = append_jump(desc, JUMP_TEST_ALL); append_dec_op1()
146 set_jump_tgt_here(desc, jump_cmd); append_dec_op1()
147 append_operation(desc, type | OP_ALG_AS_INITFINAL | append_dec_op1()
149 set_jump_tgt_here(desc, uncond_jump_cmd); append_dec_op1()
156 static inline void aead_append_src_dst(u32 *desc, u32 msg_type) aead_append_src_dst() argument
158 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); aead_append_src_dst()
159 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | aead_append_src_dst()
167 static inline void ablkcipher_append_src_dst(u32 *desc) ablkcipher_append_src_dst() argument
169 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ablkcipher_append_src_dst()
170 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ablkcipher_append_src_dst()
171 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | ablkcipher_append_src_dst()
173 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | KEY_VLF); ablkcipher_append_src_dst()
198 static void append_key_aead(u32 *desc, struct caam_ctx *ctx, append_key_aead() argument
213 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, append_key_aead()
216 append_key_as_imm(desc, (void *)ctx->key + append_key_aead()
220 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | append_key_aead()
222 append_key(desc, ctx->key_dma + ctx->split_key_pad_len, append_key_aead()
230 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | append_key_aead()
232 append_move(desc, append_key_aead()
240 static void init_sh_desc_key_aead(u32 *desc, struct caam_ctx *ctx, init_sh_desc_key_aead() argument
246 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); init_sh_desc_key_aead()
249 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | init_sh_desc_key_aead()
252 append_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); init_sh_desc_key_aead()
254 set_jump_tgt_here(desc, key_jump_cmd); init_sh_desc_key_aead()
263 u32 *desc; aead_null_set_sh_desc() local
274 desc = ctx->sh_desc_enc; aead_null_set_sh_desc()
276 init_sh_desc(desc, HDR_SHARE_SERIAL); aead_null_set_sh_desc()
279 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | aead_null_set_sh_desc()
282 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, aead_null_set_sh_desc()
286 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | aead_null_set_sh_desc()
288 set_jump_tgt_here(desc, key_jump_cmd); aead_null_set_sh_desc()
291 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); aead_null_set_sh_desc()
294 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); aead_null_set_sh_desc()
295 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); aead_null_set_sh_desc()
302 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | aead_null_set_sh_desc()
305 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | aead_null_set_sh_desc()
311 append_operation(desc, ctx->class2_alg_type | aead_null_set_sh_desc()
315 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); aead_null_set_sh_desc()
317 set_move_tgt_here(desc, read_move_cmd); aead_null_set_sh_desc()
318 set_move_tgt_here(desc, write_move_cmd); aead_null_set_sh_desc()
319 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); aead_null_set_sh_desc()
320 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | aead_null_set_sh_desc()
324 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | aead_null_set_sh_desc()
327 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, aead_null_set_sh_desc()
328 desc_bytes(desc), aead_null_set_sh_desc()
337 DUMP_PREFIX_ADDRESS, 16, 4, desc, aead_null_set_sh_desc()
338 desc_bytes(desc), 1); aead_null_set_sh_desc()
350 desc = ctx->sh_desc_dec; aead_null_set_sh_desc()
353 init_sh_desc(desc, HDR_SHARE_SERIAL); aead_null_set_sh_desc()
356 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | aead_null_set_sh_desc()
359 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, aead_null_set_sh_desc()
363 append_key(desc, ctx->key_dma, ctx->split_key_len, CLASS_2 | aead_null_set_sh_desc()
365 set_jump_tgt_here(desc, key_jump_cmd); aead_null_set_sh_desc()
368 append_operation(desc, ctx->class2_alg_type | aead_null_set_sh_desc()
372 append_math_sub(desc, REG2, SEQOUTLEN, REG0, CAAM_CMD_SZ); aead_null_set_sh_desc()
375 append_math_add(desc, VARSEQINLEN, ZERO, REG2, CAAM_CMD_SZ); aead_null_set_sh_desc()
376 append_math_add(desc, VARSEQOUTLEN, ZERO, REG2, CAAM_CMD_SZ); aead_null_set_sh_desc()
383 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | aead_null_set_sh_desc()
386 write_move_cmd = append_move(desc, MOVE_SRC_MATH2 | aead_null_set_sh_desc()
392 aead_append_src_dst(desc, FIFOLD_TYPE_MSG | FIFOLD_TYPE_FLUSH1); aead_null_set_sh_desc()
398 jump_cmd = append_jump(desc, JUMP_TEST_ALL); aead_null_set_sh_desc()
399 set_jump_tgt_here(desc, jump_cmd); aead_null_set_sh_desc()
401 set_move_tgt_here(desc, read_move_cmd); aead_null_set_sh_desc()
402 set_move_tgt_here(desc, write_move_cmd); aead_null_set_sh_desc()
403 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); aead_null_set_sh_desc()
404 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO | aead_null_set_sh_desc()
406 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); aead_null_set_sh_desc()
409 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | aead_null_set_sh_desc()
412 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, aead_null_set_sh_desc()
413 desc_bytes(desc), aead_null_set_sh_desc()
422 DUMP_PREFIX_ADDRESS, 16, 4, desc, aead_null_set_sh_desc()
423 desc_bytes(desc), 1); aead_null_set_sh_desc()
439 u32 *desc; aead_set_sh_desc() local
478 desc = ctx->sh_desc_enc; aead_set_sh_desc()
481 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); aead_set_sh_desc()
484 append_operation(desc, ctx->class2_alg_type | aead_set_sh_desc()
488 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); aead_set_sh_desc()
489 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); aead_set_sh_desc()
492 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); aead_set_sh_desc()
495 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | aead_set_sh_desc()
500 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | aead_set_sh_desc()
507 append_operation(desc, ctx->class1_alg_type | aead_set_sh_desc()
511 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); aead_set_sh_desc()
512 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); aead_set_sh_desc()
513 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); aead_set_sh_desc()
516 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | aead_set_sh_desc()
519 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, aead_set_sh_desc()
520 desc_bytes(desc), aead_set_sh_desc()
528 DUMP_PREFIX_ADDRESS, 16, 4, desc, aead_set_sh_desc()
529 desc_bytes(desc), 1); aead_set_sh_desc()
545 desc = ctx->sh_desc_dec; aead_set_sh_desc()
548 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); aead_set_sh_desc()
551 append_operation(desc, ctx->class2_alg_type | aead_set_sh_desc()
555 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); aead_set_sh_desc()
556 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); aead_set_sh_desc()
559 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); aead_set_sh_desc()
562 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | aead_set_sh_desc()
567 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | aead_set_sh_desc()
575 append_operation(desc, ctx->class1_alg_type | aead_set_sh_desc()
578 append_dec_op1(desc, ctx->class1_alg_type); aead_set_sh_desc()
581 append_math_add(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); aead_set_sh_desc()
582 append_math_add(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); aead_set_sh_desc()
583 aead_append_src_dst(desc, FIFOLD_TYPE_MSG); aead_set_sh_desc()
586 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS2 | aead_set_sh_desc()
589 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, aead_set_sh_desc()
590 desc_bytes(desc), aead_set_sh_desc()
598 DUMP_PREFIX_ADDRESS, 16, 4, desc, aead_set_sh_desc()
599 desc_bytes(desc), 1); aead_set_sh_desc()
617 desc = ctx->sh_desc_givenc; aead_set_sh_desc()
620 init_sh_desc_key_aead(desc, ctx, keys_fit_inline, is_rfc3686); aead_set_sh_desc()
629 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | aead_set_sh_desc()
631 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); aead_set_sh_desc()
632 append_move(desc, MOVE_WAITCOMP | aead_set_sh_desc()
636 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); aead_set_sh_desc()
640 append_move(desc, MOVE_SRC_CLASS1CTX | MOVE_DEST_OUTFIFO | aead_set_sh_desc()
645 append_operation(desc, ctx->class2_alg_type | aead_set_sh_desc()
649 append_math_sub_imm_u32(desc, REG3, SEQOUTLEN, IMM, ctx->authsize); aead_set_sh_desc()
652 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); aead_set_sh_desc()
653 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); aead_set_sh_desc()
656 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); aead_set_sh_desc()
659 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_MSG | aead_set_sh_desc()
665 append_load_imm_u32(desc, moveiv, LDST_CLASS_IND_CCB | aead_set_sh_desc()
667 append_load_imm_u32(desc, ivsize, LDST_CLASS_2_CCB | aead_set_sh_desc()
672 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | aead_set_sh_desc()
679 append_operation(desc, ctx->class1_alg_type | aead_set_sh_desc()
683 append_math_add(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); aead_set_sh_desc()
686 append_seq_fifo_load(desc, ivsize, aead_set_sh_desc()
690 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); aead_set_sh_desc()
691 aead_append_src_dst(desc, FIFOLD_TYPE_MSG1OUT2); aead_set_sh_desc()
694 append_seq_store(desc, ctx->authsize, LDST_CLASS_2_CCB | aead_set_sh_desc()
697 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, aead_set_sh_desc()
698 desc_bytes(desc), aead_set_sh_desc()
706 DUMP_PREFIX_ADDRESS, 16, 4, desc, aead_set_sh_desc()
707 desc_bytes(desc), 1); aead_set_sh_desc()
732 u32 *desc; gcm_set_sh_desc() local
746 desc = ctx->sh_desc_enc; gcm_set_sh_desc()
748 init_sh_desc(desc, HDR_SHARE_SERIAL); gcm_set_sh_desc()
751 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | gcm_set_sh_desc()
754 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, gcm_set_sh_desc()
757 append_key(desc, ctx->key_dma, ctx->enckeylen, gcm_set_sh_desc()
759 set_jump_tgt_here(desc, key_jump_cmd); gcm_set_sh_desc()
762 append_operation(desc, ctx->class1_alg_type | gcm_set_sh_desc()
766 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); gcm_set_sh_desc()
767 zero_assoc_jump_cmd2 = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
771 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); gcm_set_sh_desc()
772 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
775 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); gcm_set_sh_desc()
778 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); gcm_set_sh_desc()
781 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG3, CAAM_CMD_SZ); gcm_set_sh_desc()
784 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
788 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
790 set_jump_tgt_here(desc, zero_assoc_jump_cmd1); gcm_set_sh_desc()
792 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); gcm_set_sh_desc()
795 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); gcm_set_sh_desc()
798 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
802 append_jump(desc, JUMP_TEST_ALL | 2); gcm_set_sh_desc()
805 set_jump_tgt_here(desc, zero_payload_jump_cmd); gcm_set_sh_desc()
808 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
812 set_jump_tgt_here(desc, zero_assoc_jump_cmd2); gcm_set_sh_desc()
815 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | gcm_set_sh_desc()
818 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, gcm_set_sh_desc()
819 desc_bytes(desc), gcm_set_sh_desc()
827 DUMP_PREFIX_ADDRESS, 16, 4, desc, gcm_set_sh_desc()
828 desc_bytes(desc), 1); gcm_set_sh_desc()
840 desc = ctx->sh_desc_dec; gcm_set_sh_desc()
842 init_sh_desc(desc, HDR_SHARE_SERIAL); gcm_set_sh_desc()
845 key_jump_cmd = append_jump(desc, JUMP_JSL | gcm_set_sh_desc()
849 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, gcm_set_sh_desc()
852 append_key(desc, ctx->key_dma, ctx->enckeylen, gcm_set_sh_desc()
854 set_jump_tgt_here(desc, key_jump_cmd); gcm_set_sh_desc()
857 append_operation(desc, ctx->class1_alg_type | gcm_set_sh_desc()
861 append_math_add(desc, VARSEQINLEN, ZERO, REG3, CAAM_CMD_SZ); gcm_set_sh_desc()
862 zero_assoc_jump_cmd1 = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
865 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); gcm_set_sh_desc()
868 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); gcm_set_sh_desc()
871 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
874 set_jump_tgt_here(desc, zero_assoc_jump_cmd1); gcm_set_sh_desc()
877 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); gcm_set_sh_desc()
880 zero_payload_jump_cmd = append_jump(desc, JUMP_TEST_ALL | gcm_set_sh_desc()
883 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); gcm_set_sh_desc()
886 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); gcm_set_sh_desc()
889 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | gcm_set_sh_desc()
893 set_jump_tgt_here(desc, zero_payload_jump_cmd); gcm_set_sh_desc()
896 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | gcm_set_sh_desc()
899 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, gcm_set_sh_desc()
900 desc_bytes(desc), gcm_set_sh_desc()
908 DUMP_PREFIX_ADDRESS, 16, 4, desc, gcm_set_sh_desc()
909 desc_bytes(desc), 1); gcm_set_sh_desc()
931 u32 *desc; rfc4106_set_sh_desc() local
945 desc = ctx->sh_desc_enc; rfc4106_set_sh_desc()
947 init_sh_desc(desc, HDR_SHARE_SERIAL); rfc4106_set_sh_desc()
950 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | rfc4106_set_sh_desc()
953 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, rfc4106_set_sh_desc()
956 append_key(desc, ctx->key_dma, ctx->enckeylen, rfc4106_set_sh_desc()
958 set_jump_tgt_here(desc, key_jump_cmd); rfc4106_set_sh_desc()
961 append_operation(desc, ctx->class1_alg_type | rfc4106_set_sh_desc()
964 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); rfc4106_set_sh_desc()
965 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); rfc4106_set_sh_desc()
968 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4106_set_sh_desc()
972 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); rfc4106_set_sh_desc()
975 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); rfc4106_set_sh_desc()
978 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); rfc4106_set_sh_desc()
981 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); rfc4106_set_sh_desc()
984 append_math_sub(desc, VARSEQOUTLEN, VARSEQINLEN, REG0, CAAM_CMD_SZ); rfc4106_set_sh_desc()
987 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); rfc4106_set_sh_desc()
990 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4106_set_sh_desc()
994 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | rfc4106_set_sh_desc()
997 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, rfc4106_set_sh_desc()
998 desc_bytes(desc), rfc4106_set_sh_desc()
1006 DUMP_PREFIX_ADDRESS, 16, 4, desc, rfc4106_set_sh_desc()
1007 desc_bytes(desc), 1); rfc4106_set_sh_desc()
1019 desc = ctx->sh_desc_dec; rfc4106_set_sh_desc()
1021 init_sh_desc(desc, HDR_SHARE_SERIAL); rfc4106_set_sh_desc()
1024 key_jump_cmd = append_jump(desc, JUMP_JSL | rfc4106_set_sh_desc()
1027 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, rfc4106_set_sh_desc()
1030 append_key(desc, ctx->key_dma, ctx->enckeylen, rfc4106_set_sh_desc()
1032 set_jump_tgt_here(desc, key_jump_cmd); rfc4106_set_sh_desc()
1035 append_operation(desc, ctx->class1_alg_type | rfc4106_set_sh_desc()
1038 append_math_sub_imm_u32(desc, VARSEQINLEN, REG3, IMM, 8); rfc4106_set_sh_desc()
1039 append_math_add(desc, VARSEQOUTLEN, ZERO, REG3, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1042 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4106_set_sh_desc()
1046 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); rfc4106_set_sh_desc()
1049 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG3, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1052 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_MSG); rfc4106_set_sh_desc()
1055 append_seq_fifo_store(desc, 0, FIFOST_TYPE_SKIP | FIFOLDST_VLF); rfc4106_set_sh_desc()
1058 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); rfc4106_set_sh_desc()
1061 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); rfc4106_set_sh_desc()
1064 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLDST_VLF | rfc4106_set_sh_desc()
1068 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | rfc4106_set_sh_desc()
1071 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, rfc4106_set_sh_desc()
1072 desc_bytes(desc), rfc4106_set_sh_desc()
1080 DUMP_PREFIX_ADDRESS, 16, 4, desc, rfc4106_set_sh_desc()
1081 desc_bytes(desc), 1); rfc4106_set_sh_desc()
1105 u32 *desc; rfc4543_set_sh_desc() local
1119 desc = ctx->sh_desc_enc; rfc4543_set_sh_desc()
1121 init_sh_desc(desc, HDR_SHARE_SERIAL); rfc4543_set_sh_desc()
1124 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | rfc4543_set_sh_desc()
1127 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, rfc4543_set_sh_desc()
1130 append_key(desc, ctx->key_dma, ctx->enckeylen, rfc4543_set_sh_desc()
1132 set_jump_tgt_here(desc, key_jump_cmd); rfc4543_set_sh_desc()
1135 append_operation(desc, ctx->class1_alg_type | rfc4543_set_sh_desc()
1139 append_math_sub(desc, REG3, SEQINLEN, REG0, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1146 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | rfc4543_set_sh_desc()
1148 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1152 append_math_sub(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1155 append_math_sub(desc, VARSEQOUTLEN, SEQINLEN, REG0, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1158 aead_append_src_dst(desc, FIFOLD_TYPE_AAD); rfc4543_set_sh_desc()
1160 set_move_tgt_here(desc, read_move_cmd); rfc4543_set_sh_desc()
1161 set_move_tgt_here(desc, write_move_cmd); rfc4543_set_sh_desc()
1162 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); rfc4543_set_sh_desc()
1164 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); rfc4543_set_sh_desc()
1167 append_seq_store(desc, ctx->authsize, LDST_CLASS_1_CCB | rfc4543_set_sh_desc()
1170 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, rfc4543_set_sh_desc()
1171 desc_bytes(desc), rfc4543_set_sh_desc()
1179 DUMP_PREFIX_ADDRESS, 16, 4, desc, rfc4543_set_sh_desc()
1180 desc_bytes(desc), 1); rfc4543_set_sh_desc()
1192 desc = ctx->sh_desc_dec; rfc4543_set_sh_desc()
1194 init_sh_desc(desc, HDR_SHARE_SERIAL); rfc4543_set_sh_desc()
1197 key_jump_cmd = append_jump(desc, JUMP_JSL | rfc4543_set_sh_desc()
1200 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, rfc4543_set_sh_desc()
1203 append_key(desc, ctx->key_dma, ctx->enckeylen, rfc4543_set_sh_desc()
1205 set_jump_tgt_here(desc, key_jump_cmd); rfc4543_set_sh_desc()
1208 append_operation(desc, ctx->class1_alg_type | rfc4543_set_sh_desc()
1212 append_math_sub(desc, REG3, SEQOUTLEN, REG0, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1219 read_move_cmd = append_move(desc, MOVE_SRC_DESCBUF | MOVE_DEST_MATH3 | rfc4543_set_sh_desc()
1221 write_move_cmd = append_move(desc, MOVE_SRC_MATH3 | MOVE_DEST_DESCBUF | rfc4543_set_sh_desc()
1225 append_math_sub(desc, VARSEQINLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1228 append_math_sub(desc, VARSEQOUTLEN, SEQOUTLEN, REG0, CAAM_CMD_SZ); rfc4543_set_sh_desc()
1231 append_seq_fifo_store(desc, 0, FIFOST_TYPE_MESSAGE_DATA | FIFOLDST_VLF); rfc4543_set_sh_desc()
1234 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_BOTH | FIFOLDST_VLF | rfc4543_set_sh_desc()
1237 set_move_tgt_here(desc, read_move_cmd); rfc4543_set_sh_desc()
1238 set_move_tgt_here(desc, write_move_cmd); rfc4543_set_sh_desc()
1239 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); rfc4543_set_sh_desc()
1241 append_move(desc, MOVE_SRC_INFIFO_CL | MOVE_DEST_OUTFIFO); rfc4543_set_sh_desc()
1242 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); rfc4543_set_sh_desc()
1245 append_seq_fifo_load(desc, ctx->authsize, FIFOLD_CLASS_CLASS1 | rfc4543_set_sh_desc()
1248 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, rfc4543_set_sh_desc()
1249 desc_bytes(desc), rfc4543_set_sh_desc()
1257 DUMP_PREFIX_ADDRESS, 16, 4, desc, rfc4543_set_sh_desc()
1258 desc_bytes(desc), 1); rfc4543_set_sh_desc()
1466 u32 *desc; ablkcipher_setkey() local
1507 desc = ctx->sh_desc_enc; ablkcipher_setkey()
1508 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ablkcipher_setkey()
1510 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ablkcipher_setkey()
1514 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, ablkcipher_setkey()
1521 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | ablkcipher_setkey()
1523 append_move(desc, MOVE_WAITCOMP | ablkcipher_setkey()
1530 set_jump_tgt_here(desc, key_jump_cmd); ablkcipher_setkey()
1533 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT | ablkcipher_setkey()
1538 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | ablkcipher_setkey()
1545 append_operation(desc, ctx->class1_alg_type | ablkcipher_setkey()
1549 ablkcipher_append_src_dst(desc); ablkcipher_setkey()
1551 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, ablkcipher_setkey()
1552 desc_bytes(desc), ablkcipher_setkey()
1561 DUMP_PREFIX_ADDRESS, 16, 4, desc, ablkcipher_setkey()
1562 desc_bytes(desc), 1); ablkcipher_setkey()
1565 desc = ctx->sh_desc_dec; ablkcipher_setkey()
1567 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ablkcipher_setkey()
1569 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ablkcipher_setkey()
1573 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, ablkcipher_setkey()
1580 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | ablkcipher_setkey()
1582 append_move(desc, MOVE_WAITCOMP | ablkcipher_setkey()
1589 set_jump_tgt_here(desc, key_jump_cmd); ablkcipher_setkey()
1592 append_seq_load(desc, crt->ivsize, LDST_SRCDST_BYTE_CONTEXT | ablkcipher_setkey()
1597 append_load_imm_u32(desc, be32_to_cpu(1), LDST_IMM | ablkcipher_setkey()
1605 append_operation(desc, ctx->class1_alg_type | ablkcipher_setkey()
1608 append_dec_op1(desc, ctx->class1_alg_type); ablkcipher_setkey()
1611 ablkcipher_append_src_dst(desc); ablkcipher_setkey()
1613 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, ablkcipher_setkey()
1614 desc_bytes(desc), ablkcipher_setkey()
1624 DUMP_PREFIX_ADDRESS, 16, 4, desc, ablkcipher_setkey()
1625 desc_bytes(desc), 1); ablkcipher_setkey()
1628 desc = ctx->sh_desc_givenc; ablkcipher_setkey()
1630 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); ablkcipher_setkey()
1632 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | ablkcipher_setkey()
1636 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, ablkcipher_setkey()
1643 append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB | ablkcipher_setkey()
1645 append_move(desc, MOVE_WAITCOMP | ablkcipher_setkey()
1651 set_jump_tgt_here(desc, key_jump_cmd); ablkcipher_setkey()
1657 append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB | ablkcipher_setkey()
1659 append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO); ablkcipher_setkey()
1660 append_move(desc, MOVE_WAITCOMP | ablkcipher_setkey()
1665 append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO); ablkcipher_setkey()
1668 append_seq_store(desc, crt->ivsize, ablkcipher_setkey()
1674 append_load_imm_u32(desc, (u32)1, LDST_IMM | ablkcipher_setkey()
1681 append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP | ablkcipher_setkey()
1685 append_operation(desc, ctx->class1_alg_type | ablkcipher_setkey()
1689 ablkcipher_append_src_dst(desc); ablkcipher_setkey()
1691 ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc, ablkcipher_setkey()
1692 desc_bytes(desc), ablkcipher_setkey()
1701 DUMP_PREFIX_ADDRESS, 16, 4, desc, ablkcipher_setkey()
1702 desc_bytes(desc), 1); ablkcipher_setkey()
1713 u32 *key_jump_cmd, *desc; xts_ablkcipher_setkey() local
1732 desc = ctx->sh_desc_enc; xts_ablkcipher_setkey()
1733 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); xts_ablkcipher_setkey()
1735 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | xts_ablkcipher_setkey()
1739 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, xts_ablkcipher_setkey()
1743 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT | xts_ablkcipher_setkey()
1745 append_data(desc, (void *)&sector_size, 8); xts_ablkcipher_setkey()
1747 set_jump_tgt_here(desc, key_jump_cmd); xts_ablkcipher_setkey()
1754 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | xts_ablkcipher_setkey()
1756 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); xts_ablkcipher_setkey()
1759 append_operation(desc, ctx->class1_alg_type | OP_ALG_AS_INITFINAL | xts_ablkcipher_setkey()
1763 ablkcipher_append_src_dst(desc); xts_ablkcipher_setkey()
1765 ctx->sh_desc_enc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), xts_ablkcipher_setkey()
1774 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); xts_ablkcipher_setkey()
1778 desc = ctx->sh_desc_dec; xts_ablkcipher_setkey()
1780 init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX); xts_ablkcipher_setkey()
1782 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | xts_ablkcipher_setkey()
1786 append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen, xts_ablkcipher_setkey()
1790 append_cmd(desc, CMD_LOAD | IMMEDIATE | LDST_SRCDST_BYTE_CONTEXT | xts_ablkcipher_setkey()
1792 append_data(desc, (void *)&sector_size, 8); xts_ablkcipher_setkey()
1794 set_jump_tgt_here(desc, key_jump_cmd); xts_ablkcipher_setkey()
1801 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | xts_ablkcipher_setkey()
1803 append_seq_fifo_load(desc, 8, FIFOLD_CLASS_SKIP); xts_ablkcipher_setkey()
1806 append_dec_op1(desc, ctx->class1_alg_type); xts_ablkcipher_setkey()
1809 ablkcipher_append_src_dst(desc); xts_ablkcipher_setkey()
1811 ctx->sh_desc_dec_dma = dma_map_single(jrdev, desc, desc_bytes(desc), xts_ablkcipher_setkey()
1822 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); xts_ablkcipher_setkey()
1834 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1855 * @desc: h/w descriptor (variable length; must not exceed MAX_CAAM_DESCSIZE)
1912 static void aead_encrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_encrypt_done() argument
1922 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); aead_encrypt_done()
1934 static void aead_decrypt_done(struct device *jrdev, u32 *desc, u32 err, aead_decrypt_done() argument
1944 edesc = container_of(desc, struct aead_edesc, hw_desc[0]); aead_decrypt_done()
1962 static void ablkcipher_encrypt_done(struct device *jrdev, u32 *desc, u32 err, ablkcipher_encrypt_done() argument
1974 edesc = (struct ablkcipher_edesc *)((char *)desc - ablkcipher_encrypt_done()
1995 static void ablkcipher_decrypt_done(struct device *jrdev, u32 *desc, u32 err, ablkcipher_decrypt_done() argument
2007 edesc = (struct ablkcipher_edesc *)((char *)desc - ablkcipher_decrypt_done()
2037 u32 *desc = edesc->hw_desc; init_aead_job() local
2048 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); init_aead_job()
2059 append_seq_in_ptr(desc, src_dma, req->assoclen + req->cryptlen, init_aead_job()
2077 append_seq_out_ptr(desc, dst_dma, init_aead_job()
2081 append_seq_out_ptr(desc, dst_dma, init_aead_job()
2086 append_math_add_imm_u32(desc, REG3, ZERO, IMM, req->assoclen); init_aead_job()
2096 u32 *desc = edesc->hw_desc; init_gcm_job() local
2108 append_cmd(desc, CMD_FIFO_LOAD | FIFOLD_CLASS_CLASS1 | IMMEDIATE | init_gcm_job()
2112 append_data(desc, ctx->key + ctx->enckeylen, 4); init_gcm_job()
2114 append_data(desc, req->iv, ivsize); init_gcm_job()
2130 u32 *desc = edesc->hw_desc; init_authenc_job() local
2151 append_load_as_imm(desc, req->iv, ivsize, init_authenc_job()
2167 u32 *desc = edesc->hw_desc; init_ablkcipher_job() local
2182 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); init_ablkcipher_job()
2192 append_seq_in_ptr(desc, src_dma, req->nbytes + ivsize, in_options); init_ablkcipher_job()
2211 append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options); init_ablkcipher_job()
2224 u32 *desc = edesc->hw_desc; init_ablkcipher_giv_job() local
2239 init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE); init_ablkcipher_giv_job()
2249 append_seq_in_ptr(desc, src_dma, req->nbytes, in_options); init_ablkcipher_giv_job()
2259 append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options); init_ablkcipher_giv_job()
2303 /* allocate space for base edesc and hw desc commands, link tables */ aead_edesc_alloc()
2380 u32 *desc; gcm_encrypt() local
2396 desc = edesc->hw_desc; gcm_encrypt()
2397 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); gcm_encrypt()
2423 u32 *desc; aead_encrypt() local
2440 desc = edesc->hw_desc; aead_encrypt()
2441 ret = caam_jr_enqueue(jrdev, desc, aead_encrypt_done, req); aead_encrypt()
2459 u32 *desc; gcm_decrypt() local
2475 desc = edesc->hw_desc; gcm_decrypt()
2476 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); gcm_decrypt()
2502 u32 *desc; aead_decrypt() local
2525 desc = edesc->hw_desc; aead_decrypt()
2526 ret = caam_jr_enqueue(jrdev, desc, aead_decrypt_done, req); aead_decrypt()
2604 /* allocate space for base edesc and hw desc commands, link tables */ ablkcipher_edesc_alloc()
2657 u32 *desc; ablkcipher_encrypt() local
2674 desc = edesc->hw_desc; ablkcipher_encrypt()
2675 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); ablkcipher_encrypt()
2694 u32 *desc; ablkcipher_decrypt() local
2706 desc = edesc->hw_desc; ablkcipher_decrypt()
2713 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_decrypt_done, req); ablkcipher_decrypt()
2780 /* allocate space for base edesc and hw desc commands, link tables */ ablkcipher_giv_edesc_alloc()
2835 u32 *desc; ablkcipher_givencrypt() local
2853 desc = edesc->hw_desc; ablkcipher_givencrypt()
2854 ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req); ablkcipher_givencrypt()
H A Dcaamhash.c42 * So, a job desc looks like:
149 static inline int map_seq_out_ptr_ctx(u32 *desc, struct device *jrdev, map_seq_out_ptr_ctx() argument
160 append_seq_out_ptr(desc, state->ctx_dma, ctx_len, 0); map_seq_out_ptr_ctx()
166 static inline dma_addr_t map_seq_out_ptr_result(u32 *desc, struct device *jrdev, map_seq_out_ptr_result() argument
172 append_seq_out_ptr(desc, dst_dma, digestsize, 0); map_seq_out_ptr_result()
219 static inline int ctx_map_to_sec4_sg(u32 *desc, struct device *jrdev, ctx_map_to_sec4_sg() argument
235 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) append_key_ahash() argument
237 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, append_key_ahash()
243 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) init_sh_desc_key_ahash() argument
247 init_sh_desc(desc, HDR_SHARE_SERIAL); init_sh_desc_key_ahash()
251 key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | init_sh_desc_key_ahash()
254 append_key_ahash(desc, ctx); init_sh_desc_key_ahash()
256 set_jump_tgt_here(desc, key_jump_cmd); init_sh_desc_key_ahash()
260 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); init_sh_desc_key_ahash()
268 static inline void ahash_append_load_str(u32 *desc, int digestsize) ahash_append_load_str() argument
271 append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ); ahash_append_load_str()
274 append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS2 | FIFOLD_TYPE_LAST2 | ahash_append_load_str()
278 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | ahash_append_load_str()
285 static inline void ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, ahash_ctx_data_to_out() argument
289 init_sh_desc_key_ahash(desc, ctx); ahash_ctx_data_to_out()
292 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | ahash_ctx_data_to_out()
296 append_operation(desc, op | state | OP_ALG_ENCRYPT); ahash_ctx_data_to_out()
301 ahash_append_load_str(desc, digestsize); ahash_ctx_data_to_out()
305 static inline void ahash_data_to_out(u32 *desc, u32 op, u32 state, ahash_data_to_out() argument
308 init_sh_desc_key_ahash(desc, ctx); ahash_data_to_out()
311 append_operation(desc, op | state | OP_ALG_ENCRYPT); ahash_data_to_out()
316 ahash_append_load_str(desc, digestsize); ahash_data_to_out()
325 u32 *desc; ahash_set_sh_desc() local
331 desc = ctx->sh_desc_update; ahash_set_sh_desc()
333 init_sh_desc(desc, HDR_SHARE_SERIAL); ahash_set_sh_desc()
336 append_cmd(desc, CMD_SEQ_LOAD | LDST_SRCDST_BYTE_CONTEXT | ahash_set_sh_desc()
340 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | ahash_set_sh_desc()
344 ahash_append_load_str(desc, ctx->ctx_len); ahash_set_sh_desc()
346 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc()
355 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_set_sh_desc()
359 desc = ctx->sh_desc_update_first; ahash_set_sh_desc()
361 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, ahash_set_sh_desc()
364 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, ahash_set_sh_desc()
365 desc_bytes(desc), ahash_set_sh_desc()
374 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_set_sh_desc()
378 desc = ctx->sh_desc_fin; ahash_set_sh_desc()
380 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, ahash_set_sh_desc()
383 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc()
391 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_set_sh_desc()
392 desc_bytes(desc), 1); ahash_set_sh_desc()
396 desc = ctx->sh_desc_finup; ahash_set_sh_desc()
398 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, ahash_set_sh_desc()
401 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc()
409 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_set_sh_desc()
410 desc_bytes(desc), 1); ahash_set_sh_desc()
414 desc = ctx->sh_desc_digest; ahash_set_sh_desc()
416 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, ahash_set_sh_desc()
419 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, ahash_set_sh_desc()
420 desc_bytes(desc), ahash_set_sh_desc()
429 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_set_sh_desc()
430 desc_bytes(desc), 1); ahash_set_sh_desc()
449 u32 *desc; hash_digest_key() local
454 desc = kmalloc(CAAM_CMD_SZ * 8 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); hash_digest_key()
455 if (!desc) { hash_digest_key()
460 init_job_desc(desc, 0); hash_digest_key()
466 kfree(desc); hash_digest_key()
474 kfree(desc); hash_digest_key()
479 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | hash_digest_key()
481 append_seq_in_ptr(desc, src_dma, *keylen, 0); hash_digest_key()
482 append_seq_fifo_load(desc, *keylen, FIFOLD_CLASS_CLASS2 | hash_digest_key()
484 append_seq_out_ptr(desc, dst_dma, digestsize, 0); hash_digest_key()
485 append_seq_store(desc, digestsize, LDST_CLASS_2_CCB | hash_digest_key()
492 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); hash_digest_key()
498 ret = caam_jr_enqueue(jrdev, desc, split_key_done, &result); hash_digest_key()
515 kfree(desc); hash_digest_key()
637 static void ahash_done(struct device *jrdev, u32 *desc, u32 err, ahash_done() argument
651 edesc = (struct ahash_edesc *)((char *)desc - ahash_done()
672 static void ahash_done_bi(struct device *jrdev, u32 *desc, u32 err, ahash_done_bi() argument
686 edesc = (struct ahash_edesc *)((char *)desc - ahash_done_bi()
707 static void ahash_done_ctx_src(struct device *jrdev, u32 *desc, u32 err, ahash_done_ctx_src() argument
721 edesc = (struct ahash_edesc *)((char *)desc - ahash_done_ctx_src()
742 static void ahash_done_ctx_dst(struct device *jrdev, u32 *desc, u32 err, ahash_done_ctx_dst() argument
756 edesc = (struct ahash_edesc *)((char *)desc - ahash_done_ctx_dst()
792 u32 *sh_desc = ctx->sh_desc_update, *desc; ahash_update_ctx() local
811 * allocate space for base edesc and hw desc commands, ahash_update_ctx()
827 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_update_ctx()
852 desc = edesc->hw_desc; ahash_update_ctx()
853 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | ahash_update_ctx()
864 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + ahash_update_ctx()
867 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); ahash_update_ctx()
871 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_update_ctx()
872 desc_bytes(desc), 1); ahash_update_ctx()
875 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); ahash_update_ctx()
912 u32 *sh_desc = ctx->sh_desc_fin, *desc; ahash_final_ctx() local
923 /* allocate space for base edesc and hw desc commands, link tables */ ahash_final_ctx()
932 desc = edesc->hw_desc; ahash_final_ctx()
933 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); ahash_final_ctx()
940 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_final_ctx()
957 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, ahash_final_ctx()
960 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_final_ctx()
969 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_final_ctx()
972 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); ahash_final_ctx()
995 u32 *sh_desc = ctx->sh_desc_finup, *desc; ahash_finup_ctx() local
1009 /* allocate space for base edesc and hw desc commands, link tables */ ahash_finup_ctx()
1018 desc = edesc->hw_desc; ahash_finup_ctx()
1019 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); ahash_finup_ctx()
1026 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_finup_ctx()
1045 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + ahash_finup_ctx()
1048 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_finup_ctx()
1057 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_finup_ctx()
1060 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); ahash_finup_ctx()
1078 u32 *sh_desc = ctx->sh_desc_digest, *desc; ahash_digest() local
1092 /* allocate space for base edesc and hw desc commands, link tables */ ahash_digest()
1105 desc = edesc->hw_desc; ahash_digest()
1106 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); ahash_digest()
1122 append_seq_in_ptr(desc, src_dma, req->nbytes, options); ahash_digest()
1124 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_digest()
1133 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_digest()
1136 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_digest()
1158 u32 *sh_desc = ctx->sh_desc_digest, *desc; ahash_final_no_ctx() local
1165 /* allocate space for base edesc and hw desc commands, link tables */ ahash_final_no_ctx()
1174 desc = edesc->hw_desc; ahash_final_no_ctx()
1175 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); ahash_final_no_ctx()
1183 append_seq_in_ptr(desc, state->buf_dma, buflen, 0); ahash_final_no_ctx()
1185 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_final_no_ctx()
1195 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_final_no_ctx()
1198 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_final_no_ctx()
1226 u32 *desc, *sh_desc = ctx->sh_desc_update_first; ahash_update_no_ctx() local
1241 * allocate space for base edesc and hw desc commands, ahash_update_no_ctx()
1271 desc = edesc->hw_desc; ahash_update_no_ctx()
1272 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | ahash_update_no_ctx()
1283 append_seq_in_ptr(desc, edesc->sec4_sg_dma, to_hash, LDST_SGF); ahash_update_no_ctx()
1285 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); ahash_update_no_ctx()
1291 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_update_no_ctx()
1292 desc_bytes(desc), 1); ahash_update_no_ctx()
1295 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); ahash_update_no_ctx()
1336 u32 *sh_desc = ctx->sh_desc_digest, *desc; ahash_finup_no_ctx() local
1349 /* allocate space for base edesc and hw desc commands, link tables */ ahash_finup_no_ctx()
1358 desc = edesc->hw_desc; ahash_finup_no_ctx()
1359 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | HDR_REVERSE); ahash_finup_no_ctx()
1379 append_seq_in_ptr(desc, edesc->sec4_sg_dma, buflen + ahash_finup_no_ctx()
1382 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_finup_no_ctx()
1391 DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc), 1); ahash_finup_no_ctx()
1394 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_finup_no_ctx()
1418 u32 *sh_desc = ctx->sh_desc_update_first, *desc; ahash_update_first() local
1437 * allocate space for base edesc and hw desc commands, ahash_update_first()
1477 desc = edesc->hw_desc; ahash_update_first()
1478 init_job_desc_shared(desc, ptr, sh_len, HDR_SHARE_DEFER | ahash_update_first()
1481 append_seq_in_ptr(desc, src_dma, to_hash, options); ahash_update_first()
1483 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); ahash_update_first()
1489 DUMP_PREFIX_ADDRESS, 16, 4, desc, ahash_update_first()
1490 desc_bytes(desc), 1); ahash_update_first()
1493 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, ahash_update_first()
H A Dcaamrng.c20 * A job desc looks like this:
99 static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) rng_done() argument
103 bd = (struct buf_data *)((char *)desc - rng_done()
125 u32 *desc = bd->hw_desc; submit_job() local
130 err = caam_jr_enqueue(jrdev, desc, rng_done, ctx); submit_job()
195 u32 *desc = ctx->sh_desc; rng_create_sh_desc() local
197 init_sh_desc(desc, HDR_SHARE_SERIAL); rng_create_sh_desc()
200 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); rng_create_sh_desc()
203 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); rng_create_sh_desc()
206 append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE); rng_create_sh_desc()
208 ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), rng_create_sh_desc()
216 desc, desc_bytes(desc), 1); rng_create_sh_desc()
225 u32 *desc = bd->hw_desc; rng_create_job_desc() local
228 init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER | rng_create_job_desc()
237 append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); rng_create_job_desc()
239 print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, rng_create_job_desc()
240 desc, desc_bytes(desc), 1); rng_create_job_desc()
/linux-4.4.14/include/linux/
H A Dirqnr.h11 # define for_each_irq_desc(irq, desc) \
12 for (irq = 0, desc = irq_to_desc(irq); irq < nr_irqs; \
13 irq++, desc = irq_to_desc(irq)) \
14 if (!desc) \
19 # define for_each_irq_desc_reverse(irq, desc) \
20 for (irq = nr_irqs - 1, desc = irq_to_desc(irq); irq >= 0; \
21 irq--, desc = irq_to_desc(irq)) \
22 if (!desc) \
H A Dirqdesc.h104 static inline unsigned int irq_desc_get_irq(struct irq_desc *desc) irq_desc_get_irq() argument
106 return desc->irq_data.irq; irq_desc_get_irq()
109 static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc) irq_desc_get_irq_data() argument
111 return &desc->irq_data; irq_desc_get_irq_data()
114 static inline struct irq_chip *irq_desc_get_chip(struct irq_desc *desc) irq_desc_get_chip() argument
116 return desc->irq_data.chip; irq_desc_get_chip()
119 static inline void *irq_desc_get_chip_data(struct irq_desc *desc) irq_desc_get_chip_data() argument
121 return desc->irq_data.chip_data; irq_desc_get_chip_data()
124 static inline void *irq_desc_get_handler_data(struct irq_desc *desc) irq_desc_get_handler_data() argument
126 return desc->irq_common_data.handler_data; irq_desc_get_handler_data()
129 static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc) irq_desc_get_msi_desc() argument
131 return desc->irq_common_data.msi_desc; irq_desc_get_msi_desc()
138 static inline void generic_handle_irq_desc(struct irq_desc *desc) generic_handle_irq_desc() argument
140 desc->handle_irq(desc); generic_handle_irq_desc()
163 static inline int irq_desc_has_action(struct irq_desc *desc) irq_desc_has_action() argument
165 return desc->action != NULL; irq_desc_has_action()
186 struct irq_desc *desc = irq_data_to_desc(data); irq_set_handler_locked() local
188 desc->handle_irq = handler; irq_set_handler_locked()
207 struct irq_desc *desc = irq_data_to_desc(data); irq_set_chip_handler_name_locked() local
209 desc->handle_irq = handler; irq_set_chip_handler_name_locked()
210 desc->name = name; irq_set_chip_handler_name_locked()
216 struct irq_desc *desc; irq_balancing_disabled() local
218 desc = irq_to_desc(irq); irq_balancing_disabled()
219 return desc->status_use_accessors & IRQ_NO_BALANCING_MASK; irq_balancing_disabled()
224 struct irq_desc *desc; irq_is_percpu() local
226 desc = irq_to_desc(irq); irq_is_percpu()
227 return desc->status_use_accessors & IRQ_PER_CPU; irq_is_percpu()
233 struct irq_desc *desc = irq_to_desc(irq); irq_set_lockdep_class() local
235 if (desc) irq_set_lockdep_class()
236 lockdep_set_class(&desc->lock, class); irq_set_lockdep_class()
243 struct irq_desc *desc; __irq_set_preflow_handler() local
245 desc = irq_to_desc(irq); __irq_set_preflow_handler()
246 desc->preflow_handler = handler; __irq_set_preflow_handler()
H A Delfnote.h9 * Each note has three parts: a name, a type and a desc. The name is
15 * "desc" field is the actual data. There are no constraints on the
16 * desc field's contents, though typically they're fairly small.
34 * desc data with appropriate padding. The 'desctype' argument is the
55 #define ELFNOTE(name, type, desc) \
57 desc ; \
64 * Elf{32,64}_Nhdr, but includes the name and desc data. The size and
65 * type of name and desc depend on the macro arguments. "name" must
66 * be a literal string, and "desc" must be passed by value. You may
71 #define _ELFNOTE(size, name, unique, type, desc) \
76 typeof(desc) _desc \
85 sizeof(desc), \
89 desc \
91 #define ELFNOTE(size, name, type, desc) \
92 _ELFNOTE(size, name, __LINE__, type, desc)
94 #define ELFNOTE32(name, type, desc) ELFNOTE(32, name, type, desc)
95 #define ELFNOTE64(name, type, desc) ELFNOTE(64, name, type, desc)
H A Dirqhandler.h11 typedef void (*irq_flow_handler_t)(struct irq_desc *desc);
/linux-4.4.14/drivers/usb/class/
H A Dcdc-wdm.c115 struct wdm_device *desc; wdm_find_device() local
118 list_for_each_entry(desc, &wdm_device_list, device_list) wdm_find_device()
119 if (desc->intf == intf) wdm_find_device()
121 desc = NULL; wdm_find_device()
125 return desc; wdm_find_device()
130 struct wdm_device *desc; wdm_find_device_by_minor() local
133 list_for_each_entry(desc, &wdm_device_list, device_list) wdm_find_device_by_minor()
134 if (desc->intf->minor == minor) wdm_find_device_by_minor()
136 desc = NULL; wdm_find_device_by_minor()
140 return desc; wdm_find_device_by_minor()
146 struct wdm_device *desc; wdm_out_callback() local
147 desc = urb->context; wdm_out_callback()
148 spin_lock(&desc->iuspin); wdm_out_callback()
149 desc->werr = urb->status; wdm_out_callback()
150 spin_unlock(&desc->iuspin); wdm_out_callback()
151 kfree(desc->outbuf); wdm_out_callback()
152 desc->outbuf = NULL; wdm_out_callback()
153 clear_bit(WDM_IN_USE, &desc->flags); wdm_out_callback()
154 wake_up(&desc->wait); wdm_out_callback()
159 struct wdm_device *desc = urb->context; wdm_in_callback() local
163 spin_lock(&desc->iuspin); wdm_in_callback()
164 clear_bit(WDM_RESPONDING, &desc->flags); wdm_in_callback()
169 dev_dbg(&desc->intf->dev, wdm_in_callback()
173 dev_dbg(&desc->intf->dev, wdm_in_callback()
177 dev_dbg(&desc->intf->dev, wdm_in_callback()
181 dev_err(&desc->intf->dev, wdm_in_callback()
185 dev_err(&desc->intf->dev, wdm_in_callback()
191 desc->rerr = status; wdm_in_callback()
192 if (length + desc->length > desc->wMaxCommand) { wdm_in_callback()
194 set_bit(WDM_OVERFLOW, &desc->flags); wdm_in_callback()
197 if (!test_bit(WDM_OVERFLOW, &desc->flags)) { wdm_in_callback()
198 memmove(desc->ubuf + desc->length, desc->inbuf, length); wdm_in_callback()
199 desc->length += length; wdm_in_callback()
200 desc->reslength = length; wdm_in_callback()
204 wake_up(&desc->wait); wdm_in_callback()
206 set_bit(WDM_READ, &desc->flags); wdm_in_callback()
207 spin_unlock(&desc->iuspin); wdm_in_callback()
215 struct wdm_device *desc; wdm_int_callback() local
218 desc = urb->context; wdm_int_callback()
219 dr = (struct usb_cdc_notification *)desc->sbuf; wdm_int_callback()
228 set_bit(WDM_INT_STALL, &desc->flags); wdm_int_callback()
229 dev_err(&desc->intf->dev, "Stall on int endpoint\n"); wdm_int_callback()
232 dev_err(&desc->intf->dev, wdm_int_callback()
239 dev_err(&desc->intf->dev, "wdm_int_callback - %d bytes\n", wdm_int_callback()
246 dev_dbg(&desc->intf->dev, wdm_int_callback()
253 dev_dbg(&desc->intf->dev, wdm_int_callback()
258 dev_dbg(&desc->intf->dev, "SPEED_CHANGE received (len %u)", wdm_int_callback()
262 clear_bit(WDM_POLL_RUNNING, &desc->flags); wdm_int_callback()
263 dev_err(&desc->intf->dev, wdm_int_callback()
271 spin_lock(&desc->iuspin); wdm_int_callback()
272 responding = test_and_set_bit(WDM_RESPONDING, &desc->flags); wdm_int_callback()
273 if (!desc->resp_count++ && !responding wdm_int_callback()
274 && !test_bit(WDM_DISCONNECTING, &desc->flags) wdm_int_callback()
275 && !test_bit(WDM_SUSPENDING, &desc->flags)) { wdm_int_callback()
276 rv = usb_submit_urb(desc->response, GFP_ATOMIC); wdm_int_callback()
277 dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d", wdm_int_callback()
280 spin_unlock(&desc->iuspin); wdm_int_callback()
282 clear_bit(WDM_RESPONDING, &desc->flags); wdm_int_callback()
287 rv = schedule_work(&desc->rxwork); wdm_int_callback()
289 dev_err(&desc->intf->dev, wdm_int_callback()
296 dev_err(&desc->intf->dev, wdm_int_callback()
302 static void kill_urbs(struct wdm_device *desc) kill_urbs() argument
305 usb_kill_urb(desc->command); kill_urbs()
306 usb_kill_urb(desc->validity); kill_urbs()
307 usb_kill_urb(desc->response); kill_urbs()
310 static void free_urbs(struct wdm_device *desc) free_urbs() argument
312 usb_free_urb(desc->validity); free_urbs()
313 usb_free_urb(desc->response); free_urbs()
314 usb_free_urb(desc->command); free_urbs()
317 static void cleanup(struct wdm_device *desc) cleanup() argument
319 kfree(desc->sbuf); cleanup()
320 kfree(desc->inbuf); cleanup()
321 kfree(desc->orq); cleanup()
322 kfree(desc->irq); cleanup()
323 kfree(desc->ubuf); cleanup()
324 free_urbs(desc); cleanup()
325 kfree(desc); cleanup()
333 struct wdm_device *desc = file->private_data; wdm_write() local
336 if (count > desc->wMaxCommand) wdm_write()
337 count = desc->wMaxCommand; wdm_write()
339 spin_lock_irq(&desc->iuspin); wdm_write()
340 we = desc->werr; wdm_write()
341 desc->werr = 0; wdm_write()
342 spin_unlock_irq(&desc->iuspin); wdm_write()
359 r = mutex_lock_interruptible(&desc->wlock); wdm_write()
364 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_write()
369 r = usb_autopm_get_interface(desc->intf); wdm_write()
376 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE, wdm_write()
377 &desc->flags)); wdm_write()
379 if (test_bit(WDM_IN_USE, &desc->flags)) wdm_write()
382 if (test_bit(WDM_RESETTING, &desc->flags)) wdm_write()
390 req = desc->orq; wdm_write()
392 desc->command, wdm_write()
393 interface_to_usbdev(desc->intf), wdm_write()
395 usb_sndctrlpipe(interface_to_usbdev(desc->intf), 0), wdm_write()
400 desc wdm_write()
407 req->wIndex = desc->inum; /* already converted */ wdm_write()
409 set_bit(WDM_IN_USE, &desc->flags); wdm_write()
410 desc->outbuf = buf; wdm_write()
412 rv = usb_submit_urb(desc->command, GFP_KERNEL); wdm_write()
414 desc->outbuf = NULL; wdm_write()
415 clear_bit(WDM_IN_USE, &desc->flags); wdm_write()
416 dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); wdm_write()
420 dev_dbg(&desc->intf->dev, "Tx URB has been submitted index=%d", wdm_write()
424 usb_autopm_put_interface(desc->intf); wdm_write()
425 mutex_unlock(&desc->wlock); wdm_write()
430 usb_autopm_put_interface(desc->intf); wdm_write()
432 mutex_unlock(&desc->wlock); wdm_write()
442 * Called with desc->iuspin locked
444 static int clear_wdm_read_flag(struct wdm_device *desc) clear_wdm_read_flag() argument
448 clear_bit(WDM_READ, &desc->flags); clear_wdm_read_flag()
451 if (!desc->resp_count || !--desc->resp_count) clear_wdm_read_flag()
454 set_bit(WDM_RESPONDING, &desc->flags); clear_wdm_read_flag()
455 spin_unlock_irq(&desc->iuspin); clear_wdm_read_flag()
456 rv = usb_submit_urb(desc->response, GFP_KERNEL); clear_wdm_read_flag()
457 spin_lock_irq(&desc->iuspin); clear_wdm_read_flag()
459 dev_err(&desc->intf->dev, clear_wdm_read_flag()
463 clear_bit(WDM_RESPONDING, &desc->flags); clear_wdm_read_flag()
464 desc->resp_count = 0; clear_wdm_read_flag()
475 struct wdm_device *desc = file->private_data; wdm_read() local
478 rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */ wdm_read()
482 cntr = ACCESS_ONCE(desc->length); wdm_read()
484 desc->read = 0; wdm_read()
486 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_read()
490 if (test_bit(WDM_OVERFLOW, &desc->flags)) { wdm_read()
491 clear_bit(WDM_OVERFLOW, &desc->flags); wdm_read()
497 if (!test_bit(WDM_READ, &desc->flags)) { wdm_read()
503 rv = wait_event_interruptible(desc->wait, wdm_read()
504 test_bit(WDM_READ, &desc->flags)); wdm_read()
508 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_read()
512 if (test_bit(WDM_RESETTING, &desc->flags)) { wdm_read()
516 usb_mark_last_busy(interface_to_usbdev(desc->intf)); wdm_read()
522 spin_lock_irq(&desc->iuspin); wdm_read()
524 if (desc->rerr) { /* read completed, error happened */ wdm_read()
525 rv = usb_translate_errors(desc->rerr); wdm_read()
526 desc->rerr = 0; wdm_read()
527 spin_unlock_irq(&desc->iuspin); wdm_read()
534 if (!test_bit(WDM_READ, &desc->flags)) { /* lost race */ wdm_read()
535 spin_unlock_irq(&desc->iuspin); wdm_read()
539 if (!desc->reslength) { /* zero length read */ wdm_read()
540 dev_dbg(&desc->intf->dev, "%s: zero length - clearing WDM_READ\n", __func__); wdm_read()
541 rv = clear_wdm_read_flag(desc); wdm_read()
542 spin_unlock_irq(&desc->iuspin); wdm_read()
547 cntr = desc->length; wdm_read()
548 spin_unlock_irq(&desc->iuspin); wdm_read()
553 rv = copy_to_user(buffer, desc->ubuf, cntr); wdm_read()
559 spin_lock_irq(&desc->iuspin); wdm_read()
561 for (i = 0; i < desc->length - cntr; i++) wdm_read()
562 desc->ubuf[i] = desc->ubuf[i + cntr]; wdm_read()
564 desc->length -= cntr; wdm_read()
566 if (!desc->length) wdm_read()
567 clear_wdm_read_flag(desc); wdm_read()
568 spin_unlock_irq(&desc->iuspin); wdm_read()
572 mutex_unlock(&desc->rlock); wdm_read()
578 struct wdm_device *desc = file->private_data; wdm_flush() local
580 wait_event(desc->wait, !test_bit(WDM_IN_USE, &desc->flags)); wdm_flush()
582 /* cannot dereference desc->intf if WDM_DISCONNECTING */ wdm_flush()
583 if (desc->werr < 0 && !test_bit(WDM_DISCONNECTING, &desc->flags)) wdm_flush()
584 dev_err(&desc->intf->dev, "Error in flush path: %d\n", wdm_flush()
585 desc->werr); wdm_flush()
587 return usb_translate_errors(desc->werr); wdm_flush()
592 struct wdm_device *desc = file->private_data; wdm_poll() local
596 spin_lock_irqsave(&desc->iuspin, flags); wdm_poll()
597 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_poll()
599 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_poll()
602 if (test_bit(WDM_READ, &desc->flags)) wdm_poll()
604 if (desc->rerr || desc->werr) wdm_poll()
606 if (!test_bit(WDM_IN_USE, &desc->flags)) wdm_poll()
608 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_poll()
610 poll_wait(file, &desc->wait, wait); wdm_poll()
621 struct wdm_device *desc; wdm_open() local
624 desc = wdm_find_device_by_minor(minor); wdm_open()
625 if (!desc) wdm_open()
628 intf = desc->intf; wdm_open()
629 if (test_bit(WDM_DISCONNECTING, &desc->flags)) wdm_open()
631 file->private_data = desc; wdm_open()
633 rv = usb_autopm_get_interface(desc->intf); wdm_open()
635 dev_err(&desc->intf->dev, "Error autopm - %d\n", rv); wdm_open()
639 /* using write lock to protect desc->count */ wdm_open()
640 mutex_lock(&desc->wlock); wdm_open()
641 if (!desc->count++) { wdm_open()
642 desc->werr = 0; wdm_open()
643 desc->rerr = 0; wdm_open()
644 rv = usb_submit_urb(desc->validity, GFP_KERNEL); wdm_open()
646 desc->count--; wdm_open()
647 dev_err(&desc->intf->dev, wdm_open()
654 mutex_unlock(&desc->wlock); wdm_open()
655 if (desc->count == 1) wdm_open()
656 desc->manage_power(intf, 1); wdm_open()
657 usb_autopm_put_interface(desc->intf); wdm_open()
665 struct wdm_device *desc = file->private_data; wdm_release() local
669 /* using write lock to protect desc->count */ wdm_release()
670 mutex_lock(&desc->wlock); wdm_release()
671 desc->count--; wdm_release()
672 mutex_unlock(&desc->wlock); wdm_release()
674 if (!desc->count) { wdm_release()
675 if (!test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_release()
676 dev_dbg(&desc->intf->dev, "wdm_release: cleanup"); wdm_release()
677 kill_urbs(desc); wdm_release()
678 spin_lock_irq(&desc->iuspin); wdm_release()
679 desc->resp_count = 0; wdm_release()
680 spin_unlock_irq(&desc->iuspin); wdm_release()
681 desc->manage_power(desc->intf, 0); wdm_release()
683 /* must avoid dev_printk here as desc->intf is invalid */ wdm_release()
685 cleanup(desc); wdm_release()
694 struct wdm_device *desc = file->private_data; wdm_ioctl() local
699 if (copy_to_user((void __user *)arg, &desc->wMaxCommand, sizeof(desc->wMaxCommand))) wdm_ioctl()
730 struct wdm_device *desc = container_of(work, struct wdm_device, rxwork); wdm_rxwork() local
735 spin_lock_irqsave(&desc->iuspin, flags); wdm_rxwork()
736 if (test_bit(WDM_DISCONNECTING, &desc->flags)) { wdm_rxwork()
737 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_rxwork()
739 responding = test_and_set_bit(WDM_RESPONDING, &desc->flags); wdm_rxwork()
740 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_rxwork()
742 rv = usb_submit_urb(desc->response, GFP_KERNEL); wdm_rxwork()
744 spin_lock_irqsave(&desc->iuspin, flags); wdm_rxwork()
745 clear_bit(WDM_RESPONDING, &desc->flags); wdm_rxwork()
746 if (!test_bit(WDM_DISCONNECTING, &desc->flags)) wdm_rxwork()
747 schedule_work(&desc->rxwork); wdm_rxwork()
748 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_rxwork()
759 struct wdm_device *desc; wdm_create() local
761 desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL); wdm_create()
762 if (!desc) wdm_create()
764 INIT_LIST_HEAD(&desc->device_list); wdm_create()
765 mutex_init(&desc->rlock); wdm_create()
766 mutex_init(&desc->wlock); wdm_create()
767 spin_lock_init(&desc->iuspin); wdm_create()
768 init_waitqueue_head(&desc->wait); wdm_create()
769 desc->wMaxCommand = bufsize; wdm_create()
771 desc->inum = cpu_to_le16((u16)intf->cur_altsetting->desc.bInterfaceNumber); wdm_create()
772 desc->intf = intf; wdm_create()
773 INIT_WORK(&desc->rxwork, wdm_rxwork); wdm_create()
779 desc->wMaxPacketSize = usb_endpoint_maxp(ep); wdm_create()
781 desc->orq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); wdm_create()
782 if (!desc->orq) wdm_create()
784 desc->irq = kmalloc(sizeof(struct usb_ctrlrequest), GFP_KERNEL); wdm_create()
785 if (!desc->irq) wdm_create()
788 desc->validity = usb_alloc_urb(0, GFP_KERNEL); wdm_create()
789 if (!desc->validity) wdm_create()
792 desc->response = usb_alloc_urb(0, GFP_KERNEL); wdm_create()
793 if (!desc->response) wdm_create()
796 desc->command = usb_alloc_urb(0, GFP_KERNEL); wdm_create()
797 if (!desc->command) wdm_create()
800 desc->ubuf = kmalloc(desc->wMaxCommand, GFP_KERNEL); wdm_create()
801 if (!desc->ubuf) wdm_create()
804 desc->sbuf = kmalloc(desc->wMaxPacketSize, GFP_KERNEL); wdm_create()
805 if (!desc->sbuf) wdm_create()
808 desc->inbuf = kmalloc(desc->wMaxCommand, GFP_KERNEL); wdm_create()
809 if (!desc->inbuf) wdm_create()
813 desc->validity, wdm_create()
816 desc->sbuf, wdm_create()
817 desc->wMaxPacketSize, wdm_create()
819 desc, wdm_create()
823 desc->irq->bRequestType = (USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE); wdm_create()
824 desc->irq->bRequest = USB_CDC_GET_ENCAPSULATED_RESPONSE; wdm_create()
825 desc->irq->wValue = 0; wdm_create()
826 desc->irq->wIndex = desc->inum; /* already converted */ wdm_create()
827 desc->irq->wLength = cpu_to_le16(desc->wMaxCommand); wdm_create()
830 desc->response, wdm_create()
833 usb_rcvctrlpipe(interface_to_usbdev(desc->intf), 0), wdm_create()
834 (unsigned char *)desc->irq, wdm_create()
835 desc->inbuf, wdm_create()
836 desc->wMaxCommand, wdm_create()
838 desc wdm_create()
841 desc->manage_power = manage_power; wdm_create()
844 list_add(&desc->device_list, &wdm_device_list); wdm_create()
856 list_del(&desc->device_list); wdm_create()
858 cleanup(desc); wdm_create()
912 if (iface->desc.bNumEndpoints != 1) wdm_probe()
914 ep = &iface->endpoint[0].desc; wdm_probe()
960 struct wdm_device *desc; wdm_disconnect() local
964 desc = wdm_find_device(intf); wdm_disconnect()
968 spin_lock_irqsave(&desc->iuspin, flags); wdm_disconnect()
969 set_bit(WDM_DISCONNECTING, &desc->flags); wdm_disconnect()
970 set_bit(WDM_READ, &desc->flags); wdm_disconnect()
972 clear_bit(WDM_IN_USE, &desc->flags); wdm_disconnect()
973 spin_unlock_irqrestore(&desc->iuspin, flags); wdm_disconnect()
974 wake_up_all(&desc->wait); wdm_disconnect()
975 mutex_lock(&desc->rlock); wdm_disconnect()
976 mutex_lock(&desc->wlock); wdm_disconnect()
977 kill_urbs(desc); wdm_disconnect()
978 cancel_work_sync(&desc->rxwork); wdm_disconnect()
979 mutex_unlock(&desc->wlock); wdm_disconnect()
980 mutex_unlock(&desc->rlock); wdm_disconnect()
982 /* the desc->intf pointer used as list key is now invalid */ wdm_disconnect()
984 list_del(&desc->device_list); wdm_disconnect()
987 if (!desc->count) wdm_disconnect()
988 cleanup(desc); wdm_disconnect()
990 dev_dbg(&intf->dev, "%s: %d open files - postponing cleanup\n", __func__, desc->count); wdm_disconnect()
997 struct wdm_device *desc = wdm_find_device(intf); wdm_suspend() local
1000 dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor); wdm_suspend()
1004 mutex_lock(&desc->rlock); wdm_suspend()
1005 mutex_lock(&desc->wlock); wdm_suspend()
1007 spin_lock_irq(&desc->iuspin); wdm_suspend()
1010 (test_bit(WDM_IN_USE, &desc->flags) wdm_suspend()
1011 || test_bit(WDM_RESPONDING, &desc->flags))) { wdm_suspend()
1012 spin_unlock_irq(&desc->iuspin); wdm_suspend()
1016 set_bit(WDM_SUSPENDING, &desc->flags); wdm_suspend()
1017 spin_unlock_irq(&desc->iuspin); wdm_suspend()
1019 kill_urbs(desc); wdm_suspend()
1020 cancel_work_sync(&desc->rxwork); wdm_suspend()
1023 mutex_unlock(&desc->wlock); wdm_suspend()
1024 mutex_unlock(&desc->rlock); wdm_suspend()
1031 static int recover_from_urb_loss(struct wdm_device *desc) recover_from_urb_loss() argument
1035 if (desc->count) { recover_from_urb_loss()
1036 rv = usb_submit_urb(desc->validity, GFP_NOIO); recover_from_urb_loss()
1038 dev_err(&desc->intf->dev, recover_from_urb_loss()
1047 struct wdm_device *desc = wdm_find_device(intf); wdm_resume() local
1050 dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor); wdm_resume()
1052 clear_bit(WDM_SUSPENDING, &desc->flags); wdm_resume()
1053 rv = recover_from_urb_loss(desc); wdm_resume()
1061 struct wdm_device *desc = wdm_find_device(intf); wdm_pre_reset() local
1069 spin_lock_irq(&desc->iuspin); wdm_pre_reset()
1070 set_bit(WDM_RESETTING, &desc->flags); /* inform read/write */ wdm_pre_reset()
1071 set_bit(WDM_READ, &desc->flags); /* unblock read */ wdm_pre_reset()
1072 clear_bit(WDM_IN_USE, &desc->flags); /* unblock write */ wdm_pre_reset()
1073 desc->rerr = -EINTR; wdm_pre_reset()
1074 spin_unlock_irq(&desc->iuspin); wdm_pre_reset()
1075 wake_up_all(&desc->wait); wdm_pre_reset()
1076 mutex_lock(&desc->rlock); wdm_pre_reset()
1077 mutex_lock(&desc->wlock); wdm_pre_reset()
1078 kill_urbs(desc); wdm_pre_reset()
1079 cancel_work_sync(&desc->rxwork); wdm_pre_reset()
1085 struct wdm_device *desc = wdm_find_device(intf); wdm_post_reset() local
1088 clear_bit(WDM_OVERFLOW, &desc->flags); wdm_post_reset()
1089 clear_bit(WDM_RESETTING, &desc->flags); wdm_post_reset()
1090 rv = recover_from_urb_loss(desc); wdm_post_reset()
1091 mutex_unlock(&desc->wlock); wdm_post_reset()
1092 mutex_unlock(&desc->rlock); wdm_post_reset()
/linux-4.4.14/drivers/scsi/fnic/
H A Dfnic_res.h37 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); fnic_queue_wq_desc() local
39 wq_enet_desc_enc(desc, fnic_queue_wq_desc()
61 struct wq_enet_desc *desc = vnic_wq_next_desc(wq); fnic_queue_wq_eth_desc() local
63 wq_enet_desc_enc(desc, fnic_queue_wq_eth_desc()
91 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_icmnd_16() local
93 desc->hdr.type = FCPIO_ICMND_16; /* enum fcpio_type */ fnic_queue_wq_copy_desc_icmnd_16()
94 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_icmnd_16()
95 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_icmnd_16()
96 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_icmnd_16()
98 desc->u.icmnd_16.lunmap_id = lunmap_id; /* index into lunmap table */ fnic_queue_wq_copy_desc_icmnd_16()
99 desc->u.icmnd_16.special_req_flags = spl_flags; /* exch req flags */ fnic_queue_wq_copy_desc_icmnd_16()
100 desc->u.icmnd_16._resvd0[0] = 0; /* reserved */ fnic_queue_wq_copy_desc_icmnd_16()
101 desc->u.icmnd_16._resvd0[1] = 0; /* reserved */ fnic_queue_wq_copy_desc_icmnd_16()
102 desc->u.icmnd_16._resvd0[2] = 0; /* reserved */ fnic_queue_wq_copy_desc_icmnd_16()
103 desc->u.icmnd_16.sgl_cnt = sgl_cnt; /* scatter-gather list count */ fnic_queue_wq_copy_desc_icmnd_16()
104 desc->u.icmnd_16.sense_len = sense_len; /* sense buffer length */ fnic_queue_wq_copy_desc_icmnd_16()
105 desc->u.icmnd_16.sgl_addr = sgl_addr; /* scatter-gather list addr */ fnic_queue_wq_copy_desc_icmnd_16()
106 desc->u.icmnd_16.sense_addr = sns_addr; /* sense buffer address */ fnic_queue_wq_copy_desc_icmnd_16()
107 desc->u.icmnd_16.crn = crn; /* SCSI Command Reference No.*/ fnic_queue_wq_copy_desc_icmnd_16()
108 desc->u.icmnd_16.pri_ta = pri_ta; /* SCSI Pri & Task attribute */ fnic_queue_wq_copy_desc_icmnd_16()
109 desc->u.icmnd_16._resvd1 = 0; /* reserved: should be 0 */ fnic_queue_wq_copy_desc_icmnd_16()
110 desc->u.icmnd_16.flags = flags; /* command flags */ fnic_queue_wq_copy_desc_icmnd_16()
111 memset(desc->u.icmnd_16.scsi_cdb, 0, CDB_16); fnic_queue_wq_copy_desc_icmnd_16()
112 memcpy(desc->u.icmnd_16.scsi_cdb, scsi_cdb, cdb_len); /* SCSI CDB */ fnic_queue_wq_copy_desc_icmnd_16()
113 desc->u.icmnd_16.data_len = data_len; /* length of data expected */ fnic_queue_wq_copy_desc_icmnd_16()
114 memcpy(desc->u.icmnd_16.lun, lun, LUN_ADDRESS); /* LUN address */ fnic_queue_wq_copy_desc_icmnd_16()
115 desc->u.icmnd_16._resvd2 = 0; /* reserved */ fnic_queue_wq_copy_desc_icmnd_16()
116 hton24(desc->u.icmnd_16.d_id, d_id); /* FC vNIC only: Target D_ID */ fnic_queue_wq_copy_desc_icmnd_16()
117 desc->u.icmnd_16.mss = mss; /* FC vNIC only: max burst */ fnic_queue_wq_copy_desc_icmnd_16()
118 desc->u.icmnd_16.r_a_tov = ratov; /*FC vNIC only: Res. Alloc Timeout */ fnic_queue_wq_copy_desc_icmnd_16()
119 desc->u.icmnd_16.e_d_tov = edtov; /*FC vNIC only: Err Detect Timeout */ fnic_queue_wq_copy_desc_icmnd_16()
130 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_itmf() local
132 desc->hdr.type = FCPIO_ITMF; /* enum fcpio_type */ fnic_queue_wq_copy_desc_itmf()
133 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_itmf()
134 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_itmf()
135 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_itmf()
137 desc->u.itmf.lunmap_id = lunmap_id; /* index into lunmap table */ fnic_queue_wq_copy_desc_itmf()
138 desc->u.itmf.tm_req = tm_req; /* SCSI Task Management request */ fnic_queue_wq_copy_desc_itmf()
139 desc->u.itmf.t_tag = tm_id; /* tag of fcpio to be aborted */ fnic_queue_wq_copy_desc_itmf()
140 desc->u.itmf._resvd = 0; fnic_queue_wq_copy_desc_itmf()
141 memcpy(desc->u.itmf.lun, lun, LUN_ADDRESS); /* LUN address */ fnic_queue_wq_copy_desc_itmf()
142 desc->u.itmf._resvd1 = 0; fnic_queue_wq_copy_desc_itmf()
143 hton24(desc->u.itmf.d_id, d_id); /* FC vNIC only: Target D_ID */ fnic_queue_wq_copy_desc_itmf()
144 desc->u.itmf.r_a_tov = r_a_tov; /* FC vNIC only: R_A_TOV in msec */ fnic_queue_wq_copy_desc_itmf()
145 desc->u.itmf.e_d_tov = e_d_tov; /* FC vNIC only: E_D_TOV in msec */ fnic_queue_wq_copy_desc_itmf()
154 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_flogi_reg() local
156 desc->hdr.type = FCPIO_FLOGI_REG; /* enum fcpio_type */ fnic_queue_wq_copy_desc_flogi_reg()
157 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_flogi_reg()
158 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_flogi_reg()
159 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_flogi_reg()
161 desc->u.flogi_reg.format = format; fnic_queue_wq_copy_desc_flogi_reg()
162 desc->u.flogi_reg._resvd = 0; fnic_queue_wq_copy_desc_flogi_reg()
163 hton24(desc->u.flogi_reg.s_id, s_id); fnic_queue_wq_copy_desc_flogi_reg()
164 memcpy(desc->u.flogi_reg.gateway_mac, gw_mac, ETH_ALEN); fnic_queue_wq_copy_desc_flogi_reg()
174 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_fip_reg() local
176 desc->hdr.type = FCPIO_FLOGI_FIP_REG; /* enum fcpio_type */ fnic_queue_wq_copy_desc_fip_reg()
177 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_fip_reg()
178 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_fip_reg()
179 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_fip_reg()
181 desc->u.flogi_fip_reg._resvd0 = 0; fnic_queue_wq_copy_desc_fip_reg()
182 hton24(desc->u.flogi_fip_reg.s_id, s_id); fnic_queue_wq_copy_desc_fip_reg()
183 memcpy(desc->u.flogi_fip_reg.fcf_mac, fcf_mac, ETH_ALEN); fnic_queue_wq_copy_desc_fip_reg()
184 desc->u.flogi_fip_reg._resvd1 = 0; fnic_queue_wq_copy_desc_fip_reg()
185 desc->u.flogi_fip_reg.r_a_tov = r_a_tov; fnic_queue_wq_copy_desc_fip_reg()
186 desc->u.flogi_fip_reg.e_d_tov = e_d_tov; fnic_queue_wq_copy_desc_fip_reg()
187 memcpy(desc->u.flogi_fip_reg.ha_mac, ha_mac, ETH_ALEN); fnic_queue_wq_copy_desc_fip_reg()
188 desc->u.flogi_fip_reg._resvd2 = 0; fnic_queue_wq_copy_desc_fip_reg()
196 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_fw_reset() local
198 desc->hdr.type = FCPIO_RESET; /* enum fcpio_type */ fnic_queue_wq_copy_desc_fw_reset()
199 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_fw_reset()
200 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_fw_reset()
201 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_fw_reset()
210 struct fcpio_host_req *desc = vnic_wq_copy_next_desc(wq); fnic_queue_wq_copy_desc_lunmap() local
212 desc->hdr.type = FCPIO_LUNMAP_REQ; /* enum fcpio_type */ fnic_queue_wq_copy_desc_lunmap()
213 desc->hdr.status = 0; /* header status entry */ fnic_queue_wq_copy_desc_lunmap()
214 desc->hdr._resvd = 0; /* reserved */ fnic_queue_wq_copy_desc_lunmap()
215 desc->hdr.tag.u.req_id = req_id; /* id for this request */ fnic_queue_wq_copy_desc_lunmap()
217 desc->u.lunmap_req.addr = lunmap_addr; /* address of the buffer */ fnic_queue_wq_copy_desc_lunmap()
218 desc->u.lunmap_req.len = lunmap_len; /* len of the buffer */ fnic_queue_wq_copy_desc_lunmap()
227 struct rq_enet_desc *desc = vnic_rq_next_desc(rq); fnic_queue_rq_desc() local
229 rq_enet_desc_enc(desc, fnic_queue_rq_desc()
H A Dwq_enet_desc.h51 static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, wq_enet_desc_enc() argument
56 desc->address = cpu_to_le64(address); wq_enet_desc_enc()
57 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); wq_enet_desc_enc()
58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << wq_enet_desc_enc()
60 desc->header_length_flags = cpu_to_le16( wq_enet_desc_enc()
67 desc->vlan_tag = cpu_to_le16(vlan_tag); wq_enet_desc_enc()
70 static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, wq_enet_desc_dec() argument
75 *address = le64_to_cpu(desc->address); wq_enet_desc_dec()
76 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; wq_enet_desc_dec()
77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & wq_enet_desc_dec()
79 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> wq_enet_desc_dec()
81 *header_length = le16_to_cpu(desc->header_length_flags) & wq_enet_desc_dec()
83 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
85 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
87 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
89 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
91 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
93 *vlan_tag = le16_to_cpu(desc->vlan_tag); wq_enet_desc_dec()
H A Dcq_enet_desc.h31 static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, cq_enet_wq_desc_dec() argument
34 cq_desc_dec((struct cq_desc *)desc, type, cq_enet_wq_desc_dec()
93 static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, cq_enet_rq_desc_dec() argument
102 u16 completed_index_flags = le16_to_cpu(desc->completed_index_flags); cq_enet_rq_desc_dec()
104 le16_to_cpu(desc->q_number_rss_type_flags); cq_enet_rq_desc_dec()
105 u16 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); cq_enet_rq_desc_dec()
107 cq_desc_dec((struct cq_desc *)desc, type, cq_enet_rq_desc_dec()
124 *rss_hash = le32_to_cpu(desc->rss_hash); cq_enet_rq_desc_dec()
133 *vlan = le16_to_cpu(desc->vlan); cq_enet_rq_desc_dec()
136 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & cq_enet_rq_desc_dec()
138 *fcoe_fc_crc_ok = (desc->flags & cq_enet_rq_desc_dec()
140 *fcoe_enc_error = (desc->flags & cq_enet_rq_desc_dec()
142 *fcoe_eof = (u8)((desc->checksum_fcoe >> cq_enet_rq_desc_dec()
151 *checksum = le16_to_cpu(desc->checksum_fcoe); cq_enet_rq_desc_dec()
155 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; cq_enet_rq_desc_dec()
156 *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; cq_enet_rq_desc_dec()
157 *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; cq_enet_rq_desc_dec()
159 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; cq_enet_rq_desc_dec()
160 *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; cq_enet_rq_desc_dec()
161 *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; cq_enet_rq_desc_dec()
163 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; cq_enet_rq_desc_dec()
164 *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; cq_enet_rq_desc_dec()
H A Drq_enet_desc.h41 static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, rq_enet_desc_enc() argument
44 desc->address = cpu_to_le64(address); rq_enet_desc_enc()
45 desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) | rq_enet_desc_enc()
49 static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, rq_enet_desc_dec() argument
52 *address = le64_to_cpu(desc->address); rq_enet_desc_dec()
53 *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK; rq_enet_desc_dec()
54 *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) & rq_enet_desc_dec()
H A Dvnic_cq_copy.h27 struct fcpio_fw_req *desc), vnic_cq_copy_service()
31 struct fcpio_fw_req *desc; vnic_cq_copy_service() local
35 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + vnic_cq_copy_service()
37 fcpio_color_dec(desc, &color); vnic_cq_copy_service()
41 if ((*q_service)(cq->vdev, cq->index, desc)) vnic_cq_copy_service()
50 desc = (struct fcpio_fw_req *)((u8 *)cq->ring.descs + vnic_cq_copy_service()
52 fcpio_color_dec(desc, &color); vnic_cq_copy_service()
23 vnic_cq_copy_service( struct vnic_cq *cq, int (*q_service)(struct vnic_dev *vdev, unsigned int index, struct fcpio_fw_req *desc), unsigned int work_to_do) vnic_cq_copy_service() argument
/linux-4.4.14/tools/perf/arch/x86/tests/
H A Darch-tests.c7 .desc = "x86 rdpmc test",
11 .desc = "Test converting perf time to TSC",
16 .desc = "Test dwarf unwind",
22 .desc = "Test x86 instruction decoder - new instructions",
27 .desc = "Test intel cqm nmi context read",
/linux-4.4.14/net/sunrpc/
H A Dsocklib.c22 * @desc: sk_buff copy helper
29 size_t xdr_skb_read_bits(struct xdr_skb_reader *desc, void *to, size_t len) xdr_skb_read_bits() argument
31 if (len > desc->count) xdr_skb_read_bits()
32 len = desc->count; xdr_skb_read_bits()
33 if (unlikely(skb_copy_bits(desc->skb, desc->offset, to, len))) xdr_skb_read_bits()
35 desc->count -= len; xdr_skb_read_bits()
36 desc->offset += len; xdr_skb_read_bits()
43 * @desc: sk_buff copy helper
49 static size_t xdr_skb_read_and_csum_bits(struct xdr_skb_reader *desc, void *to, size_t len) xdr_skb_read_and_csum_bits() argument
54 if (len > desc->count) xdr_skb_read_and_csum_bits()
55 len = desc->count; xdr_skb_read_and_csum_bits()
56 pos = desc->offset; xdr_skb_read_and_csum_bits()
57 csum2 = skb_copy_and_csum_bits(desc->skb, pos, to, len, 0); xdr_skb_read_and_csum_bits()
58 desc->csum = csum_block_add(desc->csum, csum2, pos); xdr_skb_read_and_csum_bits()
59 desc->count -= len; xdr_skb_read_and_csum_bits()
60 desc->offset += len; xdr_skb_read_and_csum_bits()
68 * @desc: sk_buff copy helper
72 ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct xdr_skb_reader *desc, xdr_skb_read_actor copy_actor) xdr_partial_copy_from_skb() argument
82 ret = copy_actor(desc, (char *)xdr->head[0].iov_base + base, len); xdr_partial_copy_from_skb()
84 if (ret != len || !desc->count) xdr_partial_copy_from_skb()
122 ret = copy_actor(desc, kaddr + base, len); xdr_partial_copy_from_skb()
127 ret = copy_actor(desc, kaddr, len); xdr_partial_copy_from_skb()
132 if (ret != len || !desc->count) xdr_partial_copy_from_skb()
139 copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); xdr_partial_copy_from_skb()
155 struct xdr_skb_reader desc; csum_partial_copy_to_xdr() local
157 desc.skb = skb; csum_partial_copy_to_xdr()
158 desc.offset = sizeof(struct udphdr); csum_partial_copy_to_xdr()
159 desc.count = skb->len - desc.offset; csum_partial_copy_to_xdr()
164 desc.csum = csum_partial(skb->data, desc.offset, skb->csum); csum_partial_copy_to_xdr()
165 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_and_csum_bits) < 0) csum_partial_copy_to_xdr()
167 if (desc.offset != skb->len) { csum_partial_copy_to_xdr()
169 csum2 = skb_checksum(skb, desc.offset, skb->len - desc.offset, 0); csum_partial_copy_to_xdr()
170 desc.csum = csum_block_add(desc.csum, csum2, desc.offset); csum_partial_copy_to_xdr()
172 if (desc.count) csum_partial_copy_to_xdr()
174 if (csum_fold(desc.csum)) csum_partial_copy_to_xdr()
181 if (xdr_partial_copy_from_skb(xdr, 0, &desc, xdr_skb_read_bits) < 0) csum_partial_copy_to_xdr()
183 if (desc.count) csum_partial_copy_to_xdr()
/linux-4.4.14/drivers/infiniband/core/
H A Dpacker.c54 * @desc:Array of structure field descriptions
55 * @desc_len:Number of entries in @desc
60 * controlled by the array of fields in @desc.
62 void ib_pack(const struct ib_field *desc, ib_pack() argument
70 if (desc[i].size_bits <= 32) { ib_pack()
76 shift = 32 - desc[i].offset_bits - desc[i].size_bits; ib_pack()
77 if (desc[i].struct_size_bytes) ib_pack()
78 val = value_read(desc[i].struct_offset_bytes, ib_pack()
79 desc[i].struct_size_bytes, ib_pack()
84 mask = cpu_to_be32(((1ull << desc[i].size_bits) - 1) << shift); ib_pack()
85 addr = (__be32 *) buf + desc[i].offset_words; ib_pack()
87 } else if (desc[i].size_bits <= 64) { ib_pack()
93 shift = 64 - desc[i].offset_bits - desc[i].size_bits; ib_pack()
94 if (desc[i].struct_size_bytes) ib_pack()
95 val = value_read(desc[i].struct_offset_bytes, ib_pack()
96 desc[i].struct_size_bytes, ib_pack()
101 mask = cpu_to_be64((~0ull >> (64 - desc[i].size_bits)) << shift); ib_pack()
102 addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words); ib_pack()
105 if (desc[i].offset_bits % 8 || ib_pack()
106 desc[i].size_bits % 8) { ib_pack()
109 desc[i].field_name, desc[i].size_bits); ib_pack()
112 if (desc[i].struct_size_bytes) ib_pack()
113 memcpy(buf + desc[i].offset_words * 4 + ib_pack()
114 desc[i].offset_bits / 8, ib_pack()
115 structure + desc[i].struct_offset_bytes, ib_pack()
116 desc[i].size_bits / 8); ib_pack()
118 memset(buf + desc[i].offset_words * 4 + ib_pack()
119 desc[i].offset_bits / 8, ib_pack()
121 desc[i].size_bits / 8); ib_pack()
141 * @desc:Array of structure field descriptions
142 * @desc_len:Number of entries in @desc
147 * controlled by the array of fields in @desc.
149 void ib_unpack(const struct ib_field *desc, ib_unpack() argument
157 if (!desc[i].struct_size_bytes) ib_unpack()
160 if (desc[i].size_bits <= 32) { ib_unpack()
166 shift = 32 - desc[i].offset_bits - desc[i].size_bits; ib_unpack()
167 mask = ((1ull << desc[i].size_bits) - 1) << shift; ib_unpack()
168 addr = (__be32 *) buf + desc[i].offset_words; ib_unpack()
170 value_write(desc[i].struct_offset_bytes, ib_unpack()
171 desc[i].struct_size_bytes, ib_unpack()
174 } else if (desc[i].size_bits <= 64) { ib_unpack()
180 shift = 64 - desc[i].offset_bits - desc[i].size_bits; ib_unpack()
181 mask = (~0ull >> (64 - desc[i].size_bits)) << shift; ib_unpack()
182 addr = (__be64 *) buf + desc[i].offset_words; ib_unpack()
184 value_write(desc[i].struct_offset_bytes, ib_unpack()
185 desc[i].struct_size_bytes, ib_unpack()
189 if (desc[i].offset_bits % 8 || ib_unpack()
190 desc[i].size_bits % 8) { ib_unpack()
193 desc[i].field_name, desc[i].size_bits); ib_unpack()
196 memcpy(structure + desc[i].struct_offset_bytes, ib_unpack()
197 buf + desc[i].offset_words * 4 + ib_unpack()
198 desc[i].offset_bits / 8, ib_unpack()
199 desc[i].size_bits / 8); ib_unpack()
/linux-4.4.14/arch/arm/mach-davinci/
H A Dmux.h18 #define MUX_CFG(soc, desc, muxreg, mode_offset, mode_mask, mux_mode, dbg)\
19 [soc##_##desc] = { \
20 .name = #desc, \
29 #define INT_CFG(soc, desc, mode_offset, mode_mask, mux_mode, dbg) \
30 [soc##_##desc] = { \
31 .name = #desc, \
40 #define EVT_CFG(soc, desc, mode_offset, mode_mask, mux_mode, dbg) \
41 [soc##_##desc] = { \
42 .name = #desc, \
/linux-4.4.14/drivers/regulator/
H A Dhelpers.c36 ret = regmap_read(rdev->regmap, rdev->desc->enable_reg, &val); regulator_is_enabled_regmap()
40 val &= rdev->desc->enable_mask; regulator_is_enabled_regmap()
42 if (rdev->desc->enable_is_inverted) { regulator_is_enabled_regmap()
43 if (rdev->desc->enable_val) regulator_is_enabled_regmap()
44 return val != rdev->desc->enable_val; regulator_is_enabled_regmap()
47 if (rdev->desc->enable_val) regulator_is_enabled_regmap()
48 return val == rdev->desc->enable_val; regulator_is_enabled_regmap()
67 if (rdev->desc->enable_is_inverted) { regulator_enable_regmap()
68 val = rdev->desc->disable_val; regulator_enable_regmap()
70 val = rdev->desc->enable_val; regulator_enable_regmap()
72 val = rdev->desc->enable_mask; regulator_enable_regmap()
75 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, regulator_enable_regmap()
76 rdev->desc->enable_mask, val); regulator_enable_regmap()
93 if (rdev->desc->enable_is_inverted) { regulator_disable_regmap()
94 val = rdev->desc->enable_val; regulator_disable_regmap()
96 val = rdev->desc->enable_mask; regulator_disable_regmap()
98 val = rdev->desc->disable_val; regulator_disable_regmap()
101 return regmap_update_bits(rdev->regmap, rdev->desc->enable_reg, regulator_disable_regmap()
102 rdev->desc->enable_mask, val); regulator_disable_regmap()
120 ret = regmap_read(rdev->regmap, rdev->desc->vsel_reg, &val); regulator_get_voltage_sel_regmap()
124 val &= rdev->desc->vsel_mask; regulator_get_voltage_sel_regmap()
125 val >>= ffs(rdev->desc->vsel_mask) - 1; regulator_get_voltage_sel_regmap()
145 sel <<= ffs(rdev->desc->vsel_mask) - 1; regulator_set_voltage_sel_regmap()
147 ret = regmap_update_bits(rdev->regmap, rdev->desc->vsel_reg, regulator_set_voltage_sel_regmap()
148 rdev->desc->vsel_mask, sel); regulator_set_voltage_sel_regmap()
152 if (rdev->desc->apply_bit) regulator_set_voltage_sel_regmap()
153 ret = regmap_update_bits(rdev->regmap, rdev->desc->apply_reg, regulator_set_voltage_sel_regmap()
154 rdev->desc->apply_bit, regulator_set_voltage_sel_regmap()
155 rdev->desc->apply_bit); regulator_set_voltage_sel_regmap()
182 for (i = 0; i < rdev->desc->n_voltages; i++) { regulator_map_voltage_iterate()
183 ret = rdev->desc->ops->list_voltage(rdev, i); regulator_map_voltage_iterate()
215 for (i = 0; i < rdev->desc->n_voltages; i++) { regulator_map_voltage_ascend()
216 ret = rdev->desc->ops->list_voltage(rdev, i); regulator_map_voltage_ascend()
247 if (rdev->desc->n_voltages == 1 && rdev->desc->uV_step == 0) { regulator_map_voltage_linear()
248 if (min_uV <= rdev->desc->min_uV && rdev->desc->min_uV <= max_uV) regulator_map_voltage_linear()
254 if (!rdev->desc->uV_step) { regulator_map_voltage_linear()
255 BUG_ON(!rdev->desc->uV_step); regulator_map_voltage_linear()
259 if (min_uV < rdev->desc->min_uV) regulator_map_voltage_linear()
260 min_uV = rdev->desc->min_uV; regulator_map_voltage_linear()
262 ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); regulator_map_voltage_linear()
266 ret += rdev->desc->linear_min_sel; regulator_map_voltage_linear()
269 voltage = rdev->desc->ops->list_voltage(rdev, ret); regulator_map_voltage_linear()
294 if (!rdev->desc->n_linear_ranges) { regulator_map_voltage_linear_range()
295 BUG_ON(!rdev->desc->n_linear_ranges); regulator_map_voltage_linear_range()
299 for (i = 0; i < rdev->desc->n_linear_ranges; i++) { regulator_map_voltage_linear_range()
302 range = &rdev->desc->linear_ranges[i]; regulator_map_voltage_linear_range()
327 if (i == rdev->desc->n_linear_ranges) regulator_map_voltage_linear_range()
331 voltage = rdev->desc->ops->list_voltage(rdev, ret); regulator_map_voltage_linear_range()
352 if (selector >= rdev->desc->n_voltages) regulator_list_voltage_linear()
354 if (selector < rdev->desc->linear_min_sel) regulator_list_voltage_linear()
357 selector -= rdev->desc->linear_min_sel; regulator_list_voltage_linear()
359 return rdev->desc->min_uV + (rdev->desc->uV_step * selector); regulator_list_voltage_linear()
379 if (!rdev->desc->n_linear_ranges) { regulator_list_voltage_linear_range()
380 BUG_ON(!rdev->desc->n_linear_ranges); regulator_list_voltage_linear_range()
384 for (i = 0; i < rdev->desc->n_linear_ranges; i++) { regulator_list_voltage_linear_range()
385 range = &rdev->desc->linear_ranges[i]; regulator_list_voltage_linear_range()
413 if (!rdev->desc->volt_table) { regulator_list_voltage_table()
414 BUG_ON(!rdev->desc->volt_table); regulator_list_voltage_table()
418 if (selector >= rdev->desc->n_voltages) regulator_list_voltage_table()
421 return rdev->desc->volt_table[selector]; regulator_list_voltage_table()
436 val = rdev->desc->bypass_val_on; regulator_set_bypass_regmap()
438 val = rdev->desc->bypass_mask; regulator_set_bypass_regmap()
440 val = rdev->desc->bypass_val_off; regulator_set_bypass_regmap()
443 return regmap_update_bits(rdev->regmap, rdev->desc->bypass_reg, regulator_set_bypass_regmap()
444 rdev->desc->bypass_mask, val); regulator_set_bypass_regmap()
459 ret = regmap_read(rdev->regmap, rdev->desc->bypass_reg, &val); regulator_get_bypass_regmap()
463 *enable = val & rdev->desc->bypass_mask; regulator_get_bypass_regmap()
H A Dda9062-regulator.c44 struct regulator_desc desc; member in struct:da9062_regulator_info
61 struct regulator_desc desc; member in struct:da9062_regulator
309 sel <<= ffs(rdev->desc->vsel_mask) - 1; da9062_set_suspend_voltage()
312 rdev->desc->vsel_mask, sel); da9062_set_suspend_voltage()
411 .desc.id = DA9062_ID_BUCK1,
412 .desc.name = "DA9062 BUCK1",
413 .desc.of_match = of_match_ptr("buck1"),
414 .desc.regulators_node = of_match_ptr("regulators"),
415 .desc.ops = &da9062_buck_ops,
416 .desc.min_uV = (300) * 1000,
417 .desc.uV_step = (10) * 1000,
418 .desc.n_voltages = ((1570) - (300))/(10) + 1,
421 .desc.enable_reg = DA9062AA_BUCK1_CONT,
422 .desc.enable_mask = DA9062AA_BUCK1_EN_MASK,
423 .desc.vsel_reg = DA9062AA_VBUCK1_A,
424 .desc.vsel_mask = DA9062AA_VBUCK1_A_MASK,
425 .desc.linear_min_sel = 0,
449 .desc.id = DA9062_ID_BUCK2,
450 .desc.name = "DA9062 BUCK2",
451 .desc.of_match = of_match_ptr("buck2"),
452 .desc.regulators_node = of_match_ptr("regulators"),
453 .desc.ops = &da9062_buck_ops,
454 .desc.min_uV = (300) * 1000,
455 .desc.uV_step = (10) * 1000,
456 .desc.n_voltages = ((1570) - (300))/(10) + 1,
459 .desc.enable_reg = DA9062AA_BUCK2_CONT,
460 .desc.enable_mask = DA9062AA_BUCK2_EN_MASK,
461 .desc.vsel_reg = DA9062AA_VBUCK2_A,
462 .desc.vsel_mask = DA9062AA_VBUCK2_A_MASK,
463 .desc.linear_min_sel = 0,
487 .desc.id = DA9062_ID_BUCK3,
488 .desc.name = "DA9062 BUCK3",
489 .desc.of_match = of_match_ptr("buck3"),
490 .desc.regulators_node = of_match_ptr("regulators"),
491 .desc.ops = &da9062_buck_ops,
492 .desc.min_uV = (800) * 1000,
493 .desc.uV_step = (20) * 1000,
494 .desc.n_voltages = ((3340) - (800))/(20) + 1,
497 .desc.enable_reg = DA9062AA_BUCK3_CONT,
498 .desc.enable_mask = DA9062AA_BUCK3_EN_MASK,
499 .desc.vsel_reg = DA9062AA_VBUCK3_A,
500 .desc.vsel_mask = DA9062AA_VBUCK3_A_MASK,
501 .desc.linear_min_sel = 0,
525 .desc.id = DA9062_ID_BUCK4,
526 .desc.name = "DA9062 BUCK4",
527 .desc.of_match = of_match_ptr("buck4"),
528 .desc.regulators_node = of_match_ptr("regulators"),
529 .desc.ops = &da9062_buck_ops,
530 .desc.min_uV = (530) * 1000,
531 .desc.uV_step = (10) * 1000,
532 .desc.n_voltages = ((1800) - (530))/(10) + 1,
535 .desc.enable_reg = DA9062AA_BUCK4_CONT,
536 .desc.enable_mask = DA9062AA_BUCK4_EN_MASK,
537 .desc.vsel_reg = DA9062AA_VBUCK4_A,
538 .desc.vsel_mask = DA9062AA_VBUCK4_A_MASK,
539 .desc.linear_min_sel = 0,
563 .desc.id = DA9062_ID_LDO1,
564 .desc.name = "DA9062 LDO1",
565 .desc.of_match = of_match_ptr("ldo1"),
566 .desc.regulators_node = of_match_ptr("regulators"),
567 .desc.ops = &da9062_ldo_ops,
568 .desc.min_uV = (900) * 1000,
569 .desc.uV_step = (50) * 1000,
570 .desc.n_voltages = ((3600) - (900))/(50) + 1,
571 .desc.enable_reg = DA9062AA_LDO1_CONT,
572 .desc.enable_mask = DA9062AA_LDO1_EN_MASK,
573 .desc.vsel_reg = DA9062AA_VLDO1_A,
574 .desc.vsel_mask = DA9062AA_VLDO1_A_MASK,
575 .desc.linear_min_sel = 0,
595 .desc.id = DA9062_ID_LDO2,
596 .desc.name = "DA9062 LDO2",
597 .desc.of_match = of_match_ptr("ldo2"),
598 .desc.regulators_node = of_match_ptr("regulators"),
599 .desc.ops = &da9062_ldo_ops,
600 .desc.min_uV = (900) * 1000,
601 .desc.uV_step = (50) * 1000,
602 .desc.n_voltages = ((3600) - (600))/(50) + 1,
603 .desc.enable_reg = DA9062AA_LDO2_CONT,
604 .desc.enable_mask = DA9062AA_LDO2_EN_MASK,
605 .desc.vsel_reg = DA9062AA_VLDO2_A,
606 .desc.vsel_mask = DA9062AA_VLDO2_A_MASK,
607 .desc.linear_min_sel = 0,
627 .desc.id = DA9062_ID_LDO3,
628 .desc.name = "DA9062 LDO3",
629 .desc.of_match = of_match_ptr("ldo3"),
630 .desc.regulators_node = of_match_ptr("regulators"),
631 .desc.ops = &da9062_ldo_ops,
632 .desc.min_uV = (900) * 1000,
633 .desc.uV_step = (50) * 1000,
634 .desc.n_voltages = ((3600) - (900))/(50) + 1,
635 .desc.enable_reg = DA9062AA_LDO3_CONT,
636 .desc.enable_mask = DA9062AA_LDO3_EN_MASK,
637 .desc.vsel_reg = DA9062AA_VLDO3_A,
638 .desc.vsel_mask = DA9062AA_VLDO3_A_MASK,
639 .desc.linear_min_sel = 0,
659 .desc.id = DA9062_ID_LDO4,
660 .desc.name = "DA9062 LDO4",
661 .desc.of_match = of_match_ptr("ldo4"),
662 .desc.regulators_node = of_match_ptr("regulators"),
663 .desc.ops = &da9062_ldo_ops,
664 .desc.min_uV = (900) * 1000,
665 .desc.uV_step = (50) * 1000,
666 .desc.n_voltages = ((3600) - (900))/(50) + 1,
667 .desc.enable_reg = DA9062AA_LDO4_CONT,
668 .desc.enable_mask = DA9062AA_LDO4_EN_MASK,
669 .desc.vsel_reg = DA9062AA_VLDO4_A,
670 .desc.vsel_mask = DA9062AA_VLDO4_A_MASK,
671 .desc.linear_min_sel = 0,
749 regl->desc = regl->info->desc; da9062_regulator_probe()
750 regl->desc.type = REGULATOR_VOLTAGE; da9062_regulator_probe()
751 regl->desc.owner = THIS_MODULE; da9062_regulator_probe()
785 regl->rdev = devm_regulator_register(&pdev->dev, &regl->desc, da9062_regulator_probe()
790 regl->desc.name); da9062_regulator_probe()
H A Dpalmas-regulator.c470 pmic->desc[id].enable_val = pmic->current_reg_mode[id]; palmas_set_mode_smps()
869 struct regulator_desc *desc; palmas_ldo_registration() local
883 desc = &pmic->desc[id]; palmas_ldo_registration()
884 desc->name = rinfo->name; palmas_ldo_registration()
885 desc->id = id; palmas_ldo_registration()
886 desc->type = REGULATOR_VOLTAGE; palmas_ldo_registration()
887 desc->owner = THIS_MODULE; palmas_ldo_registration()
890 desc->n_voltages = PALMAS_LDO_NUM_VOLTAGES; palmas_ldo_registration()
892 desc->ops = &palmas_ops_ext_control_ldo; palmas_ldo_registration()
894 desc->ops = &palmas_ops_ldo; palmas_ldo_registration()
895 desc->min_uV = 900000; palmas_ldo_registration()
896 desc->uV_step = 50000; palmas_ldo_registration()
897 desc->linear_min_sel = 1; palmas_ldo_registration()
898 desc->enable_time = 500; palmas_ldo_registration()
899 desc->vsel_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, palmas_ldo_registration()
901 desc->vsel_mask = PALMAS_LDO1_VOLTAGE_VSEL_MASK; palmas_ldo_registration()
902 desc->enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, palmas_ldo_registration()
904 desc->enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE; palmas_ldo_registration()
910 desc->min_uV = 450000; palmas_ldo_registration()
911 desc->uV_step = 25000; palmas_ldo_registration()
917 desc->enable_time = 2000; palmas_ldo_registration()
922 desc->n_voltages = 1; palmas_ldo_registration()
924 desc->ops = &palmas_ops_ext_control_extreg; palmas_ldo_registration()
926 desc->ops = &palmas_ops_extreg; palmas_ldo_registration()
927 desc->enable_reg = palmas_ldo_registration()
930 desc->enable_mask = PALMAS_REGEN1_CTRL_MODE_ACTIVE; palmas_ldo_registration()
938 desc->supply_name = rinfo->sname; palmas_ldo_registration()
941 rdev = devm_regulator_register(pmic->dev, desc, &config); palmas_ldo_registration()
981 struct regulator_desc *desc; tps65917_ldo_registration() local
995 desc = &pmic->desc[id]; tps65917_ldo_registration()
996 desc->name = rinfo->name; tps65917_ldo_registration()
997 desc->id = id; tps65917_ldo_registration()
998 desc->type = REGULATOR_VOLTAGE; tps65917_ldo_registration()
999 desc->owner = THIS_MODULE; tps65917_ldo_registration()
1002 desc->n_voltages = PALMAS_LDO_NUM_VOLTAGES; tps65917_ldo_registration()
1004 desc->ops = &palmas_ops_ext_control_ldo; tps65917_ldo_registration()
1006 desc->ops = &tps65917_ops_ldo; tps65917_ldo_registration()
1007 desc->min_uV = 900000; tps65917_ldo_registration()
1008 desc->uV_step = 50000; tps65917_ldo_registration()
1009 desc->linear_min_sel = 1; tps65917_ldo_registration()
1010 desc->enable_time = 500; tps65917_ldo_registration()
1011 desc->vsel_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, tps65917_ldo_registration()
1013 desc->vsel_mask = PALMAS_LDO1_VOLTAGE_VSEL_MASK; tps65917_ldo_registration()
1014 desc->enable_reg = PALMAS_BASE_TO_REG(PALMAS_LDO_BASE, tps65917_ldo_registration()
1016 desc->enable_mask = PALMAS_LDO1_CTRL_MODE_ACTIVE; tps65917_ldo_registration()
1021 desc->ramp_delay = 2500; tps65917_ldo_registration()
1023 desc->n_voltages = 1; tps65917_ldo_registration()
1025 desc->ops = &palmas_ops_ext_control_extreg; tps65917_ldo_registration()
1027 desc->ops = &palmas_ops_extreg; tps65917_ldo_registration()
1028 desc->enable_reg = tps65917_ldo_registration()
1031 desc->enable_mask = PALMAS_REGEN1_CTRL_MODE_ACTIVE; tps65917_ldo_registration()
1039 desc->supply_name = rinfo->sname; tps65917_ldo_registration()
1042 rdev = devm_regulator_register(pmic->dev, desc, &config); tps65917_ldo_registration()
1083 struct regulator_desc *desc; palmas_smps_registration() local
1123 desc = &pmic->desc[id]; palmas_smps_registration()
1136 desc->ramp_delay = palmas_smps_ramp_delay[reg & 0x3]; palmas_smps_registration()
1137 pmic->ramp_delay[id] = desc->ramp_delay; palmas_smps_registration()
1151 desc->name = rinfo->name; palmas_smps_registration()
1152 desc->id = id; palmas_smps_registration()
1157 desc->n_voltages = PALMAS_SMPS10_NUM_VOLTAGES; palmas_smps_registration()
1158 desc->ops = &palmas_ops_smps10; palmas_smps_registration()
1159 desc->vsel_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, palmas_smps_registration()
1161 desc->vsel_mask = SMPS10_VSEL; palmas_smps_registration()
1162 desc->enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, palmas_smps_registration()
1165 desc->enable_mask = SMPS10_SWITCH_EN; palmas_smps_registration()
1167 desc->enable_mask = SMPS10_BOOST_EN; palmas_smps_registration()
1168 desc->bypass_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, palmas_smps_registration()
1170 desc->bypass_mask = SMPS10_BYPASS_EN; palmas_smps_registration()
1171 desc->min_uV = 3750000; palmas_smps_registration()
1172 desc->uV_step = 1250000; palmas_smps_registration()
1182 desc->n_linear_ranges = 3; palmas_smps_registration()
1190 desc->linear_ranges = smps_high_ranges; palmas_smps_registration()
1192 desc->linear_ranges = smps_low_ranges; palmas_smps_registration()
1195 desc->ops = &palmas_ops_ext_control_smps; palmas_smps_registration()
1197 desc->ops = &palmas_ops_smps; palmas_smps_registration()
1198 desc->n_voltages = PALMAS_SMPS_NUM_VOLTAGES; palmas_smps_registration()
1199 desc->vsel_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, palmas_smps_registration()
1201 desc->vsel_mask = PALMAS_SMPS12_VOLTAGE_VSEL_MASK; palmas_smps_registration()
1211 desc->enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, palmas_smps_registration()
1213 desc->enable_mask = PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK; palmas_smps_registration()
1215 desc->enable_val = SMPS_CTRL_MODE_ON; palmas_smps_registration()
1218 desc->type = REGULATOR_VOLTAGE; palmas_smps_registration()
1219 desc->owner = THIS_MODULE; palmas_smps_registration()
1226 desc->supply_name = rinfo->sname; palmas_smps_registration()
1229 rdev = devm_regulator_register(pmic->dev, desc, &config); palmas_smps_registration()
1255 struct regulator_desc *desc; tps65917_smps_registration() local
1262 desc = &pmic->desc[id]; tps65917_smps_registration()
1263 desc->n_linear_ranges = 3; tps65917_smps_registration()
1279 desc->name = rinfo->name; tps65917_smps_registration()
1280 desc->id = id; tps65917_smps_registration()
1297 desc->linear_ranges = smps_high_ranges; tps65917_smps_registration()
1299 desc->linear_ranges = smps_low_ranges; tps65917_smps_registration()
1302 desc->ops = &tps65917_ops_ext_control_smps; tps65917_smps_registration()
1304 desc->ops = &tps65917_ops_smps; tps65917_smps_registration()
1305 desc->n_voltages = PALMAS_SMPS_NUM_VOLTAGES; tps65917_smps_registration()
1306 desc->vsel_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, tps65917_smps_registration()
1308 desc->vsel_mask = PALMAS_SMPS12_VOLTAGE_VSEL_MASK; tps65917_smps_registration()
1309 desc->ramp_delay = 2500; tps65917_smps_registration()
1318 desc->enable_reg = PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE, tps65917_smps_registration()
1320 desc->enable_mask = PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK; tps65917_smps_registration()
1322 desc->enable_val = SMPS_CTRL_MODE_ON; tps65917_smps_registration()
1324 desc->type = REGULATOR_VOLTAGE; tps65917_smps_registration()
1325 desc->owner = THIS_MODULE; tps65917_smps_registration()
1332 desc->supply_name = rinfo->sname; tps65917_smps_registration()
1335 rdev = devm_regulator_register(pmic->dev, desc, &config); tps65917_smps_registration()
H A Dwm831x-ldo.c40 struct regulator_desc desc; member in struct:wm831x_ldo
252 ldo->desc.name = ldo->name; wm831x_gp_ldo_probe()
256 ldo->desc.supply_name = ldo->supply_name; wm831x_gp_ldo_probe()
258 ldo->desc.id = id; wm831x_gp_ldo_probe()
259 ldo->desc.type = REGULATOR_VOLTAGE; wm831x_gp_ldo_probe()
260 ldo->desc.n_voltages = 32; wm831x_gp_ldo_probe()
261 ldo->desc.ops = &wm831x_gp_ldo_ops; wm831x_gp_ldo_probe()
262 ldo->desc.owner = THIS_MODULE; wm831x_gp_ldo_probe()
263 ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL; wm831x_gp_ldo_probe()
264 ldo->desc.vsel_mask = WM831X_LDO1_ON_VSEL_MASK; wm831x_gp_ldo_probe()
265 ldo->desc.enable_reg = WM831X_LDO_ENABLE; wm831x_gp_ldo_probe()
266 ldo->desc.enable_mask = 1 << id; wm831x_gp_ldo_probe()
267 ldo->desc.bypass_reg = ldo->base; wm831x_gp_ldo_probe()
268 ldo->desc.bypass_mask = WM831X_LDO1_SWI; wm831x_gp_ldo_probe()
269 ldo->desc.linear_ranges = wm831x_gp_ldo_ranges; wm831x_gp_ldo_probe()
270 ldo->desc.n_linear_ranges = ARRAY_SIZE(wm831x_gp_ldo_ranges); wm831x_gp_ldo_probe()
278 ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc, wm831x_gp_ldo_probe()
462 ldo->desc.name = ldo->name; wm831x_aldo_probe()
466 ldo->desc.supply_name = ldo->supply_name; wm831x_aldo_probe()
468 ldo->desc.id = id; wm831x_aldo_probe()
469 ldo->desc.type = REGULATOR_VOLTAGE; wm831x_aldo_probe()
470 ldo->desc.n_voltages = 32; wm831x_aldo_probe()
471 ldo->desc.linear_ranges = wm831x_aldo_ranges; wm831x_aldo_probe()
472 ldo->desc.n_linear_ranges = ARRAY_SIZE(wm831x_aldo_ranges); wm831x_aldo_probe()
473 ldo->desc.ops = &wm831x_aldo_ops; wm831x_aldo_probe()
474 ldo->desc.owner = THIS_MODULE; wm831x_aldo_probe()
475 ldo->desc.vsel_reg = ldo->base + WM831X_LDO_ON_CONTROL; wm831x_aldo_probe()
476 ldo->desc.vsel_mask = WM831X_LDO7_ON_VSEL_MASK; wm831x_aldo_probe()
477 ldo->desc.enable_reg = WM831X_LDO_ENABLE; wm831x_aldo_probe()
478 ldo->desc.enable_mask = 1 << id; wm831x_aldo_probe()
479 ldo->desc.bypass_reg = ldo->base; wm831x_aldo_probe()
480 ldo->desc.bypass_mask = WM831X_LDO7_SWI; wm831x_aldo_probe()
488 ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc, wm831x_aldo_probe()
607 ldo->desc.name = ldo->name; wm831x_alive_ldo_probe()
611 ldo->desc.supply_name = ldo->supply_name; wm831x_alive_ldo_probe()
613 ldo->desc.id = id; wm831x_alive_ldo_probe()
614 ldo->desc.type = REGULATOR_VOLTAGE; wm831x_alive_ldo_probe()
615 ldo->desc.n_voltages = WM831X_ALIVE_LDO_MAX_SELECTOR + 1; wm831x_alive_ldo_probe()
616 ldo->desc.ops = &wm831x_alive_ldo_ops; wm831x_alive_ldo_probe()
617 ldo->desc.owner = THIS_MODULE; wm831x_alive_ldo_probe()
618 ldo->desc.vsel_reg = ldo->base + WM831X_ALIVE_LDO_ON_CONTROL; wm831x_alive_ldo_probe()
619 ldo->desc.vsel_mask = WM831X_LDO11_ON_VSEL_MASK; wm831x_alive_ldo_probe()
620 ldo->desc.enable_reg = WM831X_LDO_ENABLE; wm831x_alive_ldo_probe()
621 ldo->desc.enable_mask = 1 << id; wm831x_alive_ldo_probe()
622 ldo->desc.min_uV = 800000; wm831x_alive_ldo_probe()
623 ldo->desc.uV_step = 50000; wm831x_alive_ldo_probe()
624 ldo->desc.enable_time = 1000; wm831x_alive_ldo_probe()
632 ldo->regulator = devm_regulator_register(&pdev->dev, &ldo->desc, wm831x_alive_ldo_probe()
H A Dqcom_rpm-regulator.c60 struct regulator_desc desc; member in struct:qcom_rpm_reg
454 .desc.linear_ranges = pldo_ranges,
455 .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges),
456 .desc.n_voltages = 161,
457 .desc.ops = &mV_ops,
464 .desc.linear_ranges = nldo_ranges,
465 .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges),
466 .desc.n_voltages = 64,
467 .desc.ops = &mV_ops,
474 .desc.linear_ranges = smps_ranges,
475 .desc.n_linear_ranges = ARRAY_SIZE(smps_ranges),
476 .desc.n_voltages = 154,
477 .desc.ops = &mV_ops,
484 .desc.linear_ranges = ncp_ranges,
485 .desc.n_linear_ranges = ARRAY_SIZE(ncp_ranges),
486 .desc.n_voltages = 32,
487 .desc.ops = &mV_ops,
492 .desc.ops = &switch_ops,
500 .desc.linear_ranges = pldo_ranges,
501 .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges),
502 .desc.n_voltages = 161,
503 .desc.ops = &mV_ops,
510 .desc.linear_ranges = nldo_ranges,
511 .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges),
512 .desc.n_voltages = 64,
513 .desc.ops = &mV_ops,
520 .desc.linear_ranges = ftsmps_ranges,
521 .desc.n_linear_ranges = ARRAY_SIZE(ftsmps_ranges),
522 .desc.n_voltages = 101,
523 .desc.ops = &mV_ops,
530 .desc.ops = &switch_ops,
538 .desc.linear_ranges = pldo_ranges,
539 .desc.n_linear_ranges = ARRAY_SIZE(pldo_ranges),
540 .desc.n_voltages = 161,
541 .desc.ops = &uV_ops,
548 .desc.linear_ranges = nldo_ranges,
549 .desc.n_linear_ranges = ARRAY_SIZE(nldo_ranges),
550 .desc.n_voltages = 64,
551 .desc.ops = &uV_ops,
558 .desc.linear_ranges = nldo1200_ranges,
559 .desc.n_linear_ranges = ARRAY_SIZE(nldo1200_ranges),
560 .desc.n_voltages = 124,
561 .desc.ops = &uV_ops,
568 .desc.linear_ranges = smps_ranges,
569 .desc.n_linear_ranges = ARRAY_SIZE(smps_ranges),
570 .desc.n_voltages = 154,
571 .desc.ops = &uV_ops,
578 .desc.linear_ranges = ftsmps_ranges,
579 .desc.n_linear_ranges = ARRAY_SIZE(ftsmps_ranges),
580 .desc.n_voltages = 101,
581 .desc.ops = &uV_ops,
588 .desc.linear_ranges = ncp_ranges,
589 .desc.n_linear_ranges = ARRAY_SIZE(ncp_ranges),
590 .desc.n_voltages = 32,
591 .desc.ops = &uV_ops,
596 .desc.ops = &switch_ops,
601 .desc.linear_ranges = smb208_ranges,
602 .desc.n_linear_ranges = ARRAY_SIZE(smb208_ranges),
603 .desc.n_voltages = 235,
604 .desc.ops = &uV_ops,
657 const struct regulator_desc *desc, rpm_reg_of_parse()
908 vreg->desc.id = -1; rpm_reg_probe()
909 vreg->desc.owner = THIS_MODULE; rpm_reg_probe()
910 vreg->desc.type = REGULATOR_VOLTAGE; rpm_reg_probe()
911 vreg->desc.name = reg->name; rpm_reg_probe()
912 vreg->desc.supply_name = reg->supply; rpm_reg_probe()
913 vreg->desc.of_match = reg->name; rpm_reg_probe()
914 vreg->desc.of_parse_cb = rpm_reg_of_parse; rpm_reg_probe()
918 rdev = devm_regulator_register(&pdev->dev, &vreg->desc, &config); rpm_reg_probe()
656 rpm_reg_of_parse(struct device_node *node, const struct regulator_desc *desc, struct regulator_config *config) rpm_reg_of_parse() argument
H A Ddb8500-prcmu.c33 info->desc.name); db8500_regulator_enable()
53 info->desc.name); db8500_regulator_disable()
72 " %i\n", info->desc.name, info->is_enabled); db8500_regulator_is_enabled()
150 info->desc.name); db8500_regulator_switch_enable()
156 info->desc.name); db8500_regulator_switch_enable()
174 info->desc.name); db8500_regulator_switch_disable()
180 info->desc.name); db8500_regulator_switch_disable()
198 info->desc.name, info->is_enabled); db8500_regulator_switch_is_enabled()
215 .desc = {
224 .desc = {
233 .desc = {
242 .desc = {
251 .desc = {
260 .desc = {
272 .desc = {
281 .desc = {
290 .desc = {
300 .desc = {
311 .desc = {
321 .desc = {
331 .desc = {
342 .desc = {
352 .desc = {
362 .desc = {
372 .desc = {
383 .desc = {
394 .desc = {
405 .desc = {
436 info->rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); db8500_regulator_register()
440 info->desc.name, err); db8500_regulator_register()
445 "regulator-%s-probed\n", info->desc.name); db8500_regulator_register()
H A Dof_regulator.c29 const struct regulator_desc *desc) of_get_regulation_constraints()
98 if (desc && desc->of_map_mode) { of_get_regulation_constraints()
99 ret = desc->of_map_mode(pval); of_get_regulation_constraints()
137 if (desc && desc->of_map_mode) { of_get_regulation_constraints()
138 ret = desc->of_map_mode(pval); of_get_regulation_constraints()
171 * @desc: regulator description
179 const struct regulator_desc *desc) of_get_regulator_init_data()
190 of_get_regulation_constraints(node, &init_data, desc); of_get_regulator_init_data()
272 match->desc); for_each_child_of_node()
290 const struct regulator_desc *desc, regulator_of_get_init_data()
298 if (!dev->of_node || !desc->of_match) regulator_of_get_init_data()
301 if (desc->regulators_node) regulator_of_get_init_data()
303 desc->regulators_node); regulator_of_get_init_data()
309 desc->regulators_node); regulator_of_get_init_data()
318 if (strcmp(desc->of_match, name)) for_each_available_child_of_node()
321 init_data = of_get_regulator_init_data(dev, child, desc); for_each_available_child_of_node()
329 if (desc->of_parse_cb) { for_each_available_child_of_node()
330 if (desc->of_parse_cb(child, desc, config)) { for_each_available_child_of_node()
27 of_get_regulation_constraints(struct device_node *np, struct regulator_init_data **init_data, const struct regulator_desc *desc) of_get_regulation_constraints() argument
177 of_get_regulator_init_data(struct device *dev, struct device_node *node, const struct regulator_desc *desc) of_get_regulator_init_data() argument
289 regulator_of_get_init_data(struct device *dev, const struct regulator_desc *desc, struct regulator_config *config, struct device_node **node) regulator_of_get_init_data() argument
H A Dltc3589.c87 struct regulator_desc desc; member in struct:ltc3589_regulator
119 shift = ffs(rdev->desc->apply_bit) - 1; ltc3589_set_ramp_delay()
142 return regmap_update_bits(ltc3589->regmap, rdev->desc->vsel_reg + 1, ltc3589_set_suspend_voltage()
143 rdev->desc->vsel_mask, sel); ltc3589_set_suspend_voltage()
153 mask = rdev->desc->apply_bit << 1; ltc3589_set_suspend_mode()
158 mask |= rdev->desc->apply_bit; ltc3589_set_suspend_mode()
159 bit |= rdev->desc->apply_bit; ltc3589_set_suspend_mode()
201 .desc = { \
279 struct ltc3589_regulator *desc = &ltc3589->regulator_descs[i]; ltc3589_parse_regulators_dt() local
291 desc->r1 = vdiv[0]; ltc3589_parse_regulators_dt()
292 desc->r2 = vdiv[1]; ltc3589_parse_regulators_dt()
450 struct regulator_desc *desc = &rdesc->desc; ltc3589_apply_fb_voltage_divider() local
455 desc->min_uV = ltc3589_scale(desc->min_uV, rdesc->r1, rdesc->r2); ltc3589_apply_fb_voltage_divider()
456 desc->uV_step = ltc3589_scale(desc->uV_step, rdesc->r1, rdesc->r2); ltc3589_apply_fb_voltage_divider()
457 desc->fixed_uV = ltc3589_scale(desc->fixed_uV, rdesc->r1, rdesc->r2); ltc3589_apply_fb_voltage_divider()
479 descs[LTC3589_LDO3].desc.fixed_uV = 1800000; ltc3589_probe()
480 descs[LTC3589_LDO4].desc.volt_table = ltc3589_ldo4; ltc3589_probe()
482 descs[LTC3589_LDO3].desc.fixed_uV = 2800000; ltc3589_probe()
483 descs[LTC3589_LDO4].desc.volt_table = ltc3589_12_ldo4; ltc3589_probe()
499 struct regulator_desc *desc = &rdesc->desc; ltc3589_probe() local
513 ltc3589->regulators[i] = devm_regulator_register(dev, desc, ltc3589_probe()
518 desc->name, ret); ltc3589_probe()
H A Dvexpress.c27 struct regulator_desc desc; member in struct:vexpress_regulator
72 reg->desc.name = dev_name(&pdev->dev); vexpress_regulator_probe()
73 reg->desc.type = REGULATOR_VOLTAGE; vexpress_regulator_probe()
74 reg->desc.owner = THIS_MODULE; vexpress_regulator_probe()
75 reg->desc.continuous_voltage_range = true; vexpress_regulator_probe()
78 &reg->desc); vexpress_regulator_probe()
84 reg->desc.ops = &vexpress_regulator_ops; vexpress_regulator_probe()
86 reg->desc.ops = &vexpress_regulator_ops_ro; vexpress_regulator_probe()
93 reg->regdev = devm_regulator_register(&pdev->dev, &reg->desc, &config); vexpress_regulator_probe()
H A Dtps51632-regulator.c88 struct regulator_desc desc; member in struct:tps51632_chip
225 const struct regulator_desc *desc) of_get_tps51632_platform_data()
235 desc); of_get_tps51632_platform_data()
254 const struct regulator_desc *desc) of_get_tps51632_platform_data()
284 tps->desc.name = client->name; tps51632_probe()
285 tps->desc.id = 0; tps51632_probe()
286 tps->desc.ramp_delay = TPS51632_DEFAULT_RAMP_DELAY; tps51632_probe()
287 tps->desc.min_uV = TPS51632_MIN_VOLTAGE; tps51632_probe()
288 tps->desc.uV_step = TPS51632_VOLTAGE_STEP_10mV; tps51632_probe()
289 tps->desc.linear_min_sel = TPS51632_MIN_VSEL; tps51632_probe()
290 tps->desc.n_voltages = TPS51632_MAX_VSEL + 1; tps51632_probe()
291 tps->desc.ops = &tps51632_dcdc_ops; tps51632_probe()
292 tps->desc.type = REGULATOR_VOLTAGE; tps51632_probe()
293 tps->desc.owner = THIS_MODULE; tps51632_probe()
297 pdata = of_get_tps51632_platform_data(&client->dev, &tps->desc); tps51632_probe()
319 tps->desc.vsel_reg = TPS51632_VOLTAGE_BASE_REG; tps51632_probe()
321 tps->desc.vsel_reg = TPS51632_VOLTAGE_SELECT_REG; tps51632_probe()
322 tps->desc.vsel_mask = TPS51632_VOUT_MASK; tps51632_probe()
345 rdev = devm_regulator_register(&client->dev, &tps->desc, &config); tps51632_probe()
224 of_get_tps51632_platform_data(struct device *dev, const struct regulator_desc *desc) of_get_tps51632_platform_data() argument
253 of_get_tps51632_platform_data(struct device *dev, const struct regulator_desc *desc) of_get_tps51632_platform_data() argument
/linux-4.4.14/tools/testing/selftests/x86/
H A Dldt_gdt.c116 static bool install_valid_mode(const struct user_desc *desc, uint32_t ar, install_valid_mode() argument
120 desc, sizeof(*desc)); install_valid_mode()
124 uint32_t limit = desc->limit; install_valid_mode()
125 if (desc->limit_in_pages) install_valid_mode()
127 check_valid_segment(desc->entry_number, 1, ar, limit, true); install_valid_mode()
133 if (desc->seg_32bit) { install_valid_mode()
145 static bool install_valid(const struct user_desc *desc, uint32_t ar) install_valid() argument
147 return install_valid_mode(desc, ar, false); install_valid()
150 static void install_invalid(const struct user_desc *desc, bool oldmode) install_invalid() argument
153 desc, sizeof(*desc)); install_invalid()
157 check_invalid_segment(desc->entry_number, 1); install_invalid()
161 if (desc->seg_32bit) { install_invalid()
180 static void fail_install(struct user_desc *desc) fail_install() argument
182 if (safe_modify_ldt(0x11, desc, sizeof(*desc)) == 0) { fail_install()
194 struct user_desc desc = { do_simple_tests() local
205 install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB); do_simple_tests()
207 desc.limit_in_pages = 1; do_simple_tests()
208 install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | do_simple_tests()
213 desc.entry_number = 2; do_simple_tests()
214 install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | do_simple_tests()
219 desc.base_addr = 0xf0000000; do_simple_tests()
220 install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | do_simple_tests()
223 desc.useable = 1; do_simple_tests()
224 install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | do_simple_tests()
227 desc.seg_not_present = 1; do_simple_tests()
228 install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | do_simple_tests()
231 desc.seg_32bit = 0; do_simple_tests()
232 install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | do_simple_tests()
235 desc.seg_32bit = 1; do_simple_tests()
236 desc.contents = 0; do_simple_tests()
237 install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | do_simple_tests()
240 desc.read_exec_only = 1; do_simple_tests()
241 install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | do_simple_tests()
244 desc.contents = 1; do_simple_tests()
245 install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA_EXPDOWN | do_simple_tests()
248 desc.read_exec_only = 0; do_simple_tests()
249 desc.limit_in_pages = 0; do_simple_tests()
250 install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA_EXPDOWN | do_simple_tests()
253 desc.contents = 3; do_simple_tests()
254 install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE_CONF | do_simple_tests()
257 desc.read_exec_only = 1; do_simple_tests()
258 install_valid(&desc, AR_DPL3 | AR_TYPE_XOCODE_CONF | do_simple_tests()
261 desc.read_exec_only = 0; do_simple_tests()
262 desc.contents = 2; do_simple_tests()
263 install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | do_simple_tests()
266 desc.read_exec_only = 1; do_simple_tests()
269 desc.lm = 1; do_simple_tests()
270 install_valid(&desc, AR_DPL3 | AR_TYPE_XOCODE | do_simple_tests()
272 desc.lm = 0; do_simple_tests()
275 bool entry1_okay = install_valid(&desc, AR_DPL3 | AR_TYPE_XOCODE | do_simple_tests()
283 check_valid_segment(desc.entry_number, 1, do_simple_tests()
285 AR_S | AR_DB | AR_AVL, desc.limit, do_simple_tests()
306 desc.entry_number = i; do_simple_tests()
307 desc.limit = i; do_simple_tests()
308 if (safe_modify_ldt(0x11, &desc, sizeof(desc)) != 0) { do_simple_tests()
324 desc.entry_number = 8192; do_simple_tests()
325 fail_install(&desc); do_simple_tests()
328 memset(&desc, 0, sizeof(desc)); do_simple_tests()
329 install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P); do_simple_tests()
331 desc.seg_not_present = 1; do_simple_tests()
332 install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S); do_simple_tests()
334 desc.seg_not_present = 0; do_simple_tests()
335 desc.read_exec_only = 1; do_simple_tests()
336 install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S | AR_P); do_simple_tests()
338 desc.read_exec_only = 0; do_simple_tests()
339 desc.seg_not_present = 1; do_simple_tests()
340 install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S); do_simple_tests()
342 desc.read_exec_only = 1; do_simple_tests()
343 desc.limit = 1; do_simple_tests()
344 install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S); do_simple_tests()
346 desc.limit = 0; do_simple_tests()
347 desc.base_addr = 1; do_simple_tests()
348 install_valid(&desc, AR_DPL3 | AR_TYPE_RODATA | AR_S); do_simple_tests()
350 desc.base_addr = 0; do_simple_tests()
351 install_invalid(&desc, false); do_simple_tests()
353 desc.seg_not_present = 0; do_simple_tests()
354 desc.read_exec_only = 0; do_simple_tests()
355 desc.seg_32bit = 1; do_simple_tests()
356 install_valid(&desc, AR_DPL3 | AR_TYPE_RWDATA | AR_S | AR_P | AR_DB); do_simple_tests()
357 install_invalid(&desc, true); do_simple_tests()
384 const struct user_desc desc = {}; threadproc() local
385 if (syscall(SYS_modify_ldt, 1, &desc, sizeof(desc)) != 0) threadproc()
459 struct user_desc desc = { do_multicpu_tests() local
471 if (safe_modify_ldt(0x11, &desc, sizeof(desc)) != 0) { do_multicpu_tests()
531 struct user_desc desc = { do_exec_test() local
542 install_valid(&desc, AR_DPL3 | AR_TYPE_XRCODE | AR_S | AR_P | AR_DB); do_exec_test()
/linux-4.4.14/arch/arm/crypto/
H A Dsha1.h7 extern int sha1_update_arm(struct shash_desc *desc, const u8 *data,
10 extern int sha1_finup_arm(struct shash_desc *desc, const u8 *data,
H A Dsha256_glue.h8 int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data,
11 int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data,
H A Dsha1-ce-glue.c30 static int sha1_ce_update(struct shash_desc *desc, const u8 *data, sha1_ce_update() argument
33 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_ce_update()
37 return sha1_update_arm(desc, data, len); sha1_ce_update()
40 sha1_base_do_update(desc, data, len, sha1_ce_transform); sha1_ce_update()
46 static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, sha1_ce_finup() argument
50 return sha1_finup_arm(desc, data, len, out); sha1_ce_finup()
54 sha1_base_do_update(desc, data, len, sha1_ce_transform); sha1_ce_finup()
55 sha1_base_do_finalize(desc, sha1_ce_transform); sha1_ce_finup()
58 return sha1_base_finish(desc, out); sha1_ce_finup()
61 static int sha1_ce_final(struct shash_desc *desc, u8 *out) sha1_ce_final() argument
63 return sha1_ce_finup(desc, NULL, 0, out); sha1_ce_final()
H A Dsha512-neon-glue.c28 static int sha512_neon_update(struct shash_desc *desc, const u8 *data, sha512_neon_update() argument
31 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_neon_update()
35 return sha512_arm_update(desc, data, len); sha512_neon_update()
38 sha512_base_do_update(desc, data, len, sha512_neon_update()
45 static int sha512_neon_finup(struct shash_desc *desc, const u8 *data, sha512_neon_finup() argument
49 return sha512_arm_finup(desc, data, len, out); sha512_neon_finup()
53 sha512_base_do_update(desc, data, len, sha512_neon_finup()
55 sha512_base_do_finalize(desc, sha512_neon_finup()
59 return sha512_base_finish(desc, out); sha512_neon_finup()
62 static int sha512_neon_final(struct shash_desc *desc, u8 *out) sha512_neon_final() argument
64 return sha512_neon_finup(desc, NULL, 0, out); sha512_neon_final()
H A Dsha1_neon_glue.c37 static int sha1_neon_update(struct shash_desc *desc, const u8 *data, sha1_neon_update() argument
40 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_neon_update()
44 return sha1_update_arm(desc, data, len); sha1_neon_update()
47 sha1_base_do_update(desc, data, len, sha1_neon_update()
54 static int sha1_neon_finup(struct shash_desc *desc, const u8 *data, sha1_neon_finup() argument
58 return sha1_finup_arm(desc, data, len, out); sha1_neon_finup()
62 sha1_base_do_update(desc, data, len, sha1_neon_finup()
64 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_transform_neon); sha1_neon_finup()
67 return sha1_base_finish(desc, out); sha1_neon_finup()
70 static int sha1_neon_final(struct shash_desc *desc, u8 *out) sha1_neon_final() argument
72 return sha1_neon_finup(desc, NULL, 0, out); sha1_neon_final()
H A Dsha2-ce-glue.c31 static int sha2_ce_update(struct shash_desc *desc, const u8 *data, sha2_ce_update() argument
34 struct sha256_state *sctx = shash_desc_ctx(desc); sha2_ce_update()
38 return crypto_sha256_arm_update(desc, data, len); sha2_ce_update()
41 sha256_base_do_update(desc, data, len, sha2_ce_update()
48 static int sha2_ce_finup(struct shash_desc *desc, const u8 *data, sha2_ce_finup() argument
52 return crypto_sha256_arm_finup(desc, data, len, out); sha2_ce_finup()
56 sha256_base_do_update(desc, data, len, sha2_ce_finup()
58 sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); sha2_ce_finup()
61 return sha256_base_finish(desc, out); sha2_ce_finup()
64 static int sha2_ce_final(struct shash_desc *desc, u8 *out) sha2_ce_final() argument
66 return sha2_ce_finup(desc, NULL, 0, out); sha2_ce_final()
H A Dsha256_neon_glue.c32 static int sha256_update(struct shash_desc *desc, const u8 *data, sha256_update() argument
35 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_update()
39 return crypto_sha256_arm_update(desc, data, len); sha256_update()
42 sha256_base_do_update(desc, data, len, sha256_update()
49 static int sha256_finup(struct shash_desc *desc, const u8 *data, sha256_finup() argument
53 return crypto_sha256_arm_finup(desc, data, len, out); sha256_finup()
57 sha256_base_do_update(desc, data, len, sha256_finup()
59 sha256_base_do_finalize(desc, sha256_finup()
63 return sha256_base_finish(desc, out); sha256_finup()
66 static int sha256_final(struct shash_desc *desc, u8 *out) sha256_final() argument
68 return sha256_finup(desc, NULL, 0, out); sha256_final()
H A Dsha1_glue.c33 int sha1_update_arm(struct shash_desc *desc, const u8 *data, sha1_update_arm() argument
39 return sha1_base_do_update(desc, data, len, sha1_update_arm()
44 static int sha1_final(struct shash_desc *desc, u8 *out) sha1_final() argument
46 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_block_data_order); sha1_final()
47 return sha1_base_finish(desc, out); sha1_final()
50 int sha1_finup_arm(struct shash_desc *desc, const u8 *data, sha1_finup_arm() argument
53 sha1_base_do_update(desc, data, len, sha1_finup_arm()
55 return sha1_final(desc, out); sha1_finup_arm()
H A Dsha256_glue.c36 int crypto_sha256_arm_update(struct shash_desc *desc, const u8 *data, crypto_sha256_arm_update() argument
42 return sha256_base_do_update(desc, data, len, crypto_sha256_arm_update()
47 static int sha256_final(struct shash_desc *desc, u8 *out) sha256_final() argument
49 sha256_base_do_finalize(desc, sha256_final()
51 return sha256_base_finish(desc, out); sha256_final()
54 int crypto_sha256_arm_finup(struct shash_desc *desc, const u8 *data, crypto_sha256_arm_finup() argument
57 sha256_base_do_update(desc, data, len, crypto_sha256_arm_finup()
59 return sha256_final(desc, out); crypto_sha256_arm_finup()
H A Dsha512-glue.c33 int sha512_arm_update(struct shash_desc *desc, const u8 *data, sha512_arm_update() argument
36 return sha512_base_do_update(desc, data, len, sha512_arm_update()
40 int sha512_arm_final(struct shash_desc *desc, u8 *out) sha512_arm_final() argument
42 sha512_base_do_finalize(desc, sha512_arm_final()
44 return sha512_base_finish(desc, out); sha512_arm_final()
47 int sha512_arm_finup(struct shash_desc *desc, const u8 *data, sha512_arm_finup() argument
50 sha512_base_do_update(desc, data, len, sha512_arm_finup()
52 return sha512_arm_final(desc, out); sha512_arm_finup()
H A Daes-ce-glue.c166 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
169 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_encrypt()
174 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ecb_encrypt()
176 err = blkcipher_walk_virt(desc, &walk); ecb_encrypt()
182 err = blkcipher_walk_done(desc, &walk, ecb_encrypt()
189 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
192 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_decrypt()
197 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ecb_decrypt()
199 err = blkcipher_walk_virt(desc, &walk); ecb_decrypt()
205 err = blkcipher_walk_done(desc, &walk, ecb_decrypt()
212 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
215 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_encrypt()
220 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_encrypt()
222 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
229 err = blkcipher_walk_done(desc, &walk, cbc_encrypt()
236 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
239 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_decrypt()
244 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_decrypt()
246 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
253 err = blkcipher_walk_done(desc, &walk, cbc_decrypt()
260 static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_encrypt() argument
263 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_encrypt()
267 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctr_encrypt()
269 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); ctr_encrypt()
279 err = blkcipher_walk_done(desc, &walk, ctr_encrypt()
296 err = blkcipher_walk_done(desc, &walk, 0); ctr_encrypt()
303 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_encrypt() argument
306 struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt()
311 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; xts_encrypt()
313 err = blkcipher_walk_virt(desc, &walk); xts_encrypt()
320 err = blkcipher_walk_done(desc, &walk, xts_encrypt()
328 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_decrypt() argument
331 struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt()
336 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; xts_decrypt()
338 err = blkcipher_walk_virt(desc, &walk); xts_decrypt()
345 err = blkcipher_walk_done(desc, &walk, xts_decrypt()
H A Dghash-ce-glue.c46 static int ghash_init(struct shash_desc *desc) ghash_init() argument
48 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_init()
54 static int ghash_update(struct shash_desc *desc, const u8 *src, ghash_update() argument
57 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_update()
63 struct ghash_key *key = crypto_shash_ctx(desc->tfm); ghash_update()
89 static int ghash_final(struct shash_desc *desc, u8 *dst) ghash_final() argument
91 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_final()
95 struct ghash_key *key = crypto_shash_ctx(desc->tfm); ghash_final()
163 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_init() local
166 desc->tfm = child; ghash_async_init()
167 desc->flags = req->base.flags; ghash_async_init()
168 return crypto_shash_init(desc); ghash_async_init()
185 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_update() local
186 return shash_ahash_update(req, desc); ghash_async_update()
203 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_final() local
204 return crypto_shash_final(desc, req->result); ghash_async_final()
220 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_digest() local
223 desc->tfm = child; ghash_async_digest()
224 desc->flags = req->base.flags; ghash_async_digest()
225 return shash_ahash_digest(req, desc); ghash_async_digest()
/linux-4.4.14/drivers/staging/skein/
H A Dskein_generic.c23 static int skein256_init(struct shash_desc *desc) skein256_init() argument
25 return skein_256_init((struct skein_256_ctx *)shash_desc_ctx(desc), skein256_init()
29 static int skein256_update(struct shash_desc *desc, const u8 *data, skein256_update() argument
32 return skein_256_update((struct skein_256_ctx *)shash_desc_ctx(desc), skein256_update()
36 static int skein256_final(struct shash_desc *desc, u8 *out) skein256_final() argument
38 return skein_256_final((struct skein_256_ctx *)shash_desc_ctx(desc), skein256_final()
42 static int skein256_export(struct shash_desc *desc, void *out) skein256_export() argument
44 struct skein_256_ctx *sctx = shash_desc_ctx(desc); skein256_export()
50 static int skein256_import(struct shash_desc *desc, const void *in) skein256_import() argument
52 struct skein_256_ctx *sctx = shash_desc_ctx(desc); skein256_import()
58 static int skein512_init(struct shash_desc *desc) skein512_init() argument
60 return skein_512_init((struct skein_512_ctx *)shash_desc_ctx(desc), skein512_init()
64 static int skein512_update(struct shash_desc *desc, const u8 *data, skein512_update() argument
67 return skein_512_update((struct skein_512_ctx *)shash_desc_ctx(desc), skein512_update()
71 static int skein512_final(struct shash_desc *desc, u8 *out) skein512_final() argument
73 return skein_512_final((struct skein_512_ctx *)shash_desc_ctx(desc), skein512_final()
77 static int skein512_export(struct shash_desc *desc, void *out) skein512_export() argument
79 struct skein_512_ctx *sctx = shash_desc_ctx(desc); skein512_export()
85 static int skein512_import(struct shash_desc *desc, const void *in) skein512_import() argument
87 struct skein_512_ctx *sctx = shash_desc_ctx(desc); skein512_import()
93 static int skein1024_init(struct shash_desc *desc) skein1024_init() argument
95 return skein_1024_init((struct skein_1024_ctx *)shash_desc_ctx(desc), skein1024_init()
99 static int skein1024_update(struct shash_desc *desc, const u8 *data, skein1024_update() argument
102 return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc), skein1024_update()
106 static int skein1024_final(struct shash_desc *desc, u8 *out) skein1024_final() argument
108 return skein_1024_final((struct skein_1024_ctx *)shash_desc_ctx(desc), skein1024_final()
112 static int skein1024_export(struct shash_desc *desc, void *out) skein1024_export() argument
114 struct skein_1024_ctx *sctx = shash_desc_ctx(desc); skein1024_export()
120 static int skein1024_import(struct shash_desc *desc, const void *in) skein1024_import() argument
122 struct skein_1024_ctx *sctx = shash_desc_ctx(desc); skein1024_import()
/linux-4.4.14/drivers/dma/hsu/
H A Dhsu.c60 struct hsu_dma_desc *desc = hsuc->desc; hsu_dma_chan_start() local
80 count = (desc->nents - desc->active) % HSU_DMA_CHAN_NR_DESC; hsu_dma_chan_start()
82 hsu_chan_writel(hsuc, HSU_CH_DxSAR(i), desc->sg[i].addr); hsu_dma_chan_start()
83 hsu_chan_writel(hsuc, HSU_CH_DxTSR(i), desc->sg[i].len); hsu_dma_chan_start()
89 desc->active++; hsu_dma_chan_start()
118 hsuc->desc = NULL; hsu_dma_start_transfer()
123 hsuc->desc = to_hsu_dma_desc(vdesc); hsu_dma_start_transfer()
144 struct hsu_dma_desc *desc; hsu_dma_irq() local
171 desc = hsuc->desc; hsu_dma_irq()
172 if (desc) { hsu_dma_irq()
174 desc->status = DMA_ERROR; hsu_dma_irq()
175 } else if (desc->active < desc->nents) { hsu_dma_irq()
178 vchan_cookie_complete(&desc->vdesc); hsu_dma_irq()
179 desc->status = DMA_COMPLETE; hsu_dma_irq()
191 struct hsu_dma_desc *desc; hsu_dma_alloc_desc() local
193 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); hsu_dma_alloc_desc()
194 if (!desc) hsu_dma_alloc_desc()
197 desc->sg = kcalloc(nents, sizeof(*desc->sg), GFP_NOWAIT); hsu_dma_alloc_desc()
198 if (!desc->sg) { hsu_dma_alloc_desc()
199 kfree(desc); hsu_dma_alloc_desc()
203 return desc; hsu_dma_alloc_desc()
208 struct hsu_dma_desc *desc = to_hsu_dma_desc(vdesc); hsu_dma_desc_free() local
210 kfree(desc->sg); hsu_dma_desc_free()
211 kfree(desc); hsu_dma_desc_free()
220 struct hsu_dma_desc *desc; hsu_dma_prep_slave_sg() local
224 desc = hsu_dma_alloc_desc(sg_len); hsu_dma_prep_slave_sg()
225 if (!desc) hsu_dma_prep_slave_sg()
229 desc->sg[i].addr = sg_dma_address(sg); for_each_sg()
230 desc->sg[i].len = sg_dma_len(sg); for_each_sg()
233 desc->nents = sg_len;
234 desc->direction = direction;
235 /* desc->active = 0 by kzalloc */
236 desc->status = DMA_IN_PROGRESS;
238 return vchan_tx_prep(&hsuc->vchan, &desc->vdesc, flags);
247 if (vchan_issue_pending(&hsuc->vchan) && !hsuc->desc) hsu_dma_issue_pending()
252 static size_t hsu_dma_desc_size(struct hsu_dma_desc *desc) hsu_dma_desc_size() argument
257 for (i = desc->active; i < desc->nents; i++) hsu_dma_desc_size()
258 bytes += desc->sg[i].len; hsu_dma_desc_size()
265 struct hsu_dma_desc *desc = hsuc->desc; hsu_dma_active_desc_size() local
266 size_t bytes = hsu_dma_desc_size(desc); hsu_dma_active_desc_size()
269 i = desc->active % HSU_DMA_CHAN_NR_DESC; hsu_dma_active_desc_size()
292 if (hsuc->desc && cookie == hsuc->desc->vdesc.tx.cookie) { hsu_dma_tx_status()
295 status = hsuc->desc->status; hsu_dma_tx_status()
325 if (hsuc->desc && hsuc->desc->status == DMA_IN_PROGRESS) { hsu_dma_pause()
327 hsuc->desc->status = DMA_PAUSED; hsu_dma_pause()
340 if (hsuc->desc && hsuc->desc->status == DMA_PAUSED) { hsu_dma_resume()
341 hsuc->desc->status = DMA_IN_PROGRESS; hsu_dma_resume()
358 if (hsuc->desc) { hsu_dma_terminate_all()
359 hsu_dma_desc_free(&hsuc->desc->vdesc); hsu_dma_terminate_all()
360 hsuc->desc = NULL; hsu_dma_terminate_all()
/linux-4.4.14/crypto/
H A Dshash.c74 static int shash_update_unaligned(struct shash_desc *desc, const u8 *data, shash_update_unaligned() argument
77 struct crypto_shash *tfm = desc->tfm; shash_update_unaligned()
91 err = shash->update(desc, buf, unaligned_len); shash_update_unaligned()
95 shash->update(desc, data + unaligned_len, len - unaligned_len); shash_update_unaligned()
98 int crypto_shash_update(struct shash_desc *desc, const u8 *data, crypto_shash_update() argument
101 struct crypto_shash *tfm = desc->tfm; crypto_shash_update()
106 return shash_update_unaligned(desc, data, len); crypto_shash_update()
108 return shash->update(desc, data, len); crypto_shash_update()
112 static int shash_final_unaligned(struct shash_desc *desc, u8 *out) shash_final_unaligned() argument
114 struct crypto_shash *tfm = desc->tfm; shash_final_unaligned()
123 err = shash->final(desc, buf); shash_final_unaligned()
134 int crypto_shash_final(struct shash_desc *desc, u8 *out) crypto_shash_final() argument
136 struct crypto_shash *tfm = desc->tfm; crypto_shash_final()
141 return shash_final_unaligned(desc, out); crypto_shash_final()
143 return shash->final(desc, out); crypto_shash_final()
147 static int shash_finup_unaligned(struct shash_desc *desc, const u8 *data, shash_finup_unaligned() argument
150 return crypto_shash_update(desc, data, len) ?: shash_finup_unaligned()
151 crypto_shash_final(desc, out); shash_finup_unaligned()
154 int crypto_shash_finup(struct shash_desc *desc, const u8 *data, crypto_shash_finup() argument
157 struct crypto_shash *tfm = desc->tfm; crypto_shash_finup()
162 return shash_finup_unaligned(desc, data, len, out); crypto_shash_finup()
164 return shash->finup(desc, data, len, out); crypto_shash_finup()
168 static int shash_digest_unaligned(struct shash_desc *desc, const u8 *data, shash_digest_unaligned() argument
171 return crypto_shash_init(desc) ?: shash_digest_unaligned()
172 crypto_shash_finup(desc, data, len, out); shash_digest_unaligned()
175 int crypto_shash_digest(struct shash_desc *desc, const u8 *data, crypto_shash_digest() argument
178 struct crypto_shash *tfm = desc->tfm; crypto_shash_digest()
183 return shash_digest_unaligned(desc, data, len, out); crypto_shash_digest()
185 return shash->digest(desc, data, len, out); crypto_shash_digest()
189 static int shash_default_export(struct shash_desc *desc, void *out) shash_default_export() argument
191 memcpy(out, shash_desc_ctx(desc), crypto_shash_descsize(desc->tfm)); shash_default_export()
195 static int shash_default_import(struct shash_desc *desc, const void *in) shash_default_import() argument
197 memcpy(shash_desc_ctx(desc), in, crypto_shash_descsize(desc->tfm)); shash_default_import()
212 struct shash_desc *desc = ahash_request_ctx(req); shash_async_init() local
214 desc->tfm = *ctx; shash_async_init()
215 desc->flags = req->base.flags; shash_async_init()
217 return crypto_shash_init(desc); shash_async_init()
220 int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc) shash_ahash_update() argument
227 nbytes = crypto_shash_update(desc, walk.data, nbytes); shash_ahash_update()
243 int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc) shash_ahash_finup() argument
250 return crypto_shash_final(desc, req->result); shash_ahash_finup()
254 crypto_shash_finup(desc, walk.data, nbytes, shash_ahash_finup()
256 crypto_shash_update(desc, walk.data, nbytes); shash_ahash_finup()
267 struct shash_desc *desc = ahash_request_ctx(req); shash_async_finup() local
269 desc->tfm = *ctx; shash_async_finup()
270 desc->flags = req->base.flags; shash_async_finup()
272 return shash_ahash_finup(req, desc); shash_async_finup()
275 int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc) shash_ahash_digest() argument
286 err = crypto_shash_digest(desc, data + offset, nbytes, shash_ahash_digest()
289 crypto_yield(desc->flags); shash_ahash_digest()
291 err = crypto_shash_init(desc) ?: shash_ahash_digest()
292 shash_ahash_finup(req, desc); shash_ahash_digest()
301 struct shash_desc *desc = ahash_request_ctx(req); shash_async_digest() local
303 desc->tfm = *ctx; shash_async_digest()
304 desc->flags = req->base.flags; shash_async_digest()
306 return shash_ahash_digest(req, desc); shash_async_digest()
317 struct shash_desc *desc = ahash_request_ctx(req); shash_async_import() local
319 desc->tfm = *ctx; shash_async_import()
320 desc->flags = req->base.flags; shash_async_import()
322 return crypto_shash_import(desc, in); shash_async_import()
375 struct shash_desc *desc = *descp; shash_compat_setkey() local
377 return crypto_shash_setkey(desc->tfm, key, keylen); shash_compat_setkey()
383 struct shash_desc *desc = *descp; shash_compat_init() local
385 desc->flags = hdesc->flags; shash_compat_init()
387 return crypto_shash_init(desc); shash_compat_init()
394 struct shash_desc *desc = *descp; shash_compat_update() local
400 nbytes = crypto_shash_update(desc, walk.data, nbytes); shash_compat_update()
420 struct shash_desc *desc = *descp; shash_compat_digest() local
423 desc->flags = hdesc->flags; shash_compat_digest()
426 err = crypto_shash_digest(desc, data + offset, nbytes, out); shash_compat_digest()
428 crypto_yield(desc->flags); shash_compat_digest()
449 struct shash_desc *desc = *descp; crypto_exit_shash_ops_compat() local
451 crypto_free_shash(desc->tfm); crypto_exit_shash_ops_compat()
452 kzfree(desc); crypto_exit_shash_ops_compat()
462 struct shash_desc *desc; crypto_init_shash_ops_compat() local
473 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(shash), crypto_init_shash_ops_compat()
475 if (!desc) { crypto_init_shash_ops_compat()
480 *descp = desc; crypto_init_shash_ops_compat()
481 desc->tfm = shash; crypto_init_shash_ops_compat()
H A Dsha1_generic.c41 int crypto_sha1_update(struct shash_desc *desc, const u8 *data, crypto_sha1_update() argument
44 return sha1_base_do_update(desc, data, len, sha1_generic_block_fn); crypto_sha1_update()
48 static int sha1_final(struct shash_desc *desc, u8 *out) sha1_final() argument
50 sha1_base_do_finalize(desc, sha1_generic_block_fn); sha1_final()
51 return sha1_base_finish(desc, out); sha1_final()
54 int crypto_sha1_finup(struct shash_desc *desc, const u8 *data, crypto_sha1_finup() argument
57 sha1_base_do_update(desc, data, len, sha1_generic_block_fn); crypto_sha1_finup()
58 return sha1_final(desc, out); crypto_sha1_finup()
H A Dablk_helper.c58 struct blkcipher_desc desc; __ablk_encrypt() local
60 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); __ablk_encrypt()
61 desc.info = req->info; __ablk_encrypt()
62 desc.flags = 0; __ablk_encrypt()
64 return crypto_blkcipher_crt(desc.tfm)->encrypt( __ablk_encrypt()
65 &desc, req->dst, req->src, req->nbytes); __ablk_encrypt()
102 struct blkcipher_desc desc; ablk_decrypt() local
104 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); ablk_decrypt()
105 desc.info = req->info; ablk_decrypt()
106 desc.flags = 0; ablk_decrypt()
108 return crypto_blkcipher_crt(desc.tfm)->decrypt( ablk_decrypt()
109 &desc, req->dst, req->src, req->nbytes); ablk_decrypt()
H A Dcrc32.c76 static int crc32_init(struct shash_desc *desc) crc32_init() argument
78 u32 *mctx = crypto_shash_ctx(desc->tfm); crc32_init()
79 u32 *crcp = shash_desc_ctx(desc); crc32_init()
86 static int crc32_update(struct shash_desc *desc, const u8 *data, crc32_update() argument
89 u32 *crcp = shash_desc_ctx(desc); crc32_update()
103 static int crc32_finup(struct shash_desc *desc, const u8 *data, crc32_finup() argument
106 return __crc32_finup(shash_desc_ctx(desc), data, len, out); crc32_finup()
109 static int crc32_final(struct shash_desc *desc, u8 *out) crc32_final() argument
111 u32 *crcp = shash_desc_ctx(desc); crc32_final()
117 static int crc32_digest(struct shash_desc *desc, const u8 *data, crc32_digest() argument
120 return __crc32_finup(crypto_shash_ctx(desc->tfm), data, len, crc32_digest()
H A Dcrct10dif_generic.c42 static int chksum_init(struct shash_desc *desc) chksum_init() argument
44 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_init()
51 static int chksum_update(struct shash_desc *desc, const u8 *data, chksum_update() argument
54 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_update()
60 static int chksum_final(struct shash_desc *desc, u8 *out) chksum_final() argument
62 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_final()
75 static int chksum_finup(struct shash_desc *desc, const u8 *data, chksum_finup() argument
78 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_finup()
83 static int chksum_digest(struct shash_desc *desc, const u8 *data, chksum_digest() argument
86 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_digest()
H A Dmd5.c50 static int md5_init(struct shash_desc *desc) md5_init() argument
52 struct md5_state *mctx = shash_desc_ctx(desc); md5_init()
63 static int md5_update(struct shash_desc *desc, const u8 *data, unsigned int len) md5_update() argument
65 struct md5_state *mctx = shash_desc_ctx(desc); md5_update()
95 static int md5_final(struct shash_desc *desc, u8 *out) md5_final() argument
97 struct md5_state *mctx = shash_desc_ctx(desc); md5_final()
123 static int md5_export(struct shash_desc *desc, void *out) md5_export() argument
125 struct md5_state *ctx = shash_desc_ctx(desc); md5_export()
131 static int md5_import(struct shash_desc *desc, const void *in) md5_import() argument
133 struct md5_state *ctx = shash_desc_ctx(desc); md5_import()
H A Dhmac.c91 struct shash_desc *desc = shash_desc_ctx(pdesc); hmac_export() local
93 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; hmac_export()
95 return crypto_shash_export(desc, out); hmac_export()
100 struct shash_desc *desc = shash_desc_ctx(pdesc); hmac_import() local
103 desc->tfm = ctx->hash; hmac_import()
104 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; hmac_import()
106 return crypto_shash_import(desc, in); hmac_import()
117 struct shash_desc *desc = shash_desc_ctx(pdesc); hmac_update() local
119 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; hmac_update()
121 return crypto_shash_update(desc, data, nbytes); hmac_update()
130 struct shash_desc *desc = shash_desc_ctx(pdesc); hmac_final() local
132 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; hmac_final()
134 return crypto_shash_final(desc, out) ?: hmac_final()
135 crypto_shash_import(desc, opad) ?: hmac_final()
136 crypto_shash_finup(desc, out, ds, out); hmac_final()
147 struct shash_desc *desc = shash_desc_ctx(pdesc); hmac_finup() local
149 desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; hmac_finup()
151 return crypto_shash_finup(desc, data, nbytes, out) ?: hmac_finup()
152 crypto_shash_import(desc, opad) ?: hmac_finup()
153 crypto_shash_finup(desc, out, ds, out); hmac_finup()
/linux-4.4.14/drivers/staging/comedi/drivers/
H A Dcomedi_isadma.c28 * @desc: the ISA DMA cookie to program and enable
30 void comedi_isadma_program(struct comedi_isadma_desc *desc) comedi_isadma_program() argument
35 clear_dma_ff(desc->chan); comedi_isadma_program()
36 set_dma_mode(desc->chan, desc->mode); comedi_isadma_program()
37 set_dma_addr(desc->chan, desc->hw_addr); comedi_isadma_program()
38 set_dma_count(desc->chan, desc->size); comedi_isadma_program()
39 enable_dma(desc->chan); comedi_isadma_program()
111 struct comedi_isadma_desc *desc = &dma->desc[dma->cur_dma]; comedi_isadma_poll() local
117 clear_dma_ff(desc->chan); comedi_isadma_poll()
119 disable_dma(desc->chan); comedi_isadma_poll()
120 result = get_dma_residue(desc->chan); comedi_isadma_poll()
126 result1 = get_dma_residue(desc->chan); comedi_isadma_poll()
128 enable_dma(desc->chan); comedi_isadma_poll()
133 if (result >= desc->size || result == 0) comedi_isadma_poll()
136 return desc->size - result; comedi_isadma_poll()
142 * @desc: the ISA DMA cookie to set
145 void comedi_isadma_set_mode(struct comedi_isadma_desc *desc, char dma_dir) comedi_isadma_set_mode() argument
147 desc->mode = (dma_dir == COMEDI_ISADMA_READ) ? DMA_MODE_READ comedi_isadma_set_mode()
169 struct comedi_isadma_desc *desc; comedi_isadma_alloc() local
180 desc = kcalloc(n_desc, sizeof(*desc), GFP_KERNEL); comedi_isadma_alloc()
181 if (!desc) comedi_isadma_alloc()
183 dma->desc = desc; comedi_isadma_alloc()
202 desc = &dma->desc[i]; comedi_isadma_alloc()
203 desc->chan = dma_chans[i]; comedi_isadma_alloc()
204 desc->maxsize = maxsize; comedi_isadma_alloc()
205 desc->virt_addr = dma_alloc_coherent(NULL, desc->maxsize, comedi_isadma_alloc()
206 &desc->hw_addr, comedi_isadma_alloc()
208 if (!desc->virt_addr) comedi_isadma_alloc()
210 comedi_isadma_set_mode(desc, dma_dir); comedi_isadma_alloc()
227 struct comedi_isadma_desc *desc; comedi_isadma_free() local
233 if (dma->desc) { comedi_isadma_free()
235 desc = &dma->desc[i]; comedi_isadma_free()
236 if (desc->virt_addr) comedi_isadma_free()
237 dma_free_coherent(NULL, desc->maxsize, comedi_isadma_free()
238 desc->virt_addr, comedi_isadma_free()
239 desc->hw_addr); comedi_isadma_free()
241 kfree(dma->desc); comedi_isadma_free()
H A Dni_labpc_isadma.c64 struct comedi_isadma_desc *desc = &devpriv->dma->desc[0]; labpc_setup_dma() local
69 desc->size = labpc_suggest_transfer_size(dev, s, desc->maxsize); labpc_setup_dma()
71 devpriv->count * sample_size < desc->size) labpc_setup_dma()
72 desc->size = devpriv->count * sample_size; labpc_setup_dma()
74 comedi_isadma_program(desc); labpc_setup_dma()
84 struct comedi_isadma_desc *desc = &devpriv->dma->desc[0]; labpc_drain_dma() local
88 unsigned int max_samples = comedi_bytes_to_samples(s, desc->size); labpc_drain_dma()
98 residue = comedi_isadma_disable(desc->chan); labpc_drain_dma()
118 desc->size = comedi_samples_to_bytes(s, leftover); labpc_drain_dma()
120 comedi_buf_write_samples(s, desc->virt_addr, nsamples); labpc_drain_dma()
127 struct comedi_isadma_desc *desc = &devpriv->dma->desc[0]; handle_isa_dma() local
131 if (desc->size) handle_isa_dma()
132 comedi_isadma_program(desc); handle_isa_dma()
/linux-4.4.14/drivers/misc/mic/card/
H A Dmic_virtio.h43 static inline unsigned mic_desc_size(struct mic_device_desc __iomem *desc) mic_desc_size() argument
45 return sizeof(*desc) mic_desc_size()
46 + ioread8(&desc->num_vq) * sizeof(struct mic_vqconfig) mic_desc_size()
47 + ioread8(&desc->feature_len) * 2 mic_desc_size()
48 + ioread8(&desc->config_len); mic_desc_size()
52 mic_vq_config(struct mic_device_desc __iomem *desc) mic_vq_config() argument
54 return (struct mic_vqconfig __iomem *)(desc + 1); mic_vq_config()
58 mic_vq_features(struct mic_device_desc __iomem *desc) mic_vq_features() argument
60 return (__u8 __iomem *)(mic_vq_config(desc) + ioread8(&desc->num_vq)); mic_vq_features()
64 mic_vq_configspace(struct mic_device_desc __iomem *desc) mic_vq_configspace() argument
66 return mic_vq_features(desc) + ioread8(&desc->feature_len) * 2; mic_vq_configspace()
68 static inline unsigned mic_total_desc_size(struct mic_device_desc __iomem *desc) mic_total_desc_size() argument
70 return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); mic_total_desc_size()
/linux-4.4.14/arch/powerpc/include/asm/
H A Dqe_ic.h62 void (*low_handler)(struct irq_desc *desc),
63 void (*high_handler)(struct irq_desc *desc));
68 void (*low_handler)(struct irq_desc *desc), qe_ic_init()
69 void (*high_handler)(struct irq_desc *desc)) qe_ic_init()
81 static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc) qe_ic_cascade_low_ipic() argument
83 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); qe_ic_cascade_low_ipic()
90 static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc) qe_ic_cascade_high_ipic() argument
92 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); qe_ic_cascade_high_ipic()
99 static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc) qe_ic_cascade_low_mpic() argument
101 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); qe_ic_cascade_low_mpic()
103 struct irq_chip *chip = irq_desc_get_chip(desc); qe_ic_cascade_low_mpic()
108 chip->irq_eoi(&desc->irq_data); qe_ic_cascade_low_mpic()
111 static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc) qe_ic_cascade_high_mpic() argument
113 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); qe_ic_cascade_high_mpic()
115 struct irq_chip *chip = irq_desc_get_chip(desc); qe_ic_cascade_high_mpic()
120 chip->irq_eoi(&desc->irq_data); qe_ic_cascade_high_mpic()
123 static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc) qe_ic_cascade_muxed_mpic() argument
125 struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); qe_ic_cascade_muxed_mpic()
127 struct irq_chip *chip = irq_desc_get_chip(desc); qe_ic_cascade_muxed_mpic()
136 chip->irq_eoi(&desc->irq_data); qe_ic_cascade_muxed_mpic()
67 qe_ic_init(struct device_node *node, unsigned int flags, void (*low_handler)(struct irq_desc *desc), void (*high_handler)(struct irq_desc *desc)) qe_ic_init() argument
/linux-4.4.14/net/sunrpc/auth_gss/
H A Dgss_krb5_crypto.c63 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; krb5_encrypt() local
80 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length); krb5_encrypt()
97 struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv }; krb5_decrypt() local
113 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length); krb5_decrypt()
122 struct hash_desc *desc = data; checksummer() local
124 return crypto_hash_update(desc, sg, sg->length); checksummer()
155 struct hash_desc desc; make_checksum_hmac_md5() local
188 desc.tfm = md5; make_checksum_hmac_md5()
189 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; make_checksum_hmac_md5()
191 err = crypto_hash_init(&desc); make_checksum_hmac_md5()
195 err = crypto_hash_update(&desc, sg, 4); make_checksum_hmac_md5()
200 err = crypto_hash_update(&desc, sg, hdrlen); make_checksum_hmac_md5()
204 checksummer, &desc); make_checksum_hmac_md5()
207 err = crypto_hash_final(&desc, checksumdata); make_checksum_hmac_md5()
211 desc.tfm = hmac_md5; make_checksum_hmac_md5()
212 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; make_checksum_hmac_md5()
214 err = crypto_hash_init(&desc); make_checksum_hmac_md5()
222 err = crypto_hash_digest(&desc, sg, crypto_hash_digestsize(md5), make_checksum_hmac_md5()
245 struct hash_desc desc; make_checksum() local
262 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); make_checksum()
263 if (IS_ERR(desc.tfm)) make_checksum()
265 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; make_checksum()
267 checksumlen = crypto_hash_digestsize(desc.tfm); make_checksum()
270 err = crypto_hash_setkey(desc.tfm, cksumkey, make_checksum()
276 err = crypto_hash_init(&desc); make_checksum()
280 err = crypto_hash_update(&desc, sg, hdrlen); make_checksum()
284 checksummer, &desc); make_checksum()
287 err = crypto_hash_final(&desc, checksumdata); make_checksum()
310 crypto_free_hash(desc.tfm); make_checksum()
326 struct hash_desc desc; make_checksum_v2() local
343 desc.tfm = crypto_alloc_hash(kctx->gk5e->cksum_name, 0, make_checksum_v2()
345 if (IS_ERR(desc.tfm)) make_checksum_v2()
347 checksumlen = crypto_hash_digestsize(desc.tfm); make_checksum_v2()
348 desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; make_checksum_v2()
350 err = crypto_hash_setkey(desc.tfm, cksumkey, kctx->gk5e->keylength); make_checksum_v2()
354 err = crypto_hash_init(&desc); make_checksum_v2()
358 checksummer, &desc); make_checksum_v2()
363 err = crypto_hash_update(&desc, sg, hdrlen); make_checksum_v2()
367 err = crypto_hash_final(&desc, checksumdata); make_checksum_v2()
384 crypto_free_hash(desc.tfm); make_checksum_v2()
390 struct blkcipher_desc desc; member in struct:encryptor_desc
403 struct encryptor_desc *desc = data; encryptor() local
404 struct xdr_buf *outbuf = desc->outbuf; encryptor()
406 int thislen = desc->fraglen + sg->length; encryptor()
412 BUG_ON(desc->fragno > 3); encryptor()
414 page_pos = desc->pos - outbuf->head[0].iov_len; encryptor()
418 in_page = desc->pages[i]; encryptor()
422 sg_set_page(&desc->infrags[desc->fragno], in_page, sg->length, encryptor()
424 sg_set_page(&desc->outfrags[desc->fragno], sg_page(sg), sg->length, encryptor()
426 desc->fragno++; encryptor()
427 desc->fraglen += sg->length; encryptor()
428 desc->pos += sg->length; encryptor()
430 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); encryptor()
436 sg_mark_end(&desc->infrags[desc->fragno - 1]); encryptor()
437 sg_mark_end(&desc->outfrags[desc->fragno - 1]); encryptor()
439 ret = crypto_blkcipher_encrypt_iv(&desc->desc, desc->outfrags, encryptor()
440 desc->infrags, thislen); encryptor()
444 sg_init_table(desc->infrags, 4); encryptor()
445 sg_init_table(desc->outfrags, 4); encryptor()
448 sg_set_page(&desc->outfrags[0], sg_page(sg), fraglen, encryptor()
450 desc->infrags[0] = desc->outfrags[0]; encryptor()
451 sg_assign_page(&desc->infrags[0], in_page); encryptor()
452 desc->fragno = 1; encryptor()
453 desc->fraglen = fraglen; encryptor()
455 desc->fragno = 0; encryptor()
456 desc->fraglen = 0; encryptor()
466 struct encryptor_desc desc; gss_encrypt_xdr_buf() local
470 memset(desc.iv, 0, sizeof(desc.iv)); gss_encrypt_xdr_buf()
471 desc.desc.tfm = tfm; gss_encrypt_xdr_buf()
472 desc.desc.info = desc.iv; gss_encrypt_xdr_buf()
473 desc.desc.flags = 0; gss_encrypt_xdr_buf()
474 desc.pos = offset; gss_encrypt_xdr_buf()
475 desc.outbuf = buf; gss_encrypt_xdr_buf()
476 desc.pages = pages; gss_encrypt_xdr_buf()
477 desc.fragno = 0; gss_encrypt_xdr_buf()
478 desc.fraglen = 0; gss_encrypt_xdr_buf()
480 sg_init_table(desc.infrags, 4); gss_encrypt_xdr_buf()
481 sg_init_table(desc.outfrags, 4); gss_encrypt_xdr_buf()
483 ret = xdr_process_buf(buf, offset, buf->len - offset, encryptor, &desc); gss_encrypt_xdr_buf()
489 struct blkcipher_desc desc; member in struct:decryptor_desc
498 struct decryptor_desc *desc = data; decryptor() local
499 int thislen = desc->fraglen + sg->length; decryptor()
504 BUG_ON(desc->fragno > 3); decryptor()
505 sg_set_page(&desc->frags[desc->fragno], sg_page(sg), sg->length, decryptor()
507 desc->fragno++; decryptor()
508 desc->fraglen += sg->length; decryptor()
510 fraglen = thislen & (crypto_blkcipher_blocksize(desc->desc.tfm) - 1); decryptor()
516 sg_mark_end(&desc->frags[desc->fragno - 1]); decryptor()
518 ret = crypto_blkcipher_decrypt_iv(&desc->desc, desc->frags, decryptor()
519 desc->frags, thislen); decryptor()
523 sg_init_table(desc->frags, 4); decryptor()
526 sg_set_page(&desc->frags[0], sg_page(sg), fraglen, decryptor()
528 desc->fragno = 1; decryptor()
529 desc->fraglen = fraglen; decryptor()
531 desc->fragno = 0; decryptor()
532 desc->fraglen = 0; decryptor()
541 struct decryptor_desc desc; gss_decrypt_xdr_buf() local
546 memset(desc.iv, 0, sizeof(desc.iv)); gss_decrypt_xdr_buf()
547 desc.desc.tfm = tfm; gss_decrypt_xdr_buf()
548 desc.desc.info = desc.iv; gss_decrypt_xdr_buf()
549 desc.desc.flags = 0; gss_decrypt_xdr_buf()
550 desc.fragno = 0; gss_decrypt_xdr_buf()
551 desc.fraglen = 0; gss_decrypt_xdr_buf()
553 sg_init_table(desc.frags, 4); gss_decrypt_xdr_buf()
555 return xdr_process_buf(buf, offset, buf->len - offset, decryptor, &desc); gss_decrypt_xdr_buf()
602 struct blkcipher_desc desc = { .tfm = cipher, .info = iv }; gss_krb5_cts_crypt() local
629 ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, len); gss_krb5_cts_crypt()
631 ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, len); gss_krb5_cts_crypt()
654 struct encryptor_desc desc; gss_krb5_aes_encrypt() local
719 memset(desc.iv, 0, sizeof(desc.iv)); gss_krb5_aes_encrypt()
722 desc.pos = offset + GSS_KRB5_TOK_HDR_LEN; gss_krb5_aes_encrypt()
723 desc.fragno = 0; gss_krb5_aes_encrypt()
724 desc.fraglen = 0; gss_krb5_aes_encrypt()
725 desc.pages = pages; gss_krb5_aes_encrypt()
726 desc.outbuf = buf; gss_krb5_aes_encrypt()
727 desc.desc.info = desc.iv; gss_krb5_aes_encrypt()
728 desc.desc.flags = 0; gss_krb5_aes_encrypt()
729 desc.desc.tfm = aux_cipher; gss_krb5_aes_encrypt()
731 sg_init_table(desc.infrags, 4); gss_krb5_aes_encrypt()
732 sg_init_table(desc.outfrags, 4); gss_krb5_aes_encrypt()
735 cbcbytes, encryptor, &desc); gss_krb5_aes_encrypt()
743 desc.iv, pages, 1); gss_krb5_aes_encrypt()
771 struct decryptor_desc desc; gss_krb5_aes_decrypt() local
799 memset(desc.iv, 0, sizeof(desc.iv)); gss_krb5_aes_decrypt()
802 desc.fragno = 0; gss_krb5_aes_decrypt()
803 desc.fraglen = 0; gss_krb5_aes_decrypt()
804 desc.desc.info = desc.iv; gss_krb5_aes_decrypt()
805 desc.desc.flags = 0; gss_krb5_aes_decrypt()
806 desc.desc.tfm = aux_cipher; gss_krb5_aes_decrypt()
808 sg_init_table(desc.frags, 4); gss_krb5_aes_decrypt()
810 ret = xdr_process_buf(&subbuf, 0, cbcbytes, decryptor, &desc); gss_krb5_aes_decrypt()
816 ret = gss_krb5_cts_crypt(cipher, &subbuf, cbcbytes, desc.iv, NULL, 0); gss_krb5_aes_decrypt()
857 struct hash_desc desc; krb5_rc4_setup_seq_key() local
872 desc.tfm = hmac; krb5_rc4_setup_seq_key()
873 desc.flags = 0; krb5_rc4_setup_seq_key()
875 err = crypto_hash_init(&desc); krb5_rc4_setup_seq_key()
885 err = crypto_hash_digest(&desc, sg, 4, Kseq); krb5_rc4_setup_seq_key()
896 err = crypto_hash_digest(&desc, sg, 8, Kseq); krb5_rc4_setup_seq_key()
921 struct hash_desc desc; krb5_rc4_setup_enc_key() local
937 desc.tfm = hmac; krb5_rc4_setup_enc_key()
938 desc.flags = 0; krb5_rc4_setup_enc_key()
940 err = crypto_hash_init(&desc); krb5_rc4_setup_enc_key()
953 err = crypto_hash_digest(&desc, sg, 4, Kcrypt); krb5_rc4_setup_enc_key()
969 err = crypto_hash_digest(&desc, sg, 4, Kcrypt); krb5_rc4_setup_enc_key()
/linux-4.4.14/drivers/net/ethernet/stmicro/stmmac/
H A Dring_mode.c36 struct dma_desc *desc; stmmac_jumbo_frm() local
41 desc = (struct dma_desc *)(priv->dma_etx + entry); stmmac_jumbo_frm()
43 desc = priv->dma_tx + entry; stmmac_jumbo_frm()
54 desc->des2 = dma_map_single(priv->device, skb->data, stmmac_jumbo_frm()
56 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
59 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
60 desc->des3 = desc->des2 + BUF_SIZE_4KiB; stmmac_jumbo_frm()
61 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, stmmac_jumbo_frm()
68 desc = (struct dma_desc *)(priv->dma_etx + entry); stmmac_jumbo_frm()
70 desc = priv->dma_tx + entry; stmmac_jumbo_frm()
72 desc->des2 = dma_map_single(priv->device, skb->data + bmax, stmmac_jumbo_frm()
74 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
76 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
77 desc->des3 = desc->des2 + BUF_SIZE_4KiB; stmmac_jumbo_frm()
78 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, stmmac_jumbo_frm()
81 priv->hw->desc->set_tx_owner(desc); stmmac_jumbo_frm()
83 desc->des2 = dma_map_single(priv->device, skb->data, stmmac_jumbo_frm()
85 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
87 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
88 desc->des3 = desc->des2 + BUF_SIZE_4KiB; stmmac_jumbo_frm()
89 priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum, stmmac_jumbo_frm()
H A Dchain_mode.c36 struct dma_desc *desc = priv->dma_tx + entry; stmmac_jumbo_frm() local
48 desc->des2 = dma_map_single(priv->device, skb->data, stmmac_jumbo_frm()
50 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
52 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
53 priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); stmmac_jumbo_frm()
58 desc = priv->dma_tx + entry; stmmac_jumbo_frm()
61 desc->des2 = dma_map_single(priv->device, stmmac_jumbo_frm()
64 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
66 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
67 priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, stmmac_jumbo_frm()
69 priv->hw->desc->set_tx_owner(desc); stmmac_jumbo_frm()
73 desc->des2 = dma_map_single(priv->device, stmmac_jumbo_frm()
76 if (dma_mapping_error(priv->device, desc->des2)) stmmac_jumbo_frm()
78 priv->tx_skbuff_dma[entry].buf = desc->des2; stmmac_jumbo_frm()
79 priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, stmmac_jumbo_frm()
81 priv->hw->desc->set_tx_owner(desc); stmmac_jumbo_frm()
149 if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc) stmmac_clean_desc3()
/linux-4.4.14/drivers/staging/lustre/lustre/lov/
H A Dlproc_lov.c47 struct lov_desc *desc; lov_stripesize_seq_show() local
50 desc = &dev->u.lov.desc; lov_stripesize_seq_show()
51 seq_printf(m, "%llu\n", desc->ld_default_stripe_size); lov_stripesize_seq_show()
60 struct lov_desc *desc; lov_stripesize_seq_write() local
65 desc = &dev->u.lov.desc; lov_stripesize_seq_write()
71 desc->ld_default_stripe_size = val; lov_stripesize_seq_write()
80 struct lov_desc *desc; lov_stripeoffset_seq_show() local
83 desc = &dev->u.lov.desc; lov_stripeoffset_seq_show()
84 seq_printf(m, "%llu\n", desc->ld_default_stripe_offset); lov_stripeoffset_seq_show()
93 struct lov_desc *desc; lov_stripeoffset_seq_write() local
98 desc = &dev->u.lov.desc; lov_stripeoffset_seq_write()
103 desc->ld_default_stripe_offset = val; lov_stripeoffset_seq_write()
112 struct lov_desc *desc; lov_stripetype_seq_show() local
115 desc = &dev->u.lov.desc; lov_stripetype_seq_show()
116 seq_printf(m, "%u\n", desc->ld_pattern); lov_stripetype_seq_show()
125 struct lov_desc *desc; lov_stripetype_seq_write() local
129 desc = &dev->u.lov.desc; lov_stripetype_seq_write()
135 desc->ld_pattern = val; lov_stripetype_seq_write()
144 struct lov_desc *desc; lov_stripecount_seq_show() local
147 desc = &dev->u.lov.desc; lov_stripecount_seq_show()
148 seq_printf(m, "%d\n", (__s16)(desc->ld_default_stripe_count + 1) - 1); lov_stripecount_seq_show()
157 struct lov_desc *desc; lov_stripecount_seq_write() local
161 desc = &dev->u.lov.desc; lov_stripecount_seq_write()
167 desc->ld_default_stripe_count = val; lov_stripecount_seq_write()
178 struct lov_desc *desc; numobd_show() local
180 desc = &dev->u.lov.desc; numobd_show()
181 return sprintf(buf, "%u\n", desc->ld_tgt_count); numobd_show()
190 struct lov_desc *desc; activeobd_show() local
192 desc = &dev->u.lov.desc; activeobd_show()
193 return sprintf(buf, "%u\n", desc->ld_active_tgt_count); activeobd_show()
204 seq_printf(m, "%s\n", lov->desc.ld_uuid.uuid); lov_desc_uuid_seq_show()
215 while (*pos < lov->desc.ld_tgt_count) { lov_tgt_seq_start()
232 while (++*pos < lov->desc.ld_tgt_count) { lov_tgt_seq_next()
/linux-4.4.14/tools/perf/tests/
H A Dbuiltin-test.c25 .desc = "vmlinux symtab matches kallsyms",
29 .desc = "detect openat syscall event",
33 .desc = "detect openat syscall event on all cpus",
37 .desc = "read samples using the mmap interface",
41 .desc = "parse events tests",
45 .desc = "Validate PERF_RECORD_* events & perf_sample fields",
49 .desc = "Test perf pmu format parsing",
53 .desc = "Test dso data read",
57 .desc = "Test dso data cache",
61 .desc = "Test dso data reopen",
65 .desc = "roundtrip evsel->name check",
69 .desc = "Check parsing of sched tracepoints fields",
73 .desc = "Generate and check syscalls:sys_enter_openat event fields",
77 .desc = "struct perf_event_attr setup",
81 .desc = "Test matching and linking multiple hists",
85 .desc = "Try 'import perf' in python, checking link problems",
89 .desc = "Test breakpoint overflow signal handler",
93 .desc = "Test breakpoint overflow sampling",
97 .desc = "Test number of exit event of a simple workload",
101 .desc = "Test software clock events have valid period values",
105 .desc = "Test object code reading",
109 .desc = "Test sample parsing",
113 .desc = "Test using a dummy software event to keep tracking",
117 .desc = "Test parsing with no sample_id_all bit set",
121 .desc = "Test filtering hist entries",
125 .desc = "Test mmap thread lookup",
129 .desc = "Test thread mg sharing",
133 .desc = "Test output sorting of hist entries",
137 .desc = "Test cumulation of child hist entries",
141 .desc = "Test tracking with sched_switch",
145 .desc = "Filter fds with revents mask in a fdarray",
149 .desc = "Add fd to a fdarray, making it autogrow",
153 .desc = "Test kmod_path__parse function",
157 .desc = "Test thread map",
161 .desc = "Test LLVM searching and compiling",
165 .desc = "Test topology in session",
169 .desc = "Test BPF filter",
199 if (strcasestr(test->desc, argv[i])) perf_test__matches()
248 int len = strlen(t->desc); for_each_test()
260 pr_info("%2d: %-*s:", i, width, t->desc); for_each_test()
269 pr_debug("---- end ----\n%s:", t->desc); for_each_test()
295 if (argc > 1 && !strstr(t->desc, argv[1])) for_each_test()
298 pr_info("%2d: %s\n", ++i, t->desc); for_each_test()
/linux-4.4.14/drivers/gpio/
H A Dgpiolib-legacy.c22 struct gpio_desc *desc; gpio_request_one() local
25 desc = gpio_to_desc(gpio); gpio_request_one()
28 if (!desc && gpio_is_valid(gpio)) gpio_request_one()
32 set_bit(FLAG_OPEN_DRAIN, &desc->flags); gpio_request_one()
35 set_bit(FLAG_OPEN_SOURCE, &desc->flags); gpio_request_one()
38 set_bit(FLAG_ACTIVE_LOW, &desc->flags); gpio_request_one()
40 err = gpiod_request(desc, label); gpio_request_one()
45 err = gpiod_direction_input(desc); gpio_request_one()
47 err = gpiod_direction_output_raw(desc, gpio_request_one()
54 err = gpiod_export(desc, flags & GPIOF_EXPORT_CHANGEABLE); gpio_request_one()
62 gpiod_free(desc); gpio_request_one()
69 struct gpio_desc *desc = gpio_to_desc(gpio); gpio_request() local
72 if (!desc && gpio_is_valid(gpio)) gpio_request()
75 return gpiod_request(desc, label); gpio_request()
H A Dgpiolib.h106 int gpiod_request(struct gpio_desc *desc, const char *label);
107 void gpiod_free(struct gpio_desc *desc);
108 int gpiod_hog(struct gpio_desc *desc, const char *name,
114 static int __maybe_unused gpio_chip_hwgpio(const struct gpio_desc *desc) gpio_chip_hwgpio() argument
116 return desc - &desc->chip->desc[0]; gpio_chip_hwgpio()
121 #define gpiod_emerg(desc, fmt, ...) \
122 pr_emerg("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
124 #define gpiod_crit(desc, fmt, ...) \
125 pr_crit("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
127 #define gpiod_err(desc, fmt, ...) \
128 pr_err("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
130 #define gpiod_warn(desc, fmt, ...) \
131 pr_warn("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
133 #define gpiod_info(desc, fmt, ...) \
134 pr_info("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?", \
136 #define gpiod_dbg(desc, fmt, ...) \
137 pr_debug("gpio-%d (%s): " fmt, desc_to_gpio(desc), desc->label ? : "?",\
H A Dgpiolib-sysfs.c19 struct gpio_desc *desc; member in struct:gpiod_data
60 struct gpio_desc *desc = data->desc; direction_show() local
65 gpiod_get_direction(desc); direction_show()
67 test_bit(FLAG_IS_OUT, &desc->flags) direction_show()
79 struct gpio_desc *desc = data->desc; direction_store() local
85 status = gpiod_direction_output_raw(desc, 1); direction_store()
87 status = gpiod_direction_output_raw(desc, 0); direction_store()
89 status = gpiod_direction_input(desc); direction_store()
103 struct gpio_desc *desc = data->desc; value_show() local
108 status = sprintf(buf, "%d\n", gpiod_get_value_cansleep(desc)); value_show()
119 struct gpio_desc *desc = data->desc; value_store() local
124 if (!test_bit(FLAG_IS_OUT, &desc->flags)) { value_store()
131 gpiod_set_value_cansleep(desc, value); value_store()
155 struct gpio_desc *desc = data->desc; gpio_sysfs_request_irq() local
159 data->irq = gpiod_to_irq(desc); gpio_sysfs_request_irq()
169 irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? gpio_sysfs_request_irq()
172 irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ? gpio_sysfs_request_irq()
183 ret = gpiochip_lock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); gpio_sysfs_request_irq()
197 gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); gpio_sysfs_request_irq()
211 struct gpio_desc *desc = data->desc; gpio_sysfs_free_irq() local
215 gpiochip_unlock_as_irq(desc->chip, gpio_chip_hwgpio(desc)); gpio_sysfs_free_irq()
295 struct gpio_desc *desc = data->desc; gpio_sysfs_set_active_low() local
299 if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value) gpio_sysfs_set_active_low()
303 set_bit(FLAG_ACTIVE_LOW, &desc->flags); gpio_sysfs_set_active_low()
305 clear_bit(FLAG_ACTIVE_LOW, &desc->flags); gpio_sysfs_set_active_low()
321 struct gpio_desc *desc = data->desc; active_low_show() local
327 !!test_bit(FLAG_ACTIVE_LOW, &desc->flags)); active_low_show()
358 struct gpio_desc *desc = data->desc; gpio_is_visible() local
366 if (gpiod_to_irq(desc) < 0) gpio_is_visible()
368 if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags)) gpio_is_visible()
446 struct gpio_desc *desc; export_store() local
453 desc = gpio_to_desc(gpio); export_store()
455 if (!desc) { export_store()
465 status = gpiod_request(desc, "sysfs"); export_store()
471 status = gpiod_export(desc, true); export_store()
473 gpiod_free(desc); export_store()
475 set_bit(FLAG_SYSFS, &desc->flags); export_store()
488 struct gpio_desc *desc; unexport_store() local
495 desc = gpio_to_desc(gpio); unexport_store()
497 if (!desc) { unexport_store()
508 if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) { unexport_store()
510 gpiod_free(desc); unexport_store()
547 int gpiod_export(struct gpio_desc *desc, bool direction_may_change) gpiod_export() argument
563 if (!desc) { gpiod_export()
568 chip = desc->chip; gpiod_export()
579 if (!test_bit(FLAG_REQUESTED, &desc->flags) || gpiod_export()
580 test_bit(FLAG_EXPORT, &desc->flags)) { gpiod_export()
582 gpiod_dbg(desc, "%s: unavailable (requested=%d, exported=%d)\n", gpiod_export()
584 test_bit(FLAG_REQUESTED, &desc->flags), gpiod_export()
585 test_bit(FLAG_EXPORT, &desc->flags)); gpiod_export()
597 data->desc = desc; gpiod_export()
604 offset = gpio_chip_hwgpio(desc); gpiod_export()
611 desc_to_gpio(desc)); gpiod_export()
617 set_bit(FLAG_EXPORT, &desc->flags); gpiod_export()
625 gpiod_dbg(desc, "%s: status %d\n", __func__, status); gpiod_export()
630 static int match_export(struct device *dev, const void *desc) match_export() argument
634 return data->desc == desc; match_export()
649 struct gpio_desc *desc) gpiod_export_link()
654 if (!desc) { gpiod_export_link()
659 cdev = class_find_device(&gpio_class, NULL, desc, match_export); gpiod_export_link()
676 void gpiod_unexport(struct gpio_desc *desc) gpiod_unexport() argument
681 if (!desc) { gpiod_unexport()
688 if (!test_bit(FLAG_EXPORT, &desc->flags)) gpiod_unexport()
691 dev = class_find_device(&gpio_class, NULL, desc, match_export); gpiod_unexport()
697 clear_bit(FLAG_EXPORT, &desc->flags); gpiod_unexport()
748 struct gpio_desc *desc; gpiochip_sysfs_unregister() local
763 desc = &chip->desc[i]; gpiochip_sysfs_unregister()
764 if (test_and_clear_bit(FLAG_SYSFS, &desc->flags)) gpiochip_sysfs_unregister()
765 gpiod_free(desc); gpiochip_sysfs_unregister()
648 gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc) gpiod_export_link() argument
H A Dgpiolib.c78 return &chip->desc[gpio - chip->base]; gpio_to_desc()
100 return &chip->desc[hwnum]; gpiochip_get_desc()
108 int desc_to_gpio(const struct gpio_desc *desc) desc_to_gpio() argument
110 return desc->chip->base + (desc - &desc->chip->desc[0]); desc_to_gpio()
117 * @desc: descriptor to return the chip of
119 struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc) gpiod_to_chip() argument
121 return desc ? desc->chip : NULL; gpiod_to_chip()
151 * @desc: GPIO to get the direction of
157 int gpiod_get_direction(struct gpio_desc *desc) gpiod_get_direction() argument
163 chip = gpiod_to_chip(desc); gpiod_get_direction()
164 offset = gpio_chip_hwgpio(desc); gpiod_get_direction()
173 clear_bit(FLAG_IS_OUT, &desc->flags); gpiod_get_direction()
177 set_bit(FLAG_IS_OUT, &desc->flags); gpiod_get_direction()
234 struct gpio_desc *gpio = &chip->desc[i]; gpio_name_to_desc()
277 gc->desc[i].name = gc->names[i]; gpiochip_set_desc_names()
330 struct gpio_desc *desc = &descs[id]; gpiochip_add() local
332 desc->chip = chip; gpiochip_add()
340 desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; gpiochip_add()
343 chip->desc = descs; gpiochip_add()
382 chip->desc = NULL; gpiochip_add()
402 struct gpio_desc *desc; gpiochip_remove() local
418 desc = &chip->desc[id]; gpiochip_remove()
419 desc->chip = NULL; gpiochip_remove()
420 if (test_bit(FLAG_REQUESTED, &desc->flags)) gpiochip_remove()
429 kfree(chip->desc); gpiochip_remove()
430 chip->desc = NULL; gpiochip_remove()
891 static int __gpiod_request(struct gpio_desc *desc, const char *label) __gpiod_request() argument
893 struct gpio_chip *chip = desc->chip; __gpiod_request()
903 if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) { __gpiod_request()
904 desc_set_label(desc, label ? : "?"); __gpiod_request()
914 status = chip->request(chip, gpio_chip_hwgpio(desc)); __gpiod_request()
918 desc_set_label(desc, NULL); __gpiod_request()
919 clear_bit(FLAG_REQUESTED, &desc->flags); __gpiod_request()
926 gpiod_get_direction(desc); __gpiod_request()
934 clear_bit(FLAG_ACTIVE_LOW, &desc->flags); __gpiod_request()
935 clear_bit(FLAG_OPEN_DRAIN, &desc->flags); __gpiod_request()
936 clear_bit(FLAG_OPEN_SOURCE, &desc->flags); __gpiod_request()
942 int gpiod_request(struct gpio_desc *desc, const char *label) gpiod_request() argument
947 if (!desc) { gpiod_request()
952 chip = desc->chip; gpiod_request()
957 status = __gpiod_request(desc, label); gpiod_request()
964 gpiod_dbg(desc, "%s: status %d\n", __func__, status); gpiod_request()
969 static bool __gpiod_free(struct gpio_desc *desc) __gpiod_free() argument
977 gpiod_unexport(desc); __gpiod_free()
981 chip = desc->chip; __gpiod_free()
982 if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) { __gpiod_free()
986 chip->free(chip, gpio_chip_hwgpio(desc)); __gpiod_free()
989 desc_set_label(desc, NULL); __gpiod_free()
990 clear_bit(FLAG_ACTIVE_LOW, &desc->flags); __gpiod_free()
991 clear_bit(FLAG_REQUESTED, &desc->flags); __gpiod_free()
992 clear_bit(FLAG_OPEN_DRAIN, &desc->flags); __gpiod_free()
993 clear_bit(FLAG_OPEN_SOURCE, &desc->flags); __gpiod_free()
994 clear_bit(FLAG_IS_HOGGED, &desc->flags); __gpiod_free()
1002 void gpiod_free(struct gpio_desc *desc) gpiod_free() argument
1004 if (desc && __gpiod_free(desc)) gpiod_free()
1005 module_put(desc->chip->owner); gpiod_free()
1025 struct gpio_desc *desc; gpiochip_is_requested() local
1030 desc = &chip->desc[offset]; gpiochip_is_requested()
1032 if (test_bit(FLAG_REQUESTED, &desc->flags) == 0) gpiochip_is_requested()
1034 return desc->label; gpiochip_is_requested()
1040 * @desc: GPIO descriptor to request
1052 struct gpio_desc *desc = gpiochip_get_desc(chip, hwnum); gpiochip_request_own_desc() local
1055 if (IS_ERR(desc)) { gpiochip_request_own_desc()
1057 return desc; gpiochip_request_own_desc()
1060 err = __gpiod_request(desc, label); gpiochip_request_own_desc()
1064 return desc; gpiochip_request_own_desc()
1070 * @desc: GPIO descriptor to free
1075 void gpiochip_free_own_desc(struct gpio_desc *desc) gpiochip_free_own_desc() argument
1077 if (desc) gpiochip_free_own_desc()
1078 __gpiod_free(desc); gpiochip_free_own_desc()
1093 * @desc: GPIO to set to input
1100 int gpiod_direction_input(struct gpio_desc *desc) gpiod_direction_input() argument
1105 if (!desc || !desc->chip) { gpiod_direction_input()
1110 chip = desc->chip; gpiod_direction_input()
1112 gpiod_warn(desc, gpiod_direction_input()
1118 status = chip->direction_input(chip, gpio_chip_hwgpio(desc)); gpiod_direction_input()
1120 clear_bit(FLAG_IS_OUT, &desc->flags); gpiod_direction_input()
1122 trace_gpio_direction(desc_to_gpio(desc), 1, status); gpiod_direction_input()
1128 static int _gpiod_direction_output_raw(struct gpio_desc *desc, int value) _gpiod_direction_output_raw() argument
1134 if (test_bit(FLAG_USED_AS_IRQ, &desc->flags)) { _gpiod_direction_output_raw()
1135 gpiod_err(desc, _gpiod_direction_output_raw()
1142 if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags)) _gpiod_direction_output_raw()
1143 return gpiod_direction_input(desc); _gpiod_direction_output_raw()
1146 if (!value && test_bit(FLAG_OPEN_SOURCE, &desc->flags)) _gpiod_direction_output_raw()
1147 return gpiod_direction_input(desc); _gpiod_direction_output_raw()
1149 chip = desc->chip; _gpiod_direction_output_raw()
1151 gpiod_warn(desc, _gpiod_direction_output_raw()
1157 status = chip->direction_output(chip, gpio_chip_hwgpio(desc), value); _gpiod_direction_output_raw()
1159 set_bit(FLAG_IS_OUT, &desc->flags); _gpiod_direction_output_raw()
1160 trace_gpio_value(desc_to_gpio(desc), 0, value); _gpiod_direction_output_raw()
1161 trace_gpio_direction(desc_to_gpio(desc), 0, status); _gpiod_direction_output_raw()
1167 * @desc: GPIO to set to output
1176 int gpiod_direction_output_raw(struct gpio_desc *desc, int value) gpiod_direction_output_raw() argument
1178 if (!desc || !desc->chip) { gpiod_direction_output_raw()
1182 return _gpiod_direction_output_raw(desc, value); gpiod_direction_output_raw()
1188 * @desc: GPIO to set to output
1198 int gpiod_direction_output(struct gpio_desc *desc, int value) gpiod_direction_output() argument
1200 if (!desc || !desc->chip) { gpiod_direction_output()
1204 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_direction_output()
1206 return _gpiod_direction_output_raw(desc, value); gpiod_direction_output()
1218 int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) gpiod_set_debounce() argument
1222 if (!desc || !desc->chip) { gpiod_set_debounce()
1227 chip = desc->chip; gpiod_set_debounce()
1229 gpiod_dbg(desc, gpiod_set_debounce()
1235 return chip->set_debounce(chip, gpio_chip_hwgpio(desc), debounce); gpiod_set_debounce()
1241 * @desc: the gpio descriptor to test
1245 int gpiod_is_active_low(const struct gpio_desc *desc) gpiod_is_active_low() argument
1247 return test_bit(FLAG_ACTIVE_LOW, &desc->flags); gpiod_is_active_low()
1273 static int _gpiod_get_raw_value(const struct gpio_desc *desc) _gpiod_get_raw_value() argument
1279 chip = desc->chip; _gpiod_get_raw_value()
1280 offset = gpio_chip_hwgpio(desc); _gpiod_get_raw_value()
1289 trace_gpio_value(desc_to_gpio(desc), 1, value); _gpiod_get_raw_value()
1295 * @desc: gpio whose value will be returned
1303 int gpiod_get_raw_value(const struct gpio_desc *desc) gpiod_get_raw_value() argument
1305 if (!desc) gpiod_get_raw_value()
1308 WARN_ON(desc->chip->can_sleep); gpiod_get_raw_value()
1309 return _gpiod_get_raw_value(desc); gpiod_get_raw_value()
1315 * @desc: gpio whose value will be returned
1323 int gpiod_get_value(const struct gpio_desc *desc) gpiod_get_value() argument
1326 if (!desc) gpiod_get_value()
1329 WARN_ON(desc->chip->can_sleep); gpiod_get_value()
1331 value = _gpiod_get_raw_value(desc); gpiod_get_value()
1335 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_get_value()
1344 * @desc: gpio descriptor whose state need to be set.
1347 static void _gpio_set_open_drain_value(struct gpio_desc *desc, bool value) _gpio_set_open_drain_value() argument
1350 struct gpio_chip *chip = desc->chip; _gpio_set_open_drain_value()
1351 int offset = gpio_chip_hwgpio(desc); _gpio_set_open_drain_value()
1356 clear_bit(FLAG_IS_OUT, &desc->flags); _gpio_set_open_drain_value()
1360 set_bit(FLAG_IS_OUT, &desc->flags); _gpio_set_open_drain_value()
1362 trace_gpio_direction(desc_to_gpio(desc), value, err); _gpio_set_open_drain_value()
1364 gpiod_err(desc, _gpio_set_open_drain_value()
1371 * @desc: gpio descriptor whose state need to be set.
1374 static void _gpio_set_open_source_value(struct gpio_desc *desc, bool value) _gpio_set_open_source_value() argument
1377 struct gpio_chip *chip = desc->chip; _gpio_set_open_source_value()
1378 int offset = gpio_chip_hwgpio(desc); _gpio_set_open_source_value()
1383 set_bit(FLAG_IS_OUT, &desc->flags); _gpio_set_open_source_value()
1387 clear_bit(FLAG_IS_OUT, &desc->flags); _gpio_set_open_source_value()
1389 trace_gpio_direction(desc_to_gpio(desc), !value, err); _gpio_set_open_source_value()
1391 gpiod_err(desc, _gpio_set_open_source_value()
1396 static void _gpiod_set_raw_value(struct gpio_desc *desc, bool value) _gpiod_set_raw_value() argument
1400 chip = desc->chip; _gpiod_set_raw_value()
1401 trace_gpio_value(desc_to_gpio(desc), 0, value); _gpiod_set_raw_value()
1402 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) _gpiod_set_raw_value()
1403 _gpio_set_open_drain_value(desc, value); _gpiod_set_raw_value()
1404 else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) _gpiod_set_raw_value()
1405 _gpio_set_open_source_value(desc, value); _gpiod_set_raw_value()
1407 chip->set(chip, gpio_chip_hwgpio(desc), value); _gpiod_set_raw_value()
1458 struct gpio_desc *desc = desc_array[i]; gpiod_set_array_value_priv() local
1459 int hwgpio = gpio_chip_hwgpio(desc); gpiod_set_array_value_priv()
1462 if (!raw && test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_set_array_value_priv()
1464 trace_gpio_value(desc_to_gpio(desc), 0, value); gpiod_set_array_value_priv()
1469 if (test_bit(FLAG_OPEN_DRAIN, &desc->flags)) { gpiod_set_array_value_priv()
1470 _gpio_set_open_drain_value(desc, value); gpiod_set_array_value_priv()
1471 } else if (test_bit(FLAG_OPEN_SOURCE, &desc->flags)) { gpiod_set_array_value_priv()
1472 _gpio_set_open_source_value(desc, value); gpiod_set_array_value_priv()
1491 * @desc: gpio whose value will be assigned
1500 void gpiod_set_raw_value(struct gpio_desc *desc, int value) gpiod_set_raw_value() argument
1502 if (!desc) gpiod_set_raw_value()
1505 WARN_ON(desc->chip->can_sleep); gpiod_set_raw_value()
1506 _gpiod_set_raw_value(desc, value); gpiod_set_raw_value()
1512 * @desc: gpio whose value will be assigned
1521 void gpiod_set_value(struct gpio_desc *desc, int value) gpiod_set_value() argument
1523 if (!desc) gpiod_set_value()
1526 WARN_ON(desc->chip->can_sleep); gpiod_set_value()
1527 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_set_value()
1529 _gpiod_set_raw_value(desc, value); gpiod_set_value()
1579 * @desc: gpio to check
1582 int gpiod_cansleep(const struct gpio_desc *desc) gpiod_cansleep() argument
1584 if (!desc) gpiod_cansleep()
1586 return desc->chip->can_sleep; gpiod_cansleep()
1592 * @desc: gpio whose IRQ will be returned (already requested)
1597 int gpiod_to_irq(const struct gpio_desc *desc) gpiod_to_irq() argument
1602 if (!desc) gpiod_to_irq()
1604 chip = desc->chip; gpiod_to_irq()
1605 offset = gpio_chip_hwgpio(desc); gpiod_to_irq()
1623 if (test_bit(FLAG_IS_OUT, &chip->desc[offset].flags)) { gpiochip_lock_as_irq()
1630 set_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); gpiochip_lock_as_irq()
1648 clear_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); gpiochip_unlock_as_irq()
1654 * @desc: gpio whose value will be returned
1661 int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) gpiod_get_raw_value_cansleep() argument
1664 if (!desc) gpiod_get_raw_value_cansleep()
1666 return _gpiod_get_raw_value(desc); gpiod_get_raw_value_cansleep()
1672 * @desc: gpio whose value will be returned
1679 int gpiod_get_value_cansleep(const struct gpio_desc *desc) gpiod_get_value_cansleep() argument
1684 if (!desc) gpiod_get_value_cansleep()
1687 value = _gpiod_get_raw_value(desc); gpiod_get_value_cansleep()
1691 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_get_value_cansleep()
1700 * @desc: gpio whose value will be assigned
1708 void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value) gpiod_set_raw_value_cansleep() argument
1711 if (!desc) gpiod_set_raw_value_cansleep()
1713 _gpiod_set_raw_value(desc, value); gpiod_set_raw_value_cansleep()
1719 * @desc: gpio whose value will be assigned
1727 void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) gpiod_set_value_cansleep() argument
1730 if (!desc) gpiod_set_value_cansleep()
1733 if (test_bit(FLAG_ACTIVE_LOW, &desc->flags)) gpiod_set_value_cansleep()
1735 _gpiod_set_raw_value(desc, value); gpiod_set_value_cansleep()
1817 struct gpio_desc *desc; of_find_gpio() local
1828 desc = of_get_named_gpiod_flags(dev->of_node, prop_name, idx, of_find_gpio()
1830 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) of_find_gpio()
1834 if (IS_ERR(desc)) of_find_gpio()
1835 return desc; of_find_gpio()
1847 return desc; of_find_gpio()
1856 struct gpio_desc *desc; acpi_find_gpio() local
1870 desc = acpi_get_gpiod_by_index(adev, propname, idx, &info); acpi_find_gpio()
1871 if (!IS_ERR(desc) || (PTR_ERR(desc) == -EPROBE_DEFER)) acpi_find_gpio()
1876 if (IS_ERR(desc)) { acpi_find_gpio()
1877 desc = acpi_get_gpiod_by_index(adev, NULL, idx, &info); acpi_find_gpio()
1878 if (IS_ERR(desc)) acpi_find_gpio()
1879 return desc; acpi_find_gpio()
1885 return desc; acpi_find_gpio()
1923 struct gpio_desc *desc = ERR_PTR(-ENOENT); gpiod_find() local
1929 return desc; gpiod_find()
1957 desc = gpiochip_get_desc(chip, p->chip_hwnum); gpiod_find()
1960 return desc; gpiod_find()
1963 return desc; gpiod_find()
2067 * @desc: gpio to be setup
2073 static void gpiod_parse_flags(struct gpio_desc *desc, unsigned long lflags) gpiod_parse_flags() argument
2076 set_bit(FLAG_ACTIVE_LOW, &desc->flags); gpiod_parse_flags()
2078 set_bit(FLAG_OPEN_DRAIN, &desc->flags); gpiod_parse_flags()
2080 set_bit(FLAG_OPEN_SOURCE, &desc->flags); gpiod_parse_flags()
2085 * @desc: gpio whose value will be assigned
2093 static int gpiod_configure_flags(struct gpio_desc *desc, const char *con_id, gpiod_configure_flags() argument
2106 status = gpiod_direction_output(desc, gpiod_configure_flags()
2109 status = gpiod_direction_input(desc); gpiod_configure_flags()
2133 struct gpio_desc *desc = NULL; gpiod_get_index() local
2143 desc = of_find_gpio(dev, con_id, idx, &lookupflags); gpiod_get_index()
2146 desc = acpi_find_gpio(dev, con_id, idx, &lookupflags); gpiod_get_index()
2154 if (!desc || desc == ERR_PTR(-ENOENT)) { gpiod_get_index()
2156 desc = gpiod_find(dev, con_id, idx, &lookupflags); gpiod_get_index()
2159 if (IS_ERR(desc)) { gpiod_get_index()
2161 return desc; gpiod_get_index()
2164 gpiod_parse_flags(desc, lookupflags); gpiod_get_index()
2166 status = gpiod_request(desc, con_id); gpiod_get_index()
2170 status = gpiod_configure_flags(desc, con_id, flags); gpiod_get_index()
2173 gpiod_put(desc); gpiod_get_index()
2177 return desc; gpiod_get_index()
2198 struct gpio_desc *desc = ERR_PTR(-ENODEV); fwnode_get_named_gpiod() local
2209 desc = of_get_named_gpiod_flags(to_of_node(fwnode), propname, 0, fwnode_get_named_gpiod()
2211 if (!IS_ERR(desc)) { fwnode_get_named_gpiod()
2218 desc = acpi_node_get_gpiod(fwnode, propname, 0, &info); fwnode_get_named_gpiod()
2219 if (!IS_ERR(desc)) fwnode_get_named_gpiod()
2223 if (IS_ERR(desc)) fwnode_get_named_gpiod()
2224 return desc; fwnode_get_named_gpiod()
2227 set_bit(FLAG_ACTIVE_LOW, &desc->flags); fwnode_get_named_gpiod()
2231 set_bit(FLAG_OPEN_DRAIN, &desc->flags); fwnode_get_named_gpiod()
2233 set_bit(FLAG_OPEN_SOURCE, &desc->flags); fwnode_get_named_gpiod()
2236 ret = gpiod_request(desc, NULL); fwnode_get_named_gpiod()
2240 return desc; fwnode_get_named_gpiod()
2261 struct gpio_desc *desc; gpiod_get_index_optional() local
2263 desc = gpiod_get_index(dev, con_id, index, flags); gpiod_get_index_optional()
2264 if (IS_ERR(desc)) { gpiod_get_index_optional()
2265 if (PTR_ERR(desc) == -ENOENT) gpiod_get_index_optional()
2269 return desc; gpiod_get_index_optional()
2274 * gpiod_hog - Hog the specified GPIO desc given the provided flags
2275 * @desc: gpio whose value will be assigned
2281 int gpiod_hog(struct gpio_desc *desc, const char *name, gpiod_hog() argument
2289 chip = gpiod_to_chip(desc); gpiod_hog()
2290 hwnum = gpio_chip_hwgpio(desc); gpiod_hog()
2292 gpiod_parse_flags(desc, lflags); gpiod_hog()
2301 status = gpiod_configure_flags(desc, name, dflags); gpiod_hog()
2305 gpiochip_free_own_desc(desc); gpiod_hog()
2310 set_bit(FLAG_IS_HOGGED, &desc->flags); gpiod_hog()
2313 desc_to_gpio(desc), name, gpiod_hog()
2332 if (test_bit(FLAG_IS_HOGGED, &chip->desc[id].flags)) gpiochip_free_hogs()
2333 gpiochip_free_own_desc(&chip->desc[id]); gpiochip_free_hogs()
2353 struct gpio_desc *desc; gpiod_get_array() local
2361 descs = kzalloc(sizeof(*descs) + sizeof(descs->desc[0]) * count, gpiod_get_array()
2367 desc = gpiod_get_index(dev, con_id, descs->ndescs, flags); gpiod_get_array()
2368 if (IS_ERR(desc)) { gpiod_get_array()
2370 return ERR_CAST(desc); gpiod_get_array()
2372 descs->desc[descs->ndescs] = desc; gpiod_get_array()
2405 * @desc: GPIO descriptor to dispose of
2409 void gpiod_put(struct gpio_desc *desc) gpiod_put() argument
2411 gpiod_free(desc); gpiod_put()
2424 gpiod_put(descs->desc[i]); gpiod_put_array()
2436 struct gpio_desc *gdesc = &chip->desc[0]; gpiolib_dbg_show()
/linux-4.4.14/include/uapi/linux/usb/
H A Daudio.h159 __le16 wTotalLength; /* includes Unit and Terminal desc. */
257 static inline __u8 uac_mixer_unit_bNrChannels(struct uac_mixer_unit_descriptor *desc) uac_mixer_unit_bNrChannels() argument
259 return desc->baSourceID[desc->bNrInPins]; uac_mixer_unit_bNrChannels()
262 static inline __u32 uac_mixer_unit_wChannelConfig(struct uac_mixer_unit_descriptor *desc, uac_mixer_unit_wChannelConfig() argument
266 return (desc->baSourceID[desc->bNrInPins + 2] << 8) | uac_mixer_unit_wChannelConfig()
267 desc->baSourceID[desc->bNrInPins + 1]; uac_mixer_unit_wChannelConfig()
269 return (desc->baSourceID[desc->bNrInPins + 4] << 24) | uac_mixer_unit_wChannelConfig()
270 (desc->baSourceID[desc->bNrInPins + 3] << 16) | uac_mixer_unit_wChannelConfig()
271 (desc->baSourceID[desc->bNrInPins + 2] << 8) | uac_mixer_unit_wChannelConfig()
272 (desc->baSourceID[desc->bNrInPins + 1]); uac_mixer_unit_wChannelConfig()
275 static inline __u8 uac_mixer_unit_iChannelNames(struct uac_mixer_unit_descriptor *desc, uac_mixer_unit_iChannelNames() argument
279 desc->baSourceID[desc->bNrInPins + 3] : uac_mixer_unit_iChannelNames()
280 desc->baSourceID[desc->bNrInPins + 5]; uac_mixer_unit_iChannelNames()
283 static inline __u8 *uac_mixer_unit_bmControls(struct uac_mixer_unit_descriptor *desc, uac_mixer_unit_bmControls() argument
287 &desc->baSourceID[desc->bNrInPins + 4] : uac_mixer_unit_bmControls()
288 &desc->baSourceID[desc->bNrInPins + 6]; uac_mixer_unit_bmControls()
291 static inline __u8 uac_mixer_unit_iMixer(struct uac_mixer_unit_descriptor *desc) uac_mixer_unit_iMixer() argument
293 __u8 *raw = (__u8 *) desc; uac_mixer_unit_iMixer()
294 return raw[desc->bLength - 1]; uac_mixer_unit_iMixer()
307 static inline __u8 uac_selector_unit_iSelector(struct uac_selector_unit_descriptor *desc) uac_selector_unit_iSelector() argument
309 __u8 *raw = (__u8 *) desc; uac_selector_unit_iSelector()
310 return raw[desc->bLength - 1]; uac_selector_unit_iSelector()
324 static inline __u8 uac_feature_unit_iFeature(struct uac_feature_unit_descriptor *desc) uac_feature_unit_iFeature() argument
326 __u8 *raw = (__u8 *) desc; uac_feature_unit_iFeature()
327 return raw[desc->bLength - 1]; uac_feature_unit_iFeature()
341 static inline __u8 uac_processing_unit_bNrChannels(struct uac_processing_unit_descriptor *desc) uac_processing_unit_bNrChannels() argument
343 return desc->baSourceID[desc->bNrInPins]; uac_processing_unit_bNrChannels()
346 static inline __u32 uac_processing_unit_wChannelConfig(struct uac_processing_unit_descriptor *desc, uac_processing_unit_wChannelConfig() argument
350 return (desc->baSourceID[desc->bNrInPins + 2] << 8) | uac_processing_unit_wChannelConfig()
351 desc->baSourceID[desc->bNrInPins + 1]; uac_processing_unit_wChannelConfig()
353 return (desc->baSourceID[desc->bNrInPins + 4] << 24) | uac_processing_unit_wChannelConfig()
354 (desc->baSourceID[desc->bNrInPins + 3] << 16) | uac_processing_unit_wChannelConfig()
355 (desc->baSourceID[desc->bNrInPins + 2] << 8) | uac_processing_unit_wChannelConfig()
356 (desc->baSourceID[desc->bNrInPins + 1]); uac_processing_unit_wChannelConfig()
359 static inline __u8 uac_processing_unit_iChannelNames(struct uac_processing_unit_descriptor *desc, uac_processing_unit_iChannelNames() argument
363 desc->baSourceID[desc->bNrInPins + 3] : uac_processing_unit_iChannelNames()
364 desc->baSourceID[desc->bNrInPins + 5]; uac_processing_unit_iChannelNames()
367 static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc, uac_processing_unit_bControlSize() argument
371 desc->baSourceID[desc->bNrInPins + 4] : uac_processing_unit_bControlSize()
372 desc->baSourceID[desc->bNrInPins + 6]; uac_processing_unit_bControlSize()
375 static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc, uac_processing_unit_bmControls() argument
379 &desc->baSourceID[desc->bNrInPins + 5] : uac_processing_unit_bmControls()
380 &desc->baSourceID[desc->bNrInPins + 7]; uac_processing_unit_bmControls()
383 static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc, uac_processing_unit_iProcessing() argument
386 __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); uac_processing_unit_iProcessing()
387 return *(uac_processing_unit_bmControls(desc, protocol) uac_processing_unit_iProcessing()
391 static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc, uac_processing_unit_specific() argument
394 __u8 control_size = uac_processing_unit_bControlSize(desc, protocol); uac_processing_unit_specific()
395 return uac_processing_unit_bmControls(desc, protocol) uac_processing_unit_specific()
/linux-4.4.14/drivers/scsi/snic/
H A Dwq_enet_desc.h51 static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, wq_enet_desc_enc() argument
56 desc->address = cpu_to_le64(address); wq_enet_desc_enc()
57 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); wq_enet_desc_enc()
58 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << wq_enet_desc_enc()
60 desc->header_length_flags = cpu_to_le16( wq_enet_desc_enc()
67 desc->vlan_tag = cpu_to_le16(vlan_tag); wq_enet_desc_enc()
70 static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, wq_enet_desc_dec() argument
75 *address = le64_to_cpu(desc->address); wq_enet_desc_dec()
76 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; wq_enet_desc_dec()
77 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & wq_enet_desc_dec()
79 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> wq_enet_desc_dec()
81 *header_length = le16_to_cpu(desc->header_length_flags) & wq_enet_desc_dec()
83 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
85 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
87 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
89 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
91 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
93 *vlan_tag = le16_to_cpu(desc->vlan_tag); wq_enet_desc_dec()
H A Dvnic_cq_fw.h27 struct snic_fw_req *desc), vnic_cq_fw_service()
31 struct snic_fw_req *desc; vnic_cq_fw_service() local
35 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + vnic_cq_fw_service()
37 snic_color_dec(desc, &color); vnic_cq_fw_service()
41 if ((*q_service)(cq->vdev, cq->index, desc)) vnic_cq_fw_service()
50 desc = (struct snic_fw_req *)((u8 *)cq->ring.descs + vnic_cq_fw_service()
52 snic_color_dec(desc, &color); vnic_cq_fw_service()
24 vnic_cq_fw_service(struct vnic_cq *cq, int (*q_service)(struct vnic_dev *vdev, unsigned int index, struct snic_fw_req *desc), unsigned int work_to_do) vnic_cq_fw_service() argument
/linux-4.4.14/drivers/net/ethernet/cisco/enic/
H A Dwq_enet_desc.h53 static inline void wq_enet_desc_enc(struct wq_enet_desc *desc, wq_enet_desc_enc() argument
58 desc->address = cpu_to_le64(address); wq_enet_desc_enc()
59 desc->length = cpu_to_le16(length & WQ_ENET_LEN_MASK); wq_enet_desc_enc()
60 desc->mss_loopback = cpu_to_le16((mss & WQ_ENET_MSS_MASK) << wq_enet_desc_enc()
62 desc->header_length_flags = cpu_to_le16( wq_enet_desc_enc()
69 desc->vlan_tag = cpu_to_le16(vlan_tag); wq_enet_desc_enc()
72 static inline void wq_enet_desc_dec(struct wq_enet_desc *desc, wq_enet_desc_dec() argument
77 *address = le64_to_cpu(desc->address); wq_enet_desc_dec()
78 *length = le16_to_cpu(desc->length) & WQ_ENET_LEN_MASK; wq_enet_desc_dec()
79 *mss = (le16_to_cpu(desc->mss_loopback) >> WQ_ENET_MSS_SHIFT) & wq_enet_desc_dec()
81 *loopback = (u8)((le16_to_cpu(desc->mss_loopback) >> wq_enet_desc_dec()
83 *header_length = le16_to_cpu(desc->header_length_flags) & wq_enet_desc_dec()
85 *offload_mode = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
87 *eop = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
89 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
91 *fcoe_encap = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
93 *vlan_tag_insert = (u8)((le16_to_cpu(desc->header_length_flags) >> wq_enet_desc_dec()
95 *vlan_tag = le16_to_cpu(desc->vlan_tag); wq_enet_desc_dec()
H A Dcq_enet_desc.h33 static inline void cq_enet_wq_desc_dec(struct cq_enet_wq_desc *desc, cq_enet_wq_desc_dec() argument
36 cq_desc_dec((struct cq_desc *)desc, type, cq_enet_wq_desc_dec()
104 static inline void cq_enet_rq_desc_dec(struct cq_enet_rq_desc *desc, cq_enet_rq_desc_dec() argument
117 cq_desc_dec((struct cq_desc *)desc, type, cq_enet_rq_desc_dec()
120 completed_index_flags = le16_to_cpu(desc->completed_index_flags); cq_enet_rq_desc_dec()
122 le16_to_cpu(desc->q_number_rss_type_flags); cq_enet_rq_desc_dec()
123 bytes_written_flags = le16_to_cpu(desc->bytes_written_flags); cq_enet_rq_desc_dec()
139 *rss_hash = le32_to_cpu(desc->rss_hash); cq_enet_rq_desc_dec()
151 *vlan_tci = le16_to_cpu(desc->vlan); cq_enet_rq_desc_dec()
154 *fcoe_sof = (u8)(le16_to_cpu(desc->checksum_fcoe) & cq_enet_rq_desc_dec()
156 *fcoe_fc_crc_ok = (desc->flags & cq_enet_rq_desc_dec()
158 *fcoe_enc_error = (desc->flags & cq_enet_rq_desc_dec()
160 *fcoe_eof = (u8)((le16_to_cpu(desc->checksum_fcoe) >> cq_enet_rq_desc_dec()
169 *checksum = le16_to_cpu(desc->checksum_fcoe); cq_enet_rq_desc_dec()
173 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) ? 1 : 0; cq_enet_rq_desc_dec()
174 *udp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_UDP) ? 1 : 0; cq_enet_rq_desc_dec()
175 *tcp = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_TCP) ? 1 : 0; cq_enet_rq_desc_dec()
177 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) ? 1 : 0; cq_enet_rq_desc_dec()
178 *ipv6 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV6) ? 1 : 0; cq_enet_rq_desc_dec()
179 *ipv4 = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4) ? 1 : 0; cq_enet_rq_desc_dec()
181 (desc->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT) ? 1 : 0; cq_enet_rq_desc_dec()
182 *fcs_ok = (desc->flags & CQ_ENET_RQ_DESC_FLAGS_FCS_OK) ? 1 : 0; cq_enet_rq_desc_dec()
H A Drq_enet_desc.h43 static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, rq_enet_desc_enc() argument
46 desc->address = cpu_to_le64(address); rq_enet_desc_enc()
47 desc->length_type = cpu_to_le16((length & RQ_ENET_LEN_MASK) | rq_enet_desc_enc()
51 static inline void rq_enet_desc_dec(struct rq_enet_desc *desc, rq_enet_desc_dec() argument
54 *address = le64_to_cpu(desc->address); rq_enet_desc_dec()
55 *length = le16_to_cpu(desc->length_type) & RQ_ENET_LEN_MASK; rq_enet_desc_dec()
56 *type = (u8)((le16_to_cpu(desc->length_type) >> RQ_ENET_LEN_BITS) & rq_enet_desc_dec()
/linux-4.4.14/drivers/dma/
H A Ddma-jz4780.c118 struct jz4780_dma_hwdesc *desc; member in struct:jz4780_dma_desc
134 struct jz4780_dma_desc *desc; member in struct:jz4780_dma_chan
188 struct jz4780_dma_desc *desc; jz4780_dma_desc_alloc() local
193 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); jz4780_dma_desc_alloc()
194 if (!desc) jz4780_dma_desc_alloc()
197 desc->desc = dma_pool_alloc(jzchan->desc_pool, GFP_NOWAIT, jz4780_dma_desc_alloc()
198 &desc->desc_phys); jz4780_dma_desc_alloc()
199 if (!desc->desc) { jz4780_dma_desc_alloc()
200 kfree(desc); jz4780_dma_desc_alloc()
204 desc->count = count; jz4780_dma_desc_alloc()
205 desc->type = type; jz4780_dma_desc_alloc()
206 return desc; jz4780_dma_desc_alloc()
211 struct jz4780_dma_desc *desc = to_jz4780_dma_desc(vdesc); jz4780_dma_desc_free() local
214 dma_pool_free(jzchan->desc_pool, desc->desc, desc->desc_phys); jz4780_dma_desc_free()
215 kfree(desc); jz4780_dma_desc_free()
255 struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len, jz4780_dma_setup_hwdesc()
262 desc->dcm = JZ_DMA_DCM_SAI; jz4780_dma_setup_hwdesc()
263 desc->dsa = addr; jz4780_dma_setup_hwdesc()
264 desc->dta = config->dst_addr; jz4780_dma_setup_hwdesc()
265 desc->drt = jzchan->transfer_type; jz4780_dma_setup_hwdesc()
270 desc->dcm = JZ_DMA_DCM_DAI; jz4780_dma_setup_hwdesc()
271 desc->dsa = config->src_addr; jz4780_dma_setup_hwdesc()
272 desc->dta = addr; jz4780_dma_setup_hwdesc()
273 desc->drt = jzchan->transfer_type; jz4780_dma_setup_hwdesc()
300 desc->dcm |= tsz << JZ_DMA_DCM_TSZ_SHIFT; jz4780_dma_setup_hwdesc()
301 desc->dcm |= width << JZ_DMA_DCM_SP_SHIFT; jz4780_dma_setup_hwdesc()
302 desc->dcm |= width << JZ_DMA_DCM_DP_SHIFT; jz4780_dma_setup_hwdesc()
304 desc->dtc = len >> jzchan->transfer_shift; jz4780_dma_setup_hwdesc()
314 struct jz4780_dma_desc *desc; jz4780_dma_prep_slave_sg() local
318 desc = jz4780_dma_desc_alloc(jzchan, sg_len, DMA_SLAVE); jz4780_dma_prep_slave_sg()
319 if (!desc) jz4780_dma_prep_slave_sg()
323 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], jz4780_dma_prep_slave_sg()
330 desc->desc[i].dcm |= JZ_DMA_DCM_TIE; jz4780_dma_prep_slave_sg()
334 desc->desc[i].dcm |= JZ_DMA_DCM_LINK; jz4780_dma_prep_slave_sg()
341 desc->desc[i].dtc |= jz4780_dma_prep_slave_sg()
342 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24; jz4780_dma_prep_slave_sg()
346 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); jz4780_dma_prep_slave_sg()
355 struct jz4780_dma_desc *desc; jz4780_dma_prep_dma_cyclic() local
364 desc = jz4780_dma_desc_alloc(jzchan, periods, DMA_CYCLIC); jz4780_dma_prep_dma_cyclic()
365 if (!desc) jz4780_dma_prep_dma_cyclic()
369 err = jz4780_dma_setup_hwdesc(jzchan, &desc->desc[i], buf_addr, jz4780_dma_prep_dma_cyclic()
382 desc->desc[i].dcm |= JZ_DMA_DCM_TIE | JZ_DMA_DCM_LINK; jz4780_dma_prep_dma_cyclic()
391 desc->desc[i].dtc |= jz4780_dma_prep_dma_cyclic()
392 (((i + 1) * sizeof(*desc->desc)) >> 4) << 24; jz4780_dma_prep_dma_cyclic()
396 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); jz4780_dma_prep_dma_cyclic()
404 struct jz4780_dma_desc *desc; jz4780_dma_prep_dma_memcpy() local
407 desc = jz4780_dma_desc_alloc(jzchan, 1, DMA_MEMCPY); jz4780_dma_prep_dma_memcpy()
408 if (!desc) jz4780_dma_prep_dma_memcpy()
414 desc->desc[0].dsa = src; jz4780_dma_prep_dma_memcpy()
415 desc->desc[0].dta = dest; jz4780_dma_prep_dma_memcpy()
416 desc->desc[0].drt = JZ_DMA_DRT_AUTO; jz4780_dma_prep_dma_memcpy()
417 desc->desc[0].dcm = JZ_DMA_DCM_TIE | JZ_DMA_DCM_SAI | JZ_DMA_DCM_DAI | jz4780_dma_prep_dma_memcpy()
421 desc->desc[0].dtc = len >> jzchan->transfer_shift; jz4780_dma_prep_dma_memcpy()
423 return vchan_tx_prep(&jzchan->vchan, &desc->vdesc, flags); jz4780_dma_prep_dma_memcpy()
433 if (!jzchan->desc) { jz4780_dma_begin()
440 jzchan->desc = to_jz4780_dma_desc(vdesc); jz4780_dma_begin()
443 if (jzchan->desc->type == DMA_CYCLIC && vdesc->tx.callback) { jz4780_dma_begin()
458 for (i = 0; i < jzchan->desc->count; i++) jz4780_dma_begin()
459 jzchan->desc->desc[i].dcm &= ~JZ_DMA_DCM_LINK; jz4780_dma_begin()
468 (jzchan->curr_hwdesc + 1) % jzchan->desc->count; jz4780_dma_begin()
475 desc_phys = jzchan->desc->desc_phys + jz4780_dma_begin()
476 (jzchan->curr_hwdesc * sizeof(*jzchan->desc->desc)); jz4780_dma_begin()
492 if (vchan_issue_pending(&jzchan->vchan) && !jzchan->desc) jz4780_dma_issue_pending()
509 if (jzchan->desc) { jz4780_dma_terminate_all()
510 jz4780_dma_desc_free(&jzchan->desc->vdesc); jz4780_dma_terminate_all()
511 jzchan->desc = NULL; jz4780_dma_terminate_all()
538 struct jz4780_dma_desc *desc, unsigned int next_sg) jz4780_dma_desc_residue()
546 for (i = next_sg; i < desc->count; i++) jz4780_dma_desc_residue()
547 residue += desc->desc[i].dtc << jzchan->transfer_shift; jz4780_dma_desc_residue()
577 } else if (cookie == jzchan->desc->vdesc.tx.cookie) { jz4780_dma_tx_status()
578 txstate->residue = jz4780_dma_desc_residue(jzchan, jzchan->desc, jz4780_dma_tx_status()
579 (jzchan->curr_hwdesc + 1) % jzchan->desc->count); jz4780_dma_tx_status()
583 if (vdesc && jzchan->desc && vdesc == &jzchan->desc->vdesc jz4780_dma_tx_status()
584 && jzchan->desc->status & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) jz4780_dma_tx_status()
611 if (jzchan->desc) { jz4780_dma_chan_irq()
612 jzchan->desc->status = dcs; jz4780_dma_chan_irq()
615 if (jzchan->desc->type == DMA_CYCLIC) { jz4780_dma_chan_irq()
616 vchan_cyclic_callback(&jzchan->desc->vdesc); jz4780_dma_chan_irq()
618 vchan_cookie_complete(&jzchan->desc->vdesc); jz4780_dma_chan_irq()
619 jzchan->desc = NULL; jz4780_dma_chan_irq()
254 jz4780_dma_setup_hwdesc(struct jz4780_dma_chan *jzchan, struct jz4780_dma_hwdesc *desc, dma_addr_t addr, size_t len, enum dma_transfer_direction direction) jz4780_dma_setup_hwdesc() argument
537 jz4780_dma_desc_residue(struct jz4780_dma_chan *jzchan, struct jz4780_dma_desc *desc, unsigned int next_sg) jz4780_dma_desc_residue() argument
H A Didma64.c95 struct idma64_desc *desc = idma64c->desc; idma64_chan_start() local
96 struct idma64_hw_desc *hw = &desc->hw[0]; idma64_chan_start()
124 idma64c->desc = NULL; idma64_start_transfer()
129 idma64c->desc = to_idma64_desc(vdesc); idma64_start_transfer()
144 struct idma64_desc *desc; idma64_chan_irq() local
148 desc = idma64c->desc; idma64_chan_irq()
149 if (desc) { idma64_chan_irq()
152 desc->status = DMA_ERROR; idma64_chan_irq()
155 desc->status = DMA_COMPLETE; idma64_chan_irq()
156 vchan_cookie_complete(&desc->vdesc); idma64_chan_irq()
160 /* idma64_start_transfer() updates idma64c->desc */ idma64_chan_irq()
161 if (idma64c->desc == NULL || desc->status == DMA_ERROR) idma64_chan_irq()
202 struct idma64_desc *desc; idma64_alloc_desc() local
204 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); idma64_alloc_desc()
205 if (!desc) idma64_alloc_desc()
208 desc->hw = kcalloc(ndesc, sizeof(*desc->hw), GFP_NOWAIT); idma64_alloc_desc()
209 if (!desc->hw) { idma64_alloc_desc()
210 kfree(desc); idma64_alloc_desc()
214 return desc; idma64_alloc_desc()
218 struct idma64_desc *desc) idma64_desc_free()
222 if (desc->ndesc) { idma64_desc_free()
223 unsigned int i = desc->ndesc; idma64_desc_free()
226 hw = &desc->hw[--i]; idma64_desc_free()
231 kfree(desc->hw); idma64_desc_free()
232 kfree(desc); idma64_desc_free()
283 struct idma64_desc *desc) idma64_desc_fill()
286 struct idma64_hw_desc *hw = &desc->hw[desc->ndesc - 1]; idma64_desc_fill()
289 unsigned int i = desc->ndesc; idma64_desc_fill()
293 hw = &desc->hw[--i]; idma64_desc_fill()
294 llp = idma64_hw_desc_fill(hw, config, desc->direction, llp); idma64_desc_fill()
295 desc->length += hw->len; idma64_desc_fill()
308 struct idma64_desc *desc; idma64_prep_slave_sg() local
312 desc = idma64_alloc_desc(sg_len); idma64_prep_slave_sg()
313 if (!desc) idma64_prep_slave_sg()
317 struct idma64_hw_desc *hw = &desc->hw[i]; for_each_sg()
322 desc->ndesc = i; for_each_sg()
323 idma64_desc_free(idma64c, desc); for_each_sg()
331 desc->ndesc = sg_len;
332 desc->direction = direction;
333 desc->status = DMA_IN_PROGRESS;
335 idma64_desc_fill(idma64c, desc);
336 return vchan_tx_prep(&idma64c->vchan, &desc->vdesc, flags);
345 if (vchan_issue_pending(&idma64c->vchan) && !idma64c->desc) idma64_issue_pending()
352 struct idma64_desc *desc = idma64c->desc; idma64_active_desc_size() local
354 size_t bytes = desc->length; idma64_active_desc_size()
360 hw = &desc->hw[i]; idma64_active_desc_size()
364 } while (++i < desc->ndesc); idma64_active_desc_size()
370 bytes += desc->hw[--i].len; idma64_active_desc_size()
390 if (idma64c->desc && cookie == idma64c->desc->vdesc.tx.cookie) { idma64_tx_status()
393 status = idma64c->desc->status; idma64_tx_status()
460 if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) { idma64_pause()
462 idma64c->desc->status = DMA_PAUSED; idma64_pause()
475 if (idma64c->desc && idma64c->desc->status == DMA_PAUSED) { idma64_resume()
476 idma64c->desc->status = DMA_IN_PROGRESS; idma64_resume()
493 if (idma64c->desc) { idma64_terminate_all()
494 idma64_vdesc_free(&idma64c->desc->vdesc); idma64_terminate_all()
495 idma64c->desc = NULL; idma64_terminate_all()
217 idma64_desc_free(struct idma64_chan *idma64c, struct idma64_desc *desc) idma64_desc_free() argument
282 idma64_desc_fill(struct idma64_chan *idma64c, struct idma64_desc *desc) idma64_desc_fill() argument
H A Dtxx9dmac.c148 const struct txx9dmac_desc *desc) desc_read_CHAR()
150 return is_dmac64(dc) ? desc->hwdesc.CHAR : desc->hwdesc32.CHAR; desc_read_CHAR()
154 struct txx9dmac_desc *desc, dma_addr_t val) desc_write_CHAR()
157 desc->hwdesc.CHAR = val; desc_write_CHAR()
159 desc->hwdesc32.CHAR = val; desc_write_CHAR()
183 static struct txx9dmac_desc *txx9dmac_last_child(struct txx9dmac_desc *desc) txx9dmac_last_child() argument
185 if (!list_empty(&desc->tx_list)) txx9dmac_last_child()
186 desc = list_entry(desc->tx_list.prev, typeof(*desc), desc_node); txx9dmac_last_child()
187 return desc; txx9dmac_last_child()
196 struct txx9dmac_desc *desc; txx9dmac_desc_alloc() local
198 desc = kzalloc(sizeof(*desc), flags); txx9dmac_desc_alloc()
199 if (!desc) txx9dmac_desc_alloc()
201 INIT_LIST_HEAD(&desc->tx_list); txx9dmac_desc_alloc()
202 dma_async_tx_descriptor_init(&desc->txd, &dc->chan); txx9dmac_desc_alloc()
203 desc->txd.tx_submit = txx9dmac_tx_submit; txx9dmac_desc_alloc()
205 desc->txd.flags = DMA_CTRL_ACK; txx9dmac_desc_alloc()
206 desc->txd.phys = dma_map_single(chan2parent(&dc->chan), &desc->hwdesc, txx9dmac_desc_alloc()
208 return desc; txx9dmac_desc_alloc()
213 struct txx9dmac_desc *desc, *_desc; txx9dmac_desc_get() local
218 list_for_each_entry_safe(desc, _desc, &dc->free_list, desc_node) { txx9dmac_desc_get()
219 if (async_tx_test_ack(&desc->txd)) { txx9dmac_desc_get()
220 list_del(&desc->desc_node); txx9dmac_desc_get()
221 ret = desc; txx9dmac_desc_get()
224 dev_dbg(chan2dev(&dc->chan), "desc %p not ACKed\n", desc); txx9dmac_desc_get()
245 struct txx9dmac_desc *desc) txx9dmac_sync_desc_for_cpu()
250 list_for_each_entry(child, &desc->tx_list, desc_node) txx9dmac_sync_desc_for_cpu()
255 desc->txd.phys, ddev->descsize, txx9dmac_sync_desc_for_cpu()
261 * `desc' must not be on any lists.
264 struct txx9dmac_desc *desc) txx9dmac_desc_put()
266 if (desc) { txx9dmac_desc_put()
269 txx9dmac_sync_desc_for_cpu(dc, desc); txx9dmac_desc_put()
272 list_for_each_entry(child, &desc->tx_list, desc_node) txx9dmac_desc_put()
274 "moving child desc %p to freelist\n", txx9dmac_desc_put()
276 list_splice_init(&desc->tx_list, &dc->free_list); txx9dmac_desc_put()
277 dev_vdbg(chan2dev(&dc->chan), "moving desc %p to freelist\n", txx9dmac_desc_put()
278 desc); txx9dmac_desc_put()
279 list_add(&desc->desc_node, &dc->free_list); txx9dmac_desc_put()
404 struct txx9dmac_desc *desc) txx9dmac_descriptor_complete()
408 struct dma_async_tx_descriptor *txd = &desc->txd; txx9dmac_descriptor_complete()
411 txd->cookie, desc); txx9dmac_descriptor_complete() local
417 txx9dmac_sync_desc_for_cpu(dc, desc); txx9dmac_descriptor_complete()
418 list_splice_init(&desc->tx_list, &dc->free_list); txx9dmac_descriptor_complete()
419 list_move(&desc->desc_node, &dc->free_list); txx9dmac_descriptor_complete()
434 struct txx9dmac_desc *desc; txx9dmac_dequeue() local
439 desc = txx9dmac_first_queued(dc); txx9dmac_dequeue()
441 desc_write_CHAR(dc, prev, desc->txd.phys); txx9dmac_dequeue()
446 prev = txx9dmac_last_child(desc); txx9dmac_dequeue()
447 list_move_tail(&desc->desc_node, list); txx9dmac_dequeue()
449 if ((desc->txd.flags & DMA_PREP_INTERRUPT) && txx9dmac_dequeue()
457 struct txx9dmac_desc *desc, *_desc; txx9dmac_complete_all() local
470 list_for_each_entry_safe(desc, _desc, &list, desc_node) txx9dmac_complete_all()
471 txx9dmac_descriptor_complete(dc, desc); txx9dmac_complete_all()
475 struct txx9dmac_hwdesc *desc) txx9dmac_dump_desc()
480 " desc: ch%#llx s%#llx d%#llx c%#x\n", txx9dmac_dump_desc()
481 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR); txx9dmac_dump_desc()
484 " desc: ch%#llx s%#llx d%#llx c%#x" txx9dmac_dump_desc()
486 (u64)desc->CHAR, desc->SAR, desc->DAR, desc->CNTR, txx9dmac_dump_desc()
487 desc->SAIR, desc->DAIR, desc->CCR, desc->CSR); txx9dmac_dump_desc()
490 struct txx9dmac_hwdesc32 *d = (struct txx9dmac_hwdesc32 *)desc; txx9dmac_dump_desc()
493 " desc: ch%#x s%#x d%#x c%#x\n", txx9dmac_dump_desc()
497 " desc: ch%#x s%#x d%#x c%#x" txx9dmac_dump_desc()
546 struct txx9dmac_desc *desc, *_desc; txx9dmac_scan_descriptors() local
571 list_for_each_entry_safe(desc, _desc, &dc->active_list, desc_node) { txx9dmac_scan_descriptors()
572 if (desc_read_CHAR(dc, desc) == chain) { txx9dmac_scan_descriptors()
579 list_for_each_entry(child, &desc->tx_list, desc_node) txx9dmac_scan_descriptors()
591 txx9dmac_descriptor_complete(dc, desc); txx9dmac_scan_descriptors()
699 struct txx9dmac_desc *desc = txd_to_txx9dmac_desc(tx); txx9dmac_tx_submit() local
707 desc->txd.cookie, desc); txx9dmac_tx_submit() local
709 list_add_tail(&desc->desc_node, &dc->queue); txx9dmac_tx_submit()
721 struct txx9dmac_desc *desc; txx9dmac_prep_dma_memcpy() local
755 desc = txx9dmac_desc_get(dc); txx9dmac_prep_dma_memcpy()
756 if (!desc) { txx9dmac_prep_dma_memcpy()
762 desc->hwdesc.SAR = src + offset; txx9dmac_prep_dma_memcpy()
763 desc->hwdesc.DAR = dest + offset; txx9dmac_prep_dma_memcpy()
764 desc->hwdesc.CNTR = xfer_count; txx9dmac_prep_dma_memcpy()
765 txx9dmac_desc_set_nosimple(ddev, desc, 8, 8, txx9dmac_prep_dma_memcpy()
768 desc->hwdesc32.SAR = src + offset; txx9dmac_prep_dma_memcpy()
769 desc->hwdesc32.DAR = dest + offset; txx9dmac_prep_dma_memcpy()
770 desc->hwdesc32.CNTR = xfer_count; txx9dmac_prep_dma_memcpy()
771 txx9dmac_desc_set_nosimple(ddev, desc, 4, 4, txx9dmac_prep_dma_memcpy()
783 first = desc; txx9dmac_prep_dma_memcpy()
785 desc_write_CHAR(dc, prev, desc->txd.phys); txx9dmac_prep_dma_memcpy()
789 list_add_tail(&desc->desc_node, &first->tx_list); txx9dmac_prep_dma_memcpy()
791 prev = desc; txx9dmac_prep_dma_memcpy()
835 struct txx9dmac_desc *desc; for_each_sg() local
839 desc = txx9dmac_desc_get(dc); for_each_sg()
840 if (!desc) { for_each_sg()
849 desc->hwdesc.SAR = mem; for_each_sg()
850 desc->hwdesc.DAR = ds->tx_reg; for_each_sg()
852 desc->hwdesc.SAR = ds->rx_reg; for_each_sg()
853 desc->hwdesc.DAR = mem; for_each_sg()
855 desc->hwdesc.CNTR = sg_dma_len(sg); for_each_sg()
858 desc->hwdesc32.SAR = mem; for_each_sg()
859 desc->hwdesc32.DAR = ds->tx_reg; for_each_sg()
861 desc->hwdesc32.SAR = ds->rx_reg; for_each_sg()
862 desc->hwdesc32.DAR = mem; for_each_sg()
864 desc->hwdesc32.CNTR = sg_dma_len(sg); for_each_sg()
873 txx9dmac_desc_set_nosimple(ddev, desc, sai, dai, for_each_sg()
877 first = desc; for_each_sg()
879 desc_write_CHAR(dc, prev, desc->txd.phys); for_each_sg()
884 list_add_tail(&desc->desc_node, &first->tx_list); for_each_sg()
886 prev = desc; for_each_sg()
907 struct txx9dmac_desc *desc, *_desc; txx9dmac_terminate_all() local
922 list_for_each_entry_safe(desc, _desc, &list, desc_node) txx9dmac_terminate_all()
923 txx9dmac_descriptor_complete(dc, desc); txx9dmac_terminate_all()
950 struct txx9dmac_desc *desc; txx9dmac_chain_dynamic() local
955 desc = list_entry(list.next, struct txx9dmac_desc, desc_node); txx9dmac_chain_dynamic()
956 desc_write_CHAR(dc, prev, desc->txd.phys); txx9dmac_chain_dynamic()
964 channel_write_CHAR(dc, desc->txd.phys); txx9dmac_chain_dynamic()
996 struct txx9dmac_desc *desc; txx9dmac_alloc_chan_resources() local
1031 desc = txx9dmac_desc_alloc(dc, GFP_KERNEL); txx9dmac_alloc_chan_resources()
1032 if (!desc) { txx9dmac_alloc_chan_resources()
1038 txx9dmac_desc_put(dc, desc); txx9dmac_alloc_chan_resources()
1055 struct txx9dmac_desc *desc, *_desc; txx9dmac_free_chan_resources() local
1071 list_for_each_entry_safe(desc, _desc, &list, desc_node) { txx9dmac_free_chan_resources()
1072 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); txx9dmac_free_chan_resources() local
1073 dma_unmap_single(chan2parent(chan), desc->txd.phys, txx9dmac_free_chan_resources()
1075 kfree(desc); txx9dmac_free_chan_resources()
147 desc_read_CHAR(const struct txx9dmac_chan *dc, const struct txx9dmac_desc *desc) desc_read_CHAR() argument
153 desc_write_CHAR(const struct txx9dmac_chan *dc, struct txx9dmac_desc *desc, dma_addr_t val) desc_write_CHAR() argument
244 txx9dmac_sync_desc_for_cpu(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) txx9dmac_sync_desc_for_cpu() argument
263 txx9dmac_desc_put(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) txx9dmac_desc_put() argument
403 txx9dmac_descriptor_complete(struct txx9dmac_chan *dc, struct txx9dmac_desc *desc) txx9dmac_descriptor_complete() argument
474 txx9dmac_dump_desc(struct txx9dmac_chan *dc, struct txx9dmac_hwdesc *desc) txx9dmac_dump_desc() argument
H A Dpch_dma.c88 struct pch_dma_desc_regs desc[MAX_CHAN_NR]; member in struct:pch_dma_regs
327 static void pdc_dostart(struct pch_dma_chan *pd_chan, struct pch_dma_desc* desc) pdc_dostart() argument
336 pd_chan->chan.chan_id, desc->regs.dev_addr); pdc_dostart()
338 pd_chan->chan.chan_id, desc->regs.mem_addr); pdc_dostart()
340 pd_chan->chan.chan_id, desc->regs.size); pdc_dostart()
342 pd_chan->chan.chan_id, desc->regs.next); pdc_dostart()
344 if (list_empty(&desc->tx_list)) { pdc_dostart()
345 channel_writel(pd_chan, DEV_ADDR, desc->regs.dev_addr); pdc_dostart()
346 channel_writel(pd_chan, MEM_ADDR, desc->regs.mem_addr); pdc_dostart()
347 channel_writel(pd_chan, SIZE, desc->regs.size); pdc_dostart()
348 channel_writel(pd_chan, NEXT, desc->regs.next); pdc_dostart()
351 channel_writel(pd_chan, NEXT, desc->txd.phys); pdc_dostart()
357 struct pch_dma_desc *desc) pdc_chain_complete()
359 struct dma_async_tx_descriptor *txd = &desc->txd; pdc_chain_complete()
363 list_splice_init(&desc->tx_list, &pd_chan->free_list); pdc_chain_complete()
364 list_move(&desc->desc_node, &pd_chan->free_list); pdc_chain_complete()
372 struct pch_dma_desc *desc, *_d; pdc_complete_all() local
383 list_for_each_entry_safe(desc, _d, &list, desc_node) pdc_complete_all()
384 pdc_chain_complete(pd_chan, desc); pdc_complete_all()
419 struct pch_dma_desc *desc = to_pd_desc(txd); pd_tx_submit() local
427 list_add_tail(&desc->desc_node, &pd_chan->active_list); pd_tx_submit()
428 pdc_dostart(pd_chan, desc); pd_tx_submit()
430 list_add_tail(&desc->desc_node, &pd_chan->queue); pd_tx_submit()
439 struct pch_dma_desc *desc = NULL; pdc_alloc_desc() local
443 desc = pci_pool_alloc(pd->pool, flags, &addr); pdc_alloc_desc()
444 if (desc) { pdc_alloc_desc()
445 memset(desc, 0, sizeof(struct pch_dma_desc)); pdc_alloc_desc()
446 INIT_LIST_HEAD(&desc->tx_list); pdc_alloc_desc()
447 dma_async_tx_descriptor_init(&desc->txd, chan); pdc_alloc_desc()
448 desc->txd.tx_submit = pd_tx_submit; pdc_alloc_desc()
449 desc->txd.flags = DMA_CTRL_ACK; pdc_alloc_desc()
450 desc->txd.phys = addr; pdc_alloc_desc()
453 return desc; pdc_alloc_desc()
458 struct pch_dma_desc *desc, *_d; pdc_desc_get() local
463 list_for_each_entry_safe(desc, _d, &pd_chan->free_list, desc_node) { pdc_desc_get()
465 if (async_tx_test_ack(&desc->txd)) { pdc_desc_get()
466 list_del(&desc->desc_node); pdc_desc_get()
467 ret = desc; pdc_desc_get()
470 dev_dbg(chan2dev(&pd_chan->chan), "desc %p not ACKed\n", desc); pdc_desc_get()
483 "failed to alloc desc\n"); pdc_desc_get()
491 struct pch_dma_desc *desc) pdc_desc_put()
493 if (desc) { pdc_desc_put()
495 list_splice_init(&desc->tx_list, &pd_chan->free_list); pdc_desc_put()
496 list_add(&desc->desc_node, &pd_chan->free_list); pdc_desc_put()
504 struct pch_dma_desc *desc; pd_alloc_chan_resources() local
517 desc = pdc_alloc_desc(chan, GFP_KERNEL); pd_alloc_chan_resources()
519 if (!desc) { pd_alloc_chan_resources()
525 list_add_tail(&desc->desc_node, &tmp_list); pd_alloc_chan_resources()
543 struct pch_dma_desc *desc, *_d; pd_free_chan_resources() local
555 list_for_each_entry_safe(desc, _d, &tmp_list, desc_node) pd_free_chan_resources()
556 pci_pool_free(pd->pool, desc, desc->txd.phys); pd_free_chan_resources()
587 struct pch_dma_desc *desc = NULL; pd_prep_slave_sg() local
608 desc = pdc_desc_get(pd_chan); for_each_sg()
610 if (!desc) for_each_sg()
613 desc->regs.dev_addr = reg; for_each_sg()
614 desc->regs.mem_addr = sg_dma_address(sg); for_each_sg()
615 desc->regs.size = sg_dma_len(sg); for_each_sg()
616 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; for_each_sg()
620 if (desc->regs.size > DMA_DESC_MAX_COUNT_1_BYTE) for_each_sg()
622 desc->regs.size |= DMA_DESC_WIDTH_1_BYTE; for_each_sg()
625 if (desc->regs.size > DMA_DESC_MAX_COUNT_2_BYTES) for_each_sg()
627 desc->regs.size |= DMA_DESC_WIDTH_2_BYTES; for_each_sg()
630 if (desc->regs.size > DMA_DESC_MAX_COUNT_4_BYTES) for_each_sg()
632 desc->regs.size |= DMA_DESC_WIDTH_4_BYTES; for_each_sg()
639 first = desc; for_each_sg()
641 prev->regs.next |= desc->txd.phys; for_each_sg()
642 list_add_tail(&desc->desc_node, &first->tx_list); for_each_sg()
645 prev = desc; for_each_sg()
649 desc->regs.next = DMA_DESC_END_WITH_IRQ;
651 desc->regs.next = DMA_DESC_END_WITHOUT_IRQ;
654 desc->txd.flags = flags;
659 dev_err(chan2dev(chan), "failed to get desc or wrong parameters\n");
667 struct pch_dma_desc *desc, *_d; pd_device_terminate_all() local
677 list_for_each_entry_safe(desc, _d, &list, desc_node) pd_device_terminate_all()
678 pdc_chain_complete(pd_chan, desc); pd_device_terminate_all()
905 pd_chan->membase = &regs->desc[i]; pch_dma_probe()
356 pdc_chain_complete(struct pch_dma_chan *pd_chan, struct pch_dma_desc *desc) pdc_chain_complete() argument
490 pdc_desc_put(struct pch_dma_chan *pd_chan, struct pch_dma_desc *desc) pdc_desc_put() argument
H A Dimx-dma.c128 struct dma_async_tx_descriptor desc; member in struct:imxdma_desc
162 struct dma_async_tx_descriptor desc; member in struct:imxdma_channel
245 struct imxdma_desc *desc; imxdma_chan_is_doing_cyclic() local
248 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, imxdma_chan_is_doing_cyclic()
250 if (desc->type == IMXDMA_DESC_CYCLIC) imxdma_chan_is_doing_cyclic()
284 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); imxdma_sg_next()
313 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); imxdma_enable_hw()
434 struct imxdma_desc *desc; dma_irq_handle_channel() local
443 desc = list_first_entry(&imxdmac->ld_active, dma_irq_handle_channel()
448 if (desc->sg) { dma_irq_handle_channel()
450 desc->sg = sg_next(desc->sg); dma_irq_handle_channel()
452 if (desc->sg) { dma_irq_handle_channel()
453 imxdma_sg_next(desc); dma_irq_handle_channel()
516 struct imxdma_channel *imxdmac = to_imxdma_chan(d->desc.chan); imxdma_xfer_desc()
622 struct imxdma_desc *desc; imxdma_tasklet() local
632 desc = list_first_entry(&imxdmac->ld_active, struct imxdma_desc, node); imxdma_tasklet()
641 dma_cookie_complete(&desc->desc); imxdma_tasklet()
652 desc = list_first_entry(&imxdmac->ld_queue, struct imxdma_desc, imxdma_tasklet()
655 if (imxdma_xfer_desc(desc) < 0) imxdma_tasklet()
656 dev_warn(imxdma->dev, "%s: channel: %d couldn't xfer desc\n", imxdma_tasklet()
662 if (desc->desc.callback) imxdma_tasklet()
663 desc->desc.callback(desc->desc.callback_param); imxdma_tasklet()
761 struct imxdma_desc *desc; imxdma_alloc_chan_resources() local
763 desc = kzalloc(sizeof(*desc), GFP_KERNEL); imxdma_alloc_chan_resources()
764 if (!desc) imxdma_alloc_chan_resources()
766 __memzero(&desc->desc, sizeof(struct dma_async_tx_descriptor)); imxdma_alloc_chan_resources()
767 dma_async_tx_descriptor_init(&desc->desc, chan); imxdma_alloc_chan_resources()
768 desc->desc.tx_submit = imxdma_tx_submit; imxdma_alloc_chan_resources()
770 desc->desc.flags = DMA_CTRL_ACK; imxdma_alloc_chan_resources()
771 desc->status = DMA_COMPLETE; imxdma_alloc_chan_resources()
773 list_add_tail(&desc->node, &imxdmac->ld_free); imxdma_alloc_chan_resources()
787 struct imxdma_desc *desc, *_desc; imxdma_free_chan_resources() local
798 list_for_each_entry_safe(desc, _desc, &imxdmac->ld_free, node) { imxdma_free_chan_resources()
799 kfree(desc); imxdma_free_chan_resources()
816 struct imxdma_desc *desc; imxdma_prep_slave_sg() local
822 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); imxdma_prep_slave_sg()
843 desc->type = IMXDMA_DESC_SLAVE_SG;
844 desc->sg = sgl;
845 desc->sgcount = sg_len;
846 desc->len = dma_length;
847 desc->direction = direction;
849 desc->src = imxdmac->per_address;
851 desc->dest = imxdmac->per_address;
853 desc->desc.callback = NULL;
854 desc->desc.callback_param = NULL;
856 return &desc->desc;
866 struct imxdma_desc *desc; imxdma_prep_dma_cyclic() local
877 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); imxdma_prep_dma_cyclic()
902 desc->type = IMXDMA_DESC_CYCLIC; imxdma_prep_dma_cyclic()
903 desc->sg = imxdmac->sg_list; imxdma_prep_dma_cyclic()
904 desc->sgcount = periods; imxdma_prep_dma_cyclic()
905 desc->len = IMX_DMA_LENGTH_LOOP; imxdma_prep_dma_cyclic()
906 desc->direction = direction; imxdma_prep_dma_cyclic()
908 desc->src = imxdmac->per_address; imxdma_prep_dma_cyclic()
910 desc->dest = imxdmac->per_address; imxdma_prep_dma_cyclic()
912 desc->desc.callback = NULL; imxdma_prep_dma_cyclic()
913 desc->desc.callback_param = NULL; imxdma_prep_dma_cyclic()
915 return &desc->desc; imxdma_prep_dma_cyclic()
924 struct imxdma_desc *desc; imxdma_prep_dma_memcpy() local
934 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); imxdma_prep_dma_memcpy()
936 desc->type = IMXDMA_DESC_MEMCPY; imxdma_prep_dma_memcpy()
937 desc->src = src; imxdma_prep_dma_memcpy()
938 desc->dest = dest; imxdma_prep_dma_memcpy()
939 desc->len = len; imxdma_prep_dma_memcpy()
940 desc->direction = DMA_MEM_TO_MEM; imxdma_prep_dma_memcpy()
941 desc->config_port = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; imxdma_prep_dma_memcpy()
942 desc->config_mem = IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR; imxdma_prep_dma_memcpy()
943 desc->desc.callback = NULL; imxdma_prep_dma_memcpy()
944 desc->desc.callback_param = NULL; imxdma_prep_dma_memcpy()
946 return &desc->desc; imxdma_prep_dma_memcpy()
955 struct imxdma_desc *desc; imxdma_prep_dma_interleaved() local
971 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); imxdma_prep_dma_interleaved()
973 desc->type = IMXDMA_DESC_INTERLEAVED; imxdma_prep_dma_interleaved()
974 desc->src = xt->src_start; imxdma_prep_dma_interleaved()
975 desc->dest = xt->dst_start; imxdma_prep_dma_interleaved()
976 desc->x = xt->sgl[0].size; imxdma_prep_dma_interleaved()
977 desc->y = xt->numf; imxdma_prep_dma_interleaved()
978 desc->w = xt->sgl[0].icg + desc->x; imxdma_prep_dma_interleaved()
979 desc->len = desc->x * desc->y; imxdma_prep_dma_interleaved()
980 desc->direction = DMA_MEM_TO_MEM; imxdma_prep_dma_interleaved()
981 desc->config_port = IMX_DMA_MEMSIZE_32; imxdma_prep_dma_interleaved()
982 desc->config_mem = IMX_DMA_MEMSIZE_32; imxdma_prep_dma_interleaved()
984 desc->config_mem |= IMX_DMA_TYPE_2D; imxdma_prep_dma_interleaved()
986 desc->config_port |= IMX_DMA_TYPE_2D; imxdma_prep_dma_interleaved()
987 desc->desc.callback = NULL; imxdma_prep_dma_interleaved()
988 desc->desc.callback_param = NULL; imxdma_prep_dma_interleaved()
990 return &desc->desc; imxdma_prep_dma_interleaved()
997 struct imxdma_desc *desc; imxdma_issue_pending() local
1003 desc = list_first_entry(&imxdmac->ld_queue, imxdma_issue_pending()
1006 if (imxdma_xfer_desc(desc) < 0) { imxdma_issue_pending()
H A Dep93xx_dma.c228 * @desc: head of the new active descriptor chain
230 * Sets @desc to be the head of the new active descriptor chain. This is the
237 struct ep93xx_dma_desc *desc) ep93xx_dma_set_active()
241 list_add_tail(&desc->node, &edmac->active); ep93xx_dma_set_active()
243 /* Flatten the @desc->tx_list chain into @edmac->active list */ ep93xx_dma_set_active()
244 while (!list_empty(&desc->tx_list)) { ep93xx_dma_set_active()
245 struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list, ep93xx_dma_set_active()
254 d->txd.callback = desc->txd.callback; ep93xx_dma_set_active()
255 d->txd.callback_param = desc->txd.callback_param; ep93xx_dma_set_active()
285 struct ep93xx_dma_desc *desc; ep93xx_dma_advance_active() local
292 desc = ep93xx_dma_get_active(edmac); ep93xx_dma_advance_active()
293 if (!desc) ep93xx_dma_advance_active()
300 return !desc->txd.cookie; ep93xx_dma_advance_active()
355 struct ep93xx_dma_desc *desc; m2p_fill_desc() local
358 desc = ep93xx_dma_get_active(edmac); m2p_fill_desc()
359 if (!desc) { m2p_fill_desc()
365 bus_addr = desc->src_addr; m2p_fill_desc()
367 bus_addr = desc->dst_addr; m2p_fill_desc()
370 writel(desc->size, edmac->regs + M2P_MAXCNT0); m2p_fill_desc()
373 writel(desc->size, edmac->regs + M2P_MAXCNT1); m2p_fill_desc()
401 struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac); m2p_hw_interrupt() local
420 desc->txd.cookie, desc->src_addr, desc->dst_addr, m2p_hw_interrupt()
421 desc->size); m2p_hw_interrupt()
516 struct ep93xx_dma_desc *desc; m2m_fill_desc() local
518 desc = ep93xx_dma_get_active(edmac); m2m_fill_desc()
519 if (!desc) { m2m_fill_desc()
525 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0); m2m_fill_desc()
526 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0); m2m_fill_desc()
527 writel(desc->size, edmac->regs + M2M_BCR0); m2m_fill_desc()
529 writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1); m2m_fill_desc()
530 writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1); m2m_fill_desc()
531 writel(desc->size, edmac->regs + M2M_BCR1); m2m_fill_desc()
593 struct ep93xx_dma_desc *desc; m2m_hw_interrupt() local
608 desc = ep93xx_dma_get_active(edmac); m2m_hw_interrupt()
609 last_done = !desc || desc->txd.cookie; m2m_hw_interrupt()
667 struct ep93xx_dma_desc *desc, *_desc; ep93xx_dma_desc_get() local
672 list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) { ep93xx_dma_desc_get()
673 if (async_tx_test_ack(&desc->txd)) { ep93xx_dma_desc_get()
674 list_del_init(&desc->node); ep93xx_dma_desc_get()
677 desc->src_addr = 0; ep93xx_dma_desc_get()
678 desc->dst_addr = 0; ep93xx_dma_desc_get()
679 desc->size = 0; ep93xx_dma_desc_get()
680 desc->complete = false; ep93xx_dma_desc_get()
681 desc->txd.cookie = 0; ep93xx_dma_desc_get()
682 desc->txd.callback = NULL; ep93xx_dma_desc_get()
683 desc->txd.callback_param = NULL; ep93xx_dma_desc_get()
685 ret = desc; ep93xx_dma_desc_get()
694 struct ep93xx_dma_desc *desc) ep93xx_dma_desc_put()
696 if (desc) { ep93xx_dma_desc_put()
700 list_splice_init(&desc->tx_list, &edmac->free_list); ep93xx_dma_desc_put()
701 list_add(&desc->node, &edmac->free_list); ep93xx_dma_desc_put()
739 struct ep93xx_dma_desc *desc, *d; ep93xx_dma_tasklet() local
750 desc = ep93xx_dma_get_active(edmac); ep93xx_dma_tasklet()
751 if (desc) { ep93xx_dma_tasklet()
752 if (desc->complete) { ep93xx_dma_tasklet()
755 dma_cookie_complete(&desc->txd); ep93xx_dma_tasklet()
758 callback = desc->txd.callback; ep93xx_dma_tasklet()
759 callback_param = desc->txd.callback_param; ep93xx_dma_tasklet()
767 list_for_each_entry_safe(desc, d, &list, node) { ep93xx_dma_tasklet()
768 dma_descriptor_unmap(&desc->txd); ep93xx_dma_tasklet()
769 ep93xx_dma_desc_put(edmac, desc); ep93xx_dma_tasklet()
779 struct ep93xx_dma_desc *desc; ep93xx_dma_interrupt() local
784 desc = ep93xx_dma_get_active(edmac); ep93xx_dma_interrupt()
785 if (!desc) { ep93xx_dma_interrupt()
794 desc->complete = true; ep93xx_dma_interrupt()
824 struct ep93xx_dma_desc *desc; ep93xx_dma_tx_submit() local
831 desc = container_of(tx, struct ep93xx_dma_desc, txd); ep93xx_dma_tx_submit()
839 ep93xx_dma_set_active(edmac, desc); ep93xx_dma_tx_submit()
842 list_add_tail(&desc->node, &edmac->queue); ep93xx_dma_tx_submit()
907 struct ep93xx_dma_desc *desc; ep93xx_dma_alloc_chan_resources() local
909 desc = kzalloc(sizeof(*desc), GFP_KERNEL); ep93xx_dma_alloc_chan_resources()
910 if (!desc) { ep93xx_dma_alloc_chan_resources()
915 INIT_LIST_HEAD(&desc->tx_list); ep93xx_dma_alloc_chan_resources()
917 dma_async_tx_descriptor_init(&desc->txd, chan); ep93xx_dma_alloc_chan_resources()
918 desc->txd.flags = DMA_CTRL_ACK; ep93xx_dma_alloc_chan_resources()
919 desc->txd.tx_submit = ep93xx_dma_tx_submit; ep93xx_dma_alloc_chan_resources()
921 ep93xx_dma_desc_put(edmac, desc); ep93xx_dma_alloc_chan_resources()
944 struct ep93xx_dma_desc *desc, *d; ep93xx_dma_free_chan_resources() local
959 list_for_each_entry_safe(desc, d, &list, node) ep93xx_dma_free_chan_resources()
960 kfree(desc); ep93xx_dma_free_chan_resources()
981 struct ep93xx_dma_desc *desc, *first; ep93xx_dma_prep_dma_memcpy() local
986 desc = ep93xx_dma_desc_get(edmac); ep93xx_dma_prep_dma_memcpy()
987 if (!desc) { ep93xx_dma_prep_dma_memcpy()
994 desc->src_addr = src + offset; ep93xx_dma_prep_dma_memcpy()
995 desc->dst_addr = dest + offset; ep93xx_dma_prep_dma_memcpy()
996 desc->size = bytes; ep93xx_dma_prep_dma_memcpy()
999 first = desc; ep93xx_dma_prep_dma_memcpy()
1001 list_add_tail(&desc->node, &first->tx_list); ep93xx_dma_prep_dma_memcpy()
1030 struct ep93xx_dma_desc *desc, *first; ep93xx_dma_prep_slave_sg() local
1056 desc = ep93xx_dma_desc_get(edmac); for_each_sg()
1057 if (!desc) { for_each_sg()
1063 desc->src_addr = sg_dma_address(sg); for_each_sg()
1064 desc->dst_addr = edmac->runtime_addr; for_each_sg()
1066 desc->src_addr = edmac->runtime_addr; for_each_sg()
1067 desc->dst_addr = sg_dma_address(sg); for_each_sg()
1069 desc->size = sg_len; for_each_sg()
1072 first = desc; for_each_sg()
1074 list_add_tail(&desc->node, &first->tx_list); for_each_sg()
1110 struct ep93xx_dma_desc *desc, *first; ep93xx_dma_prep_dma_cyclic() local
1134 desc = ep93xx_dma_desc_get(edmac); ep93xx_dma_prep_dma_cyclic()
1135 if (!desc) { ep93xx_dma_prep_dma_cyclic()
1141 desc->src_addr = dma_addr + offset; ep93xx_dma_prep_dma_cyclic()
1142 desc->dst_addr = edmac->runtime_addr; ep93xx_dma_prep_dma_cyclic()
1144 desc->src_addr = edmac->runtime_addr; ep93xx_dma_prep_dma_cyclic()
1145 desc->dst_addr = dma_addr + offset; ep93xx_dma_prep_dma_cyclic()
1148 desc->size = period_len; ep93xx_dma_prep_dma_cyclic()
1151 first = desc; ep93xx_dma_prep_dma_cyclic()
1153 list_add_tail(&desc->node, &first->tx_list); ep93xx_dma_prep_dma_cyclic()
1175 struct ep93xx_dma_desc *desc, *_d; ep93xx_dma_terminate_all() local
1192 list_for_each_entry_safe(desc, _d, &list, node) ep93xx_dma_terminate_all()
1193 ep93xx_dma_desc_put(edmac, desc); ep93xx_dma_terminate_all()
236 ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac, struct ep93xx_dma_desc *desc) ep93xx_dma_set_active() argument
693 ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac, struct ep93xx_dma_desc *desc) ep93xx_dma_desc_put() argument
H A Dat_xdmac.c345 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first); at_xdmac_start_xfer()
422 struct at_xdmac_desc *desc = txd_to_at_desc(tx); at_xdmac_tx_submit() local
430 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n", at_xdmac_tx_submit()
431 __func__, atchan, desc); at_xdmac_tx_submit()
432 list_add_tail(&desc->xfer_node, &atchan->xfers_list); at_xdmac_tx_submit()
434 at_xdmac_start_xfer(atchan, desc); at_xdmac_tx_submit()
443 struct at_xdmac_desc *desc; at_xdmac_alloc_desc() local
447 desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys); at_xdmac_alloc_desc()
448 if (desc) { at_xdmac_alloc_desc()
449 memset(desc, 0, sizeof(*desc)); at_xdmac_alloc_desc()
450 INIT_LIST_HEAD(&desc->descs_list); at_xdmac_alloc_desc()
451 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan); at_xdmac_alloc_desc()
452 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit; at_xdmac_alloc_desc()
453 desc->tx_dma_desc.phys = phys; at_xdmac_alloc_desc()
456 return desc; at_xdmac_alloc_desc()
459 void at_xdmac_init_used_desc(struct at_xdmac_desc *desc) at_xdmac_init_used_desc() argument
461 memset(&desc->lld, 0, sizeof(desc->lld)); at_xdmac_init_used_desc()
462 INIT_LIST_HEAD(&desc->descs_list); at_xdmac_init_used_desc()
463 desc->direction = DMA_TRANS_NONE; at_xdmac_init_used_desc()
464 desc->xfer_size = 0; at_xdmac_init_used_desc()
465 desc->active_xfer = false; at_xdmac_init_used_desc()
471 struct at_xdmac_desc *desc; at_xdmac_get_desc() local
474 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT); at_xdmac_get_desc()
476 desc = list_first_entry(&atchan->free_descs_list, at_xdmac_get_desc()
478 list_del(&desc->desc_node); at_xdmac_get_desc()
479 at_xdmac_init_used_desc(desc); at_xdmac_get_desc()
482 return desc; at_xdmac_get_desc()
487 struct at_xdmac_desc *desc) at_xdmac_queue_desc()
489 if (!prev || !desc) at_xdmac_queue_desc()
492 prev->lld.mbr_nda = desc->tx_dma_desc.phys; at_xdmac_queue_desc()
500 struct at_xdmac_desc *desc) at_xdmac_increment_block_count()
502 if (!desc) at_xdmac_increment_block_count()
505 desc->lld.mbr_bc++; at_xdmac_increment_block_count()
508 "%s: incrementing the block count of the desc 0x%p\n", at_xdmac_increment_block_count()
509 __func__, desc); at_xdmac_increment_block_count() local
667 struct at_xdmac_desc *desc = NULL; for_each_sg() local
679 desc = at_xdmac_get_desc(atchan); for_each_sg()
680 if (!desc) { for_each_sg()
689 desc->lld.mbr_sa = atchan->sconfig.src_addr; for_each_sg()
690 desc->lld.mbr_da = mem; for_each_sg()
692 desc->lld.mbr_sa = mem; for_each_sg()
693 desc->lld.mbr_da = atchan->sconfig.dst_addr; for_each_sg()
699 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ for_each_sg()
703 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) | for_each_sg()
707 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); for_each_sg()
711 at_xdmac_queue_desc(chan, prev, desc); for_each_sg()
713 prev = desc; for_each_sg()
715 first = desc; for_each_sg()
717 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", for_each_sg()
718 __func__, desc, first); for_each_sg() local
719 list_add_tail(&desc->desc_node, &first->descs_list); for_each_sg()
764 struct at_xdmac_desc *desc = NULL; at_xdmac_prep_dma_cyclic() local
767 desc = at_xdmac_get_desc(atchan); at_xdmac_prep_dma_cyclic()
768 if (!desc) { at_xdmac_prep_dma_cyclic()
777 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n", at_xdmac_prep_dma_cyclic()
778 __func__, desc, &desc->tx_dma_desc.phys); at_xdmac_prep_dma_cyclic() local
781 desc->lld.mbr_sa = atchan->sconfig.src_addr; at_xdmac_prep_dma_cyclic()
782 desc->lld.mbr_da = buf_addr + i * period_len; at_xdmac_prep_dma_cyclic()
784 desc->lld.mbr_sa = buf_addr + i * period_len; at_xdmac_prep_dma_cyclic()
785 desc->lld.mbr_da = atchan->sconfig.dst_addr; at_xdmac_prep_dma_cyclic()
787 desc->lld.mbr_cfg = atchan->cfg; at_xdmac_prep_dma_cyclic()
788 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 at_xdmac_prep_dma_cyclic()
791 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg); at_xdmac_prep_dma_cyclic()
795 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); at_xdmac_prep_dma_cyclic()
799 at_xdmac_queue_desc(chan, prev, desc); at_xdmac_prep_dma_cyclic()
801 prev = desc; at_xdmac_prep_dma_cyclic()
803 first = desc; at_xdmac_prep_dma_cyclic()
805 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", at_xdmac_prep_dma_cyclic()
806 __func__, desc, first); at_xdmac_prep_dma_cyclic() local
807 list_add_tail(&desc->desc_node, &first->descs_list); at_xdmac_prep_dma_cyclic()
855 struct at_xdmac_desc *desc; at_xdmac_interleaved_queue_desc() local
884 "Adding items at the end of desc 0x%p\n", prev); at_xdmac_interleaved_queue_desc()
901 desc = at_xdmac_get_desc(atchan); at_xdmac_interleaved_queue_desc()
903 if (!desc) { at_xdmac_interleaved_queue_desc()
912 desc->lld.mbr_sa = src; at_xdmac_interleaved_queue_desc()
913 desc->lld.mbr_da = dst; at_xdmac_interleaved_queue_desc()
914 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk); at_xdmac_interleaved_queue_desc()
915 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk); at_xdmac_interleaved_queue_desc()
917 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 at_xdmac_interleaved_queue_desc()
921 desc->lld.mbr_cfg = chan_cc; at_xdmac_interleaved_queue_desc()
925 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, at_xdmac_interleaved_queue_desc()
926 desc->lld.mbr_ubc, desc->lld.mbr_cfg); at_xdmac_interleaved_queue_desc()
930 at_xdmac_queue_desc(chan, prev, desc); at_xdmac_interleaved_queue_desc()
932 return desc; at_xdmac_interleaved_queue_desc()
974 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", at_xdmac_prep_interleaved()
980 struct at_xdmac_desc *desc; at_xdmac_prep_interleaved() local
994 desc = at_xdmac_interleaved_queue_desc(chan, atchan, at_xdmac_prep_interleaved()
998 if (!desc) { at_xdmac_prep_interleaved()
1005 first = desc; at_xdmac_prep_interleaved()
1007 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", at_xdmac_prep_interleaved()
1008 __func__, desc, first); at_xdmac_prep_interleaved() local
1009 list_add_tail(&desc->desc_node, &first->descs_list); at_xdmac_prep_interleaved()
1018 prev = desc; at_xdmac_prep_interleaved()
1064 struct at_xdmac_desc *desc = NULL; at_xdmac_prep_dma_memcpy() local
1069 desc = at_xdmac_get_desc(atchan); at_xdmac_prep_dma_memcpy()
1071 if (!desc) { at_xdmac_prep_dma_memcpy()
1098 desc->lld.mbr_sa = src_addr; at_xdmac_prep_dma_memcpy()
1099 desc->lld.mbr_da = dst_addr; at_xdmac_prep_dma_memcpy()
1100 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 at_xdmac_prep_dma_memcpy()
1104 desc->lld.mbr_cfg = chan_cc; at_xdmac_prep_dma_memcpy()
1108 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); at_xdmac_prep_dma_memcpy()
1112 at_xdmac_queue_desc(chan, prev, desc); at_xdmac_prep_dma_memcpy()
1114 prev = desc; at_xdmac_prep_dma_memcpy()
1116 first = desc; at_xdmac_prep_dma_memcpy()
1118 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", at_xdmac_prep_dma_memcpy()
1119 __func__, desc, first); at_xdmac_prep_dma_memcpy() local
1120 list_add_tail(&desc->desc_node, &first->descs_list); at_xdmac_prep_dma_memcpy()
1135 struct at_xdmac_desc *desc; at_xdmac_memset_create_desc() local
1166 desc = at_xdmac_get_desc(atchan); at_xdmac_memset_create_desc()
1168 if (!desc) { at_xdmac_memset_create_desc()
1177 desc->lld.mbr_da = dst_addr; at_xdmac_memset_create_desc()
1178 desc->lld.mbr_ds = value; at_xdmac_memset_create_desc()
1179 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3 at_xdmac_memset_create_desc()
1183 desc->lld.mbr_cfg = chan_cc; at_xdmac_memset_create_desc()
1187 __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, at_xdmac_memset_create_desc()
1188 desc->lld.mbr_cfg); at_xdmac_memset_create_desc()
1190 return desc; at_xdmac_memset_create_desc()
1198 struct at_xdmac_desc *desc; at_xdmac_prep_dma_memset() local
1206 desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value); at_xdmac_prep_dma_memset()
1207 list_add_tail(&desc->desc_node, &desc->descs_list); at_xdmac_prep_dma_memset()
1209 desc->tx_dma_desc.cookie = -EBUSY; at_xdmac_prep_dma_memset()
1210 desc->tx_dma_desc.flags = flags; at_xdmac_prep_dma_memset()
1211 desc->xfer_size = len; at_xdmac_prep_dma_memset()
1213 return &desc->tx_dma_desc; at_xdmac_prep_dma_memset()
1222 struct at_xdmac_desc *desc, *pdesc = NULL, at_xdmac_prep_dma_memset_sg() local
1239 desc = at_xdmac_memset_create_desc(chan, atchan, for_each_sg()
1243 if (!desc && first) for_each_sg()
1248 first = desc; for_each_sg()
1279 "%s: desc 0x%p can be merged with desc 0x%p\n", for_each_sg()
1324 list_add_tail(&desc->desc_node, for_each_sg()
1327 "%s: add desc 0x%p to descs_list 0x%p\n", for_each_sg()
1328 __func__, desc, first); for_each_sg() local
1342 "%s: desc 0x%p can be merged with desc 0x%p\n", for_each_sg()
1343 __func__, desc, pdesc); for_each_sg() local
1356 list_add_tail(&desc->desc_node, for_each_sg()
1362 pdesc = desc; for_each_sg()
1384 struct at_xdmac_desc *desc, *_desc; at_xdmac_tx_status() local
1401 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); at_xdmac_tx_status()
1407 if (!desc->active_xfer) { at_xdmac_tx_status()
1408 dma_set_residue(txstate, desc->xfer_size); at_xdmac_tx_status()
1412 residue = desc->xfer_size; at_xdmac_tx_status()
1419 if ((desc->lld.mbr_cfg & mask) == value) { at_xdmac_tx_status()
1466 descs_list = &desc->descs_list; list_for_each_entry_safe()
1467 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) { list_for_each_entry_safe()
1468 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg); list_for_each_entry_safe()
1469 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth; list_for_each_entry_safe()
1470 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda) list_for_each_entry_safe()
1478 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1479 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue); local
1488 struct at_xdmac_desc *desc) at_xdmac_remove_xfer()
1490 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); at_xdmac_remove_xfer()
1496 list_del(&desc->xfer_node); at_xdmac_remove_xfer()
1497 list_splice_init(&desc->descs_list, &atchan->free_descs_list); at_xdmac_remove_xfer()
1502 struct at_xdmac_desc *desc; at_xdmac_advance_work() local
1512 desc = list_first_entry(&atchan->xfers_list, at_xdmac_advance_work()
1515 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); at_xdmac_advance_work()
1516 if (!desc->active_xfer) at_xdmac_advance_work()
1517 at_xdmac_start_xfer(atchan, desc); at_xdmac_advance_work()
1525 struct at_xdmac_desc *desc; at_xdmac_handle_cyclic() local
1528 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node); at_xdmac_handle_cyclic()
1529 txd = &desc->tx_dma_desc; at_xdmac_handle_cyclic()
1538 struct at_xdmac_desc *desc; at_xdmac_tasklet() local
1562 desc = list_first_entry(&atchan->xfers_list, at_xdmac_tasklet()
1565 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); at_xdmac_tasklet()
1566 BUG_ON(!desc->active_xfer); at_xdmac_tasklet()
1568 txd = &desc->tx_dma_desc; at_xdmac_tasklet()
1570 at_xdmac_remove_xfer(atchan, desc); at_xdmac_tasklet()
1711 struct at_xdmac_desc *desc, *_desc; at_xdmac_device_terminate_all() local
1724 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node) at_xdmac_device_terminate_all()
1725 at_xdmac_remove_xfer(atchan, desc); at_xdmac_device_terminate_all()
1737 struct at_xdmac_desc *desc; at_xdmac_alloc_chan_resources() local
1758 desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC); at_xdmac_alloc_chan_resources()
1759 if (!desc) { at_xdmac_alloc_chan_resources()
1764 list_add_tail(&desc->desc_node, &atchan->free_descs_list); at_xdmac_alloc_chan_resources()
1780 struct at_xdmac_desc *desc, *_desc; at_xdmac_free_chan_resources() local
1782 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) { at_xdmac_free_chan_resources()
1783 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc); at_xdmac_free_chan_resources() local
1784 list_del(&desc->desc_node); at_xdmac_free_chan_resources()
1785 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys); at_xdmac_free_chan_resources()
485 at_xdmac_queue_desc(struct dma_chan *chan, struct at_xdmac_desc *prev, struct at_xdmac_desc *desc) at_xdmac_queue_desc() argument
499 at_xdmac_increment_block_count(struct dma_chan *chan, struct at_xdmac_desc *desc) at_xdmac_increment_block_count() argument
1487 at_xdmac_remove_xfer(struct at_xdmac_chan *atchan, struct at_xdmac_desc *desc) at_xdmac_remove_xfer() argument
H A Dat_hdmac.c110 struct at_desc *desc = NULL; atc_alloc_descriptor() local
114 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys); atc_alloc_descriptor()
115 if (desc) { atc_alloc_descriptor()
116 memset(desc, 0, sizeof(struct at_desc)); atc_alloc_descriptor()
117 INIT_LIST_HEAD(&desc->tx_list); atc_alloc_descriptor()
118 dma_async_tx_descriptor_init(&desc->txd, chan); atc_alloc_descriptor()
120 desc->txd.flags = DMA_CTRL_ACK; atc_alloc_descriptor()
121 desc->txd.tx_submit = atc_tx_submit; atc_alloc_descriptor()
122 desc->txd.phys = phys; atc_alloc_descriptor()
125 return desc; atc_alloc_descriptor()
134 struct at_desc *desc, *_desc; atc_desc_get() local
141 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { atc_desc_get()
143 if (async_tx_test_ack(&desc->txd)) { atc_desc_get()
144 list_del(&desc->desc_node); atc_desc_get()
145 ret = desc; atc_desc_get()
149 "desc %p not ACKed\n", desc); atc_desc_get()
174 * @desc: descriptor, at the head of a chain, to move to free list
176 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc) atc_desc_put() argument
178 if (desc) { atc_desc_put()
183 list_for_each_entry(child, &desc->tx_list, desc_node) atc_desc_put()
185 "moving child desc %p to freelist\n", atc_desc_put()
187 list_splice_init(&desc->tx_list, &atchan->free_list); atc_desc_put()
189 "moving desc %p to freelist\n", desc); atc_desc_put()
190 list_add(&desc->desc_node, &atchan->free_list); atc_desc_put()
199 * @desc: descriptor to queue
204 struct at_desc *desc) atc_desc_chain()
207 *first = desc; atc_desc_chain()
210 (*prev)->lli.dscr = desc->txd.phys; atc_desc_chain()
212 list_add_tail(&desc->desc_node, atc_desc_chain()
215 *prev = desc; atc_desc_chain()
269 struct at_desc *desc, *_desc; atc_get_desc_by_cookie() local
271 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) { atc_get_desc_by_cookie()
272 if (desc->txd.cookie == cookie) atc_get_desc_by_cookie()
273 return desc; atc_get_desc_by_cookie()
276 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) { atc_get_desc_by_cookie()
277 if (desc->txd.cookie == cookie) atc_get_desc_by_cookie()
278 return desc; atc_get_desc_by_cookie()
314 struct at_desc *desc; atc_get_bytes_left() local
323 desc = atc_get_desc_by_cookie(atchan, cookie); atc_get_bytes_left()
324 if (desc == NULL) atc_get_bytes_left()
326 else if (desc != desc_first) atc_get_bytes_left()
327 return desc->total_len; atc_get_bytes_left()
420 list_for_each_entry(desc, &desc_first->tx_list, desc_node) { atc_get_bytes_left()
421 if (desc->lli.dscr == dscr) atc_get_bytes_left()
424 ret -= desc->len; atc_get_bytes_left()
444 * @desc: descriptor at the head of the chain we want do complete
448 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) atc_chain_complete() argument
450 struct dma_async_tx_descriptor *txd = &desc->txd; atc_chain_complete()
461 if (desc->memset_buffer) { atc_chain_complete()
462 dma_pool_free(atdma->memset_pool, desc->memset_vaddr, atc_chain_complete()
463 desc->memset_paddr); atc_chain_complete()
464 desc->memset_buffer = false; atc_chain_complete()
468 list_splice_init(&desc->tx_list, &atchan->free_list); atc_chain_complete()
470 list_move(&desc->desc_node, &atchan->free_list); atc_chain_complete()
501 struct at_desc *desc, *_desc; atc_complete_all() local
517 list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_complete_all()
518 atc_chain_complete(atchan, desc); atc_complete_all()
675 * @desc: descriptor at the head of the transaction chain
683 struct at_desc *desc = txd_to_at_desc(tx); atc_tx_submit() local
693 desc->txd.cookie); atc_tx_submit()
694 atc_dostart(atchan, desc); atc_tx_submit()
695 list_add_tail(&desc->desc_node, &atchan->active_list); atc_tx_submit()
698 desc->txd.cookie); atc_tx_submit()
699 list_add_tail(&desc->desc_node, &atchan->queue); atc_tx_submit()
720 struct at_desc *desc = NULL; atc_prep_dma_interleaved() local
777 desc = atc_desc_get(atchan); atc_prep_dma_interleaved()
778 if (!desc) { atc_prep_dma_interleaved()
784 desc->lli.saddr = xt->src_start; atc_prep_dma_interleaved()
785 desc->lli.daddr = xt->dst_start; atc_prep_dma_interleaved()
786 desc->lli.ctrla = ctrla | xfer_count; atc_prep_dma_interleaved()
787 desc->lli.ctrlb = ctrlb; atc_prep_dma_interleaved()
789 desc->boundary = first->size >> dwidth; atc_prep_dma_interleaved()
790 desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1; atc_prep_dma_interleaved()
791 desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1; atc_prep_dma_interleaved()
793 desc->txd.cookie = -EBUSY; atc_prep_dma_interleaved()
794 desc->total_len = desc->len = len; atc_prep_dma_interleaved()
797 set_desc_eol(desc); atc_prep_dma_interleaved()
799 desc->txd.flags = flags; /* client is in control of this ack */ atc_prep_dma_interleaved()
801 return &desc->txd; atc_prep_dma_interleaved()
817 struct at_desc *desc = NULL; atc_prep_dma_memcpy() local
853 desc = atc_desc_get(atchan); atc_prep_dma_memcpy()
854 if (!desc) atc_prep_dma_memcpy()
857 desc->lli.saddr = src + offset; atc_prep_dma_memcpy()
858 desc->lli.daddr = dest + offset; atc_prep_dma_memcpy()
859 desc->lli.ctrla = ctrla | xfer_count; atc_prep_dma_memcpy()
860 desc->lli.ctrlb = ctrlb; atc_prep_dma_memcpy()
862 desc->txd.cookie = 0; atc_prep_dma_memcpy()
863 desc->len = xfer_count << src_width; atc_prep_dma_memcpy()
865 atc_desc_chain(&first, &prev, desc); atc_prep_dma_memcpy()
873 set_desc_eol(desc); atc_prep_dma_memcpy()
890 struct at_desc *desc; atc_create_memset_desc() local
906 desc = atc_desc_get(atchan); atc_create_memset_desc()
907 if (!desc) { atc_create_memset_desc()
913 desc->lli.saddr = psrc; atc_create_memset_desc()
914 desc->lli.daddr = pdst; atc_create_memset_desc()
915 desc->lli.ctrla = ctrla | xfer_count; atc_create_memset_desc()
916 desc->lli.ctrlb = ctrlb; atc_create_memset_desc()
918 desc->txd.cookie = 0; atc_create_memset_desc()
919 desc->len = len; atc_create_memset_desc()
921 return desc; atc_create_memset_desc()
937 struct at_desc *desc; atc_prep_dma_memset() local
963 desc = atc_create_memset_desc(chan, paddr, dest, len); atc_prep_dma_memset()
964 if (!desc) { atc_prep_dma_memset()
970 desc->memset_paddr = paddr; atc_prep_dma_memset()
971 desc->memset_vaddr = vaddr; atc_prep_dma_memset()
972 desc->memset_buffer = true; atc_prep_dma_memset()
974 desc->txd.cookie = -EBUSY; atc_prep_dma_memset()
975 desc->total_len = len; atc_prep_dma_memset()
978 set_desc_eol(desc); atc_prep_dma_memset()
980 desc->txd.flags = flags; atc_prep_dma_memset()
982 return &desc->txd; atc_prep_dma_memset()
997 struct at_desc *desc = NULL, *first = NULL, *prev = NULL; atc_prep_dma_memset_sg() local
1034 desc = atc_create_memset_desc(chan, paddr, dest, len); for_each_sg()
1035 if (!desc) for_each_sg()
1038 atc_desc_chain(&first, &prev, desc); for_each_sg()
1047 desc->memset_paddr = paddr;
1048 desc->memset_vaddr = vaddr;
1049 desc->memset_buffer = true;
1055 set_desc_eol(desc);
1118 struct at_desc *desc; for_each_sg() local
1122 desc = atc_desc_get(atchan); for_each_sg()
1123 if (!desc) for_each_sg()
1137 desc->lli.saddr = mem; for_each_sg()
1138 desc->lli.daddr = reg; for_each_sg()
1139 desc->lli.ctrla = ctrla for_each_sg()
1142 desc->lli.ctrlb = ctrlb; for_each_sg()
1143 desc->len = len; for_each_sg()
1145 atc_desc_chain(&first, &prev, desc); for_each_sg()
1159 struct at_desc *desc; for_each_sg() local
1163 desc = atc_desc_get(atchan); for_each_sg()
1164 if (!desc) for_each_sg()
1178 desc->lli.saddr = reg; for_each_sg()
1179 desc->lli.daddr = mem; for_each_sg()
1180 desc->lli.ctrla = ctrla for_each_sg()
1183 desc->lli.ctrlb = ctrlb; for_each_sg()
1184 desc->len = len; for_each_sg()
1186 atc_desc_chain(&first, &prev, desc); for_each_sg()
1229 struct at_desc *desc = NULL; atc_prep_dma_sg() local
1306 desc = atc_desc_get(atchan); atc_prep_dma_sg()
1307 if (!desc) atc_prep_dma_sg()
1310 desc->lli.saddr = src; atc_prep_dma_sg()
1311 desc->lli.daddr = dst; atc_prep_dma_sg()
1312 desc->lli.ctrla = ctrla | xfer_count; atc_prep_dma_sg()
1313 desc->lli.ctrlb = ctrlb; atc_prep_dma_sg()
1315 desc->txd.cookie = 0; atc_prep_dma_sg()
1316 desc->len = len; atc_prep_dma_sg()
1318 atc_desc_chain(&first, &prev, desc); atc_prep_dma_sg()
1334 set_desc_eol(desc); atc_prep_dma_sg()
1370 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc, atc_dma_cyclic_fill_desc() argument
1388 desc->lli.saddr = buf_addr + (period_len * period_index); atc_dma_cyclic_fill_desc()
1389 desc->lli.daddr = sconfig->dst_addr; atc_dma_cyclic_fill_desc()
1390 desc->lli.ctrla = ctrla; atc_dma_cyclic_fill_desc()
1391 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED atc_dma_cyclic_fill_desc()
1396 desc->len = period_len; atc_dma_cyclic_fill_desc()
1400 desc->lli.saddr = sconfig->src_addr; atc_dma_cyclic_fill_desc()
1401 desc->lli.daddr = buf_addr + (period_len * period_index); atc_dma_cyclic_fill_desc()
1402 desc->lli.ctrla = ctrla; atc_dma_cyclic_fill_desc()
1403 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR atc_dma_cyclic_fill_desc()
1408 desc->len = period_len; atc_dma_cyclic_fill_desc()
1472 struct at_desc *desc; atc_prep_dma_cyclic() local
1474 desc = atc_desc_get(atchan); atc_prep_dma_cyclic()
1475 if (!desc) atc_prep_dma_cyclic()
1478 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr, atc_prep_dma_cyclic()
1482 atc_desc_chain(&first, &prev, desc); atc_prep_dma_cyclic()
1571 struct at_desc *desc, *_desc; atc_terminate_all() local
1598 list_for_each_entry_safe(desc, _desc, &list, desc_node) atc_terminate_all()
1599 atc_chain_complete(atchan, desc); atc_terminate_all()
1691 struct at_desc *desc; atc_alloc_chan_resources() local
1728 desc = atc_alloc_descriptor(chan, GFP_KERNEL); atc_alloc_chan_resources()
1729 if (!desc) { atc_alloc_chan_resources()
1734 list_add_tail(&desc->desc_node, &tmp_list); atc_alloc_chan_resources()
1761 struct at_desc *desc, *_desc; atc_free_chan_resources() local
1772 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) { atc_free_chan_resources()
1773 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); atc_free_chan_resources() local
1774 list_del(&desc->desc_node); atc_free_chan_resources()
1776 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys); atc_free_chan_resources()
203 atc_desc_chain(struct at_desc **first, struct at_desc **prev, struct at_desc *desc) atc_desc_chain() argument
H A Dfsl_raid.c88 struct fsl_re_desc *desc; fsl_re_tx_submit() local
93 desc = to_fsl_re_dma_desc(tx); fsl_re_tx_submit()
98 list_add_tail(&desc->node, &re_chan->submit_q); fsl_re_tx_submit()
109 struct fsl_re_desc *desc, *_desc; fsl_re_issue_pending() local
118 list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) { fsl_re_issue_pending()
122 list_move_tail(&desc->node, &re_chan->active_q); fsl_re_issue_pending()
125 &desc->hwdesc, sizeof(struct fsl_re_hw_desc)); fsl_re_issue_pending()
135 static void fsl_re_desc_done(struct fsl_re_desc *desc) fsl_re_desc_done() argument
140 dma_cookie_complete(&desc->async_tx); fsl_re_desc_done()
142 callback = desc->async_tx.callback; fsl_re_desc_done()
143 callback_param = desc->async_tx.callback_param; fsl_re_desc_done()
147 dma_descriptor_unmap(&desc->async_tx); fsl_re_desc_done()
152 struct fsl_re_desc *desc, *_desc; fsl_re_cleanup_descs() local
156 list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) { fsl_re_cleanup_descs()
157 if (async_tx_test_ack(&desc->async_tx)) fsl_re_cleanup_descs()
158 list_move_tail(&desc->node, &re_chan->free_q); fsl_re_cleanup_descs()
168 struct fsl_re_desc *desc, *_desc; fsl_re_dequeue() local
183 list_for_each_entry_safe(desc, _desc, &re_chan->active_q, fsl_re_dequeue()
186 if (desc->hwdesc.lbea32 == hwdesc->lbea32 && fsl_re_dequeue()
187 desc->hwdesc.addr_low == hwdesc->addr_low) { fsl_re_dequeue()
194 fsl_re_desc_done(desc); fsl_re_dequeue()
195 list_move_tail(&desc->node, &re_chan->ack_q); fsl_re_dequeue()
260 struct fsl_re_desc *desc, fsl_re_init_desc()
263 desc->re_chan = re_chan; fsl_re_init_desc()
264 desc->async_tx.tx_submit = fsl_re_tx_submit; fsl_re_init_desc()
265 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan); fsl_re_init_desc()
266 INIT_LIST_HEAD(&desc->node); fsl_re_init_desc()
268 desc->hwdesc.fmt32 = FSL_RE_FRAME_FORMAT << FSL_RE_HWDESC_FMT_SHIFT; fsl_re_init_desc()
269 desc->hwdesc.lbea32 = upper_32_bits(paddr); fsl_re_init_desc()
270 desc->hwdesc.addr_low = lower_32_bits(paddr); fsl_re_init_desc()
271 desc->cf_addr = cf; fsl_re_init_desc()
272 desc->cf_paddr = paddr; fsl_re_init_desc()
274 desc->cdb_addr = (void *)(cf + FSL_RE_CF_DESC_SIZE); fsl_re_init_desc()
275 desc->cdb_paddr = paddr + FSL_RE_CF_DESC_SIZE; fsl_re_init_desc()
277 return desc; fsl_re_init_desc()
283 struct fsl_re_desc *desc = NULL; fsl_re_chan_alloc_desc() local
292 /* take one desc from free_q */ fsl_re_chan_alloc_desc()
293 desc = list_first_entry(&re_chan->free_q, fsl_re_chan_alloc_desc()
295 list_del(&desc->node); fsl_re_chan_alloc_desc()
297 desc->async_tx.flags = flags; fsl_re_chan_alloc_desc()
301 if (!desc) { fsl_re_chan_alloc_desc()
302 desc = kzalloc(sizeof(*desc), GFP_NOWAIT); fsl_re_chan_alloc_desc()
303 if (!desc) fsl_re_chan_alloc_desc()
309 kfree(desc); fsl_re_chan_alloc_desc()
313 desc = fsl_re_init_desc(re_chan, desc, cf, paddr); fsl_re_chan_alloc_desc()
314 desc->async_tx.flags = flags; fsl_re_chan_alloc_desc()
321 return desc; fsl_re_chan_alloc_desc()
330 struct fsl_re_desc *desc; fsl_re_prep_dma_genq() local
345 desc = fsl_re_chan_alloc_desc(re_chan, flags); fsl_re_prep_dma_genq()
346 if (desc <= 0) fsl_re_prep_dma_genq()
360 xor = desc->cdb_addr; fsl_re_prep_dma_genq()
376 cf = desc->cf_addr; fsl_re_prep_dma_genq()
377 fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0); fsl_re_prep_dma_genq()
392 return &desc->async_tx; fsl_re_prep_dma_genq()
417 struct fsl_re_desc *desc; fsl_re_prep_dma_pq() local
449 desc = to_fsl_re_dma_desc(tx); fsl_re_prep_dma_pq()
469 desc = fsl_re_chan_alloc_desc(re_chan, flags); fsl_re_prep_dma_pq()
470 if (desc <= 0) fsl_re_prep_dma_pq()
480 pq = desc->cdb_addr; fsl_re_prep_dma_pq()
497 cf = desc->cf_addr; fsl_re_prep_dma_pq()
498 fill_cfd_frame(cf, 0, sizeof(struct fsl_re_pq_cdb), desc->cdb_paddr, 0); fsl_re_prep_dma_pq()
526 return &desc->async_tx; fsl_re_prep_dma_pq()
539 struct fsl_re_desc *desc; fsl_re_prep_dma_memcpy() local
553 desc = fsl_re_chan_alloc_desc(re_chan, flags); fsl_re_prep_dma_memcpy()
554 if (desc <= 0) fsl_re_prep_dma_memcpy()
563 move = desc->cdb_addr; fsl_re_prep_dma_memcpy()
567 cf = desc->cf_addr; fsl_re_prep_dma_memcpy()
568 fill_cfd_frame(cf, 0, sizeof(*move), desc->cdb_paddr, 0); fsl_re_prep_dma_memcpy()
578 return &desc->async_tx; fsl_re_prep_dma_memcpy()
584 struct fsl_re_desc *desc; fsl_re_alloc_chan_resources() local
591 desc = kzalloc(sizeof(*desc), GFP_KERNEL); fsl_re_alloc_chan_resources()
592 if (!desc) fsl_re_alloc_chan_resources()
598 kfree(desc); fsl_re_alloc_chan_resources()
602 INIT_LIST_HEAD(&desc->node); fsl_re_alloc_chan_resources()
603 fsl_re_init_desc(re_chan, desc, cf, paddr); fsl_re_alloc_chan_resources()
605 list_add_tail(&desc->node, &re_chan->free_q); fsl_re_alloc_chan_resources()
614 struct fsl_re_desc *desc; fsl_re_free_chan_resources() local
618 desc = list_first_entry(&re_chan->free_q, fsl_re_free_chan_resources()
622 list_del(&desc->node); fsl_re_free_chan_resources()
623 dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr, fsl_re_free_chan_resources()
624 desc->cf_paddr); fsl_re_free_chan_resources()
625 kfree(desc); fsl_re_free_chan_resources()
820 dev_err(dev, "No memory for fsl re_cf desc pool\n"); fsl_re_probe()
828 dev_err(dev, "No memory for fsl re_hw desc pool\n"); fsl_re_probe()
259 fsl_re_init_desc(struct fsl_re_chan *re_chan, struct fsl_re_desc *desc, void *cf, dma_addr_t paddr) fsl_re_init_desc() argument
H A Ddma-jz4740.c122 struct jz4740_dma_desc *desc; member in struct:jz4740_dmaengine_chan
280 chan->desc = NULL; jz4740_dma_terminate_all()
299 if (!chan->desc) { jz4740_dma_start_transfer()
303 chan->desc = to_jz4740_dma_desc(vdesc); jz4740_dma_start_transfer()
307 if (chan->next_sg == chan->desc->num_sgs) jz4740_dma_start_transfer()
310 sg = &chan->desc->sg[chan->next_sg]; jz4740_dma_start_transfer()
312 if (chan->desc->direction == DMA_MEM_TO_DEV) { jz4740_dma_start_transfer()
341 if (chan->desc) { jz4740_dma_chan_irq()
342 if (chan->desc->cyclic) { jz4740_dma_chan_irq()
343 vchan_cyclic_callback(&chan->desc->vdesc); jz4740_dma_chan_irq()
345 if (chan->next_sg == chan->desc->num_sgs) { jz4740_dma_chan_irq()
346 list_del(&chan->desc->vdesc.node); jz4740_dma_chan_irq()
347 vchan_cookie_complete(&chan->desc->vdesc); jz4740_dma_chan_irq()
348 chan->desc = NULL; jz4740_dma_chan_irq()
384 if (vchan_issue_pending(&chan->vchan) && !chan->desc) jz4740_dma_issue_pending()
395 struct jz4740_dma_desc *desc; jz4740_dma_prep_slave_sg() local
399 desc = jz4740_dma_alloc_desc(sg_len); jz4740_dma_prep_slave_sg()
400 if (!desc) jz4740_dma_prep_slave_sg()
404 desc->sg[i].addr = sg_dma_address(sg); for_each_sg()
405 desc->sg[i].len = sg_dma_len(sg); for_each_sg()
408 desc->num_sgs = sg_len;
409 desc->direction = direction;
410 desc->cyclic = false;
412 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
421 struct jz4740_dma_desc *desc; jz4740_dma_prep_dma_cyclic() local
429 desc = jz4740_dma_alloc_desc(num_periods); jz4740_dma_prep_dma_cyclic()
430 if (!desc) jz4740_dma_prep_dma_cyclic()
434 desc->sg[i].addr = buf_addr; jz4740_dma_prep_dma_cyclic()
435 desc->sg[i].len = period_len; jz4740_dma_prep_dma_cyclic()
439 desc->num_sgs = num_periods; jz4740_dma_prep_dma_cyclic()
440 desc->direction = direction; jz4740_dma_prep_dma_cyclic()
441 desc->cyclic = true; jz4740_dma_prep_dma_cyclic()
443 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags); jz4740_dma_prep_dma_cyclic()
447 struct jz4740_dma_desc *desc, unsigned int next_sg) jz4740_dma_desc_residue()
455 for (i = next_sg; i < desc->num_sgs; i++) jz4740_dma_desc_residue()
456 residue += desc->sg[i].len; jz4740_dma_desc_residue()
481 if (cookie == chan->desc->vdesc.tx.cookie) { jz4740_dma_tx_status()
482 state->residue = jz4740_dma_desc_residue(chan, chan->desc, jz4740_dma_tx_status()
446 jz4740_dma_desc_residue(struct jz4740_dmaengine_chan *chan, struct jz4740_dma_desc *desc, unsigned int next_sg) jz4740_dma_desc_residue() argument
H A Dpl330.c358 struct dma_pl330_desc *desc; member in struct:_pl330_req
412 /* Schedule desc completion */
510 /* The channel which currently holds this desc */
522 struct dma_pl330_desc *desc; member in struct:_xfer_spec
527 return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL; _queue_empty()
532 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; _queue_full()
1029 struct dma_pl330_desc *desc; _trigger() local
1040 if (thrd->req[idx].desc != NULL) { _trigger()
1044 if (thrd->req[idx].desc != NULL) _trigger()
1058 desc = req->desc; _trigger()
1060 ns = desc->rqcfg.nonsecure ? 1 : 0; _trigger()
1120 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg; _ldst_memtomem()
1146 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); _ldst_devtomem()
1147 off += _emit_LDP(dry_run, &buf[off], SINGLE, pxs->desc->peri); _ldst_devtomem()
1149 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); _ldst_devtomem()
1161 off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); _ldst_memtodev()
1163 off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri); _ldst_memtodev()
1164 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); _ldst_memtodev()
1175 switch (pxs->desc->rqtype) { _bursts()
1278 struct pl330_xfer *x = &pxs->desc->px; _setup_loops()
1295 struct pl330_xfer *x = &pxs->desc->px; _setup_xfer()
1326 x = &pxs->desc->px; _setup_req()
1379 struct dma_pl330_desc *desc) pl330_submit_req()
1396 if (desc->rqtype != DMA_MEM_TO_MEM && pl330_submit_req()
1397 desc->peri >= pl330->pcfg.num_peri) { pl330_submit_req()
1400 __func__, __LINE__, desc->peri); pl330_submit_req()
1413 desc->rqcfg.nonsecure = 0; pl330_submit_req()
1415 desc->rqcfg.nonsecure = 1; pl330_submit_req()
1417 ccr = _prepare_ccr(&desc->rqcfg); pl330_submit_req()
1419 idx = thrd->req[0].desc == NULL ? 0 : 1; pl330_submit_req()
1422 xs.desc = desc; pl330_submit_req()
1438 thrd->req[idx].desc = desc; pl330_submit_req()
1449 static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err) dma_pl330_rqcb() argument
1454 if (!desc) dma_pl330_rqcb()
1457 pch = desc->pchan; dma_pl330_rqcb()
1459 /* If desc aborted */ dma_pl330_rqcb()
1465 desc->status = DONE; dma_pl330_rqcb()
1512 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err); pl330_dotask()
1513 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err); pl330_dotask()
1516 thrd->req[0].desc = NULL; pl330_dotask()
1517 thrd->req[1].desc = NULL; pl330_dotask()
1597 descdone = thrd->req[active].desc; pl330_update()
1598 thrd->req[active].desc = NULL; pl330_update()
1675 thrd->req[0].desc = NULL; pl330_request_channel()
1676 thrd->req[1].desc = NULL; pl330_request_channel()
1710 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT); pl330_release_channel()
1711 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT); pl330_release_channel()
1774 thrd->req[0].desc = NULL; _reset_thread()
1780 thrd->req[1].desc = NULL; _reset_thread()
1947 struct dma_pl330_desc *desc; fill_queue() local
1950 list_for_each_entry(desc, &pch->work_list, node) { fill_queue()
1953 if (desc->status == BUSY) fill_queue()
1956 ret = pl330_submit_req(pch->thread, desc); fill_queue()
1958 desc->status = BUSY; fill_queue()
1964 desc->status = DONE; fill_queue()
1966 __func__, __LINE__, desc->txd.cookie); fill_queue()
1975 struct dma_pl330_desc *desc, *_dt; pl330_tasklet() local
1982 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) pl330_tasklet()
1983 if (desc->status == DONE) { pl330_tasklet()
1985 dma_cookie_complete(&desc->txd); pl330_tasklet()
1986 list_move_tail(&desc->node, &pch->completed_list); pl330_tasklet()
2008 desc = list_first_entry(&pch->completed_list, pl330_tasklet()
2011 callback = desc->txd.callback; pl330_tasklet()
2012 callback_param = desc->txd.callback_param; pl330_tasklet()
2015 desc->status = PREP; pl330_tasklet()
2016 list_move_tail(&desc->node, &pch->work_list); pl330_tasklet()
2024 desc->status = FREE; pl330_tasklet()
2025 list_move_tail(&desc->node, &pch->dmac->desc_pool); pl330_tasklet()
2028 dma_descriptor_unmap(&desc->txd); pl330_tasklet()
2128 struct dma_pl330_desc *desc; pl330_terminate_all() local
2139 pch->thread->req[0].desc = NULL; pl330_terminate_all()
2140 pch->thread->req[1].desc = NULL; pl330_terminate_all()
2143 /* Mark all desc done */ pl330_terminate_all()
2144 list_for_each_entry(desc, &pch->submitted_list, node) { pl330_terminate_all()
2145 desc->status = FREE; pl330_terminate_all()
2146 dma_cookie_complete(&desc->txd); pl330_terminate_all()
2149 list_for_each_entry(desc, &pch->work_list , node) { pl330_terminate_all()
2150 desc->status = FREE; pl330_terminate_all()
2151 dma_cookie_complete(&desc->txd); pl330_terminate_all()
2213 struct dma_pl330_desc *desc) pl330_get_current_xferred_count()
2222 if (desc->rqcfg.src_inc) { pl330_get_current_xferred_count()
2224 addr = desc->px.src_addr; pl330_get_current_xferred_count()
2227 addr = desc->px.dst_addr; pl330_get_current_xferred_count()
2240 struct dma_pl330_desc *desc, *running = NULL; pl330_tx_status() local
2255 running = pch->thread->req[pch->thread->req_running].desc; pl330_tx_status()
2258 list_for_each_entry(desc, &pch->work_list, node) { pl330_tx_status()
2259 if (desc->status == DONE) pl330_tx_status()
2260 transferred = desc->bytes_requested; pl330_tx_status()
2261 else if (running && desc == running) pl330_tx_status()
2263 pl330_get_current_xferred_count(pch, desc); pl330_tx_status()
2266 residual += desc->bytes_requested - transferred; pl330_tx_status()
2267 if (desc->txd.cookie == cookie) { pl330_tx_status()
2268 switch (desc->status) { pl330_tx_status()
2281 if (desc->last) pl330_tx_status()
2320 struct dma_pl330_desc *desc, *last = to_desc(tx); pl330_tx_submit() local
2329 desc = list_entry(last->node.next, struct dma_pl330_desc, node); pl330_tx_submit()
2331 desc->txd.callback = last->txd.callback; pl330_tx_submit()
2332 desc->txd.callback_param = last->txd.callback_param; pl330_tx_submit()
2334 desc->last = false; pl330_tx_submit()
2336 dma_cookie_assign(&desc->txd); pl330_tx_submit()
2338 list_move_tail(&desc->node, &pch->submitted_list); pl330_tx_submit()
2349 static inline void _init_desc(struct dma_pl330_desc *desc) _init_desc() argument
2351 desc->rqcfg.swap = SWAP_NO; _init_desc()
2352 desc->rqcfg.scctl = CCTRL0; _init_desc()
2353 desc->rqcfg.dcctl = CCTRL0; _init_desc()
2354 desc->txd.tx_submit = pl330_tx_submit; _init_desc()
2356 INIT_LIST_HEAD(&desc->node); _init_desc()
2362 struct dma_pl330_desc *desc; add_desc() local
2366 desc = kcalloc(count, sizeof(*desc), flg); add_desc()
2367 if (!desc) add_desc()
2373 _init_desc(&desc[i]); add_desc()
2374 list_add_tail(&desc[i].node, &pl330->desc_pool); add_desc()
2384 struct dma_pl330_desc *desc = NULL; pluck_desc() local
2390 desc = list_entry(pl330->desc_pool.next, pluck_desc()
2393 list_del_init(&desc->node); pluck_desc()
2395 desc->status = PREP; pluck_desc()
2396 desc->txd.callback = NULL; pluck_desc()
2401 return desc; pluck_desc()
2408 struct dma_pl330_desc *desc; pl330_get_desc() local
2410 /* Pluck one desc from the pool of DMAC */ pl330_get_desc()
2411 desc = pluck_desc(pl330); pl330_get_desc()
2414 if (!desc) { pl330_get_desc()
2419 desc = pluck_desc(pl330); pl330_get_desc()
2420 if (!desc) { pl330_get_desc()
2428 desc->pchan = pch; pl330_get_desc()
2429 desc->txd.cookie = 0; pl330_get_desc()
2430 async_tx_ack(&desc->txd); pl330_get_desc()
2432 desc->peri = peri_id ? pch->chan.chan_id : 0; pl330_get_desc()
2433 desc->rqcfg.pcfg = &pch->dmac->pcfg; pl330_get_desc()
2435 dma_async_tx_descriptor_init(&desc->txd, &pch->chan); pl330_get_desc()
2437 return desc; pl330_get_desc()
2452 struct dma_pl330_desc *desc = pl330_get_desc(pch); __pl330_prep_dma_memcpy() local
2454 if (!desc) { __pl330_prep_dma_memcpy()
2455 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", __pl330_prep_dma_memcpy()
2470 fill_px(&desc->px, dst, src, len); __pl330_prep_dma_memcpy()
2472 return desc; __pl330_prep_dma_memcpy()
2476 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len) get_burst_len() argument
2478 struct dma_pl330_chan *pch = desc->pchan; get_burst_len()
2484 burst_len >>= desc->rqcfg.brst_size; get_burst_len()
2491 if (!(len % (burst_len << desc->rqcfg.brst_size))) get_burst_len()
2504 struct dma_pl330_desc *desc = NULL, *first = NULL; pl330_prep_dma_cyclic() local
2521 desc = pl330_get_desc(pch); pl330_prep_dma_cyclic()
2522 if (!desc) { pl330_prep_dma_cyclic()
2523 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", pl330_prep_dma_cyclic()
2532 desc = list_entry(first->node.next, pl330_prep_dma_cyclic()
2534 list_move_tail(&desc->node, &pl330->desc_pool); pl330_prep_dma_cyclic()
2546 desc->rqcfg.src_inc = 1; pl330_prep_dma_cyclic()
2547 desc->rqcfg.dst_inc = 0; pl330_prep_dma_cyclic()
2552 desc->rqcfg.src_inc = 0; pl330_prep_dma_cyclic()
2553 desc->rqcfg.dst_inc = 1; pl330_prep_dma_cyclic()
2561 desc->rqtype = direction; pl330_prep_dma_cyclic()
2562 desc->rqcfg.brst_size = pch->burst_sz; pl330_prep_dma_cyclic()
2563 desc->rqcfg.brst_len = 1; pl330_prep_dma_cyclic()
2564 desc->bytes_requested = period_len; pl330_prep_dma_cyclic()
2565 fill_px(&desc->px, dst, src, period_len); pl330_prep_dma_cyclic()
2568 first = desc; pl330_prep_dma_cyclic()
2570 list_add_tail(&desc->node, &first->node); pl330_prep_dma_cyclic()
2575 if (!desc) pl330_prep_dma_cyclic()
2579 desc->txd.flags = flags; pl330_prep_dma_cyclic()
2581 return &desc->txd; pl330_prep_dma_cyclic()
2588 struct dma_pl330_desc *desc; pl330_prep_dma_memcpy() local
2598 desc = __pl330_prep_dma_memcpy(pch, dst, src, len); pl330_prep_dma_memcpy()
2599 if (!desc) pl330_prep_dma_memcpy()
2602 desc->rqcfg.src_inc = 1; pl330_prep_dma_memcpy()
2603 desc->rqcfg.dst_inc = 1; pl330_prep_dma_memcpy()
2604 desc->rqtype = DMA_MEM_TO_MEM; pl330_prep_dma_memcpy()
2617 desc->rqcfg.brst_size = 0; pl330_prep_dma_memcpy()
2618 while (burst != (1 << desc->rqcfg.brst_size)) pl330_prep_dma_memcpy()
2619 desc->rqcfg.brst_size++; pl330_prep_dma_memcpy()
2625 if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width) pl330_prep_dma_memcpy()
2626 desc->rqcfg.brst_len = 1; pl330_prep_dma_memcpy()
2628 desc->rqcfg.brst_len = get_burst_len(desc, len); pl330_prep_dma_memcpy()
2629 desc->bytes_requested = len; pl330_prep_dma_memcpy()
2631 desc->txd.flags = flags; pl330_prep_dma_memcpy()
2633 return &desc->txd; pl330_prep_dma_memcpy()
2640 struct dma_pl330_desc *desc; __pl330_giveback_desc() local
2648 desc = list_entry(first->node.next, __pl330_giveback_desc()
2650 list_move_tail(&desc->node, &pl330->desc_pool); __pl330_giveback_desc()
2663 struct dma_pl330_desc *first, *desc = NULL; pl330_prep_slave_sg() local
2678 desc = pl330_get_desc(pch); for_each_sg()
2679 if (!desc) { for_each_sg()
2683 "%s:%d Unable to fetch desc\n", for_each_sg()
2691 first = desc; for_each_sg()
2693 list_add_tail(&desc->node, &first->node); for_each_sg()
2696 desc->rqcfg.src_inc = 1; for_each_sg()
2697 desc->rqcfg.dst_inc = 0; for_each_sg()
2698 fill_px(&desc->px, for_each_sg()
2701 desc->rqcfg.src_inc = 0; for_each_sg()
2702 desc->rqcfg.dst_inc = 1; for_each_sg()
2703 fill_px(&desc->px, for_each_sg()
2707 desc->rqcfg.brst_size = pch->burst_sz; for_each_sg()
2708 desc->rqcfg.brst_len = 1; for_each_sg()
2709 desc->rqtype = direction; for_each_sg()
2710 desc->bytes_requested = sg_dma_len(sg); for_each_sg()
2713 /* Return the last desc in the chain */
2714 desc->txd.flags = flg;
2715 return &desc->txd;
2835 dev_warn(&adev->dev, "unable to allocate desc\n"); pl330_probe()
1378 pl330_submit_req(struct pl330_thread *thrd, struct dma_pl330_desc *desc) pl330_submit_req() argument
2212 pl330_get_current_xferred_count(struct dma_pl330_chan *pch, struct dma_pl330_desc *desc) pl330_get_current_xferred_count() argument
/linux-4.4.14/arch/mips/vr41xx/common/
H A Dicu.c157 struct irq_desc *desc = irq_to_desc(PIU_IRQ); vr41xx_enable_piuint() local
162 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_piuint()
164 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_piuint()
172 struct irq_desc *desc = irq_to_desc(PIU_IRQ); vr41xx_disable_piuint() local
177 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_piuint()
179 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_piuint()
187 struct irq_desc *desc = irq_to_desc(AIU_IRQ); vr41xx_enable_aiuint() local
192 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_aiuint()
194 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_aiuint()
202 struct irq_desc *desc = irq_to_desc(AIU_IRQ); vr41xx_disable_aiuint() local
207 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_aiuint()
209 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_aiuint()
217 struct irq_desc *desc = irq_to_desc(KIU_IRQ); vr41xx_enable_kiuint() local
222 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_kiuint()
224 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_kiuint()
232 struct irq_desc *desc = irq_to_desc(KIU_IRQ); vr41xx_disable_kiuint() local
237 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_kiuint()
239 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_kiuint()
247 struct irq_desc *desc = irq_to_desc(ETHERNET_IRQ); vr41xx_enable_macint() local
250 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_macint()
252 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_macint()
259 struct irq_desc *desc = irq_to_desc(ETHERNET_IRQ); vr41xx_disable_macint() local
262 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_macint()
264 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_macint()
271 struct irq_desc *desc = irq_to_desc(DSIU_IRQ); vr41xx_enable_dsiuint() local
274 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_dsiuint()
276 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_dsiuint()
283 struct irq_desc *desc = irq_to_desc(DSIU_IRQ); vr41xx_disable_dsiuint() local
286 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_dsiuint()
288 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_dsiuint()
295 struct irq_desc *desc = irq_to_desc(FIR_IRQ); vr41xx_enable_firint() local
298 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_firint()
300 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_firint()
307 struct irq_desc *desc = irq_to_desc(FIR_IRQ); vr41xx_disable_firint() local
310 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_firint()
312 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_firint()
319 struct irq_desc *desc = irq_to_desc(PCI_IRQ); vr41xx_enable_pciint() local
325 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_pciint()
327 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_pciint()
335 struct irq_desc *desc = irq_to_desc(PCI_IRQ); vr41xx_disable_pciint() local
341 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_pciint()
343 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_pciint()
351 struct irq_desc *desc = irq_to_desc(SCU_IRQ); vr41xx_enable_scuint() local
357 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_scuint()
359 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_scuint()
367 struct irq_desc *desc = irq_to_desc(SCU_IRQ); vr41xx_disable_scuint() local
373 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_scuint()
375 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_scuint()
383 struct irq_desc *desc = irq_to_desc(CSI_IRQ); vr41xx_enable_csiint() local
389 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_csiint()
391 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_csiint()
399 struct irq_desc *desc = irq_to_desc(CSI_IRQ); vr41xx_disable_csiint() local
405 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_csiint()
407 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_csiint()
415 struct irq_desc *desc = irq_to_desc(BCU_IRQ); vr41xx_enable_bcuint() local
421 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_enable_bcuint()
423 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_enable_bcuint()
431 struct irq_desc *desc = irq_to_desc(BCU_IRQ); vr41xx_disable_bcuint() local
437 raw_spin_lock_irqsave(&desc->lock, flags); vr41xx_disable_bcuint()
439 raw_spin_unlock_irqrestore(&desc->lock, flags); vr41xx_disable_bcuint()
479 struct irq_desc *desc = irq_to_desc(irq); set_sysint1_assign() local
485 raw_spin_lock_irq(&desc->lock); set_sysint1_assign()
524 raw_spin_unlock_irq(&desc->lock); set_sysint1_assign()
532 raw_spin_unlock_irq(&desc->lock); set_sysint1_assign()
539 struct irq_desc *desc = irq_to_desc(irq); set_sysint2_assign() local
545 raw_spin_lock_irq(&desc->lock); set_sysint2_assign()
592 raw_spin_unlock_irq(&desc->lock); set_sysint2_assign()
600 raw_spin_unlock_irq(&desc->lock); set_sysint2_assign()
/linux-4.4.14/drivers/pinctrl/
H A Dpinmux.c35 const struct pinmux_ops *ops = pctldev->desc->pmxops; pinmux_check_ops()
87 struct pin_desc *desc; pin_request() local
88 const struct pinmux_ops *ops = pctldev->desc->pmxops; pin_request()
91 desc = pin_desc_get(pctldev, pin); pin_request()
92 if (desc == NULL) { pin_request()
100 pin, desc->name, owner); pin_request()
104 if (desc->gpio_owner) { pin_request()
107 desc->name, desc->gpio_owner, owner); pin_request()
110 if (ops->strict && desc->mux_usecount && pin_request()
111 strcmp(desc->mux_owner, owner)) { pin_request()
114 desc->name, desc->mux_owner, owner); pin_request()
118 desc->gpio_owner = owner; pin_request()
120 if (desc->mux_usecount && strcmp(desc->mux_owner, owner)) { pin_request()
123 desc->name, desc->mux_owner, owner); pin_request()
126 if (ops->strict && desc->gpio_owner) { pin_request()
129 desc->name, desc->gpio_owner, owner); pin_request()
133 desc->mux_usecount++; pin_request()
134 if (desc->mux_usecount > 1) pin_request()
137 desc->mux_owner = owner; pin_request()
169 desc->gpio_owner = NULL; pin_request()
171 desc->mux_usecount--; pin_request()
172 if (!desc->mux_usecount) pin_request()
173 desc->mux_owner = NULL; pin_request()
198 const struct pinmux_ops *ops = pctldev->desc->pmxops; pin_free()
199 struct pin_desc *desc; pin_free() local
202 desc = pin_desc_get(pctldev, pin); pin_free()
203 if (desc == NULL) { pin_free()
213 if (WARN_ON(!desc->mux_usecount)) pin_free()
215 desc->mux_usecount--; pin_free()
216 if (desc->mux_usecount) pin_free()
230 owner = desc->gpio_owner; pin_free()
231 desc->gpio_owner = NULL; pin_free()
233 owner = desc->mux_owner; pin_free()
234 desc->mux_owner = NULL; pin_free()
235 desc->mux_setting = NULL; pin_free()
297 ops = pctldev->desc->pmxops; pinmux_gpio_direction()
310 const struct pinmux_ops *ops = pctldev->desc->pmxops; pinmux_func_name_to_selector()
332 const struct pinmux_ops *pmxops = pctldev->desc->pmxops; pinmux_map_to_setting()
403 const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; pinmux_enable_setting()
404 const struct pinmux_ops *ops = pctldev->desc->pmxops; pinmux_enable_setting()
409 struct pin_desc *desc; pinmux_enable_setting() local
434 desc = pin_desc_get(pctldev, pins[i]); pinmux_enable_setting()
435 pname = desc ? desc->name : "non-existing"; pinmux_enable_setting()
449 desc = pin_desc_get(pctldev, pins[i]); pinmux_enable_setting()
450 if (desc == NULL) { pinmux_enable_setting()
452 "could not get pin desc for pin %d\n", pinmux_enable_setting()
456 desc->mux_setting = &(setting->data.mux); pinmux_enable_setting()
469 desc = pin_desc_get(pctldev, pins[i]); pinmux_enable_setting()
470 if (desc) pinmux_enable_setting()
471 desc->mux_setting = NULL; pinmux_enable_setting()
484 const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; pinmux_disable_setting()
489 struct pin_desc *desc; pinmux_disable_setting() local
508 desc = pin_desc_get(pctldev, pins[i]); pinmux_disable_setting()
509 if (desc == NULL) { pinmux_disable_setting()
511 "could not get pin desc for pin %d\n", pinmux_disable_setting()
515 if (desc->mux_setting == &(setting->data.mux)) { pinmux_disable_setting()
516 desc->mux_setting = NULL; pinmux_disable_setting()
528 pins[i], desc->name, gname); pinmux_disable_setting()
539 const struct pinmux_ops *pmxops = pctldev->desc->pmxops; pinmux_functions_show()
581 const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; pinmux_pins_show()
582 const struct pinmux_ops *pmxops = pctldev->desc->pmxops; pinmux_pins_show()
599 for (i = 0; i < pctldev->desc->npins; i++) { pinmux_pins_show()
600 struct pin_desc *desc; pinmux_pins_show() local
603 pin = pctldev->desc->pins[i].number; pinmux_pins_show()
604 desc = pin_desc_get(pctldev, pin); pinmux_pins_show()
606 if (desc == NULL) pinmux_pins_show()
609 if (desc->mux_owner && pinmux_pins_show()
610 !strcmp(desc->mux_owner, pinctrl_dev_get_name(pctldev))) pinmux_pins_show()
614 if (desc->mux_owner) pinmux_pins_show()
617 desc->name ? desc->name : "unnamed", pinmux_pins_show()
618 desc->mux_owner, pinmux_pins_show()
620 else if (desc->gpio_owner) pinmux_pins_show()
623 desc->name ? desc->name : "unnamed", pinmux_pins_show()
624 desc->gpio_owner); pinmux_pins_show()
628 desc->name ? desc->name : "unnamed"); pinmux_pins_show()
632 desc->name ? desc->name : "unnamed", pinmux_pins_show()
633 desc->mux_owner ? desc->mux_owner pinmux_pins_show()
635 desc->gpio_owner ? desc->gpio_owner pinmux_pins_show()
641 if (desc->mux_setting) pinmux_pins_show()
644 desc->mux_setting->func), pinmux_pins_show()
646 desc->mux_setting->group)); pinmux_pins_show()
667 const struct pinmux_ops *pmxops = pctldev->desc->pmxops; pinmux_show_setting()
668 const struct pinctrl_ops *pctlops = pctldev->desc->pctlops; pinmux_show_setting()
/linux-4.4.14/drivers/dma/sh/
H A Drcar-dmac.c130 * @lock: protects the channel CHCR register and the desc members
131 * @desc.free: list of free descriptors
132 * @desc.pending: list of pending descriptors (submitted with tx_submit)
133 * @desc.active: list of active descriptors (activated with issue_pending)
134 * @desc.done: list of completed descriptors
135 * @desc.wait: list of descriptors waiting for an ack
136 * @desc.running: the descriptor being processed (a member of the active list)
137 * @desc.chunks_free: list of free transfer chunk descriptors
138 * @desc.pages: list of pages used by allocated descriptors
164 } desc; member in struct:rcar_dmac_chan
319 struct rcar_dmac_desc *desc = chan->desc.running; rcar_dmac_chan_start_xfer() local
320 u32 chcr = desc->chcr; rcar_dmac_chan_start_xfer()
327 if (desc->hwdescs.use) { rcar_dmac_chan_start_xfer()
331 "chan%u: queue desc %p: %u@%pad\n", rcar_dmac_chan_start_xfer()
332 chan->index, desc, desc->nchunks, &desc->hwdescs.dma); rcar_dmac_chan_start_xfer()
336 desc->hwdescs.dma >> 32); rcar_dmac_chan_start_xfer()
339 (desc->hwdescs.dma & 0xfffffff0) | rcar_dmac_chan_start_xfer()
342 RCAR_DMACHCRB_DCNT(desc->nchunks - 1) | rcar_dmac_chan_start_xfer()
352 chunk = list_first_entry(&desc->chunks, rcar_dmac_chan_start_xfer()
370 if (!desc->cyclic) rcar_dmac_chan_start_xfer()
376 else if (desc->async_tx.callback) rcar_dmac_chan_start_xfer()
385 struct rcar_dmac_xfer_chunk *chunk = desc->running; rcar_dmac_chan_start_xfer()
403 chunk->size >> desc->xfer_shift); rcar_dmac_chan_start_xfer()
436 struct rcar_dmac_desc *desc = to_rcar_dmac_desc(tx); rcar_dmac_tx_submit() local
445 chan->index, tx->cookie, desc); rcar_dmac_tx_submit()
447 list_add_tail(&desc->node, &chan->desc.pending); rcar_dmac_tx_submit()
448 desc->running = list_first_entry(&desc->chunks, rcar_dmac_tx_submit()
477 struct rcar_dmac_desc *desc = &page->descs[i]; rcar_dmac_desc_alloc() local
479 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan); rcar_dmac_desc_alloc()
480 desc->async_tx.tx_submit = rcar_dmac_tx_submit; rcar_dmac_desc_alloc()
481 INIT_LIST_HEAD(&desc->chunks); rcar_dmac_desc_alloc()
483 list_add_tail(&desc->node, &list); rcar_dmac_desc_alloc()
487 list_splice_tail(&list, &chan->desc.free); rcar_dmac_desc_alloc()
488 list_add_tail(&page->node, &chan->desc.pages); rcar_dmac_desc_alloc()
497 * @desc: the descriptor
507 struct rcar_dmac_desc *desc) rcar_dmac_desc_put()
512 list_splice_tail_init(&desc->chunks, &chan->desc.chunks_free); rcar_dmac_desc_put()
513 list_add_tail(&desc->node, &chan->desc.free); rcar_dmac_desc_put()
519 struct rcar_dmac_desc *desc, *_desc; rcar_dmac_desc_recycle_acked() local
530 list_splice_init(&chan->desc.wait, &list); rcar_dmac_desc_recycle_acked()
533 list_for_each_entry_safe(desc, _desc, &list, node) { rcar_dmac_desc_recycle_acked()
534 if (async_tx_test_ack(&desc->async_tx)) { rcar_dmac_desc_recycle_acked()
535 list_del(&desc->node); rcar_dmac_desc_recycle_acked()
536 rcar_dmac_desc_put(chan, desc); rcar_dmac_desc_recycle_acked()
545 list_splice(&list, &chan->desc.wait); rcar_dmac_desc_recycle_acked()
560 struct rcar_dmac_desc *desc; rcar_dmac_desc_get() local
569 while (list_empty(&chan->desc.free)) { rcar_dmac_desc_get()
583 desc = list_first_entry(&chan->desc.free, struct rcar_dmac_desc, node); rcar_dmac_desc_get()
584 list_del(&desc->node); rcar_dmac_desc_get()
588 return desc; rcar_dmac_desc_get()
614 list_splice_tail(&list, &chan->desc.chunks_free); rcar_dmac_xfer_chunk_alloc()
615 list_add_tail(&page->node, &chan->desc.pages); rcar_dmac_xfer_chunk_alloc()
639 while (list_empty(&chan->desc.chunks_free)) { rcar_dmac_xfer_chunk_get()
653 chunk = list_first_entry(&chan->desc.chunks_free, rcar_dmac_xfer_chunk_get()
663 struct rcar_dmac_desc *desc, size_t size) rcar_dmac_realloc_hwdesc()
673 if (desc->hwdescs.size == size) rcar_dmac_realloc_hwdesc()
676 if (desc->hwdescs.mem) { rcar_dmac_realloc_hwdesc()
677 dma_free_coherent(chan->chan.device->dev, desc->hwdescs.size, rcar_dmac_realloc_hwdesc()
678 desc->hwdescs.mem, desc->hwdescs.dma); rcar_dmac_realloc_hwdesc()
679 desc->hwdescs.mem = NULL; rcar_dmac_realloc_hwdesc()
680 desc->hwdescs.size = 0; rcar_dmac_realloc_hwdesc()
686 desc->hwdescs.mem = dma_alloc_coherent(chan->chan.device->dev, size, rcar_dmac_realloc_hwdesc()
687 &desc->hwdescs.dma, GFP_NOWAIT); rcar_dmac_realloc_hwdesc()
688 if (!desc->hwdescs.mem) rcar_dmac_realloc_hwdesc()
691 desc->hwdescs.size = size; rcar_dmac_realloc_hwdesc()
695 struct rcar_dmac_desc *desc) rcar_dmac_fill_hwdesc()
700 rcar_dmac_realloc_hwdesc(chan, desc, desc->nchunks * sizeof(*hwdesc)); rcar_dmac_fill_hwdesc()
702 hwdesc = desc->hwdescs.mem; rcar_dmac_fill_hwdesc()
706 list_for_each_entry(chunk, &desc->chunks, node) { rcar_dmac_fill_hwdesc()
709 hwdesc->tcr = chunk->size >> desc->xfer_shift; rcar_dmac_fill_hwdesc()
731 struct rcar_dmac_desc *desc, *_desc; rcar_dmac_chan_reinit() local
738 list_splice_init(&chan->desc.pending, &descs); rcar_dmac_chan_reinit()
739 list_splice_init(&chan->desc.active, &descs); rcar_dmac_chan_reinit()
740 list_splice_init(&chan->desc.done, &descs); rcar_dmac_chan_reinit()
741 list_splice_init(&chan->desc.wait, &descs); rcar_dmac_chan_reinit()
743 chan->desc.running = NULL; rcar_dmac_chan_reinit()
747 list_for_each_entry_safe(desc, _desc, &descs, node) { rcar_dmac_chan_reinit()
748 list_del(&desc->node); rcar_dmac_chan_reinit()
749 rcar_dmac_desc_put(chan, desc); rcar_dmac_chan_reinit()
780 struct rcar_dmac_desc *desc) rcar_dmac_chan_configure_desc()
792 switch (desc->direction) { rcar_dmac_chan_configure_desc()
813 desc->xfer_shift = ilog2(xfer_size); rcar_dmac_chan_configure_desc()
814 desc->chcr = chcr | chcr_ts[desc->xfer_shift]; rcar_dmac_chan_configure_desc()
834 struct rcar_dmac_desc *desc; rcar_dmac_chan_prep_sg() local
842 desc = rcar_dmac_desc_get(chan); rcar_dmac_chan_prep_sg()
843 if (!desc) rcar_dmac_chan_prep_sg()
846 desc->async_tx.flags = dma_flags; rcar_dmac_chan_prep_sg()
847 desc->async_tx.cookie = -EBUSY; rcar_dmac_chan_prep_sg()
849 desc->cyclic = cyclic; rcar_dmac_chan_prep_sg()
850 desc->direction = dir; rcar_dmac_chan_prep_sg()
852 rcar_dmac_chan_configure_desc(chan, desc); rcar_dmac_chan_prep_sg()
854 max_chunk_size = (RCAR_DMATCR_MASK + 1) << desc->xfer_shift; rcar_dmac_chan_prep_sg()
890 rcar_dmac_desc_put(chan, desc); for_each_sg()
906 chan->index, chunk, desc, i, sg, size, len, for_each_sg()
915 list_add_tail(&chunk->node, &desc->chunks); for_each_sg()
920 desc->nchunks = nchunks;
921 desc->size = full_size;
933 desc->hwdescs.use = !highmem && nchunks > 1;
934 if (desc->hwdescs.use) {
935 if (rcar_dmac_fill_hwdesc(chan, desc) < 0)
936 desc->hwdescs.use = false;
939 return &desc->async_tx;
951 INIT_LIST_HEAD(&rchan->desc.chunks_free); rcar_dmac_alloc_chan_resources()
952 INIT_LIST_HEAD(&rchan->desc.pages); rcar_dmac_alloc_chan_resources()
971 struct rcar_dmac_desc *desc; rcar_dmac_free_chan_resources() local
987 list_splice_init(&rchan->desc.free, &list); rcar_dmac_free_chan_resources()
988 list_splice_init(&rchan->desc.pending, &list); rcar_dmac_free_chan_resources()
989 list_splice_init(&rchan->desc.active, &list); rcar_dmac_free_chan_resources()
990 list_splice_init(&rchan->desc.done, &list); rcar_dmac_free_chan_resources()
991 list_splice_init(&rchan->desc.wait, &list); rcar_dmac_free_chan_resources()
993 list_for_each_entry(desc, &list, node) rcar_dmac_free_chan_resources()
994 rcar_dmac_realloc_hwdesc(rchan, desc, 0); rcar_dmac_free_chan_resources()
996 list_for_each_entry_safe(page, _page, &rchan->desc.pages, node) { rcar_dmac_free_chan_resources()
1054 struct dma_async_tx_descriptor *desc; rcar_dmac_prep_dma_cyclic() local
1097 desc = rcar_dmac_chan_prep_sg(rchan, sgl, sg_len, dev_addr, rcar_dmac_prep_dma_cyclic()
1101 return desc; rcar_dmac_prep_dma_cyclic()
1143 struct rcar_dmac_desc *desc = chan->desc.running; rcar_dmac_chan_get_residue() local
1149 if (!desc) rcar_dmac_chan_get_residue()
1157 if (cookie != desc->async_tx.cookie) rcar_dmac_chan_get_residue()
1158 return desc->size; rcar_dmac_chan_get_residue()
1166 if (desc->hwdescs.use) { rcar_dmac_chan_get_residue()
1169 WARN_ON(dptr >= desc->nchunks); rcar_dmac_chan_get_residue()
1171 running = desc->running; rcar_dmac_chan_get_residue()
1175 list_for_each_entry_reverse(chunk, &desc->chunks, node) { rcar_dmac_chan_get_residue()
1176 if (chunk == running || ++dptr == desc->nchunks) rcar_dmac_chan_get_residue()
1183 residue += rcar_dmac_chan_read(chan, RCAR_DMATCR) << desc->xfer_shift; rcar_dmac_chan_get_residue()
1217 if (list_empty(&rchan->desc.pending)) rcar_dmac_issue_pending()
1221 list_splice_tail_init(&rchan->desc.pending, &rchan->desc.active); rcar_dmac_issue_pending()
1227 if (!rchan->desc.running) { rcar_dmac_issue_pending()
1228 struct rcar_dmac_desc *desc; rcar_dmac_issue_pending() local
1230 desc = list_first_entry(&rchan->desc.active, rcar_dmac_issue_pending()
1232 rchan->desc.running = desc; rcar_dmac_issue_pending()
1247 struct rcar_dmac_desc *desc = chan->desc.running; rcar_dmac_isr_desc_stage_end() local
1250 if (WARN_ON(!desc || !desc->cyclic)) { rcar_dmac_isr_desc_stage_end()
1269 struct rcar_dmac_desc *desc = chan->desc.running; rcar_dmac_isr_transfer_end() local
1272 if (WARN_ON_ONCE(!desc)) { rcar_dmac_isr_transfer_end()
1286 if (!desc->hwdescs.use) { rcar_dmac_isr_transfer_end()
1292 if (!list_is_last(&desc->running->node, &desc->chunks)) { rcar_dmac_isr_transfer_end()
1293 desc->running = list_next_entry(desc->running, node); rcar_dmac_isr_transfer_end()
1294 if (!desc->cyclic) rcar_dmac_isr_transfer_end()
1303 if (desc->cyclic) { rcar_dmac_isr_transfer_end()
1304 desc->running = rcar_dmac_isr_transfer_end()
1305 list_first_entry(&desc->chunks, rcar_dmac_isr_transfer_end()
1313 list_move_tail(&desc->node, &chan->desc.done); rcar_dmac_isr_transfer_end()
1316 if (!list_empty(&chan->desc.active)) rcar_dmac_isr_transfer_end()
1317 chan->desc.running = list_first_entry(&chan->desc.active, rcar_dmac_isr_transfer_end()
1321 chan->desc.running = NULL; rcar_dmac_isr_transfer_end()
1324 if (chan->desc.running) rcar_dmac_isr_transfer_end()
1358 struct rcar_dmac_desc *desc; rcar_dmac_isr_channel_thread() local
1363 if (chan->desc.running && chan->desc.running->cyclic) { rcar_dmac_isr_channel_thread()
1367 desc = chan->desc.running; rcar_dmac_isr_channel_thread()
1368 callback = desc->async_tx.callback; rcar_dmac_isr_channel_thread()
1369 callback_param = desc->async_tx.callback_param; rcar_dmac_isr_channel_thread()
1382 while (!list_empty(&chan->desc.done)) { rcar_dmac_isr_channel_thread()
1383 desc = list_first_entry(&chan->desc.done, struct rcar_dmac_desc, rcar_dmac_isr_channel_thread()
1385 dma_cookie_complete(&desc->async_tx); rcar_dmac_isr_channel_thread()
1386 list_del(&desc->node); rcar_dmac_isr_channel_thread()
1388 if (desc->async_tx.callback) { rcar_dmac_isr_channel_thread()
1395 desc->async_tx.callback(desc->async_tx.callback_param); rcar_dmac_isr_channel_thread()
1399 list_add_tail(&desc->node, &chan->desc.wait); rcar_dmac_isr_channel_thread()
1536 INIT_LIST_HEAD(&rchan->desc.free); rcar_dmac_chan_probe()
1537 INIT_LIST_HEAD(&rchan->desc.pending); rcar_dmac_chan_probe()
1538 INIT_LIST_HEAD(&rchan->desc.active); rcar_dmac_chan_probe()
1539 INIT_LIST_HEAD(&rchan->desc.done); rcar_dmac_chan_probe()
1540 INIT_LIST_HEAD(&rchan->desc.wait); rcar_dmac_chan_probe()
506 rcar_dmac_desc_put(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) rcar_dmac_desc_put() argument
662 rcar_dmac_realloc_hwdesc(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc, size_t size) rcar_dmac_realloc_hwdesc() argument
694 rcar_dmac_fill_hwdesc(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) rcar_dmac_fill_hwdesc() argument
779 rcar_dmac_chan_configure_desc(struct rcar_dmac_chan *chan, struct rcar_dmac_desc *desc) rcar_dmac_chan_configure_desc() argument
H A Dusb-dmac.c74 * @desc: the current descriptor
84 struct usb_dmac_desc *desc; member in struct:usb_dmac_chan
199 struct usb_dmac_desc *desc = chan->desc; usb_dmac_chan_start_sg() local
200 struct usb_dmac_sg *sg = desc->sg + index; usb_dmac_chan_start_sg()
205 if (desc->direction == DMA_DEV_TO_MEM) usb_dmac_chan_start_sg()
231 chan->desc = NULL; usb_dmac_chan_start_desc()
242 chan->desc = to_usb_dmac_desc(vd); usb_dmac_chan_start_desc()
243 chan->desc->sg_index = 0; usb_dmac_chan_start_desc()
269 struct usb_dmac_desc *desc; usb_dmac_desc_alloc() local
272 desc = kzalloc(sizeof(*desc) + sg_len * sizeof(desc->sg[0]), gfp); usb_dmac_desc_alloc()
273 if (!desc) usb_dmac_desc_alloc()
276 desc->sg_allocated_len = sg_len; usb_dmac_desc_alloc()
277 INIT_LIST_HEAD(&desc->node); usb_dmac_desc_alloc()
280 list_add_tail(&desc->node, &chan->desc_freed); usb_dmac_desc_alloc()
288 struct usb_dmac_desc *desc, *_desc; usb_dmac_desc_free() local
294 list_for_each_entry_safe(desc, _desc, &list, node) { usb_dmac_desc_free()
295 list_del(&desc->node); usb_dmac_desc_free()
296 kfree(desc); usb_dmac_desc_free()
304 struct usb_dmac_desc *desc = NULL; usb_dmac_desc_get() local
309 list_for_each_entry(desc, &chan->desc_freed, node) { usb_dmac_desc_get()
310 if (sg_len <= desc->sg_allocated_len) { usb_dmac_desc_get()
311 list_move_tail(&desc->node, &chan->desc_got); usb_dmac_desc_get()
313 return desc; usb_dmac_desc_get()
320 /* If allocated the desc, it was added to tail of the list */ usb_dmac_desc_get()
322 desc = list_last_entry(&chan->desc_freed, struct usb_dmac_desc, usb_dmac_desc_get()
324 list_move_tail(&desc->node, &chan->desc_got); usb_dmac_desc_get()
326 return desc; usb_dmac_desc_get()
333 struct usb_dmac_desc *desc) usb_dmac_desc_put()
338 list_move_tail(&desc->node, &chan->desc_freed); usb_dmac_desc_put()
424 struct usb_dmac_desc *desc; usb_dmac_prep_slave_sg() local
434 desc = usb_dmac_desc_get(uchan, sg_len, GFP_NOWAIT); usb_dmac_prep_slave_sg()
435 if (!desc) usb_dmac_prep_slave_sg()
438 desc->direction = dir; usb_dmac_prep_slave_sg()
439 desc->sg_len = sg_len; for_each_sg()
441 desc->sg[i].mem_addr = sg_dma_address(sg); for_each_sg()
442 desc->sg[i].size = sg_dma_len(sg); for_each_sg()
445 return vchan_tx_prep(&uchan->vc, &desc->vd, dma_flags);
451 struct usb_dmac_desc *desc; usb_dmac_chan_terminate_all() local
459 if (uchan->desc) usb_dmac_chan_terminate_all()
460 uchan->desc = NULL; usb_dmac_chan_terminate_all()
462 list_for_each_entry(desc, &list, node) usb_dmac_chan_terminate_all()
463 list_move_tail(&desc->node, &uchan->desc_freed); usb_dmac_chan_terminate_all()
471 struct usb_dmac_desc *desc, usb_dmac_get_current_residue()
474 struct usb_dmac_sg *sg = desc->sg + sg_index; usb_dmac_get_current_residue()
482 if (desc->direction == DMA_DEV_TO_MEM) usb_dmac_get_current_residue()
493 struct usb_dmac_desc *desc; usb_dmac_chan_get_residue_if_complete() local
496 list_for_each_entry_reverse(desc, &chan->desc_freed, node) { usb_dmac_chan_get_residue_if_complete()
497 if (desc->done_cookie == cookie) { usb_dmac_chan_get_residue_if_complete()
498 residue = desc->residue; usb_dmac_chan_get_residue_if_complete()
511 struct usb_dmac_desc *desc = chan->desc; usb_dmac_chan_get_residue() local
514 if (!desc) { usb_dmac_chan_get_residue()
518 desc = to_usb_dmac_desc(vd); usb_dmac_chan_get_residue()
522 for (i = desc->sg_index + 1; i < desc->sg_len; i++) usb_dmac_chan_get_residue()
523 residue += desc->sg[i].size; usb_dmac_chan_get_residue()
526 residue += usb_dmac_get_current_residue(chan, desc, desc->sg_index); usb_dmac_chan_get_residue()
563 if (vchan_issue_pending(&uchan->vc) && !uchan->desc) usb_dmac_issue_pending()
570 struct usb_dmac_desc *desc = to_usb_dmac_desc(vd); usb_dmac_virt_desc_free() local
573 usb_dmac_desc_put(chan, desc); usb_dmac_virt_desc_free()
582 struct usb_dmac_desc *desc = chan->desc; usb_dmac_isr_transfer_end() local
584 BUG_ON(!desc); usb_dmac_isr_transfer_end()
586 if (++desc->sg_index < desc->sg_len) { usb_dmac_isr_transfer_end()
587 usb_dmac_chan_start_sg(chan, desc->sg_index); usb_dmac_isr_transfer_end()
589 desc->residue = usb_dmac_get_current_residue(chan, desc, usb_dmac_isr_transfer_end()
590 desc->sg_index - 1); usb_dmac_isr_transfer_end()
591 desc->done_cookie = desc->vd.tx.cookie; usb_dmac_isr_transfer_end()
592 vchan_cookie_complete(&desc->vd); usb_dmac_isr_transfer_end()
594 /* Restart the next transfer if this driver has a next desc */ usb_dmac_isr_transfer_end()
332 usb_dmac_desc_put(struct usb_dmac_chan *chan, struct usb_dmac_desc *desc) usb_dmac_desc_put() argument
470 usb_dmac_get_current_residue(struct usb_dmac_chan *chan, struct usb_dmac_desc *desc, int sg_index) usb_dmac_get_current_residue() argument
/linux-4.4.14/arch/arm64/crypto/
H A Dsha1-ce-glue.c35 static int sha1_ce_update(struct shash_desc *desc, const u8 *data, sha1_ce_update() argument
38 struct sha1_ce_state *sctx = shash_desc_ctx(desc); sha1_ce_update()
42 sha1_base_do_update(desc, data, len, sha1_ce_update()
49 static int sha1_ce_finup(struct shash_desc *desc, const u8 *data, sha1_ce_finup() argument
52 struct sha1_ce_state *sctx = shash_desc_ctx(desc); sha1_ce_finup()
67 sha1_base_do_update(desc, data, len, sha1_ce_finup()
70 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); sha1_ce_finup()
72 return sha1_base_finish(desc, out); sha1_ce_finup()
75 static int sha1_ce_final(struct shash_desc *desc, u8 *out) sha1_ce_final() argument
77 struct sha1_ce_state *sctx = shash_desc_ctx(desc); sha1_ce_final()
81 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform); sha1_ce_final()
83 return sha1_base_finish(desc, out); sha1_ce_final()
H A Dsha2-ce-glue.c35 static int sha256_ce_update(struct shash_desc *desc, const u8 *data, sha256_ce_update() argument
38 struct sha256_ce_state *sctx = shash_desc_ctx(desc); sha256_ce_update()
42 sha256_base_do_update(desc, data, len, sha256_ce_update()
49 static int sha256_ce_finup(struct shash_desc *desc, const u8 *data, sha256_ce_finup() argument
52 struct sha256_ce_state *sctx = shash_desc_ctx(desc); sha256_ce_finup()
67 sha256_base_do_update(desc, data, len, sha256_ce_finup()
70 sha256_base_do_finalize(desc, sha256_ce_finup()
73 return sha256_base_finish(desc, out); sha256_ce_finup()
76 static int sha256_ce_final(struct shash_desc *desc, u8 *out) sha256_ce_final() argument
78 struct sha256_ce_state *sctx = shash_desc_ctx(desc); sha256_ce_final()
82 sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform); sha256_ce_final()
84 return sha256_base_finish(desc, out); sha256_ce_final()
H A Daes-glue.c99 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
102 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_encrypt()
107 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ecb_encrypt()
109 err = blkcipher_walk_virt(desc, &walk); ecb_encrypt()
115 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); ecb_encrypt()
121 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
124 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_decrypt()
129 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ecb_decrypt()
131 err = blkcipher_walk_virt(desc, &walk); ecb_decrypt()
137 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); ecb_decrypt()
143 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
146 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_encrypt()
151 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_encrypt()
153 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
160 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); cbc_encrypt()
166 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
169 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_decrypt()
174 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_decrypt()
176 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
183 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); cbc_decrypt()
189 static int ctr_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_encrypt() argument
192 struct crypto_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_encrypt()
197 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctr_encrypt()
199 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); ctr_encrypt()
211 err = blkcipher_walk_done(desc, &walk, ctr_encrypt()
228 err = blkcipher_walk_done(desc, &walk, 0); ctr_encrypt()
235 static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_encrypt() argument
238 struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt()
243 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; xts_encrypt()
245 err = blkcipher_walk_virt(desc, &walk); xts_encrypt()
252 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); xts_encrypt()
259 static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, xts_decrypt() argument
262 struct crypto_aes_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt()
267 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; xts_decrypt()
269 err = blkcipher_walk_virt(desc, &walk); xts_decrypt()
276 err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE); xts_decrypt()
H A Dcrc32-arm64.c100 static int chksum_init(struct shash_desc *desc) chksum_init() argument
102 struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); chksum_init()
103 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_init()
128 static int chksum_update(struct shash_desc *desc, const u8 *data, chksum_update() argument
131 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_update()
137 static int chksumc_update(struct shash_desc *desc, const u8 *data, chksumc_update() argument
140 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksumc_update()
146 static int chksum_final(struct shash_desc *desc, u8 *out) chksum_final() argument
148 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_final()
154 static int chksumc_final(struct shash_desc *desc, u8 *out) chksumc_final() argument
156 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksumc_final()
174 static int chksum_finup(struct shash_desc *desc, const u8 *data, chksum_finup() argument
177 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_finup()
182 static int chksumc_finup(struct shash_desc *desc, const u8 *data, chksumc_finup() argument
185 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksumc_finup()
190 static int chksum_digest(struct shash_desc *desc, const u8 *data, chksum_digest() argument
193 struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); chksum_digest()
198 static int chksumc_digest(struct shash_desc *desc, const u8 *data, chksumc_digest() argument
201 struct chksum_ctx *mctx = crypto_shash_ctx(desc->tfm); chksumc_digest()
/linux-4.4.14/include/net/
H A Dpsnap.h5 register_snap_client(const unsigned char *desc,
/linux-4.4.14/arch/arm/include/asm/mach/
H A Dirq.h26 #define do_bad_IRQ(desc) \
28 raw_spin_lock(&desc->lock); \
29 handle_bad_irq(desc); \
30 raw_spin_unlock(&desc->lock); \
/linux-4.4.14/tools/perf/bench/
H A Dmem-memcpy-arch.h4 #define MEMCPY_FN(fn, name, desc) \
H A Dmem-memset-arch.h4 #define MEMSET_FN(fn, name, desc) \
/linux-4.4.14/include/linux/gpio/
H A Dconsumer.h25 struct gpio_desc *desc[]; member in struct:gpio_descs
70 void gpiod_put(struct gpio_desc *desc);
92 void devm_gpiod_put(struct device *dev, struct gpio_desc *desc);
95 int gpiod_get_direction(struct gpio_desc *desc);
96 int gpiod_direction_input(struct gpio_desc *desc);
97 int gpiod_direction_output(struct gpio_desc *desc, int value);
98 int gpiod_direction_output_raw(struct gpio_desc *desc, int value);
101 int gpiod_get_value(const struct gpio_desc *desc);
102 void gpiod_set_value(struct gpio_desc *desc, int value);
105 int gpiod_get_raw_value(const struct gpio_desc *desc);
106 void gpiod_set_raw_value(struct gpio_desc *desc, int value);
112 int gpiod_get_value_cansleep(const struct gpio_desc *desc);
113 void gpiod_set_value_cansleep(struct gpio_desc *desc, int value);
117 int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc);
118 void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, int value);
123 int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
125 int gpiod_is_active_low(const struct gpio_desc *desc);
126 int gpiod_cansleep(const struct gpio_desc *desc);
128 int gpiod_to_irq(const struct gpio_desc *desc);
132 int desc_to_gpio(const struct gpio_desc *desc);
192 static inline void gpiod_put(struct gpio_desc *desc) gpiod_put() argument
253 static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc) devm_gpiod_put() argument
271 static inline int gpiod_get_direction(const struct gpio_desc *desc) gpiod_get_direction() argument
277 static inline int gpiod_direction_input(struct gpio_desc *desc) gpiod_direction_input() argument
283 static inline int gpiod_direction_output(struct gpio_desc *desc, int value) gpiod_direction_output() argument
289 static inline int gpiod_direction_output_raw(struct gpio_desc *desc, int value) gpiod_direction_output_raw() argument
297 static inline int gpiod_get_value(const struct gpio_desc *desc) gpiod_get_value() argument
303 static inline void gpiod_set_value(struct gpio_desc *desc, int value) gpiod_set_value() argument
315 static inline int gpiod_get_raw_value(const struct gpio_desc *desc) gpiod_get_raw_value() argument
321 static inline void gpiod_set_raw_value(struct gpio_desc *desc, int value) gpiod_set_raw_value() argument
334 static inline int gpiod_get_value_cansleep(const struct gpio_desc *desc) gpiod_get_value_cansleep() argument
340 static inline void gpiod_set_value_cansleep(struct gpio_desc *desc, int value) gpiod_set_value_cansleep() argument
352 static inline int gpiod_get_raw_value_cansleep(const struct gpio_desc *desc) gpiod_get_raw_value_cansleep() argument
358 static inline void gpiod_set_raw_value_cansleep(struct gpio_desc *desc, gpiod_set_raw_value_cansleep() argument
372 static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce) gpiod_set_debounce() argument
379 static inline int gpiod_is_active_low(const struct gpio_desc *desc) gpiod_is_active_low() argument
385 static inline int gpiod_cansleep(const struct gpio_desc *desc) gpiod_cansleep() argument
392 static inline int gpiod_to_irq(const struct gpio_desc *desc) gpiod_to_irq() argument
404 static inline int desc_to_gpio(const struct gpio_desc *desc) desc_to_gpio() argument
430 int gpiod_export(struct gpio_desc *desc, bool direction_may_change);
432 struct gpio_desc *desc);
433 void gpiod_unexport(struct gpio_desc *desc);
437 static inline int gpiod_export(struct gpio_desc *desc, gpiod_export() argument
444 struct gpio_desc *desc) gpiod_export_link()
449 static inline void gpiod_unexport(struct gpio_desc *desc) gpiod_unexport() argument
443 gpiod_export_link(struct device *dev, const char *name, struct gpio_desc *desc) gpiod_export_link() argument
/linux-4.4.14/drivers/net/wireless/ti/wl1251/
H A Drx.c35 struct wl1251_rx_descriptor *desc) wl1251_rx_header()
43 wl1251_mem_read(wl, rx_packet_ring_addr, desc, sizeof(*desc)); wl1251_rx_header()
47 struct wl1251_rx_descriptor *desc, wl1251_rx_status()
57 status->mactime = desc->timestamp; wl1251_rx_status()
73 status->signal = desc->rssi; wl1251_rx_status()
79 wl->noise = desc->rssi - desc->snr / 2; wl1251_rx_status()
81 status->freq = ieee80211_channel_to_frequency(desc->channel, wl1251_rx_status()
86 if (!wl->monitor_present && (desc->flags & RX_DESC_ENCRYPTION_MASK)) { wl1251_rx_status()
89 if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL))) wl1251_rx_status()
92 if (unlikely(desc->flags & RX_DESC_MIC_FAIL)) wl1251_rx_status()
96 if (unlikely(!(desc->flags & RX_DESC_VALID_FCS))) wl1251_rx_status()
99 switch (desc->rate) { wl1251_rx_status()
134 if (desc->rate == RATE_1MBPS) { wl1251_rx_status()
135 if (!(desc->mod_pre & OFDM_RATE_BIT)) wl1251_rx_status()
143 if (desc->mod_pre & SHORT_PREAMBLE_BIT) wl1251_rx_status()
148 struct wl1251_rx_descriptor *desc) wl1251_rx_body()
156 length = WL1251_RX_ALIGN(desc->length - PLCP_HEADER_LENGTH); wl1251_rx_body()
157 curr_id = (desc->flags & RX_DESC_SEQNUM_MASK) >> RX_DESC_PACKETID_SHIFT; wl1251_rx_body()
183 skb_trim(skb, desc->length - PLCP_HEADER_LENGTH); wl1251_rx_body()
190 wl1251_rx_status(wl, desc, &status, beacon); wl1251_rx_body()
34 wl1251_rx_header(struct wl1251 *wl, struct wl1251_rx_descriptor *desc) wl1251_rx_header() argument
46 wl1251_rx_status(struct wl1251 *wl, struct wl1251_rx_descriptor *desc, struct ieee80211_rx_status *status, u8 beacon) wl1251_rx_status() argument
147 wl1251_rx_body(struct wl1251 *wl, struct wl1251_rx_descriptor *desc) wl1251_rx_body() argument
/linux-4.4.14/drivers/sh/intc/
H A Dhandle.c18 static intc_enum __init intc_grp_id(struct intc_desc *desc, intc_grp_id() argument
21 struct intc_group *g = desc->hw.groups; intc_grp_id()
24 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) { intc_grp_id()
25 g = desc->hw.groups + i; intc_grp_id()
38 static unsigned int __init _intc_mask_data(struct intc_desc *desc, _intc_mask_data() argument
44 struct intc_mask_reg *mr = desc->hw.mask_regs; _intc_mask_data()
48 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) { _intc_mask_data()
49 mr = desc->hw.mask_regs + *reg_idx; _intc_mask_data()
89 intc_get_mask_handle(struct intc_desc *desc, struct intc_desc_int *d, intc_get_mask_handle() argument
96 ret = _intc_mask_data(desc, d, enum_id, &i, &j); intc_get_mask_handle()
101 return intc_get_mask_handle(desc, d, intc_grp_id(desc, enum_id), 0); intc_get_mask_handle()
106 static unsigned int __init _intc_prio_data(struct intc_desc *desc, _intc_prio_data() argument
112 struct intc_prio_reg *pr = desc->hw.prio_regs; _intc_prio_data()
116 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) { _intc_prio_data()
117 pr = desc->hw.prio_regs + *reg_idx; _intc_prio_data()
158 intc_get_prio_handle(struct intc_desc *desc, struct intc_desc_int *d, intc_get_prio_handle() argument
165 ret = _intc_prio_data(desc, d, enum_id, &i, &j); intc_get_prio_handle()
170 return intc_get_prio_handle(desc, d, intc_grp_id(desc, enum_id), 0); intc_get_prio_handle()
175 static unsigned int intc_ack_data(struct intc_desc *desc, intc_ack_data() argument
178 struct intc_mask_reg *mr = desc->hw.ack_regs; intc_ack_data()
182 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) { intc_ack_data()
183 mr = desc->hw.ack_regs + i; intc_ack_data()
231 void __init intc_enable_disable_enum(struct intc_desc *desc, intc_enable_disable_enum() argument
240 data = _intc_mask_data(desc, d, enum_id, &i, &j); intc_enable_disable_enum()
249 data = _intc_prio_data(desc, d, enum_id, &i, &j); intc_enable_disable_enum()
258 intc_get_sense_handle(struct intc_desc *desc, struct intc_desc_int *d, intc_get_sense_handle() argument
261 struct intc_sense_reg *sr = desc->hw.sense_regs; intc_get_sense_handle()
264 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) { intc_get_sense_handle()
265 sr = desc->hw.sense_regs + i; intc_get_sense_handle()
287 void intc_set_ack_handle(unsigned int irq, struct intc_desc *desc, intc_set_ack_handle() argument
295 if (!desc->hw.ack_regs) intc_set_ack_handle()
299 ack_handle[irq] = intc_ack_data(desc, d, id); intc_set_ack_handle()
H A Dbalancing.c40 static unsigned int intc_dist_data(struct intc_desc *desc, intc_dist_data() argument
44 struct intc_mask_reg *mr = desc->hw.mask_regs; intc_dist_data()
48 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) { intc_dist_data()
49 mr = desc->hw.mask_regs + i; intc_dist_data()
83 void intc_set_dist_handle(unsigned int irq, struct intc_desc *desc, intc_set_dist_handle() argument
91 if (!desc->hw.mask_regs) intc_set_dist_handle()
95 dist_handle[irq] = intc_dist_data(desc, d, id); intc_set_dist_handle()
H A Dvirq-debugfs.c27 struct intc_desc_int *desc = entry->desc; intc_irq_xlate_debug() local
29 if (!desc) intc_irq_xlate_debug()
34 seq_printf(m, "%-15s\n", desc->chip.name); intc_irq_xlate_debug()
H A Dcore.c68 static void intc_redirect_irq(struct irq_desc *desc) intc_redirect_irq() argument
70 generic_handle_irq((unsigned int)irq_desc_get_handler_data(desc)); intc_redirect_irq()
73 static void __init intc_register_irq(struct intc_desc *desc, intc_register_irq() argument
95 data[0] = intc_get_mask_handle(desc, d, enum_id, 0); intc_register_irq()
96 data[1] = intc_get_prio_handle(desc, d, enum_id, 0); intc_register_irq()
106 data[0] = data[0] ? data[0] : intc_get_mask_handle(desc, d, enum_id, 1); intc_register_irq()
107 data[1] = data[1] ? data[1] : intc_get_prio_handle(desc, d, enum_id, 1); intc_register_irq()
148 data[0] = intc_get_sense_handle(desc, d, enum_id); intc_register_irq()
158 intc_set_ack_handle(irq, desc, d, enum_id); intc_register_irq()
159 intc_set_dist_handle(irq, desc, d, enum_id); intc_register_irq()
182 int __init register_intc_controller(struct intc_desc *desc) register_intc_controller() argument
185 struct intc_hw_desc *hw = &desc->hw; register_intc_controller()
190 desc->name, hw->nr_vectors); register_intc_controller()
204 if (desc->num_resources) { register_intc_controller()
205 d->nr_windows = desc->num_resources; register_intc_controller()
212 res = desc->resource + k; register_intc_controller()
290 d->chip.name = desc->name; register_intc_controller()
299 if (desc->force_disable) register_intc_controller()
300 intc_enable_disable_enum(desc, d, desc->force_disable, 0); register_intc_controller()
303 if (desc->force_enable) register_intc_controller()
304 intc_enable_disable_enum(desc, d, desc->force_enable, 0); register_intc_controller()
334 intc_register_irq(desc, d, vect->enum_id, irq); register_intc_controller()
375 intc_subgroup_init(desc, d); register_intc_controller()
378 if (desc->force_enable) register_intc_controller()
379 intc_enable_disable_enum(desc, d, desc->force_enable, 1); register_intc_controller()
381 d->skip_suspend = desc->skip_syscore_suspend; register_intc_controller()
/linux-4.4.14/arch/s390/kernel/
H A Dirq.c35 char *desc; member in struct:irq_class
60 {.irq = IRQEXT_CLK, .name = "CLK", .desc = "[EXT] Clock Comparator"},
61 {.irq = IRQEXT_EXC, .name = "EXC", .desc = "[EXT] External Call"},
62 {.irq = IRQEXT_EMS, .name = "EMS", .desc = "[EXT] Emergency Signal"},
63 {.irq = IRQEXT_TMR, .name = "TMR", .desc = "[EXT] CPU Timer"},
64 {.irq = IRQEXT_TLA, .name = "TAL", .desc = "[EXT] Timing Alert"},
65 {.irq = IRQEXT_PFL, .name = "PFL", .desc = "[EXT] Pseudo Page Fault"},
66 {.irq = IRQEXT_DSD, .name = "DSD", .desc = "[EXT] DASD Diag"},
67 {.irq = IRQEXT_VRT, .name = "VRT", .desc = "[EXT] Virtio"},
68 {.irq = IRQEXT_SCP, .name = "SCP", .desc = "[EXT] Service Call"},
69 {.irq = IRQEXT_IUC, .name = "IUC", .desc = "[EXT] IUCV"},
70 {.irq = IRQEXT_CMS, .name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling"},
71 {.irq = IRQEXT_CMC, .name = "CMC", .desc = "[EXT] CPU-Measurement: Counter"},
72 {.irq = IRQEXT_FTP, .name = "FTP", .desc = "[EXT] HMC FTP Service"},
73 {.irq = IRQIO_CIO, .name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt"},
74 {.irq = IRQIO_QAI, .name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt"},
75 {.irq = IRQIO_DAS, .name = "DAS", .desc = "[I/O] DASD"},
76 {.irq = IRQIO_C15, .name = "C15", .desc = "[I/O] 3215"},
77 {.irq = IRQIO_C70, .name = "C70", .desc = "[I/O] 3270"},
78 {.irq = IRQIO_TAP, .name = "TAP", .desc = "[I/O] Tape"},
79 {.irq = IRQIO_VMR, .name = "VMR", .desc = "[I/O] Unit Record Devices"},
80 {.irq = IRQIO_LCS, .name = "LCS", .desc = "[I/O] LCS"},
81 {.irq = IRQIO_CTC, .name = "CTC", .desc = "[I/O] CTC"},
82 {.irq = IRQIO_APB, .name = "APB", .desc = "[I/O] AP Bus"},
83 {.irq = IRQIO_ADM, .name = "ADM", .desc = "[I/O] EADM Subchannel"},
84 {.irq = IRQIO_CSC, .name = "CSC", .desc = "[I/O] CHSC Subchannel"},
85 {.irq = IRQIO_PCI, .name = "PCI", .desc = "[I/O] PCI Interrupt" },
86 {.irq = IRQIO_MSI, .name = "MSI", .desc = "[I/O] MSI Interrupt" },
87 {.irq = IRQIO_VIR, .name = "VIR", .desc = "[I/O] Virtual I/O Devices"},
88 {.irq = IRQIO_VAI, .name = "VAI", .desc = "[I/O] Virtual I/O Devices AI"},
89 {.irq = NMI_NMI, .name = "NMI", .desc = "[NMI] Machine Check"},
90 {.irq = CPU_RST, .name = "RST", .desc = "[CPU] CPU Restart"},
146 if (irqclass_sub_desc[index].desc) show_interrupts()
147 seq_printf(p, " %s", irqclass_sub_desc[index].desc); show_interrupts()
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
H A Dpers.c45 void ptlrpc_fill_bulk_md(lnet_md_t *md, struct ptlrpc_bulk_desc *desc, ptlrpc_fill_bulk_md() argument
50 LASSERT(mdidx < desc->bd_md_max_brw); ptlrpc_fill_bulk_md()
51 LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); ptlrpc_fill_bulk_md()
56 md->length = max(0, desc->bd_iov_count - mdidx * LNET_MAX_IOV); ptlrpc_fill_bulk_md()
58 if (desc->bd_enc_iov) ptlrpc_fill_bulk_md()
59 md->start = &desc->bd_enc_iov[mdidx * LNET_MAX_IOV]; ptlrpc_fill_bulk_md()
61 md->start = &desc->bd_iov[mdidx * LNET_MAX_IOV]; ptlrpc_fill_bulk_md()
64 void ptlrpc_add_bulk_page(struct ptlrpc_bulk_desc *desc, struct page *page, ptlrpc_add_bulk_page() argument
67 lnet_kiov_t *kiov = &desc->bd_iov[desc->bd_iov_count]; ptlrpc_add_bulk_page()
73 desc->bd_iov_count++; ptlrpc_add_bulk_page()
H A Dniobuf.c115 struct ptlrpc_bulk_desc *desc = req->rq_bulk; ptlrpc_register_bulk() local
128 /* NB no locking required until desc is on the network */ ptlrpc_register_bulk()
129 LASSERT(desc->bd_nob > 0); ptlrpc_register_bulk()
130 LASSERT(desc->bd_md_count == 0); ptlrpc_register_bulk()
131 LASSERT(desc->bd_md_max_brw <= PTLRPC_BULK_OPS_COUNT); ptlrpc_register_bulk()
132 LASSERT(desc->bd_iov_count <= PTLRPC_MAX_BRW_PAGES); ptlrpc_register_bulk()
133 LASSERT(desc->bd_req != NULL); ptlrpc_register_bulk()
134 LASSERT(desc->bd_type == BULK_PUT_SINK || ptlrpc_register_bulk()
135 desc->bd_type == BULK_GET_SOURCE); ptlrpc_register_bulk()
139 desc->bd_nob_transferred = 0; ptlrpc_register_bulk()
141 LASSERT(desc->bd_nob_transferred == 0); ptlrpc_register_bulk()
143 desc->bd_failure = 0; ptlrpc_register_bulk()
145 peer = desc->bd_import->imp_connection->c_peer; ptlrpc_register_bulk()
147 LASSERT(desc->bd_cbid.cbid_fn == client_bulk_callback); ptlrpc_register_bulk()
148 LASSERT(desc->bd_cbid.cbid_arg == desc); ptlrpc_register_bulk()
157 xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1); ptlrpc_register_bulk()
158 LASSERTF(!(desc->bd_registered && ptlrpc_register_bulk()
160 xid != desc->bd_last_xid, ptlrpc_register_bulk()
162 desc->bd_registered, xid, desc->bd_last_xid); ptlrpc_register_bulk()
164 total_md = (desc->bd_iov_count + LNET_MAX_IOV - 1) / LNET_MAX_IOV; ptlrpc_register_bulk()
165 desc->bd_registered = 1; ptlrpc_register_bulk()
166 desc->bd_last_xid = xid; ptlrpc_register_bulk()
167 desc->bd_md_count = total_md; ptlrpc_register_bulk()
168 md.user_ptr = &desc->bd_cbid; ptlrpc_register_bulk()
174 ((desc->bd_type == BULK_GET_SOURCE) ? ptlrpc_register_bulk()
176 ptlrpc_fill_bulk_md(&md, desc, posted_md); ptlrpc_register_bulk()
178 rc = LNetMEAttach(desc->bd_portal, peer, xid, 0, ptlrpc_register_bulk()
182 desc->bd_import->imp_obd->obd_name, xid, ptlrpc_register_bulk()
189 &desc->bd_mds[posted_md]); ptlrpc_register_bulk()
192 desc->bd_import->imp_obd->obd_name, xid, ptlrpc_register_bulk()
202 spin_lock(&desc->bd_lock); ptlrpc_register_bulk()
203 desc->bd_md_count -= total_md - posted_md; ptlrpc_register_bulk()
204 spin_unlock(&desc->bd_lock); ptlrpc_register_bulk()
205 LASSERT(desc->bd_md_count >= 0); ptlrpc_register_bulk()
206 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); ptlrpc_register_bulk()
214 LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK), ptlrpc_register_bulk()
216 desc->bd_last_xid, req->rq_xid); ptlrpc_register_bulk()
218 spin_lock(&desc->bd_lock); ptlrpc_register_bulk()
220 if (desc->bd_md_count != total_md) ptlrpc_register_bulk()
222 desc->bd_import->imp_obd->obd_name, libcfs_id2str(peer), ptlrpc_register_bulk()
223 total_md - desc->bd_md_count); ptlrpc_register_bulk()
224 spin_unlock(&desc->bd_lock); ptlrpc_register_bulk()
227 desc->bd_md_count, ptlrpc_register_bulk()
228 desc->bd_type == BULK_GET_SOURCE ? "get-source" : "put-sink", ptlrpc_register_bulk()
229 desc->bd_iov_count, desc->bd_nob, ptlrpc_register_bulk()
230 desc->bd_last_xid, req->rq_xid, desc->bd_portal); ptlrpc_register_bulk()
236 * Disconnect a bulk desc from the network. Idempotent. Not
243 struct ptlrpc_bulk_desc *desc = req->rq_bulk; ptlrpc_unregister_bulk() local
258 LASSERT(desc->bd_req == req); /* bd_req NULL until registered */ ptlrpc_unregister_bulk()
264 mdunlink_iterate_helper(desc->bd_mds, desc->bd_md_max_brw); ptlrpc_unregister_bulk()
293 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p", ptlrpc_unregister_bulk()
294 desc); ptlrpc_unregister_bulk()
/linux-4.4.14/include/crypto/
H A Dsha256_base.h21 static inline int sha224_base_init(struct shash_desc *desc) sha224_base_init() argument
23 struct sha256_state *sctx = shash_desc_ctx(desc); sha224_base_init()
38 static inline int sha256_base_init(struct shash_desc *desc) sha256_base_init() argument
40 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_init()
55 static inline int sha256_base_do_update(struct shash_desc *desc, sha256_base_do_update() argument
60 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_do_update()
93 static inline int sha256_base_do_finalize(struct shash_desc *desc, sha256_base_do_finalize() argument
97 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_do_finalize()
116 static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) sha256_base_finish() argument
118 unsigned int digest_size = crypto_shash_digestsize(desc->tfm); sha256_base_finish()
119 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_base_finish()
H A Dpoly1305.h32 int crypto_poly1305_init(struct shash_desc *desc);
37 int crypto_poly1305_update(struct shash_desc *desc,
39 int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
H A Dsha512_base.h21 static inline int sha384_base_init(struct shash_desc *desc) sha384_base_init() argument
23 struct sha512_state *sctx = shash_desc_ctx(desc); sha384_base_init()
38 static inline int sha512_base_init(struct shash_desc *desc) sha512_base_init() argument
40 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_init()
55 static inline int sha512_base_do_update(struct shash_desc *desc, sha512_base_do_update() argument
60 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_do_update()
95 static inline int sha512_base_do_finalize(struct shash_desc *desc, sha512_base_do_finalize() argument
99 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_do_finalize()
119 static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) sha512_base_finish() argument
121 unsigned int digest_size = crypto_shash_digestsize(desc->tfm); sha512_base_finish()
122 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_base_finish()
H A Dsha1_base.h20 static inline int sha1_base_init(struct shash_desc *desc) sha1_base_init() argument
22 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_init()
34 static inline int sha1_base_do_update(struct shash_desc *desc, sha1_base_do_update() argument
39 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_do_update()
72 static inline int sha1_base_do_finalize(struct shash_desc *desc, sha1_base_do_finalize() argument
76 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_do_finalize()
95 static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) sha1_base_finish() argument
97 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_base_finish()
H A Dsha.h87 extern int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
90 extern int crypto_sha1_finup(struct shash_desc *desc, const u8 *data,
93 extern int crypto_sha256_update(struct shash_desc *desc, const u8 *data,
96 extern int crypto_sha256_finup(struct shash_desc *desc, const u8 *data,
99 extern int crypto_sha512_update(struct shash_desc *desc, const u8 *data,
102 extern int crypto_sha512_finup(struct shash_desc *desc, const u8 *data,
/linux-4.4.14/arch/x86/crypto/
H A Dsha1_ssse3_glue.c37 static int sha1_update(struct shash_desc *desc, const u8 *data, sha1_update() argument
40 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_update()
44 return crypto_sha1_update(desc, data, len); sha1_update()
50 sha1_base_do_update(desc, data, len, sha1_update()
57 static int sha1_finup(struct shash_desc *desc, const u8 *data, sha1_finup() argument
61 return crypto_sha1_finup(desc, data, len, out); sha1_finup()
65 sha1_base_do_update(desc, data, len, sha1_finup()
67 sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_xform); sha1_finup()
70 return sha1_base_finish(desc, out); sha1_finup()
76 static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data, sha1_ssse3_update() argument
79 return sha1_update(desc, data, len, sha1_ssse3_update()
83 static int sha1_ssse3_finup(struct shash_desc *desc, const u8 *data, sha1_ssse3_finup() argument
86 return sha1_finup(desc, data, len, out, sha1_ssse3_finup()
91 static int sha1_ssse3_final(struct shash_desc *desc, u8 *out) sha1_ssse3_final() argument
93 return sha1_ssse3_finup(desc, NULL, 0, out); sha1_ssse3_final()
130 static int sha1_avx_update(struct shash_desc *desc, const u8 *data, sha1_avx_update() argument
133 return sha1_update(desc, data, len, sha1_avx_update()
137 static int sha1_avx_finup(struct shash_desc *desc, const u8 *data, sha1_avx_finup() argument
140 return sha1_finup(desc, data, len, out, sha1_avx_finup()
144 static int sha1_avx_final(struct shash_desc *desc, u8 *out) sha1_avx_final() argument
146 return sha1_avx_finup(desc, NULL, 0, out); sha1_avx_final()
222 static int sha1_avx2_update(struct shash_desc *desc, const u8 *data, sha1_avx2_update() argument
225 return sha1_update(desc, data, len, sha1_avx2_update()
229 static int sha1_avx2_finup(struct shash_desc *desc, const u8 *data, sha1_avx2_finup() argument
232 return sha1_finup(desc, data, len, out, sha1_avx2_finup()
236 static int sha1_avx2_final(struct shash_desc *desc, u8 *out) sha1_avx2_final() argument
238 return sha1_avx2_finup(desc, NULL, 0, out); sha1_avx2_final()
280 static int sha1_ni_update(struct shash_desc *desc, const u8 *data, sha1_ni_update() argument
283 return sha1_update(desc, data, len, sha1_ni_update()
287 static int sha1_ni_finup(struct shash_desc *desc, const u8 *data, sha1_ni_finup() argument
290 return sha1_finup(desc, data, len, out, sha1_ni_finup()
294 static int sha1_ni_final(struct shash_desc *desc, u8 *out) sha1_ni_final() argument
296 return sha1_ni_finup(desc, NULL, 0, out); sha1_ni_final()
H A Dsha256_ssse3_glue.c47 static int sha256_update(struct shash_desc *desc, const u8 *data, sha256_update() argument
50 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_update()
54 return crypto_sha256_update(desc, data, len); sha256_update()
60 sha256_base_do_update(desc, data, len, sha256_update()
67 static int sha256_finup(struct shash_desc *desc, const u8 *data, sha256_finup() argument
71 return crypto_sha256_finup(desc, data, len, out); sha256_finup()
75 sha256_base_do_update(desc, data, len, sha256_finup()
77 sha256_base_do_finalize(desc, (sha256_block_fn *)sha256_xform); sha256_finup()
80 return sha256_base_finish(desc, out); sha256_finup()
83 static int sha256_ssse3_update(struct shash_desc *desc, const u8 *data, sha256_ssse3_update() argument
86 return sha256_update(desc, data, len, sha256_transform_ssse3); sha256_ssse3_update()
89 static int sha256_ssse3_finup(struct shash_desc *desc, const u8 *data, sha256_ssse3_finup() argument
92 return sha256_finup(desc, data, len, out, sha256_transform_ssse3); sha256_ssse3_finup()
96 static int sha256_ssse3_final(struct shash_desc *desc, u8 *out) sha256_ssse3_final() argument
98 return sha256_ssse3_finup(desc, NULL, 0, out); sha256_ssse3_final()
152 static int sha256_avx_update(struct shash_desc *desc, const u8 *data, sha256_avx_update() argument
155 return sha256_update(desc, data, len, sha256_transform_avx); sha256_avx_update()
158 static int sha256_avx_finup(struct shash_desc *desc, const u8 *data, sha256_avx_finup() argument
161 return sha256_finup(desc, data, len, out, sha256_transform_avx); sha256_avx_finup()
164 static int sha256_avx_final(struct shash_desc *desc, u8 *out) sha256_avx_final() argument
166 return sha256_avx_finup(desc, NULL, 0, out); sha256_avx_final()
236 static int sha256_avx2_update(struct shash_desc *desc, const u8 *data, sha256_avx2_update() argument
239 return sha256_update(desc, data, len, sha256_transform_rorx); sha256_avx2_update()
242 static int sha256_avx2_finup(struct shash_desc *desc, const u8 *data, sha256_avx2_finup() argument
245 return sha256_finup(desc, data, len, out, sha256_transform_rorx); sha256_avx2_finup()
248 static int sha256_avx2_final(struct shash_desc *desc, u8 *out) sha256_avx2_final() argument
250 return sha256_avx2_finup(desc, NULL, 0, out); sha256_avx2_final()
318 static int sha256_ni_update(struct shash_desc *desc, const u8 *data, sha256_ni_update() argument
321 return sha256_update(desc, data, len, sha256_ni_transform); sha256_ni_update()
324 static int sha256_ni_finup(struct shash_desc *desc, const u8 *data, sha256_ni_finup() argument
327 return sha256_finup(desc, data, len, out, sha256_ni_transform); sha256_ni_finup()
330 static int sha256_ni_final(struct shash_desc *desc, u8 *out) sha256_ni_final() argument
332 return sha256_ni_finup(desc, NULL, 0, out); sha256_ni_final()
H A Dglue_helper.c36 struct blkcipher_desc *desc, __glue_ecb_crypt_128bit()
39 void *ctx = crypto_blkcipher_ctx(desc->tfm); __glue_ecb_crypt_128bit()
45 err = blkcipher_walk_virt(desc, walk); __glue_ecb_crypt_128bit()
52 desc, fpu_enabled, nbytes); __glue_ecb_crypt_128bit()
74 err = blkcipher_walk_done(desc, walk, nbytes); __glue_ecb_crypt_128bit()
82 struct blkcipher_desc *desc, struct scatterlist *dst, glue_ecb_crypt_128bit()
88 return __glue_ecb_crypt_128bit(gctx, desc, &walk); glue_ecb_crypt_128bit()
93 struct blkcipher_desc *desc, __glue_cbc_encrypt_128bit()
96 void *ctx = crypto_blkcipher_ctx(desc->tfm); __glue_cbc_encrypt_128bit()
118 struct blkcipher_desc *desc, glue_cbc_encrypt_128bit()
126 err = blkcipher_walk_virt(desc, &walk); glue_cbc_encrypt_128bit()
129 nbytes = __glue_cbc_encrypt_128bit(fn, desc, &walk); glue_cbc_encrypt_128bit()
130 err = blkcipher_walk_done(desc, &walk, nbytes); glue_cbc_encrypt_128bit()
139 struct blkcipher_desc *desc, __glue_cbc_decrypt_128bit()
142 void *ctx = crypto_blkcipher_ctx(desc->tfm); __glue_cbc_decrypt_128bit()
192 struct blkcipher_desc *desc, glue_cbc_decrypt_128bit()
202 err = blkcipher_walk_virt(desc, &walk); glue_cbc_decrypt_128bit()
206 desc, fpu_enabled, nbytes); glue_cbc_decrypt_128bit()
207 nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); glue_cbc_decrypt_128bit()
208 err = blkcipher_walk_done(desc, &walk, nbytes); glue_cbc_decrypt_128bit()
217 struct blkcipher_desc *desc, glue_ctr_crypt_final_128bit()
220 void *ctx = crypto_blkcipher_ctx(desc->tfm); glue_ctr_crypt_final_128bit()
237 struct blkcipher_desc *desc, __glue_ctr_crypt_128bit()
241 void *ctx = crypto_blkcipher_ctx(desc->tfm); __glue_ctr_crypt_128bit()
276 struct blkcipher_desc *desc, struct scatterlist *dst, glue_ctr_crypt_128bit()
285 err = blkcipher_walk_virt_block(desc, &walk, bsize); glue_ctr_crypt_128bit()
289 desc, fpu_enabled, nbytes); glue_ctr_crypt_128bit()
290 nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); glue_ctr_crypt_128bit()
291 err = blkcipher_walk_done(desc, &walk, nbytes); glue_ctr_crypt_128bit()
298 gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); glue_ctr_crypt_128bit()
299 err = blkcipher_walk_done(desc, &walk, 0); glue_ctr_crypt_128bit()
308 struct blkcipher_desc *desc, __glue_xts_crypt_128bit()
344 struct blkcipher_desc *desc, struct scatterlist *dst, glue_xts_crypt_128bit()
356 err = blkcipher_walk_virt(desc, &walk); glue_xts_crypt_128bit()
363 desc, fpu_enabled, glue_xts_crypt_128bit()
370 nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); glue_xts_crypt_128bit()
372 err = blkcipher_walk_done(desc, &walk, nbytes); glue_xts_crypt_128bit()
35 __glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct blkcipher_walk *walk) __glue_ecb_crypt_128bit() argument
81 glue_ecb_crypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) glue_ecb_crypt_128bit() argument
92 __glue_cbc_encrypt_128bit(const common_glue_func_t fn, struct blkcipher_desc *desc, struct blkcipher_walk *walk) __glue_cbc_encrypt_128bit() argument
117 glue_cbc_encrypt_128bit(const common_glue_func_t fn, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) glue_cbc_encrypt_128bit() argument
138 __glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct blkcipher_walk *walk) __glue_cbc_decrypt_128bit() argument
191 glue_cbc_decrypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) glue_cbc_decrypt_128bit() argument
216 glue_ctr_crypt_final_128bit(const common_glue_ctr_func_t fn_ctr, struct blkcipher_desc *desc, struct blkcipher_walk *walk) glue_ctr_crypt_final_128bit() argument
236 __glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct blkcipher_walk *walk) __glue_ctr_crypt_128bit() argument
275 glue_ctr_crypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) glue_ctr_crypt_128bit() argument
306 __glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, void *ctx, struct blkcipher_desc *desc, struct blkcipher_walk *walk) __glue_xts_crypt_128bit() argument
343 glue_xts_crypt_128bit(const struct common_glue_ctx *gctx, struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes, void (*tweak_fn)(void *ctx, u8 *dst, const u8 *src), void *tweak_ctx, void *crypt_ctx) glue_xts_crypt_128bit() argument
H A Dsha512_ssse3_glue.c47 static int sha512_update(struct shash_desc *desc, const u8 *data, sha512_update() argument
50 struct sha512_state *sctx = shash_desc_ctx(desc); sha512_update()
54 return crypto_sha512_update(desc, data, len); sha512_update()
60 sha512_base_do_update(desc, data, len, sha512_update()
67 static int sha512_finup(struct shash_desc *desc, const u8 *data, sha512_finup() argument
71 return crypto_sha512_finup(desc, data, len, out); sha512_finup()
75 sha512_base_do_update(desc, data, len, sha512_finup()
77 sha512_base_do_finalize(desc, (sha512_block_fn *)sha512_xform); sha512_finup()
80 return sha512_base_finish(desc, out); sha512_finup()
83 static int sha512_ssse3_update(struct shash_desc *desc, const u8 *data, sha512_ssse3_update() argument
86 return sha512_update(desc, data, len, sha512_transform_ssse3); sha512_ssse3_update()
89 static int sha512_ssse3_finup(struct shash_desc *desc, const u8 *data, sha512_ssse3_finup() argument
92 return sha512_finup(desc, data, len, out, sha512_transform_ssse3); sha512_ssse3_finup()
96 static int sha512_ssse3_final(struct shash_desc *desc, u8 *out) sha512_ssse3_final() argument
98 return sha512_ssse3_finup(desc, NULL, 0, out); sha512_ssse3_final()
162 static int sha512_avx_update(struct shash_desc *desc, const u8 *data, sha512_avx_update() argument
165 return sha512_update(desc, data, len, sha512_transform_avx); sha512_avx_update()
168 static int sha512_avx_finup(struct shash_desc *desc, const u8 *data, sha512_avx_finup() argument
171 return sha512_finup(desc, data, len, out, sha512_transform_avx); sha512_avx_finup()
175 static int sha512_avx_final(struct shash_desc *desc, u8 *out) sha512_avx_final() argument
177 return sha512_avx_finup(desc, NULL, 0, out); sha512_avx_final()
235 static int sha512_avx2_update(struct shash_desc *desc, const u8 *data, sha512_avx2_update() argument
238 return sha512_update(desc, data, len, sha512_transform_rorx); sha512_avx2_update()
241 static int sha512_avx2_finup(struct shash_desc *desc, const u8 *data, sha512_avx2_finup() argument
244 return sha512_finup(desc, data, len, out, sha512_transform_rorx); sha512_avx2_finup()
248 static int sha512_avx2_final(struct shash_desc *desc, u8 *out) sha512_avx2_final() argument
250 return sha512_avx2_finup(desc, NULL, 0, out); sha512_avx2_final()
H A Dcast5_avx_glue.c59 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, ecb_crypt() argument
63 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_crypt()
71 err = blkcipher_walk_virt(desc, walk); ecb_crypt()
72 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ecb_crypt()
106 err = blkcipher_walk_done(desc, walk, nbytes); ecb_crypt()
113 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
119 return ecb_crypt(desc, &walk, true); ecb_encrypt()
122 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
128 return ecb_crypt(desc, &walk, false); ecb_decrypt()
131 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, __cbc_encrypt() argument
134 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_encrypt()
155 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
162 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
165 nbytes = __cbc_encrypt(desc, &walk); cbc_encrypt()
166 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_encrypt()
172 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, __cbc_decrypt() argument
175 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_decrypt()
227 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
235 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
236 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_decrypt()
240 nbytes = __cbc_decrypt(desc, &walk); cbc_decrypt()
241 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_decrypt()
248 static void ctr_crypt_final(struct blkcipher_desc *desc, ctr_crypt_final() argument
251 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_crypt_final()
265 static unsigned int __ctr_crypt(struct blkcipher_desc *desc, __ctr_crypt() argument
268 struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ctr_crypt()
311 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_crypt() argument
319 err = blkcipher_walk_virt_block(desc, &walk, CAST5_BLOCK_SIZE); ctr_crypt()
320 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ctr_crypt()
324 nbytes = __ctr_crypt(desc, &walk); ctr_crypt()
325 err = blkcipher_walk_done(desc, &walk, nbytes); ctr_crypt()
331 ctr_crypt_final(desc, &walk); ctr_crypt()
332 err = blkcipher_walk_done(desc, &walk, 0); ctr_crypt()
H A Dcrct10dif-pclmul_glue.c48 static int chksum_init(struct shash_desc *desc) chksum_init() argument
50 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_init()
57 static int chksum_update(struct shash_desc *desc, const u8 *data, chksum_update() argument
60 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_update()
71 static int chksum_final(struct shash_desc *desc, u8 *out) chksum_final() argument
73 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_final()
91 static int chksum_finup(struct shash_desc *desc, const u8 *data, chksum_finup() argument
94 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_finup()
99 static int chksum_digest(struct shash_desc *desc, const u8 *data, chksum_digest() argument
102 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_digest()
H A Ddes3_ede_glue.c86 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, ecb_crypt() argument
93 err = blkcipher_walk_virt(desc, walk); ecb_crypt()
124 err = blkcipher_walk_done(desc, walk, nbytes); ecb_crypt()
130 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
133 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_encrypt()
137 return ecb_crypt(desc, &walk, ctx->enc_expkey); ecb_encrypt()
140 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
143 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_decrypt()
147 return ecb_crypt(desc, &walk, ctx->dec_expkey); ecb_decrypt()
150 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, __cbc_encrypt() argument
153 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_encrypt()
174 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
181 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
184 nbytes = __cbc_encrypt(desc, &walk); cbc_encrypt()
185 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_encrypt()
191 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, __cbc_decrypt() argument
194 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_decrypt()
253 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
260 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
263 nbytes = __cbc_decrypt(desc, &walk); cbc_decrypt()
264 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_decrypt()
286 static unsigned int __ctr_crypt(struct blkcipher_desc *desc, __ctr_crypt() argument
289 struct des3_ede_x86_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ctr_crypt()
337 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_crypt() argument
344 err = blkcipher_walk_virt_block(desc, &walk, DES3_EDE_BLOCK_SIZE); ctr_crypt()
347 nbytes = __ctr_crypt(desc, &walk); ctr_crypt()
348 err = blkcipher_walk_done(desc, &walk, nbytes); ctr_crypt()
352 ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); ctr_crypt()
353 err = blkcipher_walk_done(desc, &walk, 0); ctr_crypt()
H A Dblowfish_glue.c80 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, ecb_crypt() argument
84 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_crypt()
89 err = blkcipher_walk_virt(desc, walk); ecb_crypt()
119 err = blkcipher_walk_done(desc, walk, nbytes); ecb_crypt()
125 static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_encrypt() argument
131 return ecb_crypt(desc, &walk, blowfish_enc_blk, blowfish_enc_blk_4way); ecb_encrypt()
134 static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ecb_decrypt() argument
140 return ecb_crypt(desc, &walk, blowfish_dec_blk, blowfish_dec_blk_4way); ecb_decrypt()
143 static unsigned int __cbc_encrypt(struct blkcipher_desc *desc, __cbc_encrypt() argument
146 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_encrypt()
167 static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_encrypt() argument
174 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
177 nbytes = __cbc_encrypt(desc, &walk); cbc_encrypt()
178 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_encrypt()
184 static unsigned int __cbc_decrypt(struct blkcipher_desc *desc, __cbc_decrypt() argument
187 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __cbc_decrypt()
248 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, cbc_decrypt() argument
255 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
258 nbytes = __cbc_decrypt(desc, &walk); cbc_decrypt()
259 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_decrypt()
280 static unsigned int __ctr_crypt(struct blkcipher_desc *desc, __ctr_crypt() argument
283 struct bf_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ctr_crypt()
336 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, ctr_crypt() argument
343 err = blkcipher_walk_virt_block(desc, &walk, BF_BLOCK_SIZE); ctr_crypt()
346 nbytes = __ctr_crypt(desc, &walk); ctr_crypt()
347 err = blkcipher_walk_done(desc, &walk, nbytes); ctr_crypt()
351 ctr_crypt_final(crypto_blkcipher_ctx(desc->tfm), &walk); ctr_crypt()
352 err = blkcipher_walk_done(desc, &walk, 0); ctr_crypt()
H A Dghash-clmulni-intel_glue.c46 static int ghash_init(struct shash_desc *desc) ghash_init() argument
48 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ghash_init()
80 static int ghash_update(struct shash_desc *desc, ghash_update() argument
83 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ghash_update()
84 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); ghash_update()
134 static int ghash_final(struct shash_desc *desc, u8 *dst) ghash_final() argument
136 struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); ghash_final()
137 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); ghash_final()
177 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_init() local
180 desc->tfm = child; ghash_async_init()
181 desc->flags = req->base.flags; ghash_async_init()
182 return crypto_shash_init(desc); ghash_async_init()
199 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_update() local
200 return shash_ahash_update(req, desc); ghash_async_update()
217 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_final() local
218 return crypto_shash_final(desc, req->result); ghash_async_final()
234 struct shash_desc *desc = cryptd_shash_desc(cryptd_req); ghash_async_digest() local
237 desc->tfm = child; ghash_async_digest()
238 desc->flags = req->base.flags; ghash_async_digest()
239 return shash_ahash_digest(req, desc); ghash_async_digest()
/linux-4.4.14/drivers/s390/virtio/
H A Dkvm_virtio.c42 struct kvm_device_desc *desc; member in struct:kvm_device
56 static struct kvm_vqconfig *kvm_vq_config(const struct kvm_device_desc *desc) kvm_vq_config() argument
58 return (struct kvm_vqconfig *)(desc + 1); kvm_vq_config()
61 static u8 *kvm_vq_features(const struct kvm_device_desc *desc) kvm_vq_features() argument
63 return (u8 *)(kvm_vq_config(desc) + desc->num_vq); kvm_vq_features()
66 static u8 *kvm_vq_configspace(const struct kvm_device_desc *desc) kvm_vq_configspace() argument
68 return kvm_vq_features(desc) + desc->feature_len * 2; kvm_vq_configspace()
72 * The total size of the config page used by this device (incl. desc)
74 static unsigned desc_size(const struct kvm_device_desc *desc) desc_size() argument
76 return sizeof(*desc) desc_size()
77 + desc->num_vq * sizeof(struct kvm_vqconfig) desc_size()
78 + desc->feature_len * 2 desc_size()
79 + desc->config_len; desc_size()
87 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; kvm_get_features() local
88 u8 *in_features = kvm_vq_features(desc); kvm_get_features()
90 for (i = 0; i < min(desc->feature_len * 8, 32); i++) kvm_get_features()
99 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; kvm_finalize_features() local
101 u8 *out_features = kvm_vq_features(desc) + desc->feature_len; kvm_finalize_features()
109 memset(out_features, 0, desc->feature_len); kvm_finalize_features()
110 bits = min_t(unsigned, desc->feature_len, sizeof(vdev->features)) * 8; kvm_finalize_features()
125 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; kvm_get() local
127 BUG_ON(offset + len > desc->config_len); kvm_get()
128 memcpy(buf, kvm_vq_configspace(desc) + offset, len); kvm_get()
134 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; kvm_set() local
136 BUG_ON(offset + len > desc->config_len); kvm_set()
137 memcpy(kvm_vq_configspace(desc) + offset, buf, len); kvm_set()
147 return to_kvmdev(vdev)->desc->status; kvm_get_status()
153 to_kvmdev(vdev)->desc->status = status; kvm_set_status()
155 (unsigned long) to_kvmdev(vdev)->desc); kvm_set_status()
166 (unsigned long) to_kvmdev(vdev)->desc); kvm_reset()
199 if (index >= kdev->desc->num_vq) kvm_find_vq()
205 config = kvm_vq_config(kdev->desc)+index; kvm_find_vq()
264 if (nvqs > kdev->desc->num_vq) kvm_find_vqs()
324 kdev->desc = d; add_kvm_device()
353 * match for a kvm device with a specific desc pointer
360 return kdev->desc == data; match_desc()
/linux-4.4.14/drivers/media/pci/cobalt/
H A Dcobalt-omnitek.c108 void omni_sg_dma_start(struct cobalt_stream *s, struct sg_dma_desc_info *desc) omni_sg_dma_start() argument
112 iowrite32((u32)((u64)desc->bus >> 32), DESCRIPTOR(s->dma_channel) + 4); omni_sg_dma_start()
113 iowrite32((u32)desc->bus & NEXT_ADRS_MSK, DESCRIPTOR(s->dma_channel)); omni_sg_dma_start()
164 struct sg_dma_desc_info *desc) descriptor_list_create()
166 struct sg_dma_descriptor *d = (struct sg_dma_descriptor *)desc->virt; descriptor_list_create()
167 dma_addr_t next = desc->bus; descriptor_list_create()
268 d->next_h = (u32)((u64)desc->bus >> 32); descriptor_list_create()
269 d->next_l = (u32)desc->bus | descriptor_list_create()
273 desc->last_desc_virt = d; descriptor_list_create()
298 void *descriptor_list_allocate(struct sg_dma_desc_info *desc, size_t bytes) descriptor_list_allocate() argument
300 desc->size = bytes; descriptor_list_allocate()
301 desc->virt = dma_alloc_coherent(desc->dev, bytes, descriptor_list_allocate()
302 &desc->bus, GFP_KERNEL); descriptor_list_allocate()
303 return desc->virt; descriptor_list_allocate()
306 void descriptor_list_free(struct sg_dma_desc_info *desc) descriptor_list_free() argument
308 if (desc->virt) descriptor_list_free()
309 dma_free_coherent(desc->dev, desc->size, descriptor_list_free()
310 desc->virt, desc->bus); descriptor_list_free()
311 desc->virt = NULL; descriptor_list_free()
314 void descriptor_list_interrupt_enable(struct sg_dma_desc_info *desc) descriptor_list_interrupt_enable() argument
316 struct sg_dma_descriptor *d = desc->last_desc_virt; descriptor_list_interrupt_enable()
321 void descriptor_list_interrupt_disable(struct sg_dma_desc_info *desc) descriptor_list_interrupt_disable() argument
323 struct sg_dma_descriptor *d = desc->last_desc_virt; descriptor_list_interrupt_disable()
328 void descriptor_list_loopback(struct sg_dma_desc_info *desc) descriptor_list_loopback() argument
330 struct sg_dma_descriptor *d = desc->last_desc_virt; descriptor_list_loopback()
332 d->next_h = (u32)((u64)desc->bus >> 32); descriptor_list_loopback()
333 d->next_l = (u32)desc->bus | (d->next_l & DESCRIPTOR_FLAG_MSK); descriptor_list_loopback()
336 void descriptor_list_end_of_chain(struct sg_dma_desc_info *desc) descriptor_list_end_of_chain() argument
338 struct sg_dma_descriptor *d = desc->last_desc_virt; descriptor_list_end_of_chain()
161 descriptor_list_create(struct cobalt *cobalt, struct scatterlist *scatter_list, bool to_pci, unsigned sglen, unsigned size, unsigned width, unsigned stride, struct sg_dma_desc_info *desc) descriptor_list_create() argument
H A Dcobalt-omnitek.h43 void omni_sg_dma_start(struct cobalt_stream *s, struct sg_dma_desc_info *desc);
49 struct sg_dma_desc_info *desc);
53 void descriptor_list_loopback(struct sg_dma_desc_info *desc);
54 void descriptor_list_end_of_chain(struct sg_dma_desc_info *desc);
56 void *descriptor_list_allocate(struct sg_dma_desc_info *desc, size_t bytes);
57 void descriptor_list_free(struct sg_dma_desc_info *desc);
59 void descriptor_list_interrupt_enable(struct sg_dma_desc_info *desc);
60 void descriptor_list_interrupt_disable(struct sg_dma_desc_info *desc);
/linux-4.4.14/fs/nfs/
H A Dpagelist.c46 nfs_pgio_current_mirror(struct nfs_pageio_descriptor *desc) nfs_pgio_current_mirror() argument
48 return nfs_pgio_has_mirroring(desc) ? nfs_pgio_current_mirror()
49 &desc->pg_mirrors[desc->pg_mirror_idx] : nfs_pgio_current_mirror()
50 &desc->pg_mirrors[0]; nfs_pgio_current_mirror()
54 void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, nfs_pgheader_init() argument
58 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_pgheader_init()
62 hdr->inode = desc->pg_inode; nfs_pgheader_init()
66 hdr->dreq = desc->pg_dreq; nfs_pgheader_init()
67 hdr->layout_private = desc->pg_layout_private; nfs_pgheader_init()
69 hdr->completion_ops = desc->pg_completion_ops; nfs_pgheader_init()
73 hdr->pgio_mirror_idx = desc->pg_mirror_idx; nfs_pgheader_init()
487 * @desc: pointer to descriptor
488 * @prev: previous request in desc, or NULL
491 * Returns zero if @req can be coalesced into @desc, otherwise it returns
494 size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, nfs_generic_pg_test() argument
497 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_generic_pg_test()
664 * @desc: IO descriptor
667 static int nfs_pgio_error(struct nfs_pageio_descriptor *desc, nfs_pgio_error() argument
678 for (midx = 0; midx < desc->pg_mirror_count; midx++) { nfs_pgio_error()
679 mirror = &desc->pg_mirrors[midx]; nfs_pgio_error()
680 desc->pg_completion_ops->error_cleanup(&mirror->pg_list); nfs_pgio_error()
709 * @desc: pointer to descriptor
717 void nfs_pageio_init(struct nfs_pageio_descriptor *desc, nfs_pageio_init() argument
728 desc->pg_moreio = 0; nfs_pageio_init()
729 desc->pg_inode = inode; nfs_pageio_init()
730 desc->pg_ops = pg_ops; nfs_pageio_init()
731 desc->pg_completion_ops = compl_ops; nfs_pageio_init()
732 desc->pg_rw_ops = rw_ops; nfs_pageio_init()
733 desc->pg_ioflags = io_flags; nfs_pageio_init()
734 desc->pg_error = 0; nfs_pageio_init()
735 desc->pg_lseg = NULL; nfs_pageio_init()
736 desc->pg_dreq = NULL; nfs_pageio_init()
737 desc->pg_layout_private = NULL; nfs_pageio_init()
738 desc->pg_bsize = bsize; nfs_pageio_init()
740 desc->pg_mirror_count = 1; nfs_pageio_init()
741 desc->pg_mirror_idx = 0; nfs_pageio_init()
748 desc->pg_mirrors_dynamic = new; nfs_pageio_init()
749 desc->pg_mirrors = new; nfs_pageio_init()
752 nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize); nfs_pageio_init()
754 desc->pg_mirrors_dynamic = NULL; nfs_pageio_init()
755 desc->pg_mirrors = desc->pg_mirrors_static; nfs_pageio_init()
756 nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize); nfs_pageio_init()
790 int nfs_generic_pgio(struct nfs_pageio_descriptor *desc, nfs_generic_pgio() argument
793 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_generic_pgio()
804 return nfs_pgio_error(desc, hdr); nfs_generic_pgio()
806 nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq); nfs_generic_pgio()
823 return nfs_pgio_error(desc, hdr); nfs_generic_pgio()
825 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && nfs_generic_pgio()
826 (desc->pg_moreio || nfs_reqs_to_commit(&cinfo))) nfs_generic_pgio()
827 desc->pg_ioflags &= ~FLUSH_COND_STABLE; nfs_generic_pgio()
830 nfs_pgio_rpcsetup(hdr, mirror->pg_count, 0, desc->pg_ioflags, &cinfo); nfs_generic_pgio()
831 desc->pg_rpc_callops = &nfs_pgio_common_ops; nfs_generic_pgio()
836 static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc) nfs_generic_pg_pgios() argument
842 mirror = nfs_pgio_current_mirror(desc); nfs_generic_pg_pgios()
844 hdr = nfs_pgio_header_alloc(desc->pg_rw_ops); nfs_generic_pg_pgios()
848 desc->pg_completion_ops->error_cleanup(&mirror->pg_list); nfs_generic_pg_pgios()
851 nfs_pgheader_init(desc, hdr, nfs_pgio_header_free); nfs_generic_pg_pgios()
852 ret = nfs_generic_pgio(desc, hdr); nfs_generic_pg_pgios()
858 desc->pg_rpc_callops, nfs_generic_pg_pgios()
859 desc->pg_ioflags, 0); nfs_generic_pg_pgios()
967 * @desc: destination io descriptor
971 * existing list of pages 'desc'.
973 static int nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, nfs_pageio_do_add_request() argument
976 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_pageio_do_add_request()
983 if (desc->pg_ops->pg_init) nfs_pageio_do_add_request()
984 desc->pg_ops->pg_init(desc, req); nfs_pageio_do_add_request()
987 if (!nfs_can_coalesce_requests(prev, req, desc)) nfs_pageio_do_add_request()
998 static void nfs_pageio_doio(struct nfs_pageio_descriptor *desc) nfs_pageio_doio() argument
1000 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_pageio_doio()
1004 int error = desc->pg_ops->pg_doio(desc); nfs_pageio_doio()
1006 desc->pg_error = error; nfs_pageio_doio()
1018 * @desc: destination io descriptor
1025 * existing list of pages 'desc'.
1027 static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, __nfs_pageio_add_request() argument
1030 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); __nfs_pageio_add_request()
1044 if (!nfs_pageio_do_add_request(desc, subreq)) { __nfs_pageio_add_request()
1051 desc->pg_moreio = 1; __nfs_pageio_add_request()
1052 nfs_pageio_doio(desc); __nfs_pageio_add_request()
1053 if (desc->pg_error < 0) __nfs_pageio_add_request()
1086 desc->pg_error = PTR_ERR(subreq); __nfs_pageio_add_request()
1091 static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc) nfs_do_recoalesce() argument
1093 struct nfs_pgio_mirror *mirror = nfs_pgio_current_mirror(desc); nfs_do_recoalesce()
1108 if (__nfs_pageio_add_request(desc, req)) nfs_do_recoalesce()
1110 if (desc->pg_error < 0) { nfs_do_recoalesce()
1121 static int nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc, nfs_pageio_add_request_mirror() argument
1127 ret = __nfs_pageio_add_request(desc, req); nfs_pageio_add_request_mirror()
1130 if (desc->pg_error < 0) nfs_pageio_add_request_mirror()
1132 ret = nfs_do_recoalesce(desc); nfs_pageio_add_request_mirror()
1138 int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, nfs_pageio_add_request() argument
1149 nfs_pageio_setup_mirroring(desc, req); nfs_pageio_add_request()
1151 for (midx = 0; midx < desc->pg_mirror_count; midx++) { nfs_pageio_add_request()
1176 if (nfs_pgio_has_mirroring(desc)) nfs_pageio_add_request()
1177 desc->pg_mirror_idx = midx; nfs_pageio_add_request()
1178 if (!nfs_pageio_add_request_mirror(desc, dupreq)) nfs_pageio_add_request()
1188 * @desc: pointer to io descriptor
1191 static void nfs_pageio_complete_mirror(struct nfs_pageio_descriptor *desc, nfs_pageio_complete_mirror() argument
1194 struct nfs_pgio_mirror *mirror = &desc->pg_mirrors[mirror_idx]; nfs_pageio_complete_mirror()
1195 u32 restore_idx = desc->pg_mirror_idx; nfs_pageio_complete_mirror()
1197 if (nfs_pgio_has_mirroring(desc)) nfs_pageio_complete_mirror()
1198 desc->pg_mirror_idx = mirror_idx; nfs_pageio_complete_mirror()
1200 nfs_pageio_doio(desc); nfs_pageio_complete_mirror()
1203 if (!nfs_do_recoalesce(desc)) nfs_pageio_complete_mirror()
1206 desc->pg_mirror_idx = restore_idx; nfs_pageio_complete_mirror()
1212 * @desc - the pageio descriptor to add requests to
1214 * Try to move each request (nfs_page) from @hdr to @desc then attempt
1219 int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, nfs_pageio_resend() argument
1224 desc->pg_dreq = hdr->dreq; nfs_pageio_resend()
1229 if (!nfs_pageio_add_request(desc, req)) nfs_pageio_resend()
1232 nfs_pageio_complete(desc); nfs_pageio_resend()
1243 * @desc: pointer to io descriptor
1245 void nfs_pageio_complete(struct nfs_pageio_descriptor *desc) nfs_pageio_complete() argument
1249 for (midx = 0; midx < desc->pg_mirror_count; midx++) nfs_pageio_complete()
1250 nfs_pageio_complete_mirror(desc, midx); nfs_pageio_complete()
1252 if (desc->pg_ops->pg_cleanup) nfs_pageio_complete()
1253 desc->pg_ops->pg_cleanup(desc); nfs_pageio_complete()
1254 nfs_pageio_cleanup_mirroring(desc); nfs_pageio_complete()
1259 * @desc: pointer to io descriptor
1266 * is not contiguous with the existing list of pages in 'desc'.
1268 void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *desc, pgoff_t index) nfs_pageio_cond_complete() argument
1274 for (midx = 0; midx < desc->pg_mirror_count; midx++) { nfs_pageio_cond_complete()
1275 mirror = &desc->pg_mirrors[midx]; nfs_pageio_cond_complete()
1279 nfs_pageio_complete_mirror(desc, midx); nfs_pageio_cond_complete()
/linux-4.4.14/drivers/clk/qcom/
H A Dcommon.c61 qcom_cc_map(struct platform_device *pdev, const struct qcom_cc_desc *desc) qcom_cc_map() argument
72 return devm_regmap_init_mmio(dev, base, desc->config); qcom_cc_map()
92 const struct qcom_cc_desc *desc, struct regmap *regmap) qcom_cc_really_probe()
101 size_t num_clks = desc->num_clks; qcom_cc_really_probe()
102 struct clk_regmap **rclks = desc->clks; qcom_cc_really_probe()
135 reset->rcdev.nr_resets = desc->num_resets; qcom_cc_really_probe()
137 reset->reset_map = desc->resets; qcom_cc_really_probe()
145 if (desc->gdscs && desc->num_gdscs) { qcom_cc_really_probe()
146 ret = gdsc_register(dev, desc->gdscs, desc->num_gdscs, qcom_cc_really_probe()
159 int qcom_cc_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc) qcom_cc_probe() argument
163 regmap = qcom_cc_map(pdev, desc); qcom_cc_probe()
167 return qcom_cc_really_probe(pdev, desc, regmap); qcom_cc_probe()
91 qcom_cc_really_probe(struct platform_device *pdev, const struct qcom_cc_desc *desc, struct regmap *regmap) qcom_cc_really_probe() argument
/linux-4.4.14/arch/arm/mach-imx/
H A Dmach-pcm037_eet.c72 .desc = "Wheel Manual",
78 .desc = "Wheel AF",
84 .desc = "Wheel View",
90 .desc = "Wheel Menu",
96 .desc = "Nav Pad Up",
102 .desc = "Nav Pad Right",
108 .desc = "Nav Pad Down",
114 .desc = "Nav Pad Left",
120 .desc = "Nav Pad Ok",
126 .desc = "Wheel Off",
132 .desc = "Focus Forward",
138 .desc = "Focus Backward",
144 .desc = "Release Half",
150 .desc = "Release Full",
/linux-4.4.14/arch/x86/include/asm/
H A Ddesc.h11 static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *info) fill_ldt() argument
13 desc->limit0 = info->limit & 0x0ffff; fill_ldt()
15 desc->base0 = (info->base_addr & 0x0000ffff); fill_ldt()
16 desc->base1 = (info->base_addr & 0x00ff0000) >> 16; fill_ldt()
18 desc->type = (info->read_exec_only ^ 1) << 1; fill_ldt()
19 desc->type |= info->contents << 2; fill_ldt()
21 desc->s = 1; fill_ldt()
22 desc->dpl = 0x3; fill_ldt()
23 desc->p = info->seg_not_present ^ 1; fill_ldt()
24 desc->limit = (info->limit & 0xf0000) >> 16; fill_ldt()
25 desc->avl = info->useable; fill_ldt()
26 desc->d = info->seg_32bit; fill_ldt()
27 desc->g = info->limit_in_pages; fill_ldt()
29 desc->base2 = (info->base_addr & 0xff000000) >> 24; fill_ldt()
34 desc->l = 0; fill_ldt()
83 const u32 *desc = ptr; desc_empty() local
85 return !(desc[0] | desc[1]); desc_empty()
104 #define write_ldt_entry(dt, entry, desc) native_write_ldt_entry(dt, entry, desc)
105 #define write_gdt_entry(dt, entry, desc, type) native_write_gdt_entry(dt, entry, desc, type)
124 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc) native_write_ldt_entry() argument
126 memcpy(&ldt[entry], desc, 8); native_write_ldt_entry()
130 native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int type) native_write_gdt_entry() argument
140 memcpy(&gdt[entry], desc, size); native_write_gdt_entry()
143 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, pack_descriptor() argument
147 desc->a = ((base & 0xffff) << 16) | (limit & 0xffff); pack_descriptor()
148 desc->b = (base & 0xff000000) | ((base & 0xff0000) >> 16) | pack_descriptor()
151 desc->p = 1; pack_descriptor()
158 struct ldttss_desc64 *desc = d; set_tssldt_descriptor() local
160 memset(desc, 0, sizeof(*desc)); set_tssldt_descriptor()
162 desc->limit0 = size & 0xFFFF; set_tssldt_descriptor()
163 desc->base0 = PTR_LOW(addr); set_tssldt_descriptor()
164 desc->base1 = PTR_MIDDLE(addr) & 0xFF; set_tssldt_descriptor()
165 desc->type = type; set_tssldt_descriptor()
166 desc->p = 1; set_tssldt_descriptor()
167 desc->limit1 = (size >> 16) & 0xF; set_tssldt_descriptor()
168 desc->base2 = (PTR_MIDDLE(addr) >> 8) & 0xFF; set_tssldt_descriptor()
169 desc->base3 = PTR_HIGH(addr); set_tssldt_descriptor()
283 static inline unsigned long get_desc_base(const struct desc_struct *desc) get_desc_base() argument
285 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); get_desc_base()
288 static inline void set_desc_base(struct desc_struct *desc, unsigned long base) set_desc_base() argument
290 desc->base0 = base & 0xffff; set_desc_base()
291 desc->base1 = (base >> 16) & 0xff; set_desc_base()
292 desc->base2 = (base >> 24) & 0xff; set_desc_base()
295 static inline unsigned long get_desc_limit(const struct desc_struct *desc) get_desc_limit() argument
297 return desc->limit0 | (desc->limit << 16); get_desc_limit()
300 static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit) set_desc_limit() argument
302 desc->limit0 = limit & 0xffff; set_desc_limit()
303 desc->limit = (limit >> 16) & 0xf; set_desc_limit()
/linux-4.4.14/arch/arm/mach-iop13xx/include/mach/
H A Dadma.h221 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, iop_desc_get_byte_count() argument
224 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_get_byte_count()
228 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc, iop_desc_get_src_addr() argument
232 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_get_src_addr()
236 static inline u32 iop_desc_get_src_count(struct iop_adma_desc_slot *desc, iop_desc_get_src_count() argument
239 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_get_src_count()
244 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags) iop_desc_init_memcpy() argument
246 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_memcpy()
260 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags) iop_desc_init_memset() argument
262 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_memset()
278 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_xor() argument
281 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_xor()
299 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_zero_sum() argument
302 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_zero_sum()
321 iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_pq() argument
324 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_pq()
340 iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_pq_zero_sum() argument
343 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_init_pq_zero_sum()
360 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc, iop_desc_set_byte_count() argument
364 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_byte_count()
369 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) iop_desc_set_zero_sum_byte_count() argument
371 int slots_per_op = desc->slots_per_op; iop_desc_set_zero_sum_byte_count()
372 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; iop_desc_set_zero_sum_byte_count()
394 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, iop_desc_set_dest_addr() argument
398 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_dest_addr()
404 iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr) iop_desc_set_pq_addr() argument
406 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_pq_addr()
413 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc, iop_desc_set_memcpy_src_addr() argument
416 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_memcpy_src_addr()
421 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc, iop_desc_set_xor_src_addr() argument
424 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; iop_desc_set_xor_src_addr()
425 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; iop_desc_set_xor_src_addr()
441 iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx, iop_desc_set_pq_src_addr() argument
444 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; iop_desc_set_pq_src_addr()
445 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc, *iter; iop_desc_set_pq_src_addr()
464 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, iop_desc_init_interrupt() argument
467 iop_desc_init_memcpy(desc, 1); iop_desc_init_interrupt()
468 iop_desc_set_byte_count(desc, chan, 0); iop_desc_init_interrupt()
469 iop_desc_set_dest_addr(desc, chan, 0); iop_desc_init_interrupt()
470 iop_desc_set_memcpy_src_addr(desc, 0); iop_desc_init_interrupt()
477 iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx, iop_desc_set_pq_zero_sum_addr() argument
480 iop_desc_set_xor_src_addr(desc, pq_idx, src[pq_idx]); iop_desc_set_pq_zero_sum_addr()
481 iop_desc_set_xor_src_addr(desc, pq_idx+1, src[pq_idx+1]); iop_desc_set_pq_zero_sum_addr()
484 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, iop_desc_set_next_desc() argument
487 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_next_desc()
493 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc) iop_desc_get_next_desc() argument
495 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_get_next_desc()
499 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc) iop_desc_clear_next_desc() argument
501 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_clear_next_desc()
505 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, iop_desc_set_block_fill_val() argument
508 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_set_block_fill_val()
513 iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) iop_desc_get_zero_result() argument
515 struct iop13xx_adma_desc_hw *hw_desc = desc->hw_desc; iop_desc_get_zero_result()
/linux-4.4.14/drivers/usb/core/
H A Dconfig.c50 struct usb_ss_ep_comp_descriptor *desc; usb_parse_ss_endpoint_companion() local
56 desc = (struct usb_ss_ep_comp_descriptor *) buffer; usb_parse_ss_endpoint_companion()
57 if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP || usb_parse_ss_endpoint_companion()
62 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
73 if (usb_endpoint_xfer_isoc(&ep->desc) || usb_parse_ss_endpoint_companion()
74 usb_endpoint_xfer_int(&ep->desc)) usb_parse_ss_endpoint_companion()
76 ep->desc.wMaxPacketSize; usb_parse_ss_endpoint_companion()
80 memcpy(&ep->ss_ep_comp, desc, USB_DT_SS_EP_COMP_SIZE); usb_parse_ss_endpoint_companion()
83 if (usb_endpoint_xfer_control(&ep->desc) && desc->bMaxBurst != 0) { usb_parse_ss_endpoint_companion()
86 "setting to zero\n", desc->bMaxBurst, usb_parse_ss_endpoint_companion()
87 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
89 } else if (desc->bMaxBurst > 15) { usb_parse_ss_endpoint_companion()
92 "setting to 15\n", desc->bMaxBurst, usb_parse_ss_endpoint_companion()
93 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
97 if ((usb_endpoint_xfer_control(&ep->desc) || usb_parse_ss_endpoint_companion()
98 usb_endpoint_xfer_int(&ep->desc)) && usb_parse_ss_endpoint_companion()
99 desc->bmAttributes != 0) { usb_parse_ss_endpoint_companion()
103 usb_endpoint_xfer_control(&ep->desc) ? "Control" : "Bulk", usb_parse_ss_endpoint_companion()
104 desc->bmAttributes, usb_parse_ss_endpoint_companion()
105 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
107 } else if (usb_endpoint_xfer_bulk(&ep->desc) && usb_parse_ss_endpoint_companion()
108 desc->bmAttributes > 16) { usb_parse_ss_endpoint_companion()
112 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
114 } else if (usb_endpoint_xfer_isoc(&ep->desc) && usb_parse_ss_endpoint_companion()
115 USB_SS_MULT(desc->bmAttributes) > 3) { usb_parse_ss_endpoint_companion()
119 USB_SS_MULT(desc->bmAttributes), usb_parse_ss_endpoint_companion()
120 cfgno, inum, asnum, ep->desc.bEndpointAddress); usb_parse_ss_endpoint_companion()
124 if (usb_endpoint_xfer_isoc(&ep->desc)) usb_parse_ss_endpoint_companion()
125 max_tx = (desc->bMaxBurst + 1) * usb_parse_ss_endpoint_companion()
126 (USB_SS_MULT(desc->bmAttributes)) * usb_parse_ss_endpoint_companion()
127 usb_endpoint_maxp(&ep->desc); usb_parse_ss_endpoint_companion()
128 else if (usb_endpoint_xfer_int(&ep->desc)) usb_parse_ss_endpoint_companion()
129 max_tx = usb_endpoint_maxp(&ep->desc) * usb_parse_ss_endpoint_companion()
130 (desc->bMaxBurst + 1); usb_parse_ss_endpoint_companion()
133 if (le16_to_cpu(desc->wBytesPerInterval) > max_tx) { usb_parse_ss_endpoint_companion()
137 usb_endpoint_xfer_isoc(&ep->desc) ? "Isoc" : "Int", usb_parse_ss_endpoint_companion()
138 le16_to_cpu(desc->wBytesPerInterval), usb_parse_ss_endpoint_companion()
139 cfgno, inum, asnum, ep->desc.bEndpointAddress, usb_parse_ss_endpoint_companion()
178 if (ifp->desc.bNumEndpoints >= num_ep) usb_parse_endpoint()
181 endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints]; usb_parse_endpoint()
182 ++ifp->desc.bNumEndpoints; usb_parse_endpoint()
184 memcpy(&endpoint->desc, d, n); usb_parse_endpoint()
241 endpoint->desc.bInterval = n; usb_parse_endpoint()
253 endpoint->desc.bmAttributes = USB_ENDPOINT_XFER_INT; usb_parse_endpoint()
254 endpoint->desc.bInterval = 1; usb_parse_endpoint()
255 if (usb_endpoint_maxp(&endpoint->desc) > 8) usb_parse_endpoint()
256 endpoint->desc.wMaxPacketSize = cpu_to_le16(8); usb_parse_endpoint()
268 maxp = usb_endpoint_maxp(&endpoint->desc) & 0x07ff; usb_parse_endpoint()
336 for (i = 0; i < config->desc.bNumInterfaces; ++i) { usb_parse_interface()
350 if (alt->desc.bAlternateSetting == asnum) { usb_parse_interface()
359 memcpy(&alt->desc, d, USB_DT_INTERFACE_SIZE); usb_parse_interface()
374 num_ep = num_ep_orig = alt->desc.bNumEndpoints; usb_parse_interface()
375 alt->desc.bNumEndpoints = 0; /* Use as a counter */ usb_parse_interface()
436 memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); usb_parse_configuration()
437 if (config->desc.bDescriptorType != USB_DT_CONFIG || usb_parse_configuration()
438 config->desc.bLength < USB_DT_CONFIG_SIZE || usb_parse_configuration()
439 config->desc.bLength > size) { usb_parse_configuration()
442 config->desc.bDescriptorType, config->desc.bLength); usb_parse_configuration()
445 cfgno = config->desc.bConfigurationValue; usb_parse_configuration()
447 buffer += config->desc.bLength; usb_parse_configuration()
448 size -= config->desc.bLength; usb_parse_configuration()
450 nintf = nintf_orig = config->desc.bNumInterfaces; usb_parse_configuration()
545 config->desc.wTotalLength = cpu_to_le16(buffer2 - buffer0); usb_parse_configuration()
553 config->desc.bNumInterfaces = nintf = n; usb_parse_configuration()
614 if (intfc->altsetting[n].desc. usb_parse_configuration()
649 for (i = 0; i < cf->desc.bNumInterfaces; i++) { usb_destroy_configuration()
673 struct usb_config_descriptor *desc; usb_get_configuration() local
698 desc = kmalloc(USB_DT_CONFIG_SIZE, GFP_KERNEL); usb_get_configuration()
699 if (!desc) usb_get_configuration()
707 desc, USB_DT_CONFIG_SIZE); usb_get_configuration()
723 length = max((int) le16_to_cpu(desc->wTotalLength), usb_get_configuration()
762 kfree(desc); usb_get_configuration()
773 kfree(dev->bos->desc); usb_release_bos_descriptor()
820 dev->bos->desc = (struct usb_bos_descriptor *)buffer; usb_get_bos_descriptor()
H A Dendpoint.c19 struct usb_endpoint_descriptor *desc; member in struct:ep_device
40 return sprintf(buf, format_string, ep->desc->field); \
54 usb_endpoint_maxp(ep->desc) & 0x07ff); wMaxPacketSize_show()
64 switch (usb_endpoint_type(ep->desc)) { type_show()
90 in = (ep->desc->bEndpointAddress & USB_DIR_IN); interval_show()
92 switch (usb_endpoint_type(ep->desc)) { interval_show()
96 interval = ep->desc->bInterval; interval_show()
100 interval = 1 << (ep->desc->bInterval - 1); interval_show()
106 interval = ep->desc->bInterval; interval_show()
111 interval = 1 << (ep->desc->bInterval - 1); interval_show()
113 interval = ep->desc->bInterval; interval_show()
134 if (usb_endpoint_xfer_control(ep->desc)) direction_show()
136 else if (usb_endpoint_dir_in(ep->desc)) direction_show()
188 ep_dev->desc = &endpoint->desc; usb_create_ep_devs()
193 dev_set_name(&ep_dev->dev, "ep_%02x", endpoint->desc.bEndpointAddress); usb_create_ep_devs()
H A Ddevices.c182 const struct usb_endpoint_descriptor *desc) usb_dump_endpoint_descriptor()
190 dir = usb_endpoint_dir_in(desc) ? 'I' : 'O'; usb_dump_endpoint_descriptor()
193 switch (usb_endpoint_maxp(desc) & (0x03 << 11)) { usb_dump_endpoint_descriptor()
202 switch (usb_endpoint_type(desc)) { usb_dump_endpoint_descriptor()
206 interval = desc->bInterval; usb_dump_endpoint_descriptor()
213 interval = 1 << (desc->bInterval - 1); usb_dump_endpoint_descriptor()
218 interval = desc->bInterval; usb_dump_endpoint_descriptor()
225 interval = 1 << (desc->bInterval - 1); usb_dump_endpoint_descriptor()
227 interval = desc->bInterval; usb_dump_endpoint_descriptor()
241 start += sprintf(start, format_endpt, desc->bEndpointAddress, dir, usb_dump_endpoint_descriptor()
242 desc->bmAttributes, type, usb_dump_endpoint_descriptor()
243 (usb_endpoint_maxp(desc) & 0x07ff) * usb_dump_endpoint_descriptor()
254 const struct usb_interface_descriptor *desc; usb_dump_interface_descriptor() local
260 desc = &intfc->altsetting[setno].desc; usb_dump_interface_descriptor()
265 active = (desc == &iface->cur_altsetting->desc); usb_dump_interface_descriptor()
269 desc->bInterfaceNumber, usb_dump_interface_descriptor()
270 desc->bAlternateSetting, usb_dump_interface_descriptor()
271 desc->bNumEndpoints, usb_dump_interface_descriptor()
272 desc->bInterfaceClass, usb_dump_interface_descriptor()
273 class_decode(desc->bInterfaceClass), usb_dump_interface_descriptor()
274 desc->bInterfaceSubClass, usb_dump_interface_descriptor()
275 desc->bInterfaceProtocol, usb_dump_interface_descriptor()
284 const struct usb_host_interface *desc = &intfc->altsetting[setno]; usb_dump_interface() local
288 for (i = 0; i < desc->desc.bNumEndpoints; i++) { usb_dump_interface()
292 start, end, &desc->endpoint[i].desc); usb_dump_interface()
318 const struct usb_config_descriptor *desc, usb_dump_config_descriptor()
332 desc->bNumInterfaces, usb_dump_config_descriptor()
333 desc->bConfigurationValue, usb_dump_config_descriptor()
334 desc->bmAttributes, usb_dump_config_descriptor()
335 desc->bMaxPower * mul); usb_dump_config_descriptor()
350 return start + sprintf(start, "(null Cfg. desc.)\n"); usb_dump_config()
351 start = usb_dump_config_descriptor(start, end, &config->desc, active, usb_dump_config()
359 for (i = 0; i < config->desc.bNumInterfaces; i++) { usb_dump_config()
376 const struct usb_device_descriptor *desc) usb_dump_device_descriptor()
378 u16 bcdUSB = le16_to_cpu(desc->bcdUSB); usb_dump_device_descriptor()
379 u16 bcdDevice = le16_to_cpu(desc->bcdDevice); usb_dump_device_descriptor()
385 desc->bDeviceClass, usb_dump_device_descriptor()
386 class_decode(desc->bDeviceClass), usb_dump_device_descriptor()
387 desc->bDeviceSubClass, usb_dump_device_descriptor()
388 desc->bDeviceProtocol, usb_dump_device_descriptor()
389 desc->bMaxPacketSize0, usb_dump_device_descriptor()
390 desc->bNumConfigurations); usb_dump_device_descriptor()
394 le16_to_cpu(desc->idVendor), usb_dump_device_descriptor()
395 le16_to_cpu(desc->idProduct), usb_dump_device_descriptor()
455 const struct usb_hub_descriptor *desc) usb_dump_hub_descriptor()
458 unsigned char *ptr = (unsigned char *)desc; usb_dump_hub_descriptor()
181 usb_dump_endpoint_descriptor(int speed, char *start, char *end, const struct usb_endpoint_descriptor *desc) usb_dump_endpoint_descriptor() argument
317 usb_dump_config_descriptor(char *start, char *end, const struct usb_config_descriptor *desc, int active, int speed) usb_dump_config_descriptor() argument
375 usb_dump_device_descriptor(char *start, char *end, const struct usb_device_descriptor *desc) usb_dump_device_descriptor() argument
454 usb_dump_hub_descriptor(char *start, char *end, const struct usb_hub_descriptor *desc) usb_dump_hub_descriptor() argument
H A Dgeneric.c29 static int is_rndis(struct usb_interface_descriptor *desc) is_rndis() argument
31 return desc->bInterfaceClass == USB_CLASS_COMM is_rndis()
32 && desc->bInterfaceSubClass == 2 is_rndis()
33 && desc->bInterfaceProtocol == 0xff; is_rndis()
36 static int is_activesync(struct usb_interface_descriptor *desc) is_activesync() argument
38 return desc->bInterfaceClass == USB_CLASS_MISC is_activesync()
39 && desc->bInterfaceSubClass == 1 is_activesync()
40 && desc->bInterfaceProtocol == 1; is_activesync()
57 struct usb_interface_descriptor *desc = NULL; usb_choose_configuration() local
60 if (c->desc.bNumInterfaces > 0) usb_choose_configuration()
61 desc = &c->intf_cache[0]->altsetting->desc; usb_choose_configuration()
85 if (bus_powered && (c->desc.bmAttributes & usb_choose_configuration()
113 if (i == 0 && num_configs > 1 && desc && usb_choose_configuration()
114 (is_rndis(desc) || is_activesync(desc))) { usb_choose_configuration()
128 (desc && desc->bInterfaceClass != usb_choose_configuration()
146 i = best->desc.bConfigurationValue; usb_choose_configuration()
/linux-4.4.14/drivers/pinctrl/sunxi/
H A Dpinctrl-sunxi.c78 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_desc_find_function_by_name()
79 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_desc_find_function_by_name()
103 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_desc_find_function_by_pin()
104 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_desc_find_function_by_pin()
291 unsigned pin = g->pin - pctl->desc->pin_base; sunxi_pconf_group_set()
388 pin -= pctl->desc->pin_base; sunxi_pmx_set()
404 struct sunxi_desc_function *desc = sunxi_pmx_set_mux() local
409 if (!desc) sunxi_pmx_set_mux()
412 sunxi_pmx_set(pctldev, g->pin, desc->muxval); sunxi_pmx_set_mux()
424 struct sunxi_desc_function *desc; sunxi_pmx_gpio_set_direction() local
432 desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, offset, func); sunxi_pmx_gpio_set_direction()
433 if (!desc) sunxi_pmx_gpio_set_direction()
436 sunxi_pmx_set(pctldev, offset, desc->muxval); sunxi_pmx_gpio_set_direction()
460 u32 set_mux = pctl->desc->irq_read_needs_mux && sunxi_pinctrl_gpio_get()
461 test_bit(FLAG_USED_AS_IRQ, &chip->desc[offset].flags); sunxi_pinctrl_gpio_get()
526 struct sunxi_desc_function *desc; sunxi_pinctrl_gpio_to_irq() local
527 unsigned pinnum = pctl->desc->pin_base + offset; sunxi_pinctrl_gpio_to_irq()
533 desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, pinnum, "irq"); sunxi_pinctrl_gpio_to_irq()
534 if (!desc) sunxi_pinctrl_gpio_to_irq()
537 irqnum = desc->irqbank * IRQ_PER_BANK + desc->irqnum; sunxi_pinctrl_gpio_to_irq()
557 pctl->irq_array[d->hwirq] - pctl->desc->pin_base); sunxi_pinctrl_irq_request_resources()
575 pctl->irq_array[d->hwirq] - pctl->desc->pin_base); sunxi_pinctrl_irq_release_resources()
581 u32 reg = sunxi_irq_cfg_reg(d->hwirq, pctl->desc->irq_bank_base); sunxi_pinctrl_irq_set_type()
629 pctl->desc->irq_bank_base); sunxi_pinctrl_irq_ack()
639 u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base); sunxi_pinctrl_irq_mask()
656 u32 reg = sunxi_irq_ctrl_reg(d->hwirq, pctl->desc->irq_bank_base); sunxi_pinctrl_irq_unmask()
711 struct sunxi_desc_function *desc; sunxi_pinctrl_irq_of_xlate() local
718 pin = pctl->desc->pin_base + base + intspec[1]; sunxi_pinctrl_irq_of_xlate()
720 desc = sunxi_pinctrl_desc_find_function_by_pin(pctl, pin, "irq"); sunxi_pinctrl_irq_of_xlate()
721 if (!desc) sunxi_pinctrl_irq_of_xlate()
724 *out_hwirq = desc->irqbank * PINS_PER_BANK + desc->irqnum; sunxi_pinctrl_irq_of_xlate()
734 static void sunxi_pinctrl_irq_handler(struct irq_desc *desc) sunxi_pinctrl_irq_handler() argument
736 unsigned int irq = irq_desc_get_irq(desc); sunxi_pinctrl_irq_handler()
737 struct irq_chip *chip = irq_desc_get_chip(desc); sunxi_pinctrl_irq_handler()
738 struct sunxi_pinctrl *pctl = irq_desc_get_handler_data(desc); sunxi_pinctrl_irq_handler()
741 for (bank = 0; bank < pctl->desc->irq_banks; bank++) sunxi_pinctrl_irq_handler()
745 if (bank == pctl->desc->irq_banks) sunxi_pinctrl_irq_handler()
748 reg = sunxi_irq_status_reg_from_bank(bank, pctl->desc->irq_bank_base); sunxi_pinctrl_irq_handler()
754 chained_irq_enter(chip, desc); sunxi_pinctrl_irq_handler()
760 chained_irq_exit(chip, desc); sunxi_pinctrl_irq_handler()
791 pctl->ngroups = pctl->desc->npins; sunxi_pinctrl_build_state()
800 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_build_state()
801 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_build_state()
813 pctl->desc->npins * sizeof(*pctl->functions), sunxi_pinctrl_build_state()
819 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_build_state()
820 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_build_state()
839 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_build_state()
840 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_build_state()
874 const struct sunxi_pinctrl_desc *desc) sunxi_pinctrl_init()
897 pctl->desc = desc; sunxi_pinctrl_init()
900 IRQ_PER_BANK * pctl->desc->irq_banks, sunxi_pinctrl_init()
913 pctl->desc->npins * sizeof(*pins), sunxi_pinctrl_init()
918 for (i = 0; i < pctl->desc->npins; i++) sunxi_pinctrl_init()
919 pins[i] = pctl->desc->pins[i].pin; sunxi_pinctrl_init()
930 pctrl_desc->npins = pctl->desc->npins; sunxi_pinctrl_init()
948 last_pin = pctl->desc->pins[pctl->desc->npins - 1].pin.number; sunxi_pinctrl_init()
961 pctl->desc->pin_base; sunxi_pinctrl_init()
964 pctl->chip->base = pctl->desc->pin_base; sunxi_pinctrl_init()
970 for (i = 0; i < pctl->desc->npins; i++) { sunxi_pinctrl_init()
971 const struct sunxi_desc_pin *pin = pctl->desc->pins + i; sunxi_pinctrl_init()
974 pin->pin.number - pctl->desc->pin_base, sunxi_pinctrl_init()
991 pctl->desc->irq_banks, sunxi_pinctrl_init()
999 for (i = 0; i < pctl->desc->irq_banks; i++) { sunxi_pinctrl_init()
1008 pctl->desc->irq_banks * IRQ_PER_BANK, sunxi_pinctrl_init()
1017 for (i = 0; i < (pctl->desc->irq_banks * IRQ_PER_BANK); i++) { sunxi_pinctrl_init()
1025 for (i = 0; i < pctl->desc->irq_banks; i++) { sunxi_pinctrl_init()
1028 pctl->desc->irq_bank_base)); sunxi_pinctrl_init()
1031 pctl->desc->irq_bank_base)); sunxi_pinctrl_init()
873 sunxi_pinctrl_init(struct platform_device *pdev, const struct sunxi_pinctrl_desc *desc) sunxi_pinctrl_init() argument
/linux-4.4.14/security/apparmor/
H A Dcrypto.c38 } desc; aa_calc_profile_hash() local
49 desc.shash.tfm = apparmor_tfm; aa_calc_profile_hash()
50 desc.shash.flags = 0; aa_calc_profile_hash()
52 error = crypto_shash_init(&desc.shash); aa_calc_profile_hash()
55 error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4); aa_calc_profile_hash()
58 error = crypto_shash_update(&desc.shash, (u8 *) start, len); aa_calc_profile_hash()
61 error = crypto_shash_final(&desc.shash, profile->hash); aa_calc_profile_hash()
/linux-4.4.14/security/integrity/evm/
H A Devm_crypto.c40 struct shash_desc *desc; init_desc() local
75 desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), init_desc()
77 if (!desc) init_desc()
80 desc->tfm = *tfm; init_desc()
81 desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; init_desc()
83 rc = crypto_shash_init(desc); init_desc()
85 kfree(desc); init_desc()
88 return desc; init_desc()
97 static void hmac_add_misc(struct shash_desc *desc, struct inode *inode, hmac_add_misc() argument
114 crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc)); hmac_add_misc()
116 crypto_shash_update(desc, inode->i_sb->s_uuid, hmac_add_misc()
118 crypto_shash_final(desc, digest); hmac_add_misc()
135 struct shash_desc *desc; evm_calc_hmac_or_hash() local
144 desc = init_desc(type); evm_calc_hmac_or_hash()
145 if (IS_ERR(desc)) evm_calc_hmac_or_hash()
146 return PTR_ERR(desc); evm_calc_hmac_or_hash()
153 crypto_shash_update(desc, (const u8 *)req_xattr_value, evm_calc_hmac_or_hash()
168 crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size); evm_calc_hmac_or_hash()
170 hmac_add_misc(desc, inode, digest); evm_calc_hmac_or_hash()
174 kfree(desc); evm_calc_hmac_or_hash()
222 struct shash_desc *desc; evm_init_hmac() local
224 desc = init_desc(EVM_XATTR_HMAC); evm_init_hmac()
225 if (IS_ERR(desc)) { evm_init_hmac()
227 return PTR_ERR(desc); evm_init_hmac()
230 crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len); evm_init_hmac()
231 hmac_add_misc(desc, inode, hmac_val); evm_init_hmac()
232 kfree(desc); evm_init_hmac()
/linux-4.4.14/include/linux/irqchip/
H A Dchained_irq.h28 struct irq_desc *desc) chained_irq_enter()
35 chip->irq_mask_ack(&desc->irq_data); chained_irq_enter()
37 chip->irq_mask(&desc->irq_data); chained_irq_enter()
39 chip->irq_ack(&desc->irq_data); chained_irq_enter()
44 struct irq_desc *desc) chained_irq_exit()
47 chip->irq_eoi(&desc->irq_data); chained_irq_exit()
49 chip->irq_unmask(&desc->irq_data); chained_irq_exit()
27 chained_irq_enter(struct irq_chip *chip, struct irq_desc *desc) chained_irq_enter() argument
43 chained_irq_exit(struct irq_chip *chip, struct irq_desc *desc) chained_irq_exit() argument
/linux-4.4.14/tools/perf/util/intel-pt-decoder/
H A Dintel-pt-log.c104 char desc[INTEL_PT_PKT_DESC_MAX]; __intel_pt_log_packet() local
110 intel_pt_pkt_desc(packet, desc, INTEL_PT_PKT_DESC_MAX); __intel_pt_log_packet()
111 fprintf(f, "%s\n", desc); __intel_pt_log_packet()
116 char desc[INTEL_PT_INSN_DESC_MAX]; __intel_pt_log_insn() local
125 if (intel_pt_insn_desc(intel_pt_insn, desc, INTEL_PT_INSN_DESC_MAX) > 0) __intel_pt_log_insn()
126 fprintf(f, "%s\n", desc); __intel_pt_log_insn()
134 char desc[INTEL_PT_INSN_DESC_MAX]; __intel_pt_log_insn_no_data() local
140 if (intel_pt_insn_desc(intel_pt_insn, desc, INTEL_PT_INSN_DESC_MAX) > 0) __intel_pt_log_insn_no_data()
141 fprintf(f, "%s\n", desc); __intel_pt_log_insn_no_data()
/linux-4.4.14/include/linux/regulator/
H A Dof_regulator.h16 const struct regulator_desc *desc; member in struct:of_regulator_match
23 const struct regulator_desc *desc);
31 const struct regulator_desc *desc) of_get_regulator_init_data()
29 of_get_regulator_init_data(struct device *dev, struct device_node *node, const struct regulator_desc *desc) of_get_regulator_init_data() argument
/linux-4.4.14/arch/powerpc/crypto/
H A Dsha1.c31 static int sha1_init(struct shash_desc *desc) sha1_init() argument
33 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_init()
42 static int sha1_update(struct shash_desc *desc, const u8 *data, sha1_update() argument
45 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_update()
79 static int sha1_final(struct shash_desc *desc, u8 *out) sha1_final() argument
81 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_final()
92 sha1_update(desc, padding, padlen); sha1_final()
95 sha1_update(desc, (const u8 *)&bits, sizeof(bits)); sha1_final()
107 static int sha1_export(struct shash_desc *desc, void *out) sha1_export() argument
109 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_export()
115 static int sha1_import(struct shash_desc *desc, const void *in) sha1_import() argument
117 struct sha1_state *sctx = shash_desc_ctx(desc); sha1_import()
H A Dmd5-glue.c36 static int ppc_md5_init(struct shash_desc *desc) ppc_md5_init() argument
38 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_init()
49 static int ppc_md5_update(struct shash_desc *desc, const u8 *data, ppc_md5_update() argument
52 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_update()
81 static int ppc_md5_final(struct shash_desc *desc, u8 *out) ppc_md5_final() argument
83 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_final()
113 static int ppc_md5_export(struct shash_desc *desc, void *out) ppc_md5_export() argument
115 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_export()
121 static int ppc_md5_import(struct shash_desc *desc, const void *in) ppc_md5_import() argument
123 struct md5_state *sctx = shash_desc_ctx(desc); ppc_md5_import()
H A Daes-spe-glue.c179 static int ppc_ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_ecb_encrypt() argument
182 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_ecb_encrypt()
187 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_ecb_encrypt()
189 err = blkcipher_walk_virt(desc, &walk); ppc_ecb_encrypt()
201 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_ecb_encrypt()
207 static int ppc_ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_ecb_decrypt() argument
210 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_ecb_decrypt()
215 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_ecb_decrypt()
217 err = blkcipher_walk_virt(desc, &walk); ppc_ecb_decrypt()
229 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_ecb_decrypt()
235 static int ppc_cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_cbc_encrypt() argument
238 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_cbc_encrypt()
243 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_cbc_encrypt()
245 err = blkcipher_walk_virt(desc, &walk); ppc_cbc_encrypt()
257 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_cbc_encrypt()
263 static int ppc_cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_cbc_decrypt() argument
266 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_cbc_decrypt()
271 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_cbc_decrypt()
273 err = blkcipher_walk_virt(desc, &walk); ppc_cbc_decrypt()
285 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_cbc_decrypt()
291 static int ppc_ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_ctr_crypt() argument
294 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_ctr_crypt()
299 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_ctr_crypt()
301 err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); ppc_ctr_crypt()
315 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_ctr_crypt()
321 static int ppc_xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_xts_encrypt() argument
324 struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_xts_encrypt()
330 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_xts_encrypt()
332 err = blkcipher_walk_virt(desc, &walk); ppc_xts_encrypt()
346 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_xts_encrypt()
352 static int ppc_xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, ppc_xts_decrypt() argument
355 struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_xts_decrypt()
361 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ppc_xts_decrypt()
363 err = blkcipher_walk_virt(desc, &walk); ppc_xts_decrypt()
377 err = blkcipher_walk_done(desc, &walk, ubytes); ppc_xts_decrypt()
/linux-4.4.14/drivers/usb/image/
H A Dmicrotek.c212 static inline void mts_debug_dump(struct mts_desc* desc) { mts_debug_dump() argument
213 MTS_DEBUG("desc at 0x%x: toggle = %02x%02x\n", mts_debug_dump()
214 (int)desc, mts_debug_dump()
215 (int)desc->usb_dev->toggle[1],(int)desc->usb_dev->toggle[0] mts_debug_dump()
218 usb_sndbulkpipe(desc->usb_dev,desc->ep_out), mts_debug_dump()
219 usb_rcvbulkpipe(desc->usb_dev,desc->ep_response), mts_debug_dump()
220 usb_rcvbulkpipe(desc->usb_dev,desc->ep_image) mts_debug_dump()
317 static inline void mts_urb_abort(struct mts_desc* desc) { mts_urb_abort() argument
319 mts_debug_dump(desc); mts_urb_abort()
321 usb_kill_urb( desc->urb ); mts_urb_abort()
338 struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); mts_scsi_abort() local
342 mts_urb_abort(desc); mts_scsi_abort()
349 struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); mts_scsi_host_reset() local
353 mts_debug_dump(desc); mts_scsi_host_reset()
355 result = usb_lock_device_for_reset(desc->usb_dev, desc->usb_intf); mts_scsi_host_reset()
357 result = usb_reset_device(desc->usb_dev); mts_scsi_host_reset()
358 usb_unlock_device(desc->usb_dev); mts_scsi_host_reset()
529 mts_build_transfer_context(struct scsi_cmnd *srb, struct mts_desc* desc) mts_build_transfer_context() argument
536 desc->context.instance = desc; mts_build_transfer_context()
537 desc->context.srb = srb; mts_build_transfer_context()
538 desc->context.fragment = 0; mts_build_transfer_context()
541 desc->context.data = NULL; mts_build_transfer_context()
542 desc->context.data_length = 0; mts_build_transfer_context()
546 desc->context.data = sg_virt(&sg[0]); mts_build_transfer_context()
547 desc->context.data_length = sg[0].length; mts_build_transfer_context()
556 ) { pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_image); mts_build_transfer_context()
557 MTS_DEBUG( "transferring from desc->ep_image == %d\n", mts_build_transfer_context()
558 (int)desc->ep_image ); mts_build_transfer_context()
560 pipe = usb_rcvbulkpipe(desc->usb_dev,desc->ep_response); mts_build_transfer_context()
561 MTS_DEBUG( "transferring from desc->ep_response == %d\n", mts_build_transfer_context()
562 (int)desc->ep_response); mts_build_transfer_context()
564 MTS_DEBUG("transferring to desc->ep_out == %d\n", mts_build_transfer_context()
565 (int)desc->ep_out); mts_build_transfer_context()
566 pipe = usb_sndbulkpipe(desc->usb_dev,desc->ep_out); mts_build_transfer_context()
568 desc->context.data_pipe = pipe; mts_build_transfer_context()
575 struct mts_desc* desc = (struct mts_desc*)(srb->device->host->hostdata[0]); mts_scsi_queuecommand_lck() local
581 mts_debug_dump(desc); mts_scsi_queuecommand_lck()
598 usb_fill_bulk_urb(desc->urb, mts_scsi_queuecommand_lck()
599 desc->usb_dev, mts_scsi_queuecommand_lck()
600 usb_sndbulkpipe(desc->usb_dev,desc->ep_out), mts_scsi_queuecommand_lck()
604 &desc->context mts_scsi_queuecommand_lck()
608 mts_build_transfer_context( srb, desc ); mts_scsi_queuecommand_lck()
609 desc->context.final_callback = callback; mts_scsi_queuecommand_lck()
612 res=usb_submit_urb(desc->urb, GFP_ATOMIC); mts_scsi_queuecommand_lck()
696 if ( altsetting->desc.bNumEndpoints != MTS_EP_TOTAL ) { mts_usb_probe()
698 (int)MTS_EP_TOTAL, (int)altsetting->desc.bNumEndpoints ); mts_usb_probe()
702 for( i = 0; i < altsetting->desc.bNumEndpoints; i++ ) { mts_usb_probe()
703 if ((altsetting->endpoint[i].desc.bmAttributes & mts_usb_probe()
707 (int)altsetting->endpoint[i].desc.bEndpointAddress ); mts_usb_probe()
709 if (altsetting->endpoint[i].desc.bEndpointAddress & mts_usb_probe()
712 = altsetting->endpoint[i].desc.bEndpointAddress & mts_usb_probe()
720 ep_out = altsetting->endpoint[i].desc.bEndpointAddress & mts_usb_probe()
795 struct mts_desc *desc = usb_get_intfdata(intf); mts_usb_disconnect() local
799 usb_kill_urb(desc->urb); mts_usb_disconnect()
800 scsi_remove_host(desc->host); mts_usb_disconnect()
802 scsi_host_put(desc->host); mts_usb_disconnect()
803 usb_free_urb(desc->urb); mts_usb_disconnect()
804 kfree(desc->context.scsi_status); mts_usb_disconnect()
805 kfree(desc); mts_usb_disconnect()
/linux-4.4.14/arch/s390/crypto/
H A Daes_s390.c252 static int fallback_blk_dec(struct blkcipher_desc *desc, fallback_blk_dec() argument
258 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); fallback_blk_dec()
260 tfm = desc->tfm; fallback_blk_dec()
261 desc->tfm = sctx->fallback.blk; fallback_blk_dec()
263 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); fallback_blk_dec()
265 desc->tfm = tfm; fallback_blk_dec()
269 static int fallback_blk_enc(struct blkcipher_desc *desc, fallback_blk_enc() argument
275 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); fallback_blk_enc()
277 tfm = desc->tfm; fallback_blk_enc()
278 desc->tfm = sctx->fallback.blk; fallback_blk_enc()
280 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); fallback_blk_enc()
282 desc->tfm = tfm; fallback_blk_enc()
316 static int ecb_aes_crypt(struct blkcipher_desc *desc, long func, void *param, ecb_aes_crypt() argument
319 int ret = blkcipher_walk_virt(desc, walk); ecb_aes_crypt()
333 ret = blkcipher_walk_done(desc, walk, nbytes); ecb_aes_crypt()
339 static int ecb_aes_encrypt(struct blkcipher_desc *desc, ecb_aes_encrypt() argument
343 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ecb_aes_encrypt()
347 return fallback_blk_enc(desc, dst, src, nbytes); ecb_aes_encrypt()
350 return ecb_aes_crypt(desc, sctx->enc, sctx->key, &walk); ecb_aes_encrypt()
353 static int ecb_aes_decrypt(struct blkcipher_desc *desc, ecb_aes_decrypt() argument
357 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ecb_aes_decrypt()
361 return fallback_blk_dec(desc, dst, src, nbytes); ecb_aes_decrypt()
364 return ecb_aes_crypt(desc, sctx->dec, sctx->key, &walk); ecb_aes_decrypt()
445 static int cbc_aes_crypt(struct blkcipher_desc *desc, long func, cbc_aes_crypt() argument
448 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_crypt()
449 int ret = blkcipher_walk_virt(desc, walk); cbc_aes_crypt()
472 ret = blkcipher_walk_done(desc, walk, nbytes); cbc_aes_crypt()
480 static int cbc_aes_encrypt(struct blkcipher_desc *desc, cbc_aes_encrypt() argument
484 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_encrypt()
488 return fallback_blk_enc(desc, dst, src, nbytes); cbc_aes_encrypt()
491 return cbc_aes_crypt(desc, sctx->enc, &walk); cbc_aes_encrypt()
494 static int cbc_aes_decrypt(struct blkcipher_desc *desc, cbc_aes_decrypt() argument
498 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_decrypt()
502 return fallback_blk_dec(desc, dst, src, nbytes); cbc_aes_decrypt()
505 return cbc_aes_crypt(desc, sctx->dec, &walk); cbc_aes_decrypt()
551 static int xts_fallback_decrypt(struct blkcipher_desc *desc, xts_fallback_decrypt() argument
555 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); xts_fallback_decrypt()
559 tfm = desc->tfm; xts_fallback_decrypt()
560 desc->tfm = xts_ctx->fallback; xts_fallback_decrypt()
562 ret = crypto_blkcipher_decrypt_iv(desc, dst, src, nbytes); xts_fallback_decrypt()
564 desc->tfm = tfm; xts_fallback_decrypt()
568 static int xts_fallback_encrypt(struct blkcipher_desc *desc, xts_fallback_encrypt() argument
572 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); xts_fallback_encrypt()
576 tfm = desc->tfm; xts_fallback_encrypt()
577 desc->tfm = xts_ctx->fallback; xts_fallback_encrypt()
579 ret = crypto_blkcipher_encrypt_iv(desc, dst, src, nbytes); xts_fallback_encrypt()
581 desc->tfm = tfm; xts_fallback_encrypt()
617 static int xts_aes_crypt(struct blkcipher_desc *desc, long func, xts_aes_crypt() argument
622 int ret = blkcipher_walk_virt(desc, walk); xts_aes_crypt()
657 ret = blkcipher_walk_done(desc, walk, nbytes); xts_aes_crypt()
663 static int xts_aes_encrypt(struct blkcipher_desc *desc, xts_aes_encrypt() argument
667 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); xts_aes_encrypt()
671 return xts_fallback_encrypt(desc, dst, src, nbytes); xts_aes_encrypt()
674 return xts_aes_crypt(desc, xts_ctx->enc, xts_ctx, &walk); xts_aes_encrypt()
677 static int xts_aes_decrypt(struct blkcipher_desc *desc, xts_aes_decrypt() argument
681 struct s390_xts_ctx *xts_ctx = crypto_blkcipher_ctx(desc->tfm); xts_aes_decrypt()
685 return xts_fallback_decrypt(desc, dst, src, nbytes); xts_aes_decrypt()
688 return xts_aes_crypt(desc, xts_ctx->dec, xts_ctx, &walk); xts_aes_decrypt()
778 static int ctr_aes_crypt(struct blkcipher_desc *desc, long func, ctr_aes_crypt() argument
781 int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); ctr_aes_crypt()
816 ret = blkcipher_walk_done(desc, walk, nbytes); ctr_aes_crypt()
840 ret = blkcipher_walk_done(desc, walk, 0); ctr_aes_crypt()
847 static int ctr_aes_encrypt(struct blkcipher_desc *desc, ctr_aes_encrypt() argument
851 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ctr_aes_encrypt()
855 return ctr_aes_crypt(desc, sctx->enc, sctx, &walk); ctr_aes_encrypt()
858 static int ctr_aes_decrypt(struct blkcipher_desc *desc, ctr_aes_decrypt() argument
862 struct s390_aes_ctx *sctx = crypto_blkcipher_ctx(desc->tfm); ctr_aes_decrypt()
866 return ctr_aes_crypt(desc, sctx->dec, sctx, &walk); ctr_aes_decrypt()
H A Ddes_s390.c86 static int ecb_desall_crypt(struct blkcipher_desc *desc, long func, ecb_desall_crypt() argument
89 int ret = blkcipher_walk_virt(desc, walk); ecb_desall_crypt()
103 ret = blkcipher_walk_done(desc, walk, nbytes); ecb_desall_crypt()
109 static int cbc_desall_crypt(struct blkcipher_desc *desc, long func, cbc_desall_crypt() argument
112 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_desall_crypt()
113 int ret = blkcipher_walk_virt(desc, walk); cbc_desall_crypt()
136 ret = blkcipher_walk_done(desc, walk, nbytes); cbc_desall_crypt()
144 static int ecb_des_encrypt(struct blkcipher_desc *desc, ecb_des_encrypt() argument
148 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_des_encrypt()
152 return ecb_desall_crypt(desc, KM_DEA_ENCRYPT, ctx->key, &walk); ecb_des_encrypt()
155 static int ecb_des_decrypt(struct blkcipher_desc *desc, ecb_des_decrypt() argument
159 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_des_decrypt()
163 return ecb_desall_crypt(desc, KM_DEA_DECRYPT, ctx->key, &walk); ecb_des_decrypt()
186 static int cbc_des_encrypt(struct blkcipher_desc *desc, cbc_des_encrypt() argument
193 return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk); cbc_des_encrypt()
196 static int cbc_des_decrypt(struct blkcipher_desc *desc, cbc_des_decrypt() argument
203 return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk); cbc_des_decrypt()
290 static int ecb_des3_encrypt(struct blkcipher_desc *desc, ecb_des3_encrypt() argument
294 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_des3_encrypt()
298 return ecb_desall_crypt(desc, KM_TDEA_192_ENCRYPT, ctx->key, &walk); ecb_des3_encrypt()
301 static int ecb_des3_decrypt(struct blkcipher_desc *desc, ecb_des3_decrypt() argument
305 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_des3_decrypt()
309 return ecb_desall_crypt(desc, KM_TDEA_192_DECRYPT, ctx->key, &walk); ecb_des3_decrypt()
332 static int cbc_des3_encrypt(struct blkcipher_desc *desc, cbc_des3_encrypt() argument
339 return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk); cbc_des3_encrypt()
342 static int cbc_des3_decrypt(struct blkcipher_desc *desc, cbc_des3_decrypt() argument
349 return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk); cbc_des3_decrypt()
386 static int ctr_desall_crypt(struct blkcipher_desc *desc, long func, ctr_desall_crypt() argument
390 int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE); ctr_desall_crypt()
425 ret = blkcipher_walk_done(desc, walk, nbytes); ctr_desall_crypt()
447 ret = blkcipher_walk_done(desc, walk, 0); ctr_desall_crypt()
453 static int ctr_des_encrypt(struct blkcipher_desc *desc, ctr_des_encrypt() argument
457 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_des_encrypt()
461 return ctr_desall_crypt(desc, KMCTR_DEA_ENCRYPT, ctx, &walk); ctr_des_encrypt()
464 static int ctr_des_decrypt(struct blkcipher_desc *desc, ctr_des_decrypt() argument
468 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_des_decrypt()
472 return ctr_desall_crypt(desc, KMCTR_DEA_DECRYPT, ctx, &walk); ctr_des_decrypt()
496 static int ctr_des3_encrypt(struct blkcipher_desc *desc, ctr_des3_encrypt() argument
500 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_des3_encrypt()
504 return ctr_desall_crypt(desc, KMCTR_TDEA_192_ENCRYPT, ctx, &walk); ctr_des3_encrypt()
507 static int ctr_des3_decrypt(struct blkcipher_desc *desc, ctr_des3_decrypt() argument
511 struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_des3_decrypt()
515 return ctr_desall_crypt(desc, KMCTR_TDEA_192_DECRYPT, ctx, &walk); ctr_des3_decrypt()
H A Dsha_common.c21 int s390_sha_update(struct shash_desc *desc, const u8 *data, unsigned int len) s390_sha_update() argument
23 struct s390_sha_ctx *ctx = shash_desc_ctx(desc); s390_sha_update()
24 unsigned int bsize = crypto_shash_blocksize(desc->tfm); s390_sha_update()
63 int s390_sha_final(struct shash_desc *desc, u8 *out) s390_sha_final() argument
65 struct s390_sha_ctx *ctx = shash_desc_ctx(desc); s390_sha_final()
66 unsigned int bsize = crypto_shash_blocksize(desc->tfm); s390_sha_final()
97 memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); s390_sha_final()
/linux-4.4.14/drivers/net/wireless/ti/wlcore/
H A Drx.c60 struct wl1271_rx_descriptor *desc, wl1271_rx_status()
66 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG) wl1271_rx_status()
71 status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band); wl1271_rx_status()
74 if (desc->rate <= wl->hw_min_ht_rate) wl1271_rx_status()
83 status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7)); wl1271_rx_status()
84 status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7); wl1271_rx_status()
91 wl->noise = desc->rssi - (desc->snr >> 1); wl1271_rx_status()
93 status->freq = ieee80211_channel_to_frequency(desc->channel, wl1271_rx_status()
96 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { wl1271_rx_status()
97 u8 desc_err_code = desc->status & WL1271_RX_DESC_STATUS_MASK; wl1271_rx_status()
110 wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel, wl1271_rx_status()
117 struct wl1271_rx_descriptor *desc; wl1271_rx_handle_data() local
147 desc = (struct wl1271_rx_descriptor *) data; wl1271_rx_handle_data()
149 if (desc->packet_class == WL12XX_RX_CLASS_LOGGER) { wl1271_rx_handle_data()
150 size_t len = length - sizeof(*desc); wl1271_rx_handle_data()
151 wl12xx_copy_fwlog(wl, data + sizeof(*desc), len); wl1271_rx_handle_data()
157 if (desc->status & WL1271_RX_DESC_DECRYPT_FAIL) { wl1271_rx_handle_data()
158 hdr = (void *)(data + sizeof(*desc) + offset_to_data); wl1271_rx_handle_data()
160 desc->status & WL1271_RX_DESC_STATUS_MASK, wl1271_rx_handle_data()
162 wl1271_dump((DEBUG_RX|DEBUG_CMD), "PKT: ", data + sizeof(*desc), wl1271_rx_handle_data()
186 memcpy(buf, data + sizeof(*desc), pkt_data_len); wl1271_rx_handle_data()
190 *hlid = desc->hlid; wl1271_rx_handle_data()
198 wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon); wl1271_rx_handle_data()
199 wlcore_hw_set_rx_csum(wl, desc, skb); wl1271_rx_handle_data()
203 skb->len - desc->pad_len, wl1271_rx_handle_data()
59 wl1271_rx_status(struct wl1271 *wl, struct wl1271_rx_descriptor *desc, struct ieee80211_rx_status *status, u8 beacon) wl1271_rx_status() argument
/linux-4.4.14/drivers/rapidio/devices/
H A Dtsi721_dma.c43 static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
126 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", tsi721_bdma_ch_init()
336 tsi721_desc_fill_init(struct tsi721_tx_desc *desc, tsi721_desc_fill_init() argument
347 (desc->rtype << 19) | desc->destid); tsi721_desc_fill_init()
348 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | tsi721_desc_fill_init()
350 rio_addr = (desc->rio_addr >> 2) | tsi721_desc_fill_init()
351 ((u64)(desc->rio_addr_u & 0x3) << 62); tsi721_desc_fill_init()
378 struct tsi721_tx_desc *desc) tsi721_dma_tx_err()
380 struct dma_async_tx_descriptor *txd = &desc->txd; tsi721_dma_tx_err()
384 list_move(&desc->desc_node, &bdma_chan->free_list); tsi721_dma_tx_err()
414 static int tsi721_submit_sg(struct tsi721_tx_desc *desc) tsi721_submit_sg() argument
416 struct dma_chan *dchan = desc->txd.chan; tsi721_submit_sg()
439 rio_addr = desc->rio_addr; tsi721_submit_sg()
457 for_each_sg(desc->sg, sg, desc->sg_len, i) { tsi721_submit_sg()
460 i, desc->sg_len, tsi721_submit_sg()
483 "%s: prev desc final len: %d\n", tsi721_submit_sg()
487 desc->rio_addr = rio_addr; tsi721_submit_sg()
493 desc->sg = sg; tsi721_submit_sg()
494 desc->sg_len -= i; tsi721_submit_sg()
499 err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); tsi721_submit_sg()
502 "Failed to build desc: err=%d\n", err); tsi721_submit_sg()
507 bd_ptr, desc->destid, desc->rio_addr); tsi721_submit_sg()
522 dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n", tsi721_submit_sg()
524 desc->sg_len = 0; tsi721_submit_sg()
539 struct tsi721_tx_desc *desc; tsi721_advance_work() local
553 desc = tsi721_dma_first_active(bdma_chan); tsi721_advance_work()
554 err = tsi721_submit_sg(desc); tsi721_advance_work()
558 tsi721_dma_tx_err(bdma_chan, desc); tsi721_advance_work()
593 struct tsi721_tx_desc *desc; tsi721_dma_tasklet() local
597 desc = tsi721_dma_first_active(bdma_chan); tsi721_dma_tasklet()
599 if (desc->sg_len == 0) { tsi721_dma_tasklet()
603 desc->status = DMA_COMPLETE; tsi721_dma_tasklet()
604 dma_cookie_complete(&desc->txd); tsi721_dma_tasklet()
605 if (desc->txd.flags & DMA_PREP_INTERRUPT) { tsi721_dma_tasklet()
606 callback = desc->txd.callback; tsi721_dma_tasklet()
607 param = desc->txd.callback_param; tsi721_dma_tasklet()
609 list_move(&desc->desc_node, &bdma_chan->free_list); tsi721_dma_tasklet()
626 struct tsi721_tx_desc *desc = to_tsi721_desc(txd); tsi721_tx_submit() local
631 if (!list_empty(&desc->desc_node)) { tsi721_tx_submit()
645 desc->status = DMA_IN_PROGRESS; tsi721_tx_submit()
646 list_add_tail(&desc->desc_node, &bdma_chan->queue); tsi721_tx_submit()
655 struct tsi721_tx_desc *desc = NULL; tsi721_alloc_chan_resources() local
672 desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc), tsi721_alloc_chan_resources()
674 if (!desc) { tsi721_alloc_chan_resources()
681 bdma_chan->tx_desc = desc; tsi721_alloc_chan_resources()
684 dma_async_tx_descriptor_init(&desc[i].txd, dchan); tsi721_alloc_chan_resources()
685 desc[i].txd.tx_submit = tsi721_tx_submit; tsi721_alloc_chan_resources()
686 desc[i].txd.flags = DMA_CTRL_ACK; tsi721_alloc_chan_resources()
687 list_add(&desc[i].desc_node, &bdma_chan->free_list); tsi721_alloc_chan_resources()
762 struct tsi721_tx_desc *desc, *_d; tsi721_prep_rio_sg() local
798 list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) { tsi721_prep_rio_sg()
799 if (async_tx_test_ack(&desc->txd)) { tsi721_prep_rio_sg()
800 list_del_init(&desc->desc_node); tsi721_prep_rio_sg()
801 desc->destid = rext->destid; tsi721_prep_rio_sg()
802 desc->rio_addr = rext->rio_addr; tsi721_prep_rio_sg()
803 desc->rio_addr_u = 0; tsi721_prep_rio_sg()
804 desc->rtype = rtype; tsi721_prep_rio_sg()
805 desc->sg_len = sg_len; tsi721_prep_rio_sg()
806 desc->sg = sgl; tsi721_prep_rio_sg()
807 txd = &desc->txd; tsi721_prep_rio_sg()
821 struct tsi721_tx_desc *desc, *_d; tsi721_terminate_all() local
845 list_for_each_entry_safe(desc, _d, &list, desc_node) tsi721_terminate_all()
846 tsi721_dma_tx_err(bdma_chan, desc); tsi721_terminate_all()
377 tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan, struct tsi721_tx_desc *desc) tsi721_dma_tx_err() argument
/linux-4.4.14/drivers/dma/ioat/
H A Dprep.c58 static dma_addr_t pq16_get_src(struct ioat_raw_descriptor *desc[3], int idx) pq16_get_src() argument
60 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; pq16_get_src()
75 static void pq16_set_src(struct ioat_raw_descriptor *desc[3], pq16_set_src() argument
78 struct ioat_pq_descriptor *pq = (struct ioat_pq_descriptor *)desc[0]; pq16_set_src()
80 (struct ioat_pq16a_descriptor *)desc[1]; pq16_set_src()
81 struct ioat_raw_descriptor *raw = desc[pq16_idx_to_desc[idx]]; pq16_set_src()
118 struct ioat_ring_ent *desc; ioat_dma_prep_memcpy_lock() local
137 desc = ioat_get_ring_ent(ioat_chan, idx + i); ioat_dma_prep_memcpy_lock()
138 hw = desc->hw; ioat_dma_prep_memcpy_lock()
148 dump_desc_dbg(ioat_chan, desc); ioat_dma_prep_memcpy_lock()
151 desc->txd.flags = flags; ioat_dma_prep_memcpy_lock()
152 desc->len = total_len; ioat_dma_prep_memcpy_lock()
156 dump_desc_dbg(ioat_chan, desc); ioat_dma_prep_memcpy_lock()
159 return &desc->txd; ioat_dma_prep_memcpy_lock()
170 struct ioat_ring_ent *desc; __ioat_prep_xor_lock() local
209 desc = ioat_get_ring_ent(ioat_chan, idx + i); __ioat_prep_xor_lock()
210 xor = desc->xor; __ioat_prep_xor_lock()
231 dump_desc_dbg(ioat_chan, desc); __ioat_prep_xor_lock()
235 desc->txd.flags = flags; __ioat_prep_xor_lock()
236 desc->len = total_len; __ioat_prep_xor_lock()
238 desc->result = result; __ioat_prep_xor_lock()
288 dump_pq_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc, dump_pq_desc_dbg() argument
292 struct ioat_pq_descriptor *pq = desc->pq; dump_pq_desc_dbg()
298 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" dump_pq_desc_dbg()
301 desc_id(desc), (unsigned long long) desc->txd.phys, dump_pq_desc_dbg()
303 desc->txd.flags, pq->size, pq->ctl, pq->ctl_f.op, dump_pq_desc_dbg()
316 struct ioat_ring_ent *desc) dump_pq16_desc_dbg()
319 struct ioat_pq_descriptor *pq = desc->pq; dump_pq16_desc_dbg()
326 if (desc->sed) { dump_pq16_desc_dbg()
327 descs[1] = (void *)desc->sed->hw; dump_pq16_desc_dbg()
328 descs[2] = (void *)desc->sed->hw + 64; dump_pq16_desc_dbg()
331 dev_dbg(dev, "desc[%d]: (%#llx->%#llx) flags: %#x" dump_pq16_desc_dbg()
334 desc_id(desc), (unsigned long long) desc->txd.phys, dump_pq16_desc_dbg()
336 desc->txd.flags, pq->size, pq->ctl, dump_pq16_desc_dbg()
359 struct ioat_ring_ent *desc; __ioat_prep_pq_lock() local
404 desc = ioat_get_ring_ent(ioat_chan, idx + i); __ioat_prep_pq_lock()
405 pq = desc->pq; __ioat_prep_pq_lock()
445 desc->txd.flags = flags; __ioat_prep_pq_lock()
446 desc->len = total_len; __ioat_prep_pq_lock()
448 desc->result = result; __ioat_prep_pq_lock()
450 dump_pq_desc_dbg(ioat_chan, desc, ext); __ioat_prep_pq_lock()
455 compl_desc = desc; __ioat_prep_pq_lock()
482 struct ioat_ring_ent *desc; __ioat_prep_pq16_lock() local
512 desc = ioat_get_ring_ent(ioat_chan, idx + i); __ioat_prep_pq16_lock()
513 pq = desc->pq; __ioat_prep_pq16_lock()
517 desc->sed = ioat3_alloc_sed(ioat_dma, (src_cnt-2) >> 3); __ioat_prep_pq16_lock()
518 if (!desc->sed) { __ioat_prep_pq16_lock()
524 pq->sed_addr = desc->sed->dma; __ioat_prep_pq16_lock()
525 desc->sed->parent = desc; __ioat_prep_pq16_lock()
527 descs[1] = (struct ioat_raw_descriptor *)desc->sed->hw; __ioat_prep_pq16_lock()
559 desc->txd.flags = flags; __ioat_prep_pq16_lock()
560 desc->len = total_len; __ioat_prep_pq16_lock()
562 desc->result = result; __ioat_prep_pq16_lock()
565 /* with cb3.3 we should be able to do completion w/o a null desc */ __ioat_prep_pq16_lock()
569 dump_pq16_desc_dbg(ioat_chan, desc); __ioat_prep_pq16_lock()
572 return &desc->txd; __ioat_prep_pq16_lock()
720 struct ioat_ring_ent *desc; ioat_prep_interrupt_lock() local
727 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); ioat_prep_interrupt_lock()
731 hw = desc->hw; ioat_prep_interrupt_lock()
741 desc->txd.flags = flags; ioat_prep_interrupt_lock()
742 desc->len = 1; ioat_prep_interrupt_lock()
744 dump_desc_dbg(ioat_chan, desc); ioat_prep_interrupt_lock()
747 return &desc->txd; ioat_prep_interrupt_lock()
315 dump_pq16_desc_dbg(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) dump_pq16_desc_dbg() argument
H A Ddma.c165 struct ioat_ring_ent *desc; __ioat_start_null_desc() local
170 "Unable to start null desc - ring full\n"); __ioat_start_null_desc()
177 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); __ioat_start_null_desc()
179 hw = desc->hw; __ioat_start_null_desc()
188 async_tx_ack(&desc->txd); __ioat_start_null_desc()
189 ioat_set_chainaddr(ioat_chan, desc->txd.phys); __ioat_start_null_desc()
190 dump_desc_dbg(ioat_chan, desc); __ioat_start_null_desc()
218 struct ioat_ring_ent *desc; __ioat_restart_chan() local
220 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); __ioat_restart_chan()
221 ioat_set_chainaddr(ioat_chan, desc->txd.phys); __ioat_restart_chan()
296 struct ioat_ring_ent *desc; ioat_alloc_ring_ent() local
306 desc = kmem_cache_zalloc(ioat_cache, flags); ioat_alloc_ring_ent()
307 if (!desc) { ioat_alloc_ring_ent()
312 dma_async_tx_descriptor_init(&desc->txd, chan); ioat_alloc_ring_ent()
313 desc->txd.tx_submit = ioat_tx_submit_unlock; ioat_alloc_ring_ent()
314 desc->hw = hw; ioat_alloc_ring_ent()
315 desc->txd.phys = phys; ioat_alloc_ring_ent()
316 return desc; ioat_alloc_ring_ent()
319 void ioat_free_ring_ent(struct ioat_ring_ent *desc, struct dma_chan *chan) ioat_free_ring_ent() argument
324 pci_pool_free(ioat_dma->dma_pool, desc->hw, desc->txd.phys); ioat_free_ring_ent()
325 kmem_cache_free(ioat_cache, desc); ioat_free_ring_ent()
532 static bool desc_has_ext(struct ioat_ring_ent *desc) desc_has_ext() argument
534 struct ioat_dma_descriptor *hw = desc->hw; desc_has_ext()
538 struct ioat_xor_descriptor *xor = desc->xor; desc_has_ext()
544 struct ioat_pq_descriptor *pq = desc->pq; desc_has_ext()
591 desc_get_errstat(struct ioatdma_chan *ioat_chan, struct ioat_ring_ent *desc) desc_get_errstat() argument
593 struct ioat_dma_descriptor *hw = desc->hw; desc_get_errstat()
599 struct ioat_pq_descriptor *pq = desc->pq; desc_get_errstat()
608 *desc->result |= SUM_CHECK_P_RESULT; desc_get_errstat()
611 *desc->result |= SUM_CHECK_Q_RESULT; desc_get_errstat()
627 struct ioat_ring_ent *desc; __cleanup() local
651 desc = ioat_get_ring_ent(ioat_chan, idx + i); __cleanup()
652 dump_desc_dbg(ioat_chan, desc); __cleanup()
656 desc_get_errstat(ioat_chan, desc); __cleanup()
658 tx = &desc->txd; __cleanup()
672 if (desc_has_ext(desc)) { __cleanup()
678 if (desc->sed) { __cleanup()
679 ioat_free_sed(ioat_dma, desc->sed); __cleanup()
680 desc->sed = NULL; __cleanup()
750 struct ioat_ring_ent *desc; ioat_eh() local
765 desc = ioat_get_ring_ent(ioat_chan, ioat_chan->tail); ioat_eh()
766 hw = desc->hw; ioat_eh()
767 dump_desc_dbg(ioat_chan, desc); ioat_eh()
772 *desc->result |= SUM_CHECK_P_RESULT; ioat_eh()
779 *desc->result |= SUM_CHECK_P_RESULT; ioat_eh()
783 *desc->result |= SUM_CHECK_Q_RESULT; ioat_eh()
795 tx = &desc->txd; ioat_eh()
810 *ioat_chan->completion = desc->txd.phys; ioat_eh()
/linux-4.4.14/drivers/power/
H A Dcharger-manager.c95 switch (cm->desc->battery_present) { is_batt_present()
102 psy = power_supply_get_by_name(cm->desc->psy_fuel_gauge); is_batt_present()
113 for (i = 0; cm->desc->psy_charger_stat[i]; i++) { is_batt_present()
115 cm->desc->psy_charger_stat[i]); is_batt_present()
118 cm->desc->psy_charger_stat[i]); is_batt_present()
152 for (i = 0; cm->desc->psy_charger_stat[i]; i++) { is_ext_pwr_online()
153 psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]); is_ext_pwr_online()
156 cm->desc->psy_charger_stat[i]); is_ext_pwr_online()
186 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); get_batt_uV()
216 for (i = 0; cm->desc->psy_charger_stat[i]; i++) { is_charging()
223 psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]); is_charging()
226 cm->desc->psy_charger_stat[i]); is_charging()
235 cm->desc->psy_charger_stat[i]); is_charging()
253 cm->desc->psy_charger_stat[i]); is_charging()
275 struct charger_desc *desc = cm->desc; is_full_charged() local
286 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); is_full_charged()
290 if (desc->fullbatt_full_capacity > 0) { is_full_charged()
296 if (!ret && val.intval > desc->fullbatt_full_capacity) { is_full_charged()
303 if (desc->fullbatt_uV > 0) { is_full_charged()
305 if (!ret && uV >= desc->fullbatt_uV) { is_full_charged()
312 if (desc->fullbatt_soc > 0) { is_full_charged()
317 if (!ret && val.intval >= desc->fullbatt_soc) { is_full_charged()
334 switch (cm->desc->polling_mode) { is_polling_required()
345 cm->desc->polling_mode); is_polling_required()
364 struct charger_desc *desc = cm->desc; try_charger_enable() local
381 for (i = 0 ; i < desc->num_charger_regulators ; i++) { try_charger_enable()
382 if (desc->charger_regulators[i].externally_control) try_charger_enable()
385 err = regulator_enable(desc->charger_regulators[i].consumer); try_charger_enable()
388 desc->charger_regulators[i].regulator_name); try_charger_enable()
399 for (i = 0 ; i < desc->num_charger_regulators ; i++) { try_charger_enable()
400 if (desc->charger_regulators[i].externally_control) try_charger_enable()
403 err = regulator_disable(desc->charger_regulators[i].consumer); try_charger_enable()
406 desc->charger_regulators[i].regulator_name); try_charger_enable()
414 for (i = 0; i < desc->num_charger_regulators; i++) { try_charger_enable()
416 desc->charger_regulators[i].consumer)) { try_charger_enable()
418 desc->charger_regulators[i].consumer); try_charger_enable()
420 desc->charger_regulators[i].regulator_name); try_charger_enable()
520 struct charger_desc *desc = cm->desc; fullbatt_vchk() local
526 if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms) fullbatt_vchk()
535 diff = desc->fullbatt_uV - batt_uV; fullbatt_vchk()
541 if (diff > desc->fullbatt_vchkdrop_uV) { fullbatt_vchk()
559 struct charger_desc *desc = cm->desc; check_charging_duration() local
564 if (!desc->charging_max_duration_ms && check_charging_duration()
565 !desc->discharging_max_duration_ms) check_charging_duration()
571 if (duration > desc->charging_max_duration_ms) { check_charging_duration()
573 desc->charging_max_duration_ms); check_charging_duration()
581 if (duration > desc->charging_max_duration_ms && check_charging_duration()
584 desc->discharging_max_duration_ms); check_charging_duration()
600 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); cm_get_battery_temperature_by_psy()
617 if (!cm->desc->measure_battery_temp) cm_get_battery_temperature()
638 struct charger_desc *desc = cm->desc; cm_check_thermal_status() local
653 upper_limit = desc->temp_max; cm_check_thermal_status()
654 lower_limit = desc->temp_min; cm_check_thermal_status()
657 upper_limit -= desc->temp_diff; cm_check_thermal_status()
658 lower_limit += desc->temp_diff; cm_check_thermal_status()
771 if (is_polling_required(cm) && cm->desc->polling_interval_ms) { _setup_polling()
774 if (min > cm->desc->polling_interval_ms) _setup_polling()
775 min = cm->desc->polling_interval_ms; _setup_polling()
830 struct charger_desc *desc = cm->desc; fullbatt_handler() local
832 if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms) fullbatt_handler()
839 msecs_to_jiffies(desc->fullbatt_vchkdrop_ms)); fullbatt_handler()
841 desc->fullbatt_vchkdrop_ms); fullbatt_handler()
879 if (is_polling_required(cm) && cm->desc->polling_interval_ms) misc_event_handler()
889 struct charger_desc *desc = cm->desc; charger_get_property() local
921 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); charger_get_property()
939 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge); charger_get_property()
963 * the battery voltage values and the thresholds given as desc charger_get_property()
972 if (desc->fullbatt_uV > 0 && uV >= desc->fullbatt_uV && charger_get_property()
995 cm->desc->psy_fuel_gauge); charger_get_property()
1093 if (cm->desc->polling_interval_ms == 0) cm_setup_timer()
1095 CM_MIN_VALID(wakeup_ms, cm->desc->polling_interval_ms); cm_setup_timer()
1233 struct charger_desc *desc = cm->desc; charger_manager_register_extcon() local
1239 for (i = 0; i < desc->num_charger_regulators; i++) { charger_manager_register_extcon()
1240 charger = &desc->charger_regulators[i]; charger_manager_register_extcon()
1309 struct charger_desc *desc = cm->desc; charger_externally_control_store() local
1326 for (i = 0; i < desc->num_charger_regulators; i++) { charger_externally_control_store()
1327 if (&desc->charger_regulators[i] != charger && charger_externally_control_store()
1328 !desc->charger_regulators[i].externally_control) { charger_externally_control_store()
1370 struct charger_desc *desc = cm->desc; charger_manager_register_sysfs() local
1379 for (i = 0; i < desc->num_charger_regulators; i++) { charger_manager_register_sysfs()
1380 charger = &desc->charger_regulators[i]; charger_manager_register_sysfs()
1417 if (!desc->charger_regulators[i].externally_control || charger_manager_register_sysfs()
1447 struct charger_desc *desc = cm->desc; cm_init_thermal_data() local
1459 cm->desc->measure_battery_temp = true; cm_init_thermal_data()
1462 if (ret && desc->thermal_zone) { cm_init_thermal_data()
1464 thermal_zone_get_zone_by_name(desc->thermal_zone); cm_init_thermal_data()
1472 cm->desc->measure_battery_temp = true; cm_init_thermal_data()
1476 if (cm->desc->measure_battery_temp) { cm_init_thermal_data()
1478 if (!desc->temp_max) cm_init_thermal_data()
1479 desc->temp_max = CM_DEFAULT_CHARGE_TEMP_MAX; cm_init_thermal_data()
1480 if (!desc->temp_diff) cm_init_thermal_data()
1481 desc->temp_diff = CM_DEFAULT_RECHARGE_TEMP_DIFF; cm_init_thermal_data()
1496 struct charger_desc *desc; of_cm_parse_desc() local
1502 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); of_cm_parse_desc()
1503 if (!desc) of_cm_parse_desc()
1506 of_property_read_string(np, "cm-name", &desc->psy_name); of_cm_parse_desc()
1509 desc->polling_mode = poll_mode; of_cm_parse_desc()
1512 &desc->polling_interval_ms); of_cm_parse_desc()
1515 &desc->fullbatt_vchkdrop_ms); of_cm_parse_desc()
1517 &desc->fullbatt_vchkdrop_uV); of_cm_parse_desc()
1518 of_property_read_u32(np, "cm-fullbatt-voltage", &desc->fullbatt_uV); of_cm_parse_desc()
1519 of_property_read_u32(np, "cm-fullbatt-soc", &desc->fullbatt_soc); of_cm_parse_desc()
1521 &desc->fullbatt_full_capacity); of_cm_parse_desc()
1524 desc->battery_present = battery_stat; of_cm_parse_desc()
1530 desc->psy_charger_stat = devm_kzalloc(dev, sizeof(char *) of_cm_parse_desc()
1532 if (desc->psy_charger_stat) { of_cm_parse_desc()
1536 i, &desc->psy_charger_stat[i]); of_cm_parse_desc()
1542 of_property_read_string(np, "cm-fuel-gauge", &desc->psy_fuel_gauge); of_cm_parse_desc()
1544 of_property_read_string(np, "cm-thermal-zone", &desc->thermal_zone); of_cm_parse_desc()
1546 of_property_read_u32(np, "cm-battery-cold", &desc->temp_min); of_cm_parse_desc()
1548 desc->temp_min *= -1; of_cm_parse_desc()
1549 of_property_read_u32(np, "cm-battery-hot", &desc->temp_max); of_cm_parse_desc()
1550 of_property_read_u32(np, "cm-battery-temp-diff", &desc->temp_diff); of_cm_parse_desc()
1553 &desc->charging_max_duration_ms); of_cm_parse_desc()
1555 &desc->discharging_max_duration_ms); of_cm_parse_desc()
1558 desc->num_charger_regulators = of_get_child_count(np); of_cm_parse_desc()
1559 if (desc->num_charger_regulators) { of_cm_parse_desc()
1564 * desc->num_charger_regulators, of_cm_parse_desc()
1569 desc->charger_regulators = chg_regs; of_cm_parse_desc()
1609 return desc;
1627 struct charger_desc *desc = cm_get_drv_data(pdev); charger_manager_probe() local
1635 if (IS_ERR(desc)) { charger_manager_probe()
1636 dev_err(&pdev->dev, "No platform data (desc) found\n"); charger_manager_probe()
1647 cm->desc = desc; charger_manager_probe()
1660 if (desc->fullbatt_uV == 0) { charger_manager_probe()
1663 if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) { charger_manager_probe()
1665 desc->fullbatt_vchkdrop_ms = 0; charger_manager_probe()
1666 desc->fullbatt_vchkdrop_uV = 0; charger_manager_probe()
1668 if (desc->fullbatt_soc == 0) { charger_manager_probe()
1671 if (desc->fullbatt_full_capacity == 0) { charger_manager_probe()
1675 if (!desc->charger_regulators || desc->num_charger_regulators < 1) { charger_manager_probe()
1680 if (!desc->psy_charger_stat || !desc->psy_charger_stat[0]) { charger_manager_probe()
1685 if (!desc->psy_fuel_gauge) { charger_manager_probe()
1691 while (desc->psy_charger_stat[i]) charger_manager_probe()
1695 for (i = 0; desc->psy_charger_stat[i]; i++) { charger_manager_probe()
1698 psy = power_supply_get_by_name(desc->psy_charger_stat[i]); charger_manager_probe()
1701 desc->psy_charger_stat[i]); charger_manager_probe()
1707 if (desc->polling_interval_ms == 0 || charger_manager_probe()
1708 msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) { charger_manager_probe()
1713 if (!desc->charging_max_duration_ms || charger_manager_probe()
1714 !desc->discharging_max_duration_ms) { charger_manager_probe()
1716 desc->charging_max_duration_ms = 0; charger_manager_probe()
1717 desc->discharging_max_duration_ms = 0; charger_manager_probe()
1724 if (!desc->psy_name) charger_manager_probe()
1727 strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX); charger_manager_probe()
1744 fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge); charger_manager_probe()
1747 desc->psy_fuel_gauge); charger_manager_probe()
1767 cm->desc->measure_battery_temp = false; charger_manager_probe()
1821 for (i = 0; i < desc->num_charger_regulators; i++) { charger_manager_probe()
1824 charger = &desc->charger_regulators[i]; charger_manager_probe()
1829 for (i = 0; i < desc->num_charger_regulators; i++) { charger_manager_probe()
1832 charger = &desc->charger_regulators[i]; charger_manager_probe()
1840 regulator_put(desc->charger_regulators[i].consumer); charger_manager_probe()
1851 struct charger_desc *desc = cm->desc; charger_manager_remove() local
1863 for (i = 0 ; i < desc->num_charger_regulators ; i++) { charger_manager_remove()
1865 = &desc->charger_regulators[i]; charger_manager_remove()
1872 for (i = 0 ; i < desc->num_charger_regulators ; i++) charger_manager_remove()
1873 regulator_put(desc->charger_regulators[i].consumer); charger_manager_remove()
2033 for (i = 0; cm->desc->psy_charger_stat[i]; i++) { find_power_supply()
2034 if (!strcmp(psy->desc->name, cm->desc->psy_charger_stat[i])) { find_power_supply()
H A Dpower_supply_core.c45 if (!supplier->desc->name) __power_supply_is_supplied_by()
48 if (!strcmp(supplier->desc->name, supply->supplied_from[i])) __power_supply_is_supplied_by()
51 if (!supply->desc->name) __power_supply_is_supplied_by()
54 if (!strcmp(supplier->supplied_to[i], supply->desc->name)) __power_supply_is_supplied_by()
67 if (pst->desc->external_power_changed) __power_supply_changed_work()
68 pst->desc->external_power_changed(pst); __power_supply_changed_work()
168 psy->desc->name, epsy->desc->name); __power_supply_populate_supplied_from()
169 psy->supplied_from[i-1] = (char *)epsy->desc->name; __power_supply_populate_supplied_from()
290 if (!epsy->desc->get_property(epsy, POWER_SUPPLY_PROP_ONLINE, __power_supply_am_i_supplied()
317 if (psy->desc->type != POWER_SUPPLY_TYPE_BATTERY) __power_supply_is_system_supplied()
318 if (!psy->desc->get_property(psy, POWER_SUPPLY_PROP_ONLINE, __power_supply_is_system_supplied()
347 psy->desc->type == POWER_SUPPLY_TYPE_BATTERY && power_supply_set_battery_charged()
348 psy->desc->set_charged) { power_supply_set_battery_charged()
349 psy->desc->set_charged(psy); power_supply_set_battery_charged()
362 return strcmp(psy->desc->name, name) == 0; power_supply_match_device_by_name()
497 return psy->desc->get_property(psy, psp, val); power_supply_get_property()
505 if (atomic_read(&psy->use_cnt) <= 0 || !psy->desc->set_property) power_supply_set_property()
508 return psy->desc->set_property(psy, psp, val); power_supply_set_property()
516 !psy->desc->property_is_writeable) power_supply_property_is_writeable()
519 return psy->desc->property_is_writeable(psy, psp); power_supply_property_is_writeable()
526 !psy->desc->external_power_changed) power_supply_external_power_changed()
529 psy->desc->external_power_changed(psy); power_supply_external_power_changed()
568 ret = psy->desc->get_property(psy, POWER_SUPPLY_PROP_TEMP, &val); power_supply_read_temp()
585 if (psy->desc->no_thermal) psy_register_thermal()
589 for (i = 0; i < psy->desc->num_properties; i++) { psy_register_thermal()
590 if (psy->desc->properties[i] == POWER_SUPPLY_PROP_TEMP) { psy_register_thermal()
591 psy->tzd = thermal_zone_device_register(psy->desc->name, psy_register_thermal()
615 ret = psy->desc->get_property(psy, ps_get_max_charge_cntl_limit()
631 ret = psy->desc->get_property(psy, ps_get_cur_chrage_cntl_limit()
648 ret = psy->desc->set_property(psy, ps_set_cur_charge_cntl_limit()
665 for (i = 0; i < psy->desc->num_properties; i++) { psy_register_cooler()
666 if (psy->desc->properties[i] == psy_register_cooler()
669 (char *)psy->desc->name, psy_register_cooler()
705 const struct power_supply_desc *desc, __power_supply_register()
715 __func__, desc->name); __power_supply_register()
730 psy->desc = desc; __power_supply_register()
738 rc = dev_set_name(dev, "%s", desc->name); __power_supply_register()
808 * @desc: Description of power supply, must be valid through whole
819 const struct power_supply_desc *desc, power_supply_register()
822 return __power_supply_register(parent, desc, cfg, true); power_supply_register()
830 * @desc: Description of power supply, must be valid through whole
842 const struct power_supply_desc *desc, power_supply_register_no_ws()
845 return __power_supply_register(parent, desc, cfg, false); power_supply_register_no_ws()
860 * @desc: Description of power supply, must be valid through whole
872 const struct power_supply_desc *desc, devm_power_supply_register()
881 psy = __power_supply_register(parent, desc, cfg, true); devm_power_supply_register()
896 * @desc: Description of power supply, must be valid through whole
908 const struct power_supply_desc *desc, devm_power_supply_register_no_ws()
917 psy = __power_supply_register(parent, desc, cfg, false); devm_power_supply_register_no_ws()
704 __power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg, bool ws) __power_supply_register() argument
818 power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg) power_supply_register() argument
841 power_supply_register_no_ws(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg) power_supply_register_no_ws() argument
871 devm_power_supply_register(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg) devm_power_supply_register() argument
907 devm_power_supply_register_no_ws(struct device *parent, const struct power_supply_desc *desc, const struct power_supply_config *cfg) devm_power_supply_register_no_ws() argument
/linux-4.4.14/drivers/crypto/
H A Dpadlock-sha.c36 static int padlock_sha_init(struct shash_desc *desc) padlock_sha_init() argument
38 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha_init()
39 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); padlock_sha_init()
42 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; padlock_sha_init()
46 static int padlock_sha_update(struct shash_desc *desc, padlock_sha_update() argument
49 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha_update()
51 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; padlock_sha_update()
55 static int padlock_sha_export(struct shash_desc *desc, void *out) padlock_sha_export() argument
57 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha_export()
62 static int padlock_sha_import(struct shash_desc *desc, const void *in) padlock_sha_import() argument
64 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha_import()
65 struct padlock_sha_ctx *ctx = crypto_shash_ctx(desc->tfm); padlock_sha_import()
68 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; padlock_sha_import()
79 static int padlock_sha1_finup(struct shash_desc *desc, const u8 *in, padlock_sha1_finup() argument
88 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha1_finup()
95 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; padlock_sha1_finup()
138 static int padlock_sha1_final(struct shash_desc *desc, u8 *out) padlock_sha1_final() argument
142 return padlock_sha1_finup(desc, buf, 0, out); padlock_sha1_final()
145 static int padlock_sha256_finup(struct shash_desc *desc, const u8 *in, padlock_sha256_finup() argument
154 struct padlock_sha_desc *dctx = shash_desc_ctx(desc); padlock_sha256_finup()
161 dctx->fallback.flags = desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; padlock_sha256_finup()
204 static int padlock_sha256_final(struct shash_desc *desc, u8 *out) padlock_sha256_final() argument
208 return padlock_sha256_finup(desc, buf, 0, out); padlock_sha256_final()
294 static int padlock_sha1_init_nano(struct shash_desc *desc) padlock_sha1_init_nano() argument
296 struct sha1_state *sctx = shash_desc_ctx(desc); padlock_sha1_init_nano()
305 static int padlock_sha1_update_nano(struct shash_desc *desc, padlock_sha1_update_nano() argument
308 struct sha1_state *sctx = shash_desc_ctx(desc); padlock_sha1_update_nano()
359 static int padlock_sha1_final_nano(struct shash_desc *desc, u8 *out) padlock_sha1_final_nano() argument
361 struct sha1_state *state = (struct sha1_state *)shash_desc_ctx(desc); padlock_sha1_final_nano()
371 padlock_sha1_update_nano(desc, padding, padlen); padlock_sha1_final_nano()
374 padlock_sha1_update_nano(desc, (const u8 *)&bits, sizeof(bits)); padlock_sha1_final_nano()
382 static int padlock_sha256_init_nano(struct shash_desc *desc) padlock_sha256_init_nano() argument
384 struct sha256_state *sctx = shash_desc_ctx(desc); padlock_sha256_init_nano()
394 static int padlock_sha256_update_nano(struct shash_desc *desc, const u8 *data, padlock_sha256_update_nano() argument
397 struct sha256_state *sctx = shash_desc_ctx(desc); padlock_sha256_update_nano()
448 static int padlock_sha256_final_nano(struct shash_desc *desc, u8 *out) padlock_sha256_final_nano() argument
451 (struct sha256_state *)shash_desc_ctx(desc); padlock_sha256_final_nano()
461 padlock_sha256_update_nano(desc, padding, padlen); padlock_sha256_final_nano()
464 padlock_sha256_update_nano(desc, (const u8 *)&bits, sizeof(bits)); padlock_sha256_final_nano()
472 static int padlock_sha_export_nano(struct shash_desc *desc, padlock_sha_export_nano() argument
475 int statesize = crypto_shash_statesize(desc->tfm); padlock_sha_export_nano()
476 void *sctx = shash_desc_ctx(desc); padlock_sha_export_nano()
482 static int padlock_sha_import_nano(struct shash_desc *desc, padlock_sha_import_nano() argument
485 int statesize = crypto_shash_statesize(desc->tfm); padlock_sha_import_nano()
486 void *sctx = shash_desc_ctx(desc); padlock_sha_import_nano()
/linux-4.4.14/drivers/net/ethernet/amd/xgbe/
H A DMakefile4 xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
/linux-4.4.14/drivers/platform/olpc/
H A Dolpc-ec.c69 struct ec_cmd_desc *desc = NULL; olpc_ec_worker() local
75 desc = list_first_entry(&ec->cmd_q, struct ec_cmd_desc, node); olpc_ec_worker()
76 list_del(&desc->node); olpc_ec_worker()
81 if (!desc) olpc_ec_worker()
86 desc->err = ec_driver->ec_cmd(desc->cmd, desc->inbuf, desc->inlen, olpc_ec_worker()
87 desc->outbuf, desc->outlen, ec_cb_arg); olpc_ec_worker()
91 complete(&desc->finished); olpc_ec_worker()
101 static void queue_ec_descriptor(struct ec_cmd_desc *desc, queue_ec_descriptor() argument
106 INIT_LIST_HEAD(&desc->node); queue_ec_descriptor()
109 list_add_tail(&desc->node, &ec->cmd_q); queue_ec_descriptor()
118 struct ec_cmd_desc desc; olpc_ec_cmd() local
133 desc.cmd = cmd; olpc_ec_cmd()
134 desc.inbuf = inbuf; olpc_ec_cmd()
135 desc.outbuf = outbuf; olpc_ec_cmd()
136 desc.inlen = inlen; olpc_ec_cmd()
137 desc.outlen = outlen; olpc_ec_cmd()
138 desc.err = 0; olpc_ec_cmd()
139 init_completion(&desc.finished); olpc_ec_cmd()
141 queue_ec_descriptor(&desc, ec); olpc_ec_cmd()
144 wait_for_completion(&desc.finished); olpc_ec_cmd()
147 return desc.err; olpc_ec_cmd()
/linux-4.4.14/drivers/base/
H A Dplatform-msi.c46 static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc) platform_msi_calc_hwirq() argument
50 devid = desc->platform.msi_priv_data->devid; platform_msi_calc_hwirq()
52 return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index; platform_msi_calc_hwirq()
55 static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc) platform_msi_set_desc() argument
57 arg->desc = desc; platform_msi_set_desc()
58 arg->hwirq = platform_msi_calc_hwirq(desc); platform_msi_set_desc()
88 struct msi_desc *desc = irq_data_get_msi_desc(data); platform_msi_write_msg() local
91 priv_data = desc->platform.msi_priv_data; platform_msi_write_msg()
93 priv_data->write_msg(desc, msg); platform_msi_write_msg()
115 struct msi_desc *desc, *tmp; platform_msi_free_descs() local
117 list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) { list_for_each_entry_safe()
118 list_del(&desc->list); list_for_each_entry_safe()
119 free_msi_entry(desc); list_for_each_entry_safe()
130 struct msi_desc *desc; platform_msi_alloc_descs() local
132 desc = alloc_msi_entry(dev); platform_msi_alloc_descs()
133 if (!desc) platform_msi_alloc_descs()
136 desc->platform.msi_priv_data = data; platform_msi_alloc_descs()
137 desc->platform.msi_index = i; platform_msi_alloc_descs()
138 desc->nvec_used = 1; platform_msi_alloc_descs()
140 list_add_tail(&desc->list, dev_to_msi_list(dev)); platform_msi_alloc_descs()
256 struct msi_desc *desc; platform_msi_domain_free_irqs() local
258 desc = first_msi_entry(dev); platform_msi_domain_free_irqs()
259 if (desc) { platform_msi_domain_free_irqs()
262 data = desc->platform.msi_priv_data; platform_msi_domain_free_irqs()
/linux-4.4.14/drivers/clk/mvebu/
H A Dcommon.c110 const struct coreclk_soc_desc *desc) mvebu_coreclk_setup()
123 clk_data.clk_num = 2 + desc->num_ratios; mvebu_coreclk_setup()
126 if (desc->get_refclk_freq) mvebu_coreclk_setup()
139 rate = desc->get_tclk_freq(base); mvebu_coreclk_setup()
147 rate = desc->get_cpu_freq(base); mvebu_coreclk_setup()
149 if (desc->is_sscg_enabled && desc->fix_sscg_deviation mvebu_coreclk_setup()
150 && desc->is_sscg_enabled(base)) mvebu_coreclk_setup()
151 rate = desc->fix_sscg_deviation(rate); mvebu_coreclk_setup()
158 for (n = 0; n < desc->num_ratios; n++) { mvebu_coreclk_setup()
159 const char *rclk_name = desc->ratios[n].name; mvebu_coreclk_setup()
164 desc->get_clk_ratio(base, desc->ratios[n].id, &mult, &div); mvebu_coreclk_setup()
171 if (desc->get_refclk_freq) { mvebu_coreclk_setup()
174 2 + desc->num_ratios, &name); mvebu_coreclk_setup()
175 rate = desc->get_refclk_freq(base); mvebu_coreclk_setup()
176 clk_data.clks[2 + desc->num_ratios] = mvebu_coreclk_setup()
179 WARN_ON(IS_ERR(clk_data.clks[2 + desc->num_ratios])); mvebu_coreclk_setup()
240 const struct clk_gating_soc_desc *desc) mvebu_clk_gating_setup()
272 for (n = 0; desc[n].name;) mvebu_clk_gating_setup()
283 (desc[n].parent) ? desc[n].parent : default_parent; mvebu_clk_gating_setup()
284 ctrl->gates[n] = clk_register_gate(NULL, desc[n].name, parent, mvebu_clk_gating_setup()
285 desc[n].flags, base, desc[n].bit_idx, mvebu_clk_gating_setup()
109 mvebu_coreclk_setup(struct device_node *np, const struct coreclk_soc_desc *desc) mvebu_coreclk_setup() argument
239 mvebu_clk_gating_setup(struct device_node *np, const struct clk_gating_soc_desc *desc) mvebu_clk_gating_setup() argument
H A Dclk-corediv.c58 const struct clk_corediv_desc *desc; member in struct:clk_corediv
80 const struct clk_corediv_desc *desc = corediv->desc; clk_corediv_is_enabled() local
81 u32 enable_mask = BIT(desc->fieldbit) << soc_desc->enable_bit_offset; clk_corediv_is_enabled()
90 const struct clk_corediv_desc *desc = corediv->desc; clk_corediv_enable() local
97 reg |= (BIT(desc->fieldbit) << soc_desc->enable_bit_offset); clk_corediv_enable()
109 const struct clk_corediv_desc *desc = corediv->desc; clk_corediv_disable() local
116 reg &= ~(BIT(desc->fieldbit) << soc_desc->enable_bit_offset); clk_corediv_disable()
127 const struct clk_corediv_desc *desc = corediv->desc; clk_corediv_recalc_rate() local
131 div = (reg >> desc->offset) & desc->mask; clk_corediv_recalc_rate()
155 const struct clk_corediv_desc *desc = corediv->desc; clk_corediv_set_rate() local
165 reg &= ~(desc->mask << desc->offset); clk_corediv_set_rate()
166 reg |= (div & desc->mask) << desc->offset; clk_corediv_set_rate()
170 reg = readl(corediv->reg) | BIT(desc->fieldbit); clk_corediv_set_rate()
278 corediv[i].desc = soc_desc->descs + i; mvebu_corediv_clk_init()
/linux-4.4.14/arch/sh/boards/mach-x3proto/
H A Dsetup.c131 .desc = "key44",
136 .desc = "key43",
141 .desc = "key42",
145 .desc = "key41",
149 .desc = "key34",
153 .desc = "key33",
157 .desc = "key32",
161 .desc = "key31",
165 .desc = "key24",
169 .desc = "key23",
173 .desc = "key22",
177 .desc = "key21",
181 .desc = "key14",
185 .desc = "key13",
189 .desc = "key12",
193 .desc = "key11",
/linux-4.4.14/sound/usb/
H A Dhelper.h21 #define get_iface_desc(iface) (&(iface)->desc)
22 #define get_endpoint(alt,ep) (&(alt)->endpoint[ep].desc)
23 #define get_ep_desc(ep) (&(ep)->desc)
24 #define get_cfg_desc(cfg) (&(cfg)->desc)
/linux-4.4.14/drivers/staging/lustre/lustre/libcfs/linux/
H A Dlinux-crypto-adler.c67 static int adler32_init(struct shash_desc *desc) adler32_init() argument
69 u32 *mctx = crypto_shash_ctx(desc->tfm); adler32_init()
70 u32 *cksump = shash_desc_ctx(desc); adler32_init()
77 static int adler32_update(struct shash_desc *desc, const u8 *data, adler32_update() argument
80 u32 *cksump = shash_desc_ctx(desc); adler32_update()
93 static int adler32_finup(struct shash_desc *desc, const u8 *data, adler32_finup() argument
96 return __adler32_finup(shash_desc_ctx(desc), data, len, out); adler32_finup()
99 static int adler32_final(struct shash_desc *desc, u8 *out) adler32_final() argument
101 u32 *cksump = shash_desc_ctx(desc); adler32_final()
107 static int adler32_digest(struct shash_desc *desc, const u8 *data, adler32_digest() argument
110 return __adler32_finup(crypto_shash_ctx(desc->tfm), data, len, adler32_digest()
/linux-4.4.14/drivers/gpu/drm/atmel-hlcdc/
H A Datmel_hlcdc_layer.c75 bitmap_clear(slot->updated_configs, 0, layer->desc->nconfigs); atmel_hlcdc_layer_update_reset()
77 sizeof(*slot->configs) * layer->desc->nconfigs); atmel_hlcdc_layer_update_reset()
88 const struct atmel_hlcdc_layer_desc *desc = layer->desc; atmel_hlcdc_layer_update_apply() local
103 for_each_set_bit(cfg, slot->updated_configs, layer->desc->nconfigs) { atmel_hlcdc_layer_update_apply()
105 desc->regs_offset + atmel_hlcdc_layer_update_apply()
125 desc->regs_offset + atmel_hlcdc_layer_update_apply()
129 desc->regs_offset + atmel_hlcdc_layer_update_apply()
133 desc->regs_offset + atmel_hlcdc_layer_update_apply()
149 desc->regs_offset + atmel_hlcdc_layer_update_apply()
169 desc->regs_offset + ATMEL_HLCDC_LAYER_CHER, atmel_hlcdc_layer_update_apply()
180 const struct atmel_hlcdc_layer_desc *desc = layer->desc; atmel_hlcdc_layer_irq() local
191 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IMR, &imr); atmel_hlcdc_layer_irq()
192 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); atmel_hlcdc_layer_irq()
277 desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, atmel_hlcdc_layer_irq()
306 const struct atmel_hlcdc_layer_desc *desc = layer->desc; atmel_hlcdc_layer_disable() local
313 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, atmel_hlcdc_layer_disable()
318 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, &isr); atmel_hlcdc_layer_disable()
398 layer->desc->nconfigs * sizeof(u32)); atmel_hlcdc_layer_update_start()
401 DIV_ROUND_UP(layer->desc->nconfigs, atmel_hlcdc_layer_update_start()
414 layer->desc->regs_offset + atmel_hlcdc_layer_update_start()
417 layer->desc->nconfigs); atmel_hlcdc_layer_update_start()
486 if (cfg >= layer->desc->nconfigs) atmel_hlcdc_layer_update_cfg()
569 const struct atmel_hlcdc_layer_desc *desc) atmel_hlcdc_layer_update_init()
576 updated_size = DIV_ROUND_UP(desc->nconfigs, atmel_hlcdc_layer_update_init()
581 ((desc->nconfigs * sizeof(u32)) + atmel_hlcdc_layer_update_init()
591 buffer += desc->nconfigs * sizeof(u32); atmel_hlcdc_layer_update_init()
602 const struct atmel_hlcdc_layer_desc *desc) atmel_hlcdc_layer_init()
612 layer->desc = desc; atmel_hlcdc_layer_init()
614 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, atmel_hlcdc_layer_init()
616 for (i = 0; i < desc->formats->nformats; i++) { atmel_hlcdc_layer_init()
617 int nplanes = drm_format_num_planes(desc->formats->formats[i]); atmel_hlcdc_layer_init()
624 drm_flip_work_init(&layer->gc, desc->name, atmel_hlcdc_layer_init()
630 ret = atmel_hlcdc_layer_update_init(dev, layer, desc); atmel_hlcdc_layer_init()
635 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR, atmel_hlcdc_layer_init()
637 regmap_read(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_ISR, atmel_hlcdc_layer_init()
648 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IER, tmp); atmel_hlcdc_layer_init()
656 const struct atmel_hlcdc_layer_desc *desc = layer->desc; atmel_hlcdc_layer_cleanup() local
659 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_IDR, atmel_hlcdc_layer_cleanup()
661 regmap_write(regmap, desc->regs_offset + ATMEL_HLCDC_LAYER_CHDR, atmel_hlcdc_layer_cleanup()
567 atmel_hlcdc_layer_update_init(struct drm_device *dev, struct atmel_hlcdc_layer *layer, const struct atmel_hlcdc_layer_desc *desc) atmel_hlcdc_layer_update_init() argument
600 atmel_hlcdc_layer_init(struct drm_device *dev, struct atmel_hlcdc_layer *layer, const struct atmel_hlcdc_layer_desc *desc) atmel_hlcdc_layer_init() argument
/linux-4.4.14/arch/sparc/crypto/
H A Dcrc32c_glue.c43 static int crc32c_sparc64_init(struct shash_desc *desc) crc32c_sparc64_init() argument
45 u32 *mctx = crypto_shash_ctx(desc->tfm); crc32c_sparc64_init()
46 u32 *crcp = shash_desc_ctx(desc); crc32c_sparc64_init()
69 static int crc32c_sparc64_update(struct shash_desc *desc, const u8 *data, crc32c_sparc64_update() argument
72 u32 *crcp = shash_desc_ctx(desc); crc32c_sparc64_update()
90 static int crc32c_sparc64_finup(struct shash_desc *desc, const u8 *data, crc32c_sparc64_finup() argument
93 return __crc32c_sparc64_finup(shash_desc_ctx(desc), data, len, out); crc32c_sparc64_finup()
96 static int crc32c_sparc64_final(struct shash_desc *desc, u8 *out) crc32c_sparc64_final() argument
98 u32 *crcp = shash_desc_ctx(desc); crc32c_sparc64_final()
104 static int crc32c_sparc64_digest(struct shash_desc *desc, const u8 *data, crc32c_sparc64_digest() argument
107 return __crc32c_sparc64_finup(crypto_shash_ctx(desc->tfm), data, len, crc32c_sparc64_digest()
H A Dsha256_glue.c29 static int sha224_sparc64_init(struct shash_desc *desc) sha224_sparc64_init() argument
31 struct sha256_state *sctx = shash_desc_ctx(desc); sha224_sparc64_init()
45 static int sha256_sparc64_init(struct shash_desc *desc) sha256_sparc64_init() argument
47 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_init()
82 static int sha256_sparc64_update(struct shash_desc *desc, const u8 *data, sha256_sparc64_update() argument
85 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_update()
98 static int sha256_sparc64_final(struct shash_desc *desc, u8 *out) sha256_sparc64_final() argument
100 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_final()
131 static int sha224_sparc64_final(struct shash_desc *desc, u8 *hash) sha224_sparc64_final() argument
135 sha256_sparc64_final(desc, D); sha224_sparc64_final()
143 static int sha256_sparc64_export(struct shash_desc *desc, void *out) sha256_sparc64_export() argument
145 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_export()
151 static int sha256_sparc64_import(struct shash_desc *desc, const void *in) sha256_sparc64_import() argument
153 struct sha256_state *sctx = shash_desc_ctx(desc); sha256_sparc64_import()
H A Ddes_glue.c93 static int __ecb_crypt(struct blkcipher_desc *desc, __ecb_crypt() argument
97 struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ecb_crypt()
102 err = blkcipher_walk_virt(desc, &walk); __ecb_crypt()
103 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; __ecb_crypt()
118 err = blkcipher_walk_done(desc, &walk, nbytes); __ecb_crypt()
124 static int ecb_encrypt(struct blkcipher_desc *desc, ecb_encrypt() argument
128 return __ecb_crypt(desc, dst, src, nbytes, true); ecb_encrypt()
131 static int ecb_decrypt(struct blkcipher_desc *desc, ecb_decrypt() argument
135 return __ecb_crypt(desc, dst, src, nbytes, false); ecb_decrypt()
141 static int cbc_encrypt(struct blkcipher_desc *desc, cbc_encrypt() argument
145 struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_encrypt()
150 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
151 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_encrypt()
163 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_encrypt()
172 static int cbc_decrypt(struct blkcipher_desc *desc, cbc_decrypt() argument
176 struct des_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_decrypt()
181 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
182 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_decrypt()
194 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_decrypt()
261 static int __ecb3_crypt(struct blkcipher_desc *desc, __ecb3_crypt() argument
265 struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ecb3_crypt()
271 err = blkcipher_walk_virt(desc, &walk); __ecb3_crypt()
272 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; __ecb3_crypt()
289 err = blkcipher_walk_done(desc, &walk, nbytes); __ecb3_crypt()
295 static int ecb3_encrypt(struct blkcipher_desc *desc, ecb3_encrypt() argument
299 return __ecb3_crypt(desc, dst, src, nbytes, true); ecb3_encrypt()
302 static int ecb3_decrypt(struct blkcipher_desc *desc, ecb3_decrypt() argument
306 return __ecb3_crypt(desc, dst, src, nbytes, false); ecb3_decrypt()
313 static int cbc3_encrypt(struct blkcipher_desc *desc, cbc3_encrypt() argument
317 struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc3_encrypt()
323 err = blkcipher_walk_virt(desc, &walk); cbc3_encrypt()
324 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc3_encrypt()
339 err = blkcipher_walk_done(desc, &walk, nbytes); cbc3_encrypt()
349 static int cbc3_decrypt(struct blkcipher_desc *desc, cbc3_decrypt() argument
353 struct des3_ede_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc3_decrypt()
359 err = blkcipher_walk_virt(desc, &walk); cbc3_decrypt()
360 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc3_decrypt()
375 err = blkcipher_walk_done(desc, &walk, nbytes); cbc3_decrypt()
H A Dcamellia_glue.c85 static int __ecb_crypt(struct blkcipher_desc *desc, __ecb_crypt() argument
89 struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); __ecb_crypt()
100 err = blkcipher_walk_virt(desc, &walk); __ecb_crypt()
101 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; __ecb_crypt()
120 err = blkcipher_walk_done(desc, &walk, nbytes); __ecb_crypt()
126 static int ecb_encrypt(struct blkcipher_desc *desc, ecb_encrypt() argument
130 return __ecb_crypt(desc, dst, src, nbytes, true); ecb_encrypt()
133 static int ecb_decrypt(struct blkcipher_desc *desc, ecb_decrypt() argument
137 return __ecb_crypt(desc, dst, src, nbytes, false); ecb_decrypt()
148 static int cbc_encrypt(struct blkcipher_desc *desc, cbc_encrypt() argument
152 struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_encrypt()
163 err = blkcipher_walk_virt(desc, &walk); cbc_encrypt()
164 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_encrypt()
181 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_encrypt()
187 static int cbc_decrypt(struct blkcipher_desc *desc, cbc_decrypt() argument
191 struct camellia_sparc64_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_decrypt()
202 err = blkcipher_walk_virt(desc, &walk); cbc_decrypt()
203 desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; cbc_decrypt()
220 err = blkcipher_walk_done(desc, &walk, nbytes); cbc_decrypt()
/linux-4.4.14/drivers/dma/dw/
H A Dcore.c85 struct dw_desc *desc, *_desc; dwc_desc_get() local
91 list_for_each_entry_safe(desc, _desc, &dwc->free_list, desc_node) { dwc_desc_get()
93 if (async_tx_test_ack(&desc->txd)) { dwc_desc_get()
94 list_del(&desc->desc_node); dwc_desc_get()
95 ret = desc; dwc_desc_get()
98 dev_dbg(chan2dev(&dwc->chan), "desc %p not ACKed\n", desc); dwc_desc_get()
109 * `desc' must not be on any lists.
111 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc) dwc_desc_put() argument
115 if (desc) { dwc_desc_put()
119 list_for_each_entry(child, &desc->tx_list, desc_node) dwc_desc_put()
121 "moving child desc %p to freelist\n", dwc_desc_put()
123 list_splice_init(&desc->tx_list, &dwc->free_list); dwc_desc_put()
124 dev_vdbg(chan2dev(&dwc->chan), "moving desc %p to freelist\n", desc); dwc_desc_put()
125 list_add(&desc->desc_node, &dwc->free_list); dwc_desc_put()
191 struct dw_desc *desc) dwc_do_single_block()
200 ctllo = desc->lli.ctllo | DWC_CTLL_INT_EN; dwc_do_single_block()
202 channel_writel(dwc, SAR, desc->lli.sar); dwc_do_single_block()
203 channel_writel(dwc, DAR, desc->lli.dar); dwc_do_single_block()
205 channel_writel(dwc, CTL_HI, desc->lli.ctlhi); dwc_do_single_block()
260 struct dw_desc *desc; dwc_dostart_first_queued() local
266 desc = dwc_first_active(dwc); dwc_dostart_first_queued()
267 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie); dwc_dostart_first_queued()
268 dwc_dostart(dwc, desc); dwc_dostart_first_queued()
274 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc, dwc_descriptor_complete() argument
279 struct dma_async_tx_descriptor *txd = &desc->txd; dwc_descriptor_complete()
293 list_for_each_entry(child, &desc->tx_list, desc_node) dwc_descriptor_complete()
295 async_tx_ack(&desc->txd); dwc_descriptor_complete()
297 list_splice_init(&desc->tx_list, &dwc->free_list); dwc_descriptor_complete()
298 list_move(&desc->desc_node, &dwc->free_list); dwc_descriptor_complete()
309 struct dw_desc *desc, *_desc; dwc_complete_all() local
331 list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_complete_all()
332 dwc_descriptor_complete(dwc, desc, true); dwc_complete_all()
347 struct dw_desc *desc, *_desc; dwc_scan_descriptors() local
367 desc = dwc_first_active(dwc); dwc_scan_descriptors()
369 head = &desc->tx_list; dwc_scan_descriptors()
371 /* Update desc to reflect last sent one */ dwc_scan_descriptors()
373 desc = to_dw_desc(active->prev); dwc_scan_descriptors()
375 dwc->residue -= desc->len; dwc_scan_descriptors()
412 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) { dwc_scan_descriptors()
414 dwc->residue = desc->total_len; dwc_scan_descriptors()
417 if (desc->txd.phys == llp) { dwc_scan_descriptors()
423 if (desc->lli.llp == llp) { dwc_scan_descriptors()
430 dwc->residue -= desc->len; dwc_scan_descriptors()
431 list_for_each_entry(child, &desc->tx_list, desc_node) { dwc_scan_descriptors()
446 dwc_descriptor_complete(dwc, desc, true); dwc_scan_descriptors()
462 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n", dwc_dump_lli()
574 dwc_dump_lli(dwc, &dwc->cdesc->desc[i]->lli); dwc_handle_cyclic()
658 struct dw_desc *desc = txd_to_dw_desc(tx); dwc_tx_submit() local
672 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n", __func__, desc->txd.cookie); dwc_tx_submit()
673 list_add_tail(&desc->desc_node, &dwc->queue); dwc_tx_submit()
686 struct dw_desc *desc; dwc_prep_dma_memcpy() local
725 desc = dwc_desc_get(dwc); dwc_prep_dma_memcpy()
726 if (!desc) dwc_prep_dma_memcpy()
729 desc->lli.sar = src + offset; dwc_prep_dma_memcpy()
730 desc->lli.dar = dest + offset; dwc_prep_dma_memcpy()
731 desc->lli.ctllo = ctllo; dwc_prep_dma_memcpy()
732 desc->lli.ctlhi = xfer_count; dwc_prep_dma_memcpy()
733 desc->len = xfer_count << src_width; dwc_prep_dma_memcpy()
736 first = desc; dwc_prep_dma_memcpy()
738 prev->lli.llp = desc->txd.phys; dwc_prep_dma_memcpy()
739 list_add_tail(&desc->desc_node, dwc_prep_dma_memcpy()
742 prev = desc; dwc_prep_dma_memcpy()
803 struct dw_desc *desc; for_each_sg() local
813 desc = dwc_desc_get(dwc); for_each_sg()
814 if (!desc) for_each_sg()
817 desc->lli.sar = mem; for_each_sg()
818 desc->lli.dar = reg; for_each_sg()
819 desc->lli.ctllo = ctllo | DWC_CTLL_SRC_WIDTH(mem_width); for_each_sg()
829 desc->lli.ctlhi = dlen >> mem_width; for_each_sg()
830 desc->len = dlen; for_each_sg()
833 first = desc; for_each_sg()
835 prev->lli.llp = desc->txd.phys; for_each_sg()
836 list_add_tail(&desc->desc_node, for_each_sg()
839 prev = desc; for_each_sg()
860 struct dw_desc *desc; for_each_sg() local
870 desc = dwc_desc_get(dwc); for_each_sg()
871 if (!desc) for_each_sg()
874 desc->lli.sar = reg; for_each_sg()
875 desc->lli.dar = mem; for_each_sg()
876 desc->lli.ctllo = ctllo | DWC_CTLL_DST_WIDTH(mem_width); for_each_sg()
885 desc->lli.ctlhi = dlen >> reg_width; for_each_sg()
886 desc->len = dlen; for_each_sg()
889 first = desc; for_each_sg()
891 prev->lli.llp = desc->txd.phys; for_each_sg()
892 list_add_tail(&desc->desc_node, for_each_sg()
895 prev = desc; for_each_sg()
1026 struct dw_desc *desc, *_desc; dwc_terminate_all() local
1045 list_for_each_entry_safe(desc, _desc, &list, desc_node) dwc_terminate_all()
1046 dwc_descriptor_complete(dwc, desc, false); dwc_terminate_all()
1131 struct dw_desc *desc; dwc_alloc_chan_resources() local
1171 desc = dma_pool_alloc(dw->desc_pool, GFP_ATOMIC, &phys); dwc_alloc_chan_resources()
1172 if (!desc) dwc_alloc_chan_resources()
1175 memset(desc, 0, sizeof(struct dw_desc)); dwc_alloc_chan_resources()
1177 INIT_LIST_HEAD(&desc->tx_list); dwc_alloc_chan_resources()
1178 dma_async_tx_descriptor_init(&desc->txd, chan); dwc_alloc_chan_resources()
1179 desc->txd.tx_submit = dwc_tx_submit; dwc_alloc_chan_resources()
1180 desc->txd.flags = DMA_CTRL_ACK; dwc_alloc_chan_resources()
1181 desc->txd.phys = phys; dwc_alloc_chan_resources()
1183 dwc_desc_put(dwc, desc); dwc_alloc_chan_resources()
1205 struct dw_desc *desc, *_desc; dwc_free_chan_resources() local
1242 list_for_each_entry_safe(desc, _desc, &list, desc_node) { dwc_free_chan_resources()
1243 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc); dwc_free_chan_resources() local
1244 dma_pool_free(dw->desc_pool, desc, desc->txd.phys); dwc_free_chan_resources()
1275 dwc_dostart(dwc, dwc->cdesc->desc[0]); dw_dma_cyclic_start()
1322 struct dw_desc *desc; dw_dma_cyclic_prep() local
1384 cdesc->desc = kzalloc(sizeof(struct dw_desc *) * periods, GFP_KERNEL); dw_dma_cyclic_prep()
1385 if (!cdesc->desc) dw_dma_cyclic_prep()
1389 desc = dwc_desc_get(dwc); dw_dma_cyclic_prep()
1390 if (!desc) dw_dma_cyclic_prep()
1395 desc->lli.dar = sconfig->dst_addr; dw_dma_cyclic_prep()
1396 desc->lli.sar = buf_addr + (period_len * i); dw_dma_cyclic_prep()
1397 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) dw_dma_cyclic_prep()
1404 desc->lli.ctllo |= sconfig->device_fc ? dw_dma_cyclic_prep()
1410 desc->lli.dar = buf_addr + (period_len * i); dw_dma_cyclic_prep()
1411 desc->lli.sar = sconfig->src_addr; dw_dma_cyclic_prep()
1412 desc->lli.ctllo = (DWC_DEFAULT_CTLLO(chan) dw_dma_cyclic_prep()
1419 desc->lli.ctllo |= sconfig->device_fc ? dw_dma_cyclic_prep()
1428 desc->lli.ctlhi = (period_len >> reg_width); dw_dma_cyclic_prep()
1429 cdesc->desc[i] = desc; dw_dma_cyclic_prep()
1432 last->lli.llp = desc->txd.phys; dw_dma_cyclic_prep()
1434 last = desc; dw_dma_cyclic_prep()
1438 last->lli.llp = cdesc->desc[0]->txd.phys; dw_dma_cyclic_prep()
1451 dwc_desc_put(dwc, cdesc->desc[i]); dw_dma_cyclic_prep()
1488 dwc_desc_put(dwc, cdesc->desc[i]); dw_dma_cyclic_free()
1490 kfree(cdesc->desc); dw_dma_cyclic_free()
190 dwc_do_single_block(struct dw_dma_chan *dwc, struct dw_desc *desc) dwc_do_single_block() argument
/linux-4.4.14/include/asm-generic/
H A Dmsi.h14 * @desc: Pointer to msi descriptor
22 struct msi_desc *desc; member in struct:msi_alloc_info
/linux-4.4.14/arch/sh/kernel/cpu/irq/
H A Dipr.c55 void register_ipr_controller(struct ipr_desc *desc) register_ipr_controller() argument
59 desc->chip.irq_mask = disable_ipr_irq; register_ipr_controller()
60 desc->chip.irq_unmask = enable_ipr_irq; register_ipr_controller()
62 for (i = 0; i < desc->nr_irqs; i++) { register_ipr_controller()
63 struct ipr_data *p = desc->ipr_data + i; register_ipr_controller()
66 BUG_ON(p->ipr_idx >= desc->nr_offsets); register_ipr_controller()
67 BUG_ON(!desc->ipr_offsets[p->ipr_idx]); register_ipr_controller()
77 irq_set_chip_and_handler_name(p->irq, &desc->chip, register_ipr_controller()
/linux-4.4.14/drivers/media/pci/solo6x10/
H A Dsolo6x10-p2m.c65 struct solo_p2m_desc *desc, dma_addr_t desc_dma, solo_p2m_dma_desc()
90 /* For 6010 with more than one desc, we can do a one-shot */ solo_p2m_dma_desc()
99 /* For single descriptors and 6110, we need to run each desc */ solo_p2m_dma_desc()
102 p2m_dev->descs = desc; solo_p2m_dma_desc()
105 desc[1].dma_addr); solo_p2m_dma_desc()
107 desc[1].ext_addr); solo_p2m_dma_desc()
109 desc[1].cfg); solo_p2m_dma_desc()
111 desc[1].ctrl); solo_p2m_dma_desc()
136 void solo_p2m_fill_desc(struct solo_p2m_desc *desc, int wr, solo_p2m_fill_desc() argument
143 desc->cfg = SOLO_P2M_COPY_SIZE(size >> 2); solo_p2m_fill_desc()
144 desc->ctrl = SOLO_P2M_BURST_SIZE(SOLO_P2M_BURST_256) | solo_p2m_fill_desc()
148 desc->cfg |= SOLO_P2M_EXT_INC(ext_size >> 2); solo_p2m_fill_desc()
149 desc->ctrl |= SOLO_P2M_PCI_INC(size >> 2) | solo_p2m_fill_desc()
153 desc->dma_addr = dma_addr; solo_p2m_fill_desc()
154 desc->ext_addr = ext_addr; solo_p2m_fill_desc()
161 struct solo_p2m_desc desc[2]; solo_p2m_dma_t() local
163 solo_p2m_fill_desc(&desc[1], wr, dma_addr, ext_addr, size, repeat, solo_p2m_dma_t()
167 return solo_p2m_dma_desc(solo_dev, desc, 0, 1); solo_p2m_dma_t()
173 struct solo_p2m_desc *desc; solo_p2m_isr() local
182 desc = &p2m_dev->descs[p2m_dev->desc_idx]; solo_p2m_isr()
185 solo_reg_write(solo_dev, SOLO_P2M_TAR_ADR(id), desc->dma_addr); solo_p2m_isr()
186 solo_reg_write(solo_dev, SOLO_P2M_EXT_ADR(id), desc->ext_addr); solo_p2m_isr()
187 solo_reg_write(solo_dev, SOLO_P2M_EXT_CFG(id), desc->cfg); solo_p2m_isr()
188 solo_reg_write(solo_dev, SOLO_P2M_CONTROL(id), desc->ctrl); solo_p2m_isr()
64 solo_p2m_dma_desc(struct solo_dev *solo_dev, struct solo_p2m_desc *desc, dma_addr_t desc_dma, int desc_cnt) solo_p2m_dma_desc() argument
/linux-4.4.14/lib/
H A Dcrc-t10dif.c27 } desc; crc_t10dif_update() local
33 desc.shash.tfm = crct10dif_tfm; crc_t10dif_update()
34 desc.shash.flags = 0; crc_t10dif_update()
35 *(__u16 *)desc.ctx = crc; crc_t10dif_update()
37 err = crypto_shash_update(&desc.shash, buffer, len); crc_t10dif_update()
40 return *(__u16 *)desc.ctx; crc_t10dif_update()
/linux-4.4.14/sound/soc/intel/common/
H A Dsst-acpi.c60 struct sst_acpi_desc *desc; member in struct:sst_acpi_priv
70 struct sst_acpi_desc *desc = sst_acpi->desc; sst_acpi_fw_cb() local
81 platform_device_register_data(dev, desc->drv_name, -1, sst_acpi_fw_cb()
85 desc->drv_name, (int)PTR_ERR(sst_acpi->pdev_pcm)); sst_acpi_fw_cb()
120 struct sst_acpi_desc *desc; sst_acpi_probe() local
132 desc = (struct sst_acpi_desc *)id->driver_data; sst_acpi_probe()
133 mach = sst_acpi_find_machine(desc->machines); sst_acpi_probe()
140 sst_pdata->id = desc->sst_id; sst_acpi_probe()
142 sst_acpi->desc = desc; sst_acpi_probe()
145 sst_pdata->resindex_dma_base = desc->resindex_dma_base; sst_acpi_probe()
146 if (desc->resindex_dma_base >= 0) { sst_acpi_probe()
147 sst_pdata->dma_engine = desc->dma_engine; sst_acpi_probe()
148 sst_pdata->dma_base = desc->resindex_dma_base; sst_acpi_probe()
149 sst_pdata->dma_size = desc->dma_size; sst_acpi_probe()
152 if (desc->irqindex_host_ipc >= 0) sst_acpi_probe()
153 sst_pdata->irq = platform_get_irq(pdev, desc->irqindex_host_ipc); sst_acpi_probe()
155 if (desc->resindex_lpe_base >= 0) { sst_acpi_probe()
157 desc->resindex_lpe_base); sst_acpi_probe()
164 if (desc->resindex_pcicfg_base >= 0) { sst_acpi_probe()
166 desc->resindex_pcicfg_base); sst_acpi_probe()
173 if (desc->resindex_fw_base >= 0) { sst_acpi_probe()
175 desc->resindex_fw_base); sst_acpi_probe()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/
H A Dmxms.c101 u8 *desc = mxms + mxms_headerlen(mxm); mxms_foreach() local
102 u8 *fini = desc + mxms_structlen(mxm) - 1; mxms_foreach()
103 while (desc < fini) { mxms_foreach()
104 u8 type = desc[0] & 0x0f; mxms_foreach()
124 entries = (ROM32(desc[0]) & 0x01f00000) >> 20; mxms_foreach()
133 entries = (desc[1] & 0xf0) >> 4; mxms_foreach()
141 entries = desc[1] & 0x07; mxms_foreach()
153 u8 *dump = desc; mxms_foreach()
170 if (!exec(mxm, desc, info)) mxms_foreach()
174 desc += headerlen + (entries * recordlen); mxms_foreach()
181 mxms_output_device(struct nvkm_mxm *mxm, u8 *pdata, struct mxms_odev *desc) mxms_output_device() argument
187 desc->outp_type = (data & 0x00000000000000f0ULL) >> 4; mxms_output_device()
188 desc->ddc_port = (data & 0x0000000000000f00ULL) >> 8; mxms_output_device()
189 desc->conn_type = (data & 0x000000000001f000ULL) >> 12; mxms_output_device()
190 desc->dig_conn = (data & 0x0000000000780000ULL) >> 19; mxms_output_device()
H A Dnv50.c33 struct mxms_odev desc; member in struct:context
40 struct mxms_odev desc; mxm_match_tmds_partner() local
42 mxms_output_device(mxm, data, &desc); mxm_match_tmds_partner()
43 if (desc.outp_type == 2 && mxm_match_tmds_partner()
44 desc.dig_conn == ctx->desc.dig_conn) mxm_match_tmds_partner()
54 u64 desc = *(u64 *)data; mxm_match_dcb() local
56 mxms_output_device(mxm, data, &ctx->desc); mxm_match_dcb()
59 if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type) mxm_match_dcb()
66 if ((desc & 0x00000000000000f0) >= 0x20) { mxm_match_dcb()
68 u8 link = mxm_sor_map(bios, ctx->desc.dig_conn); mxm_match_dcb()
84 if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 && mxm_match_dcb()
116 i2cidx = mxm_ddc_map(bios, ctx.desc.ddc_port); mxm_dcb_sanitise_entry()
128 switch (ctx.desc.outp_type) { mxm_dcb_sanitise_entry()
133 link = mxm_sor_map(bios, ctx.desc.dig_conn) & 0x30; mxm_dcb_sanitise_entry()
149 switch (ctx.desc.conn_type) { mxm_dcb_sanitise_entry()
180 u64 desc = *(u64 *)data; mxm_show_unmatched() local
181 if ((desc & 0xf0) != 0xf0) mxm_show_unmatched()
182 nvkm_info(subdev, "unmatched output device %016llx\n", desc); mxm_show_unmatched()
/linux-4.4.14/drivers/net/wireless/p54/
H A Dp54pci.c150 struct p54p_desc *desc = &ring[i]; p54p_refill_rx_ring() local
152 if (!desc->host_addr) { p54p_refill_rx_ring()
171 desc->host_addr = cpu_to_le32(mapping); p54p_refill_rx_ring()
172 desc->device_addr = 0; // FIXME: necessary? p54p_refill_rx_ring()
173 desc->len = cpu_to_le16(priv->common.rx_mtu + 32); p54p_refill_rx_ring()
174 desc->flags = 0; p54p_refill_rx_ring()
193 struct p54p_desc *desc; p54p_check_rx_ring() local
203 desc = &ring[i]; p54p_check_rx_ring()
204 len = le16_to_cpu(desc->len); p54p_check_rx_ring()
220 dma_addr = le32_to_cpu(desc->host_addr); p54p_check_rx_ring()
229 desc->host_addr = cpu_to_le32(0); p54p_check_rx_ring()
234 desc->len = cpu_to_le16(priv->common.rx_mtu + 32); p54p_check_rx_ring()
250 struct p54p_desc *desc; p54p_check_tx_ring() local
259 desc = &ring[i]; p54p_check_tx_ring()
264 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr), p54p_check_tx_ring()
265 le16_to_cpu(desc->len), PCI_DMA_TODEVICE); p54p_check_tx_ring()
267 desc->host_addr = 0; p54p_check_tx_ring()
268 desc->device_addr = 0; p54p_check_tx_ring()
269 desc->len = 0; p54p_check_tx_ring()
270 desc->flags = 0; p54p_check_tx_ring()
332 struct p54p_desc *desc; p54p_tx() local
350 desc = &ring_control->tx_data[i]; p54p_tx()
351 desc->host_addr = cpu_to_le32(mapping); p54p_tx()
352 desc->device_addr = ((struct p54_hdr *)skb->data)->req_id; p54p_tx()
353 desc->len = cpu_to_le16(skb->len); p54p_tx()
354 desc->flags = 0; p54p_tx()
369 struct p54p_desc *desc; p54p_stop() local
382 desc = &ring_control->rx_data[i]; p54p_stop()
383 if (desc->host_addr) p54p_stop()
385 le32_to_cpu(desc->host_addr), p54p_stop()
393 desc = &ring_control->rx_mgmt[i]; p54p_stop()
394 if (desc->host_addr) p54p_stop()
396 le32_to_cpu(desc->host_addr), p54p_stop()
404 desc = &ring_control->tx_data[i]; p54p_stop()
405 if (desc->host_addr) p54p_stop()
407 le32_to_cpu(desc->host_addr), p54p_stop()
408 le16_to_cpu(desc->len), p54p_stop()
416 desc = &ring_control->tx_mgmt[i]; p54p_stop()
417 if (desc->host_addr) p54p_stop()
419 le32_to_cpu(desc->host_addr), p54p_stop()
420 le16_to_cpu(desc->len), p54p_stop()
/linux-4.4.14/arch/arm/include/asm/hardware/
H A Diop3xx-adma.h199 iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_pq() argument
206 iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr) iop_desc_set_pq_addr() argument
212 iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx, iop_desc_set_pq_src_addr() argument
226 iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_pq_zero_sum() argument
233 iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) iop_desc_set_pq_zero_sum_byte_count() argument
241 iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx, iop_desc_set_pq_zero_sum_addr() argument
302 static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc, iop_desc_is_aligned() argument
306 return (desc->idx & (num_slots - 1)) ? 0 : 1; iop_desc_is_aligned()
396 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc, iop_desc_get_byte_count() argument
399 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_get_byte_count()
429 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc, iop_desc_get_src_addr() argument
433 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_get_src_addr()
461 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags) iop_desc_init_memcpy() argument
463 struct iop3xx_desc_dma *hw_desc = desc->hw_desc; iop_desc_init_memcpy()
479 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags) iop_desc_init_memset() argument
481 struct iop3xx_desc_aau *hw_desc = desc->hw_desc; iop_desc_init_memset()
552 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ iop3xx_desc_init_xor()
564 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_xor() argument
567 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags); iop_desc_init_xor()
572 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_zero_sum() argument
575 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; iop_desc_init_zero_sum()
583 hw_desc = desc->hw_desc; iop_desc_init_zero_sum()
601 (u32) (desc->async_tx.phys + (i << 5)); iop_desc_init_zero_sum()
609 iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt, iop_desc_init_null_xor() argument
612 struct iop3xx_desc_aau *hw_desc = desc->hw_desc; iop_desc_init_null_xor()
638 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */ iop_desc_init_null_xor()
646 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc, iop_desc_set_byte_count() argument
650 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_set_byte_count()
666 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc, iop_desc_init_interrupt() argument
669 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_init_interrupt()
674 iop_desc_init_memcpy(desc, 1); iop_desc_init_interrupt()
680 iop_desc_init_null_xor(desc, 2, 1); iop_desc_init_interrupt()
692 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len) iop_desc_set_zero_sum_byte_count() argument
694 int slots_per_op = desc->slots_per_op; iop_desc_set_zero_sum_byte_count()
695 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; iop_desc_set_zero_sum_byte_count()
713 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc, iop_desc_set_dest_addr() argument
717 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_set_dest_addr()
732 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc, iop_desc_set_memcpy_src_addr() argument
735 struct iop3xx_desc_dma *hw_desc = desc->hw_desc; iop_desc_set_memcpy_src_addr()
740 iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx, iop_desc_set_zero_sum_src_addr() argument
744 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; iop_desc_set_zero_sum_src_addr()
745 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; iop_desc_set_zero_sum_src_addr()
755 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc, iop_desc_set_xor_src_addr() argument
759 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter; iop_desc_set_xor_src_addr()
760 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op; iop_desc_set_xor_src_addr()
770 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc, iop_desc_set_next_desc() argument
774 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_set_next_desc()
780 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc) iop_desc_get_next_desc() argument
783 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_get_next_desc()
787 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc) iop_desc_clear_next_desc() argument
790 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, }; iop_desc_clear_next_desc()
794 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc, iop_desc_set_block_fill_val() argument
797 struct iop3xx_desc_aau *hw_desc = desc->hw_desc; iop_desc_set_block_fill_val()
802 iop_desc_get_zero_result(struct iop_adma_desc_slot *desc) iop_desc_get_zero_result() argument
804 struct iop3xx_desc_aau *hw_desc = desc->hw_desc; iop_desc_get_zero_result()
/linux-4.4.14/drivers/net/ethernet/xscale/
H A Dixp4xx_eth.c58 #define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
177 struct desc *desc_tab; /* coherent */
198 struct desc { struct
236 (n) * sizeof(struct desc))
240 ((n) + RX_DESCS) * sizeof(struct desc))
612 static inline void debug_desc(u32 phys, struct desc *desc) debug_desc() argument
617 phys, desc->next, desc->buf_len, desc->pkt_len, debug_desc()
618 desc->data, desc->dest_id, desc->src_id, desc->flags, debug_desc()
619 desc->qos, desc->padlen, desc->vlan_tci, debug_desc()
620 desc->dst_mac_0, desc->dst_mac_1, desc->dst_mac_2, debug_desc()
621 desc->dst_mac_3, desc->dst_mac_4, desc->dst_mac_5, debug_desc()
622 desc->src_mac_0, desc->src_mac_1, desc->src_mac_2, debug_desc()
623 desc->src_mac_3, desc->src_mac_4, desc->src_mac_5); debug_desc()
631 struct desc *tab; queue_get_desc()
639 n_desc = (phys - tab_phys) / sizeof(struct desc); queue_get_desc()
647 struct desc *desc) queue_put_desc()
649 debug_desc(phys, desc); queue_put_desc()
657 static inline void dma_unmap_tx(struct port *port, struct desc *desc) dma_unmap_tx() argument
660 dma_unmap_single(&port->netdev->dev, desc->data, dma_unmap_tx()
661 desc->buf_len, DMA_TO_DEVICE); dma_unmap_tx()
663 dma_unmap_single(&port->netdev->dev, desc->data & ~3, dma_unmap_tx()
664 ALIGN((desc->data & 3) + desc->buf_len, 4), dma_unmap_tx()
695 struct desc *desc; eth_poll() local
726 desc = rx_desc_ptr(port, n); eth_poll()
739 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4)); eth_poll()
744 /* put the desc back on RX-ready queue */ eth_poll()
745 desc->buf_len = MAX_MRU; eth_poll()
746 desc->pkt_len = 0; eth_poll()
747 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); eth_poll()
755 dma_unmap_single(&dev->dev, desc->data - NET_IP_ALIGN, eth_poll()
758 dma_sync_single_for_cpu(&dev->dev, desc->data - NET_IP_ALIGN, eth_poll()
761 ALIGN(NET_IP_ALIGN + desc->pkt_len, 4) / 4); eth_poll()
764 skb_put(skb, desc->pkt_len); eth_poll()
777 desc->data = phys + NET_IP_ALIGN; eth_poll()
779 desc->buf_len = MAX_MRU; eth_poll()
780 desc->pkt_len = 0; eth_poll()
781 queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc); eth_poll()
802 struct desc *desc; eth_txdone_irq() local
810 n_desc = (phys - tx_desc_phys(port, 0)) / sizeof(struct desc); eth_txdone_irq()
812 desc = tx_desc_ptr(port, n_desc); eth_txdone_irq()
813 debug_desc(phys, desc); eth_txdone_irq()
817 port->netdev->stats.tx_bytes += desc->pkt_len; eth_txdone_irq()
819 dma_unmap_tx(port, desc); eth_txdone_irq()
829 queue_put_desc(port->plat->txreadyq, phys, desc); eth_txdone_irq()
847 struct desc *desc; eth_xmit() local
889 desc = tx_desc_ptr(port, n); eth_xmit()
896 desc->data = phys + offset; eth_xmit()
897 desc->buf_len = desc->pkt_len = len; eth_xmit()
901 queue_put_desc(TX_QUEUE(port->id), tx_desc_phys(port, n), desc); eth_xmit()
1142 struct desc *desc = rx_desc_ptr(port, i); init_queues() local
1154 desc->buf_len = MAX_MRU; init_queues()
1155 desc->data = dma_map_single(&port->netdev->dev, data, init_queues()
1157 if (dma_mapping_error(&port->netdev->dev, desc->data)) { init_queues()
1161 desc->data += NET_IP_ALIGN; init_queues()
1174 struct desc *desc = rx_desc_ptr(port, i); destroy_queues() local
1178 desc->data - NET_IP_ALIGN, destroy_queues()
1184 struct desc *desc = tx_desc_ptr(port, i); destroy_queues() local
1187 dma_unmap_tx(port, desc); destroy_queues()
1337 struct desc *desc; eth_close() local
1341 desc = tx_desc_ptr(port, n); eth_close()
1343 desc->buf_len = desc->pkt_len = 1; eth_close()
1345 queue_put_desc(TX_QUEUE(port->id), phys, desc); eth_close()
646 queue_put_desc(unsigned int queue, u32 phys, struct desc *desc) queue_put_desc() argument
/linux-4.4.14/drivers/scsi/
H A Dses.c142 unsigned char *desc) ses_set_page2_descriptor()
159 memcpy(desc_ptr, desc, 4); ses_set_page2_descriptor()
202 unsigned char *desc; ses_get_fault() local
204 desc = ses_get_page2_descriptor(edev, ecomp); ses_get_fault()
205 if (desc) ses_get_fault()
206 ecomp->fault = (desc[3] & 0x60) >> 4; ses_get_fault()
213 unsigned char desc[4]; ses_set_fault() local
221 init_device_slot_control(desc, ecomp, desc_ptr); ses_set_fault()
225 desc[3] &= 0xdf; ses_set_fault()
228 desc[3] |= 0x20; ses_set_fault()
235 return ses_set_page2_descriptor(edev, ecomp, desc); ses_set_fault()
241 unsigned char *desc; ses_get_status() local
243 desc = ses_get_page2_descriptor(edev, ecomp); ses_get_status()
244 if (desc) ses_get_status()
245 ecomp->status = (desc[0] & 0x0f); ses_get_status()
251 unsigned char *desc; ses_get_locate() local
253 desc = ses_get_page2_descriptor(edev, ecomp); ses_get_locate()
254 if (desc) ses_get_locate()
255 ecomp->locate = (desc[2] & 0x02) ? 1 : 0; ses_get_locate()
262 unsigned char desc[4]; ses_set_locate() local
270 init_device_slot_control(desc, ecomp, desc_ptr); ses_set_locate()
274 desc[2] &= 0xfd; ses_set_locate()
277 desc[2] |= 0x02; ses_set_locate()
283 return ses_set_page2_descriptor(edev, ecomp, desc); ses_set_locate()
290 unsigned char desc[4]; ses_set_active() local
298 init_device_slot_control(desc, ecomp, desc_ptr); ses_set_active()
302 desc[2] &= 0x7f; ses_set_active()
306 desc[2] |= 0x80; ses_set_active()
313 return ses_set_page2_descriptor(edev, ecomp, desc); ses_set_active()
327 unsigned char *desc; ses_get_power_status() local
329 desc = ses_get_page2_descriptor(edev, ecomp); ses_get_power_status()
330 if (desc) ses_get_power_status()
331 ecomp->power_status = (desc[3] & 0x10) ? 0 : 1; ses_get_power_status()
338 unsigned char desc[4]; ses_set_power_status() local
346 init_device_slot_control(desc, ecomp, desc_ptr); ses_set_power_status()
351 desc[3] |= 0x10; ses_set_power_status()
354 desc[3] &= 0xef; ses_set_power_status()
360 return ses_set_page2_descriptor(edev, ecomp, desc); ses_set_power_status()
400 unsigned char *desc) ses_process_descriptor()
402 int eip = desc[0] & 0x10; ses_process_descriptor()
403 int invalid = desc[0] & 0x80; ses_process_descriptor()
404 enum scsi_protocol proto = desc[0] & 0x0f; ses_process_descriptor()
416 d = desc + 4; ses_process_descriptor()
422 d = desc + 4; ses_process_descriptor()
424 d = desc + 8; ses_process_descriptor()
426 d = desc + 4; ses_process_descriptor()
582 unsigned char *desc; ses_match_to_enclosure() local
592 desc = sdev->vpd_pg83 + 4; ses_match_to_enclosure()
593 while (desc < sdev->vpd_pg83 + sdev->vpd_pg83_len) { ses_match_to_enclosure()
594 enum scsi_protocol proto = desc[0] >> 4; ses_match_to_enclosure()
595 u8 code_set = desc[0] & 0x0f; ses_match_to_enclosure()
596 u8 piv = desc[1] & 0x80; ses_match_to_enclosure()
597 u8 assoc = (desc[1] & 0x30) >> 4; ses_match_to_enclosure()
598 u8 type = desc[1] & 0x0f; ses_match_to_enclosure()
599 u8 len = desc[3]; ses_match_to_enclosure()
603 efd.addr = get_unaligned_be64(&desc[4]); ses_match_to_enclosure()
605 desc += len + 4; ses_match_to_enclosure()
140 ses_set_page2_descriptor(struct enclosure_device *edev, struct enclosure_component *ecomp, unsigned char *desc) ses_set_page2_descriptor() argument
399 ses_process_descriptor(struct enclosure_component *ecomp, unsigned char *desc) ses_process_descriptor() argument
/linux-4.4.14/fs/ext2/
H A Dialloc.c48 struct ext2_group_desc *desc; read_inode_bitmap() local
51 desc = ext2_get_group_desc(sb, block_group, NULL); read_inode_bitmap()
52 if (!desc) read_inode_bitmap()
55 bh = sb_bread(sb, le32_to_cpu(desc->bg_inode_bitmap)); read_inode_bitmap()
60 block_group, le32_to_cpu(desc->bg_inode_bitmap)); read_inode_bitmap()
67 struct ext2_group_desc * desc; ext2_release_inode() local
70 desc = ext2_get_group_desc(sb, group, &bh); ext2_release_inode()
71 if (!desc) { ext2_release_inode()
78 le16_add_cpu(&desc->bg_free_inodes_count, 1); ext2_release_inode()
80 le16_add_cpu(&desc->bg_used_dirs_count, -1); ext2_release_inode()
208 struct ext2_group_desc *desc, *best_desc = NULL; find_group_dir() local
212 desc = ext2_get_group_desc (sb, group, NULL); find_group_dir()
213 if (!desc || !desc->bg_free_inodes_count) find_group_dir()
215 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) find_group_dir()
218 (le16_to_cpu(desc->bg_free_blocks_count) > find_group_dir()
221 best_desc = desc; find_group_dir()
273 struct ext2_group_desc *desc; find_group_orlov() local
291 desc = ext2_get_group_desc (sb, group, NULL); find_group_orlov()
292 if (!desc || !desc->bg_free_inodes_count) find_group_orlov()
294 if (le16_to_cpu(desc->bg_used_dirs_count) >= best_ndir) find_group_orlov()
296 if (le16_to_cpu(desc->bg_free_inodes_count) < avefreei) find_group_orlov()
298 if (le16_to_cpu(desc->bg_free_blocks_count) < avefreeb) find_group_orlov()
301 best_ndir = le16_to_cpu(desc->bg_used_dirs_count); find_group_orlov()
302 best_desc = desc; find_group_orlov()
305 desc = best_desc; find_group_orlov()
331 desc = ext2_get_group_desc (sb, group, NULL); find_group_orlov()
332 if (!desc || !desc->bg_free_inodes_count) find_group_orlov()
336 if (le16_to_cpu(desc->bg_used_dirs_count) >= max_dirs) find_group_orlov()
338 if (le16_to_cpu(desc->bg_free_inodes_count) < min_inodes) find_group_orlov()
340 if (le16_to_cpu(desc->bg_free_blocks_count) < min_blocks) find_group_orlov()
348 desc = ext2_get_group_desc (sb, group, NULL); find_group_orlov()
349 if (!desc || !desc->bg_free_inodes_count) find_group_orlov()
351 if (le16_to_cpu(desc->bg_free_inodes_count) >= avefreei) find_group_orlov()
374 struct ext2_group_desc *desc; find_group_other() local
381 desc = ext2_get_group_desc (sb, group, NULL); find_group_other()
382 if (desc && le16_to_cpu(desc->bg_free_inodes_count) && find_group_other()
383 le16_to_cpu(desc->bg_free_blocks_count)) find_group_other()
405 desc = ext2_get_group_desc (sb, group, NULL); find_group_other()
406 if (desc && le16_to_cpu(desc->bg_free_inodes_count) && find_group_other()
407 le16_to_cpu(desc->bg_free_blocks_count)) find_group_other()
419 desc = ext2_get_group_desc (sb, group, NULL); find_group_other()
420 if (desc && le16_to_cpu(desc->bg_free_inodes_count)) find_group_other()
620 struct ext2_group_desc *desc; ext2_count_free_inodes() local
633 desc = ext2_get_group_desc (sb, i, NULL); ext2_count_free_inodes()
634 if (!desc) ext2_count_free_inodes()
636 desc_count += le16_to_cpu(desc->bg_free_inodes_count); ext2_count_free_inodes()
644 i, le16_to_cpu(desc->bg_free_inodes_count), x); ext2_count_free_inodes()
655 desc = ext2_get_group_desc (sb, i, NULL); ext2_count_free_inodes()
656 if (!desc) ext2_count_free_inodes()
658 desc_count += le16_to_cpu(desc->bg_free_inodes_count); ext2_count_free_inodes()
/linux-4.4.14/net/xfrm/
H A Dxfrm_algo.c41 .desc = {
60 .desc = {
79 .desc = {
98 .desc = {
117 .desc = {
136 .desc = {
155 .desc = {
189 .desc = {
209 .desc = {
229 .desc = {
249 .desc = {
268 .desc = {
287 .desc = {
307 .desc = {
326 .desc = {
362 .desc = {
383 .desc = {
404 .desc = {
425 .desc = {
446 .desc = {
467 .desc = {
488 .desc = {
509 .desc = {
530 .desc = {
550 .desc = {
568 .desc = { .sadb_alg_id = SADB_X_CALG_DEFLATE }
578 .desc = { .sadb_alg_id = SADB_X_CALG_LZS }
588 .desc = { .sadb_alg_id = SADB_X_CALG_LZJH }
674 return entry->desc.sadb_alg_id == (unsigned long)data; xfrm_alg_id_match()
/linux-4.4.14/drivers/devfreq/
H A Ddevfreq-event.c44 if (!edev || !edev->desc) devfreq_event_enable_edev()
48 if (edev->desc->ops && edev->desc->ops->enable devfreq_event_enable_edev()
50 ret = edev->desc->ops->enable(edev); devfreq_event_enable_edev()
76 if (!edev || !edev->desc) devfreq_event_disable_edev()
86 if (edev->desc->ops && edev->desc->ops->disable devfreq_event_disable_edev()
88 ret = edev->desc->ops->disable(edev); devfreq_event_disable_edev()
113 if (!edev || !edev->desc) devfreq_event_is_enabled()
138 if (!edev || !edev->desc) devfreq_event_set_event()
141 if (!edev->desc->ops || !edev->desc->ops->set_event) devfreq_event_set_event()
148 ret = edev->desc->ops->set_event(edev); devfreq_event_set_event()
168 if (!edev || !edev->desc) devfreq_event_get_event()
171 if (!edev->desc->ops || !edev->desc->ops->get_event) devfreq_event_get_event()
180 ret = edev->desc->ops->get_event(edev, edata); devfreq_event_get_event()
200 if (!edev || !edev->desc) devfreq_event_reset_event()
207 if (edev->desc->ops && edev->desc->ops->reset) devfreq_event_reset_event()
208 ret = edev->desc->ops->reset(edev); devfreq_event_reset_event()
243 if (!strcmp(edev->desc->name, node->name)) devfreq_event_get_edev_by_phandle()
301 * @desc : the devfreq-event device's decriptor which include essential
308 struct devfreq_event_desc *desc) devfreq_event_add_edev()
314 if (!dev || !desc) devfreq_event_add_edev()
317 if (!desc->name || !desc->ops) devfreq_event_add_edev()
320 if (!desc->ops->set_event || !desc->ops->get_event) devfreq_event_add_edev()
328 edev->desc = desc; devfreq_event_add_edev()
393 * @desc : the devfreq-event device's decriptor which include essential
401 struct devfreq_event_desc *desc) devm_devfreq_event_add_edev()
409 edev = devfreq_event_add_edev(dev, desc); devm_devfreq_event_add_edev()
446 if (!edev || !edev->desc) name_show()
449 return sprintf(buf, "%s\n", edev->desc->name); name_show()
458 if (!edev || !edev->desc) enable_count_show()
307 devfreq_event_add_edev(struct device *dev, struct devfreq_event_desc *desc) devfreq_event_add_edev() argument
400 devm_devfreq_event_add_edev(struct device *dev, struct devfreq_event_desc *desc) devm_devfreq_event_add_edev() argument
/linux-4.4.14/drivers/net/ethernet/ti/
H A Dcpmac.c241 static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc) cpmac_dump_desc() argument
245 printk("%s: desc[%p]:", dev->name, desc); cpmac_dump_desc()
246 for (i = 0; i < sizeof(*desc) / 4; i++) cpmac_dump_desc()
247 printk(" %08x", ((u32 *)desc)[i]); cpmac_dump_desc()
369 struct cpmac_desc *desc) cpmac_rx_one()
374 cpmac_dump_desc(priv->dev, desc); cpmac_rx_one()
375 cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping); cpmac_rx_one()
376 if (unlikely(!desc->datalen)) { cpmac_rx_one()
385 skb_put(desc->skb, desc->datalen); cpmac_rx_one()
386 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); cpmac_rx_one()
387 skb_checksum_none_assert(desc->skb); cpmac_rx_one()
389 priv->dev->stats.rx_bytes += desc->datalen; cpmac_rx_one()
390 result = desc->skb; cpmac_rx_one()
391 dma_unmap_single(&priv->dev->dev, desc->data_mapping, cpmac_rx_one()
393 desc->skb = skb; cpmac_rx_one()
394 desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data, cpmac_rx_one()
397 desc->hw_data = (u32)desc->data_mapping; cpmac_rx_one()
410 desc->buflen = CPMAC_SKB_SIZE; cpmac_rx_one()
411 desc->dataflags = CPMAC_OWN; cpmac_rx_one()
419 struct cpmac_desc *desc, *restart; cpmac_poll() local
433 desc = priv->rx_head; cpmac_poll()
435 while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) { cpmac_poll()
438 if ((desc->dataflags & CPMAC_EOQ) != 0) { cpmac_poll()
448 restart, desc); cpmac_poll()
452 restart = desc->next; cpmac_poll()
455 skb = cpmac_rx_one(priv, desc); cpmac_poll()
460 desc = desc->next; cpmac_poll()
463 if (desc != priv->rx_head) { cpmac_poll()
467 desc->prev->hw_next = (u32)0; cpmac_poll()
507 priv->rx_head = desc; cpmac_poll()
553 struct cpmac_desc *desc; cpmac_start_xmit() local
566 desc = &priv->desc_ring[queue]; cpmac_start_xmit()
567 if (unlikely(desc->dataflags & CPMAC_OWN)) { cpmac_start_xmit()
576 desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN; cpmac_start_xmit()
577 desc->skb = skb; cpmac_start_xmit()
578 desc->data_mapping = dma_map_single(&dev->dev, skb->data, len, cpmac_start_xmit()
580 desc->hw_data = (u32)desc->data_mapping; cpmac_start_xmit()
581 desc->datalen = len; cpmac_start_xmit()
582 desc->buflen = len; cpmac_start_xmit()
586 cpmac_dump_desc(dev, desc); cpmac_start_xmit()
589 cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping); cpmac_start_xmit()
596 struct cpmac_desc *desc; cpmac_end_xmit() local
599 desc = &priv->desc_ring[queue]; cpmac_end_xmit()
600 cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping); cpmac_end_xmit()
601 if (likely(desc->skb)) { cpmac_end_xmit()
604 dev->stats.tx_bytes += desc->skb->len; cpmac_end_xmit()
606 dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len, cpmac_end_xmit()
611 desc->skb, desc->skb->len); cpmac_end_xmit()
613 dev_kfree_skb_irq(desc->skb); cpmac_end_xmit()
614 desc->skb = NULL; cpmac_end_xmit()
692 struct cpmac_desc *desc; cpmac_clear_rx() local
697 desc = priv->rx_head; cpmac_clear_rx()
699 if ((desc->dataflags & CPMAC_OWN) == 0) { cpmac_clear_rx()
703 cpmac_dump_desc(dev, desc); cpmac_clear_rx()
704 desc->dataflags = CPMAC_OWN; cpmac_clear_rx()
707 desc->hw_next = desc->next->mapping; cpmac_clear_rx()
708 desc = desc->next; cpmac_clear_rx()
953 struct cpmac_desc *desc; cpmac_open() local
985 priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i; cpmac_open()
988 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { cpmac_open()
994 desc->skb = skb; cpmac_open()
995 desc->data_mapping = dma_map_single(&dev->dev, skb->data, cpmac_open()
998 desc->hw_data = (u32)desc->data_mapping; cpmac_open()
999 desc->buflen = CPMAC_SKB_SIZE; cpmac_open()
1000 desc->dataflags = CPMAC_OWN; cpmac_open()
1001 desc->next = &priv->rx_head[(i + 1) % priv->ring_size]; cpmac_open()
1002 desc->next->prev = desc; cpmac_open()
1003 desc->hw_next = (u32)desc->next->mapping; cpmac_open()
368 cpmac_rx_one(struct cpmac_priv *priv, struct cpmac_desc *desc) cpmac_rx_one() argument
/linux-4.4.14/sound/soc/blackfin/
H A Dbf5xx-sport.c127 static void setup_desc(struct dmasg *desc, void *buf, int fragcount, setup_desc() argument
135 desc[i].next_desc_addr = &(desc[i + 1]); setup_desc()
136 desc[i].start_addr = (unsigned long)buf + i*fragsize; setup_desc()
137 desc[i].cfg = cfg; setup_desc()
138 desc[i].x_count = x_count; setup_desc()
139 desc[i].x_modify = wdsize; setup_desc()
140 desc[i].y_count = ycount; setup_desc()
141 desc[i].y_modify = wdsize; setup_desc()
145 desc[fragcount-1].next_desc_addr = desc; setup_desc()
147 pr_debug("setup desc: desc0=%p, next0=%p, desc1=%p," setup_desc()
149 desc, desc[0].next_desc_addr, setup_desc()
150 desc+1, desc[1].next_desc_addr, setup_desc()
151 desc[0].x_count, desc[0].y_count, setup_desc()
152 desc[0].start_addr, desc[0].cfg); setup_desc()
179 struct dmasg *desc, temp_desc; sport_hook_rx_dummy() local
190 desc = get_dma_next_desc_ptr(sport->dma_rx_chan); sport_hook_rx_dummy()
192 temp_desc = *desc; sport_hook_rx_dummy()
193 desc->x_count = sport->dummy_count / 2; sport_hook_rx_dummy()
194 desc->y_count = 0; sport_hook_rx_dummy()
195 desc->next_desc_addr = sport->dummy_rx_desc; sport_hook_rx_dummy()
203 *desc = temp_desc; sport_hook_rx_dummy()
300 struct dmasg *desc, temp_desc; sport_hook_tx_dummy() local
311 desc = get_dma_next_desc_ptr(sport->dma_tx_chan); sport_hook_tx_dummy()
313 temp_desc = *desc; sport_hook_tx_dummy()
314 desc->x_count = sport->dummy_count / 2; sport_hook_tx_dummy()
315 desc->y_count = 0; sport_hook_tx_dummy()
316 desc->next_desc_addr = sport->dummy_tx_desc; sport_hook_tx_dummy()
324 *desc = temp_desc; sport_hook_tx_dummy()
437 pr_err("Failed to allocate memory for rx desc\n"); sport_config_rx_dma()
502 pr_err("Failed to allocate memory for tx desc\n"); sport_config_tx_dma()
526 struct dmasg *desc; sport_config_rx_dummy() local
531 desc = l1_data_sram_zalloc(2 * sizeof(*desc)); sport_config_rx_dummy()
534 desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0); sport_config_rx_dummy()
535 memset(desc, 0, 2 * sizeof(*desc)); sport_config_rx_dummy()
537 if (desc == NULL) { sport_config_rx_dummy()
538 pr_err("Failed to allocate memory for dummy rx desc\n"); sport_config_rx_dummy()
541 sport->dummy_rx_desc = desc; sport_config_rx_dummy()
542 desc->start_addr = (unsigned long)sport->dummy_buf; sport_config_rx_dummy()
545 desc->cfg = config; sport_config_rx_dummy()
546 desc->x_count = sport->dummy_count/sport->wdsize; sport_config_rx_dummy()
547 desc->x_modify = sport->wdsize; sport_config_rx_dummy()
548 desc->y_count = 0; sport_config_rx_dummy()
549 desc->y_modify = 0; sport_config_rx_dummy()
550 memcpy(desc+1, desc, sizeof(*desc)); sport_config_rx_dummy()
551 desc->next_desc_addr = desc + 1; sport_config_rx_dummy()
552 desc[1].next_desc_addr = desc; sport_config_rx_dummy()
558 struct dmasg *desc; sport_config_tx_dummy() local
564 desc = l1_data_sram_zalloc(2 * sizeof(*desc)); sport_config_tx_dummy()
567 desc = dma_alloc_coherent(NULL, 2 * sizeof(*desc), &addr, 0); sport_config_tx_dummy()
568 memset(desc, 0, 2 * sizeof(*desc)); sport_config_tx_dummy()
570 if (!desc) { sport_config_tx_dummy()
571 pr_err("Failed to allocate memory for dummy tx desc\n"); sport_config_tx_dummy()
574 sport->dummy_tx_desc = desc; sport_config_tx_dummy()
575 desc->start_addr = (unsigned long)sport->dummy_buf + \ sport_config_tx_dummy()
579 desc->cfg = config; sport_config_tx_dummy()
580 desc->x_count = sport->dummy_count/sport->wdsize; sport_config_tx_dummy()
581 desc->x_modify = sport->wdsize; sport_config_tx_dummy()
582 desc->y_count = 0; sport_config_tx_dummy()
583 desc->y_modify = 0; sport_config_tx_dummy()
584 memcpy(desc+1, desc, sizeof(*desc)); sport_config_tx_dummy()
585 desc->next_desc_addr = desc + 1; sport_config_tx_dummy()
586 desc[1].next_desc_addr = desc; sport_config_tx_dummy()
/linux-4.4.14/net/netfilter/
H A Dnft_cmp.c75 struct nft_data_desc desc; nft_cmp_init() local
78 err = nft_data_init(NULL, &priv->data, sizeof(priv->data), &desc, nft_cmp_init()
83 err = nft_validate_register_load(priv->sreg, desc.len); nft_cmp_init()
88 priv->len = desc.len; nft_cmp_init()
124 struct nft_data_desc desc; nft_cmp_fast_init() local
129 err = nft_data_init(NULL, &data, sizeof(data), &desc, nft_cmp_fast_init()
134 err = nft_validate_register_load(priv->sreg, desc.len); nft_cmp_fast_init()
138 desc.len *= BITS_PER_BYTE; nft_cmp_fast_init()
139 mask = nft_cmp_fast_mask(desc.len); nft_cmp_fast_init()
142 priv->len = desc.len; nft_cmp_fast_init()
177 struct nft_data_desc desc; nft_cmp_select_ops() local
200 err = nft_data_init(NULL, &data, sizeof(data), &desc, nft_cmp_select_ops()
205 if (desc.len <= sizeof(u32) && op == NFT_CMP_EQ) nft_cmp_select_ops()
/linux-4.4.14/drivers/crypto/nx/
H A Dnx-aes-cbc.c65 static int cbc_aes_nx_crypt(struct blkcipher_desc *desc, cbc_aes_nx_crypt() argument
71 struct nx_crypto_ctx *nx_ctx = crypto_blkcipher_ctx(desc->tfm); cbc_aes_nx_crypt()
87 rc = nx_build_sg_lists(nx_ctx, desc, dst, src, &to_process, cbc_aes_nx_crypt()
98 desc->flags & CRYPTO_TFM_REQ_MAY_SLEEP); cbc_aes_nx_crypt()
102 memcpy(desc->info, csbcpb->cpb.aes_cbc.cv, AES_BLOCK_SIZE); cbc_aes_nx_crypt()
114 static int cbc_aes_nx_encrypt(struct blkcipher_desc *desc, cbc_aes_nx_encrypt() argument
119 return cbc_aes_nx_crypt(desc, dst, src, nbytes, 1); cbc_aes_nx_encrypt()
122 static int cbc_aes_nx_decrypt(struct blkcipher_desc *desc, cbc_aes_nx_decrypt() argument
127 return cbc_aes_nx_crypt(desc, dst, src, nbytes, 0); cbc_aes_nx_decrypt()
/linux-4.4.14/include/uapi/linux/
H A Dmic_common.h100 * @desc: Array of MIC virtio device descriptors.
104 struct mic_device_desc desc[0]; member in struct:mic_device_page
111 * (avail and desc rings)
131 * Max vring entries (power of 2) to ensure desc and avail rings
137 * Max size of the desc block in bytes: includes:
176 static inline unsigned mic_desc_size(const struct mic_device_desc *desc) mic_desc_size() argument
178 return sizeof(*desc) + desc->num_vq * sizeof(struct mic_vqconfig) mic_desc_size()
179 + desc->feature_len * 2 + desc->config_len; mic_desc_size()
183 mic_vq_config(const struct mic_device_desc *desc) mic_vq_config() argument
185 return (struct mic_vqconfig *)(desc + 1); mic_vq_config()
188 static inline __u8 *mic_vq_features(const struct mic_device_desc *desc) mic_vq_features() argument
190 return (__u8 *)(mic_vq_config(desc) + desc->num_vq); mic_vq_features()
193 static inline __u8 *mic_vq_configspace(const struct mic_device_desc *desc) mic_vq_configspace() argument
195 return mic_vq_features(desc) + desc->feature_len * 2; mic_vq_configspace()
197 static inline unsigned mic_total_desc_size(struct mic_device_desc *desc) mic_total_desc_size() argument
199 return mic_aligned_desc_size(desc) + sizeof(struct mic_device_ctrl); mic_total_desc_size()
/linux-4.4.14/drivers/mtd/ubi/
H A Dcdev.c51 * @desc: volume descriptor
57 static int get_exclusive(struct ubi_volume_desc *desc) get_exclusive() argument
60 struct ubi_volume *vol = desc->vol; get_exclusive()
71 err = desc->mode; get_exclusive()
72 desc->mode = UBI_EXCLUSIVE; get_exclusive()
81 * @desc: volume descriptor
84 static void revoke_exclusive(struct ubi_volume_desc *desc, int mode) revoke_exclusive() argument
86 struct ubi_volume *vol = desc->vol; revoke_exclusive()
90 ubi_assert(vol->exclusive == 1 && desc->mode == UBI_EXCLUSIVE); revoke_exclusive()
102 desc->mode = mode; revoke_exclusive()
107 struct ubi_volume_desc *desc; vol_cdev_open() local
122 desc = ubi_open_volume(ubi_num, vol_id, mode); vol_cdev_open()
123 if (IS_ERR(desc)) vol_cdev_open()
124 return PTR_ERR(desc); vol_cdev_open()
126 file->private_data = desc; vol_cdev_open()
132 struct ubi_volume_desc *desc = file->private_data; vol_cdev_release() local
133 struct ubi_volume *vol = desc->vol; vol_cdev_release()
136 vol->ubi->ubi_num, vol->vol_id, desc->mode); vol_cdev_release()
152 ubi_close_volume(desc); vol_cdev_release()
158 struct ubi_volume_desc *desc = file->private_data; vol_cdev_llseek() local
159 struct ubi_volume *vol = desc->vol; vol_cdev_llseek()
173 struct ubi_volume_desc *desc = file->private_data; vol_cdev_fsync() local
174 struct ubi_device *ubi = desc->vol->ubi; vol_cdev_fsync()
187 struct ubi_volume_desc *desc = file->private_data; vol_cdev_read() local
188 struct ubi_volume *vol = desc->vol; vol_cdev_read()
264 struct ubi_volume_desc *desc = file->private_data; vol_cdev_direct_write() local
265 struct ubi_volume *vol = desc->vol; vol_cdev_direct_write()
340 struct ubi_volume_desc *desc = file->private_data; vol_cdev_write() local
341 struct ubi_volume *vol = desc->vol; vol_cdev_write()
366 revoke_exclusive(desc, UBI_READWRITE); vol_cdev_write()
381 revoke_exclusive(desc, UBI_READWRITE); vol_cdev_write()
391 struct ubi_volume_desc *desc = file->private_data; vol_cdev_ioctl() local
392 struct ubi_volume *vol = desc->vol; vol_cdev_ioctl()
413 if (desc->mode == UBI_READONLY) { vol_cdev_ioctl()
425 err = get_exclusive(desc); vol_cdev_ioctl()
432 revoke_exclusive(desc, UBI_READWRITE); vol_cdev_ioctl()
449 if (desc->mode == UBI_READONLY || vol_cdev_ioctl()
461 err = get_exclusive(desc); vol_cdev_ioctl()
467 revoke_exclusive(desc, UBI_READWRITE); vol_cdev_ioctl()
482 if (desc->mode == UBI_READONLY || vol_cdev_ioctl()
512 err = ubi_leb_map(desc, req.lnum); vol_cdev_ioctl()
526 err = ubi_leb_unmap(desc, lnum); vol_cdev_ioctl()
540 err = ubi_is_mapped(desc, lnum); vol_cdev_ioctl()
558 desc->vol->direct_writes = !!req.value; vol_cdev_ioctl()
573 ubi_get_volume_info(desc, &vi); vol_cdev_ioctl()
583 ubi_get_volume_info(desc, &vi); vol_cdev_ioctl()
738 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_METAONLY); rename_volumes()
739 if (IS_ERR(re->desc)) { rename_volumes()
740 err = PTR_ERR(re->desc); rename_volumes()
748 if (re->desc->vol->name_len == name_len && rename_volumes()
749 !memcmp(re->desc->vol->name, name, name_len)) { rename_volumes()
750 ubi_close_volume(re->desc); rename_volumes()
759 vol_id, re->desc->vol->name, name); rename_volumes()
767 struct ubi_volume_desc *desc; rename_volumes() local
777 if (re->new_name_len == re1->desc->vol->name_len && rename_volumes()
778 !memcmp(re->new_name, re1->desc->vol->name, rename_volumes()
779 re1->desc->vol->name_len)) { rename_volumes()
792 desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, rename_volumes()
794 if (IS_ERR(desc)) { rename_volumes()
795 err = PTR_ERR(desc); rename_volumes()
809 ubi_close_volume(desc); rename_volumes()
814 re1->desc = desc; rename_volumes()
817 re1->desc->vol->vol_id, re1->desc->vol->name); rename_volumes()
826 ubi_close_volume(re->desc); rename_volumes()
838 struct ubi_volume_desc *desc; ubi_cdev_ioctl() local
890 desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_EXCLUSIVE); ubi_cdev_ioctl()
891 if (IS_ERR(desc)) { ubi_cdev_ioctl()
892 err = PTR_ERR(desc); ubi_cdev_ioctl()
897 err = ubi_remove_volume(desc, 0); ubi_cdev_ioctl()
905 ubi_close_volume(desc); ubi_cdev_ioctl()
926 desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE); ubi_cdev_ioctl()
927 if (IS_ERR(desc)) { ubi_cdev_ioctl()
928 err = PTR_ERR(desc); ubi_cdev_ioctl()
932 pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, ubi_cdev_ioctl()
933 desc->vol->usable_leb_size); ubi_cdev_ioctl()
936 err = ubi_resize_volume(desc, pebs); ubi_cdev_ioctl()
938 ubi_close_volume(desc); ubi_cdev_ioctl()
/linux-4.4.14/drivers/tty/serial/8250/
H A D8250_dma.c72 struct dma_async_tx_descriptor *desc; serial8250_tx_dma() local
81 desc = dmaengine_prep_slave_single(dma->txchan, serial8250_tx_dma()
85 if (!desc) { serial8250_tx_dma()
91 desc->callback = __dma_tx_complete; serial8250_tx_dma()
92 desc->callback_param = p; serial8250_tx_dma()
94 dma->tx_cookie = dmaengine_submit(desc); serial8250_tx_dma()
116 struct dma_async_tx_descriptor *desc; serial8250_rx_dma() local
140 desc = dmaengine_prep_slave_single(dma->rxchan, dma->rx_addr, serial8250_rx_dma()
143 if (!desc) serial8250_rx_dma()
147 desc->callback = __dma_rx_complete; serial8250_rx_dma()
148 desc->callback_param = p; serial8250_rx_dma()
150 dma->rx_cookie = dmaengine_submit(desc); serial8250_rx_dma()
/linux-4.4.14/drivers/crypto/vmx/
H A Daes_cbc.c98 static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc, p8_aes_cbc_encrypt() argument
105 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); p8_aes_cbc_encrypt()
108 .info = desc->info, p8_aes_cbc_encrypt()
109 .flags = desc->flags p8_aes_cbc_encrypt()
122 ret = blkcipher_walk_virt(desc, &walk); p8_aes_cbc_encrypt()
129 ret = blkcipher_walk_done(desc, &walk, nbytes); p8_aes_cbc_encrypt()
139 static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc, p8_aes_cbc_decrypt() argument
146 crypto_tfm_ctx(crypto_blkcipher_tfm(desc->tfm)); p8_aes_cbc_decrypt()
149 .info = desc->info, p8_aes_cbc_decrypt()
150 .flags = desc->flags p8_aes_cbc_decrypt()
163 ret = blkcipher_walk_virt(desc, &walk); p8_aes_cbc_decrypt()
170 ret = blkcipher_walk_done(desc, &walk, nbytes); p8_aes_cbc_decrypt()

Completed in 5234 milliseconds

1234567891011>>