Lines Matching refs:ctx

114 typedef int (*descriptor_callback_t)(struct context *ctx,
665 static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) in ar_buffer_bus() argument
667 return page_private(ctx->pages[i]); in ar_buffer_bus()
670 static void ar_context_link_page(struct ar_context *ctx, unsigned int index) in ar_context_link_page() argument
674 d = &ctx->descriptors[index]; in ar_context_link_page()
680 d = &ctx->descriptors[ctx->last_buffer_index]; in ar_context_link_page()
683 ctx->last_buffer_index = index; in ar_context_link_page()
685 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in ar_context_link_page()
688 static void ar_context_release(struct ar_context *ctx) in ar_context_release() argument
692 vunmap(ctx->buffer); in ar_context_release()
695 if (ctx->pages[i]) { in ar_context_release()
696 dma_unmap_page(ctx->ohci->card.device, in ar_context_release()
697 ar_buffer_bus(ctx, i), in ar_context_release()
699 __free_page(ctx->pages[i]); in ar_context_release()
703 static void ar_context_abort(struct ar_context *ctx, const char *error_msg) in ar_context_abort() argument
705 struct fw_ohci *ohci = ctx->ohci; in ar_context_abort()
707 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { in ar_context_abort()
708 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); in ar_context_abort()
721 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) in ar_first_buffer_index() argument
723 return ar_next_buffer_index(ctx->last_buffer_index); in ar_first_buffer_index()
730 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, in ar_search_last_active_buffer() argument
733 unsigned int i, next_i, last = ctx->last_buffer_index; in ar_search_last_active_buffer()
736 i = ar_first_buffer_index(ctx); in ar_search_last_active_buffer()
737 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); in ar_search_last_active_buffer()
746 ctx->descriptors[next_i].res_count); in ar_search_last_active_buffer()
763 ctx->descriptors[next_i].res_count); in ar_search_last_active_buffer()
781 ar_context_abort(ctx, "corrupted descriptor"); in ar_search_last_active_buffer()
787 static void ar_sync_buffers_for_cpu(struct ar_context *ctx, in ar_sync_buffers_for_cpu() argument
793 i = ar_first_buffer_index(ctx); in ar_sync_buffers_for_cpu()
795 dma_sync_single_for_cpu(ctx->ohci->card.device, in ar_sync_buffers_for_cpu()
796 ar_buffer_bus(ctx, i), in ar_sync_buffers_for_cpu()
801 dma_sync_single_for_cpu(ctx->ohci->card.device, in ar_sync_buffers_for_cpu()
802 ar_buffer_bus(ctx, i), in ar_sync_buffers_for_cpu()
813 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) in handle_ar_packet() argument
815 struct fw_ohci *ohci = ctx->ohci; in handle_ar_packet()
847 ar_context_abort(ctx, "invalid packet length"); in handle_ar_packet()
860 ar_context_abort(ctx, "invalid tcode"); in handle_ar_packet()
902 } else if (ctx == &ohci->ar_request_ctx) { in handle_ar_packet()
911 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end) in handle_ar_packets() argument
916 next = handle_ar_packet(ctx, p); in handle_ar_packets()
925 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) in ar_recycle_buffers() argument
929 i = ar_first_buffer_index(ctx); in ar_recycle_buffers()
931 dma_sync_single_for_device(ctx->ohci->card.device, in ar_recycle_buffers()
932 ar_buffer_bus(ctx, i), in ar_recycle_buffers()
934 ar_context_link_page(ctx, i); in ar_recycle_buffers()
941 struct ar_context *ctx = (struct ar_context *)data; in ar_context_tasklet() local
945 p = ctx->pointer; in ar_context_tasklet()
949 end_buffer_index = ar_search_last_active_buffer(ctx, in ar_context_tasklet()
951 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); in ar_context_tasklet()
952 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; in ar_context_tasklet()
954 if (end_buffer_index < ar_first_buffer_index(ctx)) { in ar_context_tasklet()
961 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; in ar_context_tasklet()
962 p = handle_ar_packets(ctx, p, buffer_end); in ar_context_tasklet()
969 p = handle_ar_packets(ctx, p, end); in ar_context_tasklet()
972 ar_context_abort(ctx, "inconsistent descriptor"); in ar_context_tasklet()
976 ctx->pointer = p; in ar_context_tasklet()
977 ar_recycle_buffers(ctx, end_buffer_index); in ar_context_tasklet()
982 ctx->pointer = NULL; in ar_context_tasklet()
985 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, in ar_context_init() argument
993 ctx->regs = regs; in ar_context_init()
994 ctx->ohci = ohci; in ar_context_init()
995 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); in ar_context_init()
998 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); in ar_context_init()
999 if (!ctx->pages[i]) in ar_context_init()
1001 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], in ar_context_init()
1004 __free_page(ctx->pages[i]); in ar_context_init()
1005 ctx->pages[i] = NULL; in ar_context_init()
1008 set_page_private(ctx->pages[i], dma_addr); in ar_context_init()
1012 pages[i] = ctx->pages[i]; in ar_context_init()
1014 pages[AR_BUFFERS + i] = ctx->pages[i]; in ar_context_init()
1015 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); in ar_context_init()
1016 if (!ctx->buffer) in ar_context_init()
1019 ctx->descriptors = ohci->misc_buffer + descriptors_offset; in ar_context_init()
1020 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; in ar_context_init()
1023 d = &ctx->descriptors[i]; in ar_context_init()
1028 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); in ar_context_init()
1029 d->branch_address = cpu_to_le32(ctx->descriptors_bus + in ar_context_init()
1036 ar_context_release(ctx); in ar_context_init()
1041 static void ar_context_run(struct ar_context *ctx) in ar_context_run() argument
1046 ar_context_link_page(ctx, i); in ar_context_run()
1048 ctx->pointer = ctx->buffer; in ar_context_run()
1050 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); in ar_context_run()
1051 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); in ar_context_run()
1069 struct context *ctx = (struct context *) data; in context_tasklet() local
1075 desc = list_entry(ctx->buffer_list.next, in context_tasklet()
1077 last = ctx->last; in context_tasklet()
1083 ctx->current_bus = address; in context_tasklet()
1094 if (!ctx->callback(ctx, d, last)) in context_tasklet()
1102 spin_lock_irqsave(&ctx->ohci->lock, flags); in context_tasklet()
1103 list_move_tail(&old_desc->list, &ctx->buffer_list); in context_tasklet()
1104 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in context_tasklet()
1106 ctx->last = last; in context_tasklet()
1114 static int context_add_buffer(struct context *ctx) in context_add_buffer() argument
1124 if (ctx->total_allocation >= 16*1024*1024) in context_add_buffer()
1127 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, in context_add_buffer()
1137 list_add_tail(&desc->list, &ctx->buffer_list); in context_add_buffer()
1138 ctx->total_allocation += PAGE_SIZE; in context_add_buffer()
1143 static int context_init(struct context *ctx, struct fw_ohci *ohci, in context_init() argument
1146 ctx->ohci = ohci; in context_init()
1147 ctx->regs = regs; in context_init()
1148 ctx->total_allocation = 0; in context_init()
1150 INIT_LIST_HEAD(&ctx->buffer_list); in context_init()
1151 if (context_add_buffer(ctx) < 0) in context_init()
1154 ctx->buffer_tail = list_entry(ctx->buffer_list.next, in context_init()
1157 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); in context_init()
1158 ctx->callback = callback; in context_init()
1165 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); in context_init()
1166 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); in context_init()
1167 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); in context_init()
1168 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); in context_init()
1169 ctx->last = ctx->buffer_tail->buffer; in context_init()
1170 ctx->prev = ctx->buffer_tail->buffer; in context_init()
1171 ctx->prev_z = 1; in context_init()
1176 static void context_release(struct context *ctx) in context_release() argument
1178 struct fw_card *card = &ctx->ohci->card; in context_release()
1181 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) in context_release()
1188 static struct descriptor *context_get_descriptors(struct context *ctx, in context_get_descriptors() argument
1192 struct descriptor_buffer *desc = ctx->buffer_tail; in context_get_descriptors()
1201 if (desc->list.next == &ctx->buffer_list) { in context_get_descriptors()
1204 if (context_add_buffer(ctx) < 0) in context_get_descriptors()
1209 ctx->buffer_tail = desc; in context_get_descriptors()
1219 static void context_run(struct context *ctx, u32 extra) in context_run() argument
1221 struct fw_ohci *ohci = ctx->ohci; in context_run()
1223 reg_write(ohci, COMMAND_PTR(ctx->regs), in context_run()
1224 le32_to_cpu(ctx->last->branch_address)); in context_run()
1225 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); in context_run()
1226 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); in context_run()
1227 ctx->running = true; in context_run()
1231 static void context_append(struct context *ctx, in context_append() argument
1235 struct descriptor_buffer *desc = ctx->buffer_tail; in context_append()
1244 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); in context_append()
1256 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && in context_append()
1257 d_branch != ctx->prev && in context_append()
1258 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == in context_append()
1260 ctx->prev->branch_address = cpu_to_le32(d_bus | z); in context_append()
1263 ctx->prev = d; in context_append()
1264 ctx->prev_z = z; in context_append()
1267 static void context_stop(struct context *ctx) in context_stop() argument
1269 struct fw_ohci *ohci = ctx->ohci; in context_stop()
1273 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); in context_stop()
1274 ctx->running = false; in context_stop()
1277 reg = reg_read(ohci, CONTROL_SET(ctx->regs)); in context_stop()
1297 static int at_context_queue_packet(struct context *ctx, in at_context_queue_packet() argument
1300 struct fw_ohci *ohci = ctx->ohci; in at_context_queue_packet()
1307 d = context_get_descriptors(ctx, 4, &d_bus); in at_context_queue_packet()
1417 context_append(ctx, d, z, 4 - z); in at_context_queue_packet()
1419 if (ctx->running) in at_context_queue_packet()
1420 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in at_context_queue_packet()
1422 context_run(ctx, 0); in at_context_queue_packet()
1427 static void at_context_flush(struct context *ctx) in at_context_flush() argument
1429 tasklet_disable(&ctx->tasklet); in at_context_flush()
1431 ctx->flushing = true; in at_context_flush()
1432 context_tasklet((unsigned long)ctx); in at_context_flush()
1433 ctx->flushing = false; in at_context_flush()
1435 tasklet_enable(&ctx->tasklet); in at_context_flush()
1600 static void handle_local_request(struct context *ctx, struct fw_packet *packet) in handle_local_request() argument
1604 if (ctx == &ctx->ohci->at_request_ctx) { in handle_local_request()
1606 packet->callback(packet, &ctx->ohci->card, packet->ack); in handle_local_request()
1617 handle_local_rom(ctx->ohci, packet, csr); in handle_local_request()
1623 handle_local_lock(ctx->ohci, packet, csr); in handle_local_request()
1626 if (ctx == &ctx->ohci->at_request_ctx) in handle_local_request()
1627 fw_core_handle_request(&ctx->ohci->card, packet); in handle_local_request()
1629 fw_core_handle_response(&ctx->ohci->card, packet); in handle_local_request()
1633 if (ctx == &ctx->ohci->at_response_ctx) { in handle_local_request()
1635 packet->callback(packet, &ctx->ohci->card, packet->ack); in handle_local_request()
1639 static void at_context_transmit(struct context *ctx, struct fw_packet *packet) in at_context_transmit() argument
1644 spin_lock_irqsave(&ctx->ohci->lock, flags); in at_context_transmit()
1646 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && in at_context_transmit()
1647 ctx->ohci->generation == packet->generation) { in at_context_transmit()
1648 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in at_context_transmit()
1649 handle_local_request(ctx, packet); in at_context_transmit()
1653 ret = at_context_queue_packet(ctx, packet); in at_context_transmit()
1654 spin_unlock_irqrestore(&ctx->ohci->lock, flags); in at_context_transmit()
1657 packet->callback(packet, &ctx->ohci->card, packet->ack); in at_context_transmit()
2556 struct context *ctx = &ohci->at_request_ctx; in ohci_cancel_packet() local
2560 tasklet_disable(&ctx->tasklet); in ohci_cancel_packet()
2575 tasklet_enable(&ctx->tasklet); in ohci_cancel_packet()
2734 static void flush_iso_completions(struct iso_context *ctx) in flush_iso_completions() argument
2736 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, in flush_iso_completions()
2737 ctx->header_length, ctx->header, in flush_iso_completions()
2738 ctx->base.callback_data); in flush_iso_completions()
2739 ctx->header_length = 0; in flush_iso_completions()
2742 static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr) in copy_iso_headers() argument
2746 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { in copy_iso_headers()
2747 if (ctx->base.drop_overflow_headers) in copy_iso_headers()
2749 flush_iso_completions(ctx); in copy_iso_headers()
2752 ctx_hdr = ctx->header + ctx->header_length; in copy_iso_headers()
2753 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); in copy_iso_headers()
2760 if (ctx->base.header_size > 0) in copy_iso_headers()
2762 if (ctx->base.header_size > 4) in copy_iso_headers()
2764 if (ctx->base.header_size > 8) in copy_iso_headers()
2765 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); in copy_iso_headers()
2766 ctx->header_length += ctx->base.header_size; in copy_iso_headers()
2773 struct iso_context *ctx = in handle_ir_packet_per_buffer() local
2795 copy_iso_headers(ctx, (u32 *) (last + 1)); in handle_ir_packet_per_buffer()
2798 flush_iso_completions(ctx); in handle_ir_packet_per_buffer()
2808 struct iso_context *ctx = in handle_ir_buffer_fill() local
2819 ctx->mc_buffer_bus = buffer_dma; in handle_ir_buffer_fill()
2820 ctx->mc_completed = completed; in handle_ir_buffer_fill()
2833 ctx->base.callback.mc(&ctx->base, in handle_ir_buffer_fill()
2835 ctx->base.callback_data); in handle_ir_buffer_fill()
2836 ctx->mc_completed = 0; in handle_ir_buffer_fill()
2842 static void flush_ir_buffer_fill(struct iso_context *ctx) in flush_ir_buffer_fill() argument
2844 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device, in flush_ir_buffer_fill()
2845 ctx->mc_buffer_bus & PAGE_MASK, in flush_ir_buffer_fill()
2846 ctx->mc_buffer_bus & ~PAGE_MASK, in flush_ir_buffer_fill()
2847 ctx->mc_completed, DMA_FROM_DEVICE); in flush_ir_buffer_fill()
2849 ctx->base.callback.mc(&ctx->base, in flush_ir_buffer_fill()
2850 ctx->mc_buffer_bus + ctx->mc_completed, in flush_ir_buffer_fill()
2851 ctx->base.callback_data); in flush_ir_buffer_fill()
2852 ctx->mc_completed = 0; in flush_ir_buffer_fill()
2896 struct iso_context *ctx = in handle_it_packet() local
2910 if (ctx->header_length + 4 > PAGE_SIZE) { in handle_it_packet()
2911 if (ctx->base.drop_overflow_headers) in handle_it_packet()
2913 flush_iso_completions(ctx); in handle_it_packet()
2916 ctx_hdr = ctx->header + ctx->header_length; in handle_it_packet()
2917 ctx->last_timestamp = le16_to_cpu(last->res_count); in handle_it_packet()
2921 ctx->header_length += 4; in handle_it_packet()
2924 flush_iso_completions(ctx); in handle_it_packet()
2945 struct iso_context *uninitialized_var(ctx); in ohci_allocate_iso_context()
2961 ctx = &ohci->it_context_list[index]; in ohci_allocate_iso_context()
2974 ctx = &ohci->ir_context_list[index]; in ohci_allocate_iso_context()
2986 ctx = &ohci->ir_context_list[index]; in ohci_allocate_iso_context()
3000 memset(ctx, 0, sizeof(*ctx)); in ohci_allocate_iso_context()
3001 ctx->header_length = 0; in ohci_allocate_iso_context()
3002 ctx->header = (void *) __get_free_page(GFP_KERNEL); in ohci_allocate_iso_context()
3003 if (ctx->header == NULL) { in ohci_allocate_iso_context()
3007 ret = context_init(&ctx->context, ohci, regs, callback); in ohci_allocate_iso_context()
3013 ctx->mc_completed = 0; in ohci_allocate_iso_context()
3016 return &ctx->base; in ohci_allocate_iso_context()
3019 free_page((unsigned long)ctx->header); in ohci_allocate_iso_context()
3042 struct iso_context *ctx = container_of(base, struct iso_context, base); in ohci_start_iso() local
3043 struct fw_ohci *ohci = ctx->context.ohci; in ohci_start_iso()
3048 if (ctx->context.last->branch_address == 0) in ohci_start_iso()
3051 switch (ctx->base.type) { in ohci_start_iso()
3053 index = ctx - ohci->it_context_list; in ohci_start_iso()
3061 context_run(&ctx->context, match); in ohci_start_iso()
3068 index = ctx - ohci->ir_context_list; in ohci_start_iso()
3069 match = (tags << 28) | (sync << 8) | ctx->base.channel; in ohci_start_iso()
3077 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); in ohci_start_iso()
3078 context_run(&ctx->context, control); in ohci_start_iso()
3080 ctx->sync = sync; in ohci_start_iso()
3081 ctx->tags = tags; in ohci_start_iso()
3092 struct iso_context *ctx = container_of(base, struct iso_context, base); in ohci_stop_iso() local
3095 switch (ctx->base.type) { in ohci_stop_iso()
3097 index = ctx - ohci->it_context_list; in ohci_stop_iso()
3103 index = ctx - ohci->ir_context_list; in ohci_stop_iso()
3108 context_stop(&ctx->context); in ohci_stop_iso()
3109 tasklet_kill(&ctx->context.tasklet); in ohci_stop_iso()
3117 struct iso_context *ctx = container_of(base, struct iso_context, base); in ohci_free_iso_context() local
3122 context_release(&ctx->context); in ohci_free_iso_context()
3123 free_page((unsigned long)ctx->header); in ohci_free_iso_context()
3129 index = ctx - ohci->it_context_list; in ohci_free_iso_context()
3134 index = ctx - ohci->ir_context_list; in ohci_free_iso_context()
3140 index = ctx - ohci->ir_context_list; in ohci_free_iso_context()
3185 struct iso_context *ctx; in ohci_resume_iso_dma() local
3188 ctx = &ohci->ir_context_list[i]; in ohci_resume_iso_dma()
3189 if (ctx->context.running) in ohci_resume_iso_dma()
3190 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); in ohci_resume_iso_dma()
3194 ctx = &ohci->it_context_list[i]; in ohci_resume_iso_dma()
3195 if (ctx->context.running) in ohci_resume_iso_dma()
3196 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); in ohci_resume_iso_dma()
3201 static int queue_iso_transmit(struct iso_context *ctx, in queue_iso_transmit() argument
3236 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); in queue_iso_transmit()
3256 IT_HEADER_CHANNEL(ctx->base.channel) | in queue_iso_transmit()
3257 IT_HEADER_SPEED(ctx->base.speed)); in queue_iso_transmit()
3282 dma_sync_single_range_for_device(ctx->context.ohci->card.device, in queue_iso_transmit()
3300 context_append(&ctx->context, d, z, header_z); in queue_iso_transmit()
3305 static int queue_iso_packet_per_buffer(struct iso_context *ctx, in queue_iso_packet_per_buffer() argument
3310 struct device *device = ctx->context.ohci->card.device; in queue_iso_packet_per_buffer()
3321 packet_count = packet->header_length / ctx->base.header_size; in queue_iso_packet_per_buffer()
3322 header_size = max(ctx->base.header_size, (size_t)8); in queue_iso_packet_per_buffer()
3333 d = context_get_descriptors(&ctx->context, in queue_iso_packet_per_buffer()
3380 context_append(&ctx->context, d, z, header_z); in queue_iso_packet_per_buffer()
3386 static int queue_iso_buffer_fill(struct iso_context *ctx, in queue_iso_buffer_fill() argument
3406 d = context_get_descriptors(&ctx->context, 1, &d_bus); in queue_iso_buffer_fill()
3428 dma_sync_single_range_for_device(ctx->context.ohci->card.device, in queue_iso_buffer_fill()
3436 context_append(&ctx->context, d, 1, 0); in queue_iso_buffer_fill()
3447 struct iso_context *ctx = container_of(base, struct iso_context, base); in ohci_queue_iso() local
3451 spin_lock_irqsave(&ctx->context.ohci->lock, flags); in ohci_queue_iso()
3454 ret = queue_iso_transmit(ctx, packet, buffer, payload); in ohci_queue_iso()
3457 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); in ohci_queue_iso()
3460 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); in ohci_queue_iso()
3463 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); in ohci_queue_iso()
3470 struct context *ctx = in ohci_flush_queue_iso() local
3473 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); in ohci_flush_queue_iso()
3478 struct iso_context *ctx = container_of(base, struct iso_context, base); in ohci_flush_iso_completions() local
3481 tasklet_disable(&ctx->context.tasklet); in ohci_flush_iso_completions()
3483 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { in ohci_flush_iso_completions()
3484 context_tasklet((unsigned long)&ctx->context); in ohci_flush_iso_completions()
3489 if (ctx->header_length != 0) in ohci_flush_iso_completions()
3490 flush_iso_completions(ctx); in ohci_flush_iso_completions()
3493 if (ctx->mc_completed != 0) in ohci_flush_iso_completions()
3494 flush_ir_buffer_fill(ctx); in ohci_flush_iso_completions()
3500 clear_bit_unlock(0, &ctx->flushing_completions); in ohci_flush_iso_completions()
3504 tasklet_enable(&ctx->context.tasklet); in ohci_flush_iso_completions()