Searched refs:tail (Results 1 - 200 of 1033) sorted by relevance

123456

/linux-4.1.27/include/linux/
H A Dcirc_buf.h11 int tail; member in struct:circ_buf
15 #define CIRC_CNT(head,tail,size) (((head) - (tail)) & ((size)-1))
18 as a completely full buffer has head == tail, which is the same as
20 #define CIRC_SPACE(head,tail,size) CIRC_CNT((tail),((head)+1),(size))
23 accessing head and tail more than once, so they can change
25 #define CIRC_CNT_TO_END(head,tail,size) \
26 ({int end = (size) - (tail); \
31 #define CIRC_SPACE_TO_END(head,tail,size) \
33 int n = (end + (tail)) & ((size)-1); \
H A Dosq_lock.h16 * Stores an encoded value of the CPU # of the tail node in the queue.
19 atomic_t tail; member in struct:optimistic_spin_queue
29 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL); osq_lock_init()
H A Dbio.h536 * fast access to the tail.
540 struct bio *tail; member in struct:bio_list
550 bl->head = bl->tail = NULL; bio_list_init()
573 if (bl->tail) bio_list_add()
574 bl->tail->bi_next = bio; bio_list_add()
578 bl->tail = bio; bio_list_add()
587 if (!bl->tail) bio_list_add_head()
588 bl->tail = bio; bio_list_add_head()
596 if (bl->tail) bio_list_merge()
597 bl->tail->bi_next = bl2->head; bio_list_merge()
601 bl->tail = bl2->tail; bio_list_merge()
611 bl2->tail->bi_next = bl->head; bio_list_merge_head()
613 bl->tail = bl2->tail; bio_list_merge_head()
630 bl->tail = NULL; bio_list_pop()
642 bl->head = bl->tail = NULL; bio_list_get()
H A Dtty_flip.h19 struct tty_buffer *tb = port->buf.tail; tty_insert_flip_char()
H A Dstring.h157 const char *tail = strrchr(path, '/'); kbasename() local
158 return tail ? tail + 1 : path; kbasename()
H A Dhid-debug.h44 int tail; member in struct:hid_debug_list
H A Dhidraw.h38 int tail; member in struct:hidraw_list
H A Dvmw_vmci_defs.h426 * a line in a store, for example, you walk up to the tail.
437 * For a queue of buffer 'size' bytes, the tail and head pointers will be in
737 * Helper to add a given offset to a head or tail pointer. Wraps the
776 * vmci_qp_add_pointer() is used to manipulate the tail itself.
799 * Helper routine for getting the head and the tail pointer for a queue.
832 u64 tail; vmci_q_header_free_space() local
836 tail = vmci_q_header_producer_tail(produce_q_header); vmci_q_header_free_space()
839 if (tail >= produce_q_size || head >= produce_q_size) vmci_q_header_free_space()
843 * Deduct 1 to avoid tail becoming equal to head which causes vmci_q_header_free_space()
844 * ambiguity. If head and tail are equal it means that the vmci_q_header_free_space()
847 if (tail >= head) vmci_q_header_free_space()
848 free_space = produce_q_size - (tail - head) - 1; vmci_q_header_free_space()
850 free_space = head - tail - 1; vmci_q_header_free_space()
H A Duinput.h68 unsigned char tail; member in struct:uinput_device
/linux-4.1.27/arch/sparc/include/asm/
H A Dintr_queue.h7 #define INTRQ_CPU_MONDO_TAIL 0x3c8 /* CPU mondo tail */
9 #define INTRQ_DEVICE_MONDO_TAIL 0x3d8 /* Device mondo tail */
11 #define INTRQ_RESUM_MONDO_TAIL 0x3e8 /* Resumable error mondo tail */
/linux-4.1.27/drivers/staging/unisys/uislib/
H A Duisqueue.c57 unsigned int head, tail, nof; spar_signal_insert() local
64 /* capture current head and tail */ spar_signal_insert()
66 tail = readl(&pqhdr->tail); spar_signal_insert()
68 /* queue is full if (head + 1) % n equals tail */ spar_signal_insert()
69 if (((head + 1) % readl(&pqhdr->max_slots)) == tail) { spar_signal_insert()
115 unsigned int head, tail; spar_signal_remove() local
120 /* capture current head and tail */ spar_signal_remove()
122 tail = readl(&pqhdr->tail); spar_signal_remove()
124 /* queue is empty if the head index equals the tail index */ spar_signal_remove()
125 if (head == tail) { spar_signal_remove()
131 tail = (tail + 1) % readl(&pqhdr->max_slots); spar_signal_remove()
133 /* copy signal from tail location to the area pointed to by pSignal */ spar_signal_remove()
135 (tail * readl(&pqhdr->signal_size)); spar_signal_remove()
139 writel(tail, &pqhdr->tail); spar_signal_remove()
170 unsigned int head, tail, count = 0; spar_signal_remove_all() local
175 /* capture current head and tail */ spar_signal_remove_all()
177 tail = pqhdr->tail; spar_signal_remove_all()
179 /* queue is empty if the head index equals the tail index */ spar_signal_remove_all()
180 if (head == tail) spar_signal_remove_all()
183 while (head != tail) { spar_signal_remove_all()
185 tail = (tail + 1) % pqhdr->max_slots; spar_signal_remove_all()
187 /* copy signal from tail location to the area pointed spar_signal_remove_all()
192 (tail * pqhdr->signal_size); spar_signal_remove_all()
197 pqhdr->tail = tail; spar_signal_remove_all()
223 return readl(&pqhdr->head) == readl(&pqhdr->tail); spar_signalqueue_empty()
/linux-4.1.27/arch/arm/kernel/
H A Dperf_callchain.c30 * next frame tail.
33 user_backtrace(struct frame_tail __user *tail, user_backtrace() argument
39 if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) user_backtrace()
43 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); user_backtrace()
55 if (tail + 1 >= buftail.fp) user_backtrace()
64 struct frame_tail __user *tail; perf_callchain_user() local
76 tail = (struct frame_tail __user *)regs->ARM_fp - 1; perf_callchain_user()
79 tail && !((unsigned long)tail & 0x3)) perf_callchain_user()
80 tail = user_backtrace(tail, entry); perf_callchain_user()
/linux-4.1.27/lib/
H A Dlist_sort.c24 struct list_head head, *tail = &head; merge() local
29 tail->next = a; merge()
32 tail->next = b; merge()
35 tail = tail->next; merge()
37 tail->next = a?:b; merge()
54 struct list_head *tail = head; merge_and_restore_back_links() local
60 tail->next = a; merge_and_restore_back_links()
61 a->prev = tail; merge_and_restore_back_links()
64 tail->next = b; merge_and_restore_back_links()
65 b->prev = tail; merge_and_restore_back_links()
68 tail = tail->next; merge_and_restore_back_links()
70 tail->next = a ? : b; merge_and_restore_back_links()
80 (*cmp)(priv, tail->next, tail->next); merge_and_restore_back_links()
82 tail->next->prev = tail; merge_and_restore_back_links()
83 tail = tail->next; merge_and_restore_back_links()
84 } while (tail->next); merge_and_restore_back_links()
86 tail->next = head; merge_and_restore_back_links()
87 head->prev = tail; merge_and_restore_back_links()
/linux-4.1.27/drivers/staging/unisys/visorutil/
H A Dcharqueue.c26 #define IS_EMPTY(charqueue) (charqueue->head == charqueue->tail)
32 int head, tail; member in struct:charqueue
47 cq->tail = 0; visor_charqueue_create()
59 if (charqueue->head == charqueue->tail) visor_charqueue_enqueue()
61 charqueue->tail = (charqueue->tail+1) % alloc_slots; visor_charqueue_enqueue()
84 charqueue->tail = (charqueue->tail+1) % alloc_slots; charqueue_dequeue_1()
85 return charqueue->buf[charqueue->tail]; charqueue_dequeue_1()
/linux-4.1.27/arch/arm/oprofile/
H A Dcommon.c86 static struct frame_tail* user_backtrace(struct frame_tail *tail) user_backtrace() argument
91 if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) user_backtrace()
93 if (__copy_from_user_inatomic(buftail, tail, sizeof(buftail))) user_backtrace()
100 if (tail + 1 >= buftail[0].fp) user_backtrace()
108 struct frame_tail *tail = ((struct frame_tail *) regs->ARM_fp) - 1; arm_backtrace() local
117 while (depth-- && tail && !((unsigned long) tail & 3)) arm_backtrace()
118 tail = user_backtrace(tail); arm_backtrace()
/linux-4.1.27/sound/oss/
H A Dmsnd.c104 f->tail = 0; msnd_fifo_alloc()
116 f->len = f->tail = f->head = 0; msnd_fifo_make_empty()
127 if (f->head <= f->tail) { msnd_fifo_write_io()
129 if (nwritten > f->n - f->tail) msnd_fifo_write_io()
130 nwritten = f->n - f->tail; msnd_fifo_write_io()
133 nwritten = f->head - f->tail; msnd_fifo_write_io()
138 memcpy_fromio(f->data + f->tail, buf, nwritten); msnd_fifo_write_io()
143 f->tail += nwritten; msnd_fifo_write_io()
144 f->tail %= f->n; msnd_fifo_write_io()
158 if (f->head <= f->tail) { msnd_fifo_write()
160 if (nwritten > f->n - f->tail) msnd_fifo_write()
161 nwritten = f->n - f->tail; msnd_fifo_write()
164 nwritten = f->head - f->tail; msnd_fifo_write()
169 memcpy(f->data + f->tail, buf, nwritten); msnd_fifo_write()
174 f->tail += nwritten; msnd_fifo_write()
175 f->tail %= f->n; msnd_fifo_write()
189 if (f->tail <= f->head) { msnd_fifo_read_io()
195 nread = f->tail - f->head; msnd_fifo_read_io()
220 if (f->tail <= f->head) { msnd_fifo_read()
226 nread = f->tail - f->head; msnd_fifo_read()
H A Dmidibuf.c35 int len, head, tail; member in struct:midi_buf
66 q->queue[q->tail] = (data); \
67 q->len++; q->tail = (q->tail+1) % MAX_QUEUE_SIZE; \
188 midi_in_buf[dev]->len = midi_in_buf[dev]->head = midi_in_buf[dev]->tail = 0; MIDIbuf_open()
200 midi_out_buf[dev]->len = midi_out_buf[dev]->head = midi_out_buf[dev]->tail = 0; MIDIbuf_open()
350 should q->len,tail&head be atomic_t? */ MIDIbuf_read()
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_cq.c68 if (unlikely(next == wc->tail)) { ipath_cq_enter()
135 u32 tail; ipath_poll_cq() local
146 tail = wc->tail; ipath_poll_cq()
147 if (tail > (u32) cq->ibcq.cqe) ipath_poll_cq()
148 tail = (u32) cq->ibcq.cqe; ipath_poll_cq()
150 if (tail == wc->head) ipath_poll_cq()
153 *entry = wc->kqueue[tail]; ipath_poll_cq()
154 if (tail >= cq->ibcq.cqe) ipath_poll_cq()
155 tail = 0; ipath_poll_cq()
157 tail++; ipath_poll_cq()
159 wc->tail = tail; ipath_poll_cq()
223 * Allocate the completion queue entries and head/tail pointers. ipath_create_cq()
289 wc->tail = 0; ipath_create_cq()
357 cq->queue->head != cq->queue->tail) ipath_req_notify_cq()
376 u32 head, tail, n; ipath_resize_cq() local
410 * Make sure head and tail are sane since they ipath_resize_cq()
417 tail = old_wc->tail; ipath_resize_cq()
418 if (tail > (u32) cq->ibcq.cqe) ipath_resize_cq()
419 tail = (u32) cq->ibcq.cqe; ipath_resize_cq()
420 if (head < tail) ipath_resize_cq()
421 n = cq->ibcq.cqe + 1 + head - tail; ipath_resize_cq()
423 n = head - tail; ipath_resize_cq()
428 for (n = 0; tail != head; n++) { ipath_resize_cq()
430 wc->uqueue[n] = old_wc->uqueue[tail]; ipath_resize_cq()
432 wc->kqueue[n] = old_wc->kqueue[tail]; ipath_resize_cq()
433 if (tail == (u32) cq->ibcq.cqe) ipath_resize_cq()
434 tail = 0; ipath_resize_cq()
436 tail++; ipath_resize_cq()
440 wc->tail = 0; ipath_resize_cq()
H A Dipath_srq.c72 if (next == wq->tail) { ipath_post_srq_receive()
176 srq->rq.wq->tail = 0; ipath_create_srq()
226 u32 sz, size, n, head, tail; ipath_modify_srq() local
271 tail = owq->tail; ipath_modify_srq()
272 if (tail >= srq->rq.size) ipath_modify_srq()
273 tail = 0; ipath_modify_srq()
275 if (n < tail) ipath_modify_srq()
276 n += srq->rq.size - tail; ipath_modify_srq()
278 n -= tail; ipath_modify_srq()
285 while (tail != head) { ipath_modify_srq()
289 wqe = get_rwqe_ptr(&srq->rq, tail); ipath_modify_srq()
296 if (++tail >= srq->rq.size) ipath_modify_srq()
297 tail = 0; ipath_modify_srq()
302 wq->tail = 0; ipath_modify_srq()
H A Dipath_sdma.c293 /* reset our notion of head and tail */ sdma_abort_task()
672 u16 tail; ipath_sdma_verbs_send() local
714 tail = dd->ipath_sdma_descq_tail; ipath_sdma_verbs_send()
715 descqp = &dd->ipath_sdma_descq[tail].qw[0]; ipath_sdma_verbs_send()
720 tx->txreq.start_idx = tail; ipath_sdma_verbs_send()
722 /* increment the tail */ ipath_sdma_verbs_send()
723 if (++tail == dd->ipath_sdma_descq_cnt) { ipath_sdma_verbs_send()
724 tail = 0; ipath_sdma_verbs_send()
753 /* increment the tail */ ipath_sdma_verbs_send()
754 if (++tail == dd->ipath_sdma_descq_cnt) { ipath_sdma_verbs_send()
755 tail = 0; ipath_sdma_verbs_send()
781 if (!tail) ipath_sdma_verbs_send()
791 /* Commit writes to memory and advance the tail on the chip */ ipath_sdma_verbs_send()
793 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail); ipath_sdma_verbs_send()
795 tx->txreq.next_descq_idx = tail; ipath_sdma_verbs_send()
797 dd->ipath_sdma_descq_tail = tail; ipath_sdma_verbs_send()
805 while (tail != dd->ipath_sdma_descq_tail) { ipath_sdma_verbs_send()
806 if (!tail) ipath_sdma_verbs_send()
807 tail = dd->ipath_sdma_descq_cnt - 1; ipath_sdma_verbs_send()
809 tail--; ipath_sdma_verbs_send()
810 unmap_desc(dd, tail); ipath_sdma_verbs_send()
H A Dipath_ud.c64 u32 tail; ipath_ud_loopback() local
119 * Note that it is safe to drop the lock after changing rq->tail ipath_ud_loopback()
124 tail = wq->tail; ipath_ud_loopback()
125 /* Validate tail before using it since it is user writable. */ ipath_ud_loopback()
126 if (tail >= rq->size) ipath_ud_loopback()
127 tail = 0; ipath_ud_loopback()
128 if (unlikely(tail == wq->head)) { ipath_ud_loopback()
133 wqe = get_rwqe_ptr(rq, tail); ipath_ud_loopback()
146 if (++tail >= rq->size) ipath_ud_loopback()
147 tail = 0; ipath_ud_loopback()
148 wq->tail = tail; ipath_ud_loopback()
160 if (n < tail) ipath_ud_loopback()
161 n += rq->size - tail; ipath_ud_loopback()
163 n -= tail; ipath_ud_loopback()
H A Dipath_ruc.c174 u32 tail; ipath_get_rwqe() local
194 tail = wq->tail; ipath_get_rwqe()
195 /* Validate tail before using it since it is user writable. */ ipath_get_rwqe()
196 if (tail >= rq->size) ipath_get_rwqe()
197 tail = 0; ipath_get_rwqe()
199 if (unlikely(tail == wq->head)) { ipath_get_rwqe()
205 wqe = get_rwqe_ptr(rq, tail); ipath_get_rwqe()
206 if (++tail >= rq->size) ipath_get_rwqe()
207 tail = 0; ipath_get_rwqe()
213 wq->tail = tail; ipath_get_rwqe()
227 if (n < tail) ipath_get_rwqe()
228 n += rq->size - tail; ipath_get_rwqe()
230 n -= tail; ipath_get_rwqe()
H A Dipath_user_sdma.c686 unsigned ofs, u16 tail) ipath_user_sdma_send_frag()
694 descqp = &dd->ipath_sdma_descq[tail].qw[0]; ipath_user_sdma_send_frag()
713 u16 tail; ipath_user_sdma_push_pkts() local
728 tail = dd->ipath_sdma_descq_tail; ipath_user_sdma_push_pkts()
735 u16 dtail = tail; ipath_user_sdma_push_pkts()
741 ipath_user_sdma_send_frag(dd, pkt, i, ofs, tail); ipath_user_sdma_push_pkts()
744 if (++tail == dd->ipath_sdma_descq_cnt) { ipath_user_sdma_push_pkts()
745 tail = 0; ipath_user_sdma_push_pkts()
778 /* advance the tail on the chip if necessary */ ipath_user_sdma_push_pkts()
779 if (dd->ipath_sdma_descq_tail != tail) { ipath_user_sdma_push_pkts()
781 ipath_write_kreg(dd, dd->ipath_kregs->kr_senddmatail, tail); ipath_user_sdma_push_pkts()
782 dd->ipath_sdma_descq_tail = tail; ipath_user_sdma_push_pkts()
684 ipath_user_sdma_send_frag(struct ipath_devdata *dd, struct ipath_user_sdma_pkt *pkt, int idx, unsigned ofs, u16 tail) ipath_user_sdma_send_frag() argument
H A Dipath_qp.c362 qp->r_rq.wq->tail = 0; ipath_reset_qp()
413 u32 tail; ipath_error_qp() local
422 tail = wq->tail; ipath_error_qp()
423 if (tail >= qp->r_rq.size) ipath_error_qp()
424 tail = 0; ipath_error_qp()
425 while (tail != head) { ipath_error_qp()
426 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; ipath_error_qp()
427 if (++tail >= qp->r_rq.size) ipath_error_qp()
428 tail = 0; ipath_error_qp()
431 wq->tail = tail; ipath_error_qp()
692 u32 tail; ipath_compute_aeth() local
698 tail = wq->tail; ipath_compute_aeth()
699 if (tail >= qp->r_rq.size) ipath_compute_aeth()
700 tail = 0; ipath_compute_aeth()
706 credits = head - tail; ipath_compute_aeth()
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_cq.c71 if (unlikely(next == wc->tail)) { qib_cq_enter()
142 u32 tail; qib_poll_cq() local
153 tail = wc->tail; qib_poll_cq()
154 if (tail > (u32) cq->ibcq.cqe) qib_poll_cq()
155 tail = (u32) cq->ibcq.cqe; qib_poll_cq()
157 if (tail == wc->head) qib_poll_cq()
160 *entry = wc->kqueue[tail]; qib_poll_cq()
161 if (tail >= cq->ibcq.cqe) qib_poll_cq()
162 tail = 0; qib_poll_cq()
164 tail++; qib_poll_cq()
166 wc->tail = tail; qib_poll_cq()
238 * Allocate the completion queue entries and head/tail pointers. qib_create_cq()
305 wc->tail = 0; qib_create_cq()
373 cq->queue->head != cq->queue->tail) qib_req_notify_cq()
392 u32 head, tail, n; qib_resize_cq() local
426 * Make sure head and tail are sane since they qib_resize_cq()
433 tail = old_wc->tail; qib_resize_cq()
434 if (tail > (u32) cq->ibcq.cqe) qib_resize_cq()
435 tail = (u32) cq->ibcq.cqe; qib_resize_cq()
436 if (head < tail) qib_resize_cq()
437 n = cq->ibcq.cqe + 1 + head - tail; qib_resize_cq()
439 n = head - tail; qib_resize_cq()
444 for (n = 0; tail != head; n++) { qib_resize_cq()
446 wc->uqueue[n] = old_wc->uqueue[tail]; qib_resize_cq()
448 wc->kqueue[n] = old_wc->kqueue[tail]; qib_resize_cq()
449 if (tail == (u32) cq->ibcq.cqe) qib_resize_cq()
450 tail = 0; qib_resize_cq()
452 tail++; qib_resize_cq()
456 wc->tail = 0; qib_resize_cq()
H A Dqib_srq.c72 if (next == wq->tail) { qib_post_srq_receive()
172 srq->rq.wq->tail = 0; qib_create_srq()
222 u32 sz, size, n, head, tail; qib_modify_srq() local
260 * validate head and tail pointer values and compute qib_modify_srq()
265 tail = owq->tail; qib_modify_srq()
266 if (head >= srq->rq.size || tail >= srq->rq.size) { qib_modify_srq()
271 if (n < tail) qib_modify_srq()
272 n += srq->rq.size - tail; qib_modify_srq()
274 n -= tail; qib_modify_srq()
281 while (tail != head) { qib_modify_srq()
285 wqe = get_rwqe_ptr(&srq->rq, tail); qib_modify_srq()
292 if (++tail >= srq->rq.size) qib_modify_srq()
293 tail = 0; qib_modify_srq()
298 wq->tail = 0; qib_modify_srq()
H A Dqib_ruc.c146 u32 tail; qib_get_rwqe() local
166 tail = wq->tail; qib_get_rwqe()
167 /* Validate tail before using it since it is user writable. */ qib_get_rwqe()
168 if (tail >= rq->size) qib_get_rwqe()
169 tail = 0; qib_get_rwqe()
170 if (unlikely(tail == wq->head)) { qib_get_rwqe()
176 wqe = get_rwqe_ptr(rq, tail); qib_get_rwqe()
178 * Even though we update the tail index in memory, the verbs qib_get_rwqe()
182 if (++tail >= rq->size) qib_get_rwqe()
183 tail = 0; qib_get_rwqe()
184 wq->tail = tail; qib_get_rwqe()
203 if (n < tail) qib_get_rwqe()
204 n += rq->size - tail; qib_get_rwqe()
206 n -= tail; qib_get_rwqe()
H A Dqib_sdma.c174 * Reset our notion of head and tail. sdma_sw_clean_up_task()
541 u16 tail; qib_sdma_verbs_send() local
572 tail = ppd->sdma_descq_tail; qib_sdma_verbs_send()
573 descqp = &ppd->sdma_descq[tail].qw[0]; qib_sdma_verbs_send()
577 /* increment the tail */ qib_sdma_verbs_send()
578 if (++tail == ppd->sdma_descq_cnt) { qib_sdma_verbs_send()
579 tail = 0; qib_sdma_verbs_send()
584 tx->txreq.start_idx = tail; qib_sdma_verbs_send()
611 /* increment the tail */ qib_sdma_verbs_send()
612 if (++tail == ppd->sdma_descq_cnt) { qib_sdma_verbs_send()
613 tail = 0; qib_sdma_verbs_send()
639 if (!tail) qib_sdma_verbs_send()
649 tx->txreq.next_descq_idx = tail; qib_sdma_verbs_send()
650 ppd->dd->f_sdma_update_tail(ppd, tail); qib_sdma_verbs_send()
657 if (!tail) qib_sdma_verbs_send()
658 tail = ppd->sdma_descq_cnt - 1; qib_sdma_verbs_send()
660 tail--; qib_sdma_verbs_send()
661 if (tail == ppd->sdma_descq_tail) qib_sdma_verbs_send()
663 unmap_desc(ppd, tail); qib_sdma_verbs_send()
728 u16 head, tail, cnt; dump_sdma_state() local
731 tail = ppd->sdma_descq_tail; dump_sdma_state()
738 "SDMA ppd->sdma_descq_tail: %u\n", tail); dump_sdma_state()
743 while (head != tail) { dump_sdma_state()
H A Dqib_qp.c413 qp->r_rq.wq->tail = 0; qib_reset_qp()
532 u32 tail; qib_error_qp() local
541 tail = wq->tail; qib_error_qp()
542 if (tail >= qp->r_rq.size) qib_error_qp()
543 tail = 0; qib_error_qp()
544 while (tail != head) { qib_error_qp()
545 wc.wr_id = get_rwqe_ptr(&qp->r_rq, tail)->wr_id; qib_error_qp()
546 if (++tail >= qp->r_rq.size) qib_error_qp()
547 tail = 0; qib_error_qp()
550 wq->tail = tail; qib_error_qp()
926 u32 tail; qib_compute_aeth() local
932 tail = wq->tail; qib_compute_aeth()
933 if (tail >= qp->r_rq.size) qib_compute_aeth()
934 tail = 0; qib_compute_aeth()
940 credits = head - tail; qib_compute_aeth()
/linux-4.1.27/net/sunrpc/
H A Dxdr.c136 struct kvec *tail = xdr->tail; xdr_inline_pages() local
146 tail->iov_base = buf + offset; xdr_inline_pages()
147 tail->iov_len = buflen - offset; xdr_inline_pages()
313 * moved into the inlined pages and/or the tail.
318 struct kvec *head, *tail; xdr_shrink_bufhead() local
322 tail = buf->tail; xdr_shrink_bufhead()
329 /* Shift the tail first */ xdr_shrink_bufhead()
330 if (tail->iov_len != 0) { xdr_shrink_bufhead()
331 if (tail->iov_len > len) { xdr_shrink_bufhead()
332 copy = tail->iov_len - len; xdr_shrink_bufhead()
333 memmove((char *)tail->iov_base + len, xdr_shrink_bufhead()
334 tail->iov_base, copy); xdr_shrink_bufhead()
336 /* Copy from the inlined pages into the tail */ xdr_shrink_bufhead()
341 if (offs >= tail->iov_len) xdr_shrink_bufhead()
343 else if (copy > tail->iov_len - offs) xdr_shrink_bufhead()
344 copy = tail->iov_len - offs; xdr_shrink_bufhead()
346 _copy_from_pages((char *)tail->iov_base + offs, xdr_shrink_bufhead()
350 /* Do we also need to copy data from the head into the tail ? */ xdr_shrink_bufhead()
353 if (copy > tail->iov_len) xdr_shrink_bufhead()
354 copy = tail->iov_len; xdr_shrink_bufhead()
355 memcpy(tail->iov_base, xdr_shrink_bufhead()
389 * moved into the tail.
394 struct kvec *tail; xdr_shrink_pagelen() local
399 tail = buf->tail; xdr_shrink_pagelen()
404 /* Shift the tail first */ xdr_shrink_pagelen()
406 unsigned int free_space = tailbuf_len - tail->iov_len; xdr_shrink_pagelen()
410 tail->iov_len += free_space; xdr_shrink_pagelen()
413 if (tail->iov_len > len) { xdr_shrink_pagelen()
414 char *p = (char *)tail->iov_base + len; xdr_shrink_pagelen()
415 memmove(p, tail->iov_base, tail->iov_len - len); xdr_shrink_pagelen()
417 copy = tail->iov_len; xdr_shrink_pagelen()
418 /* Copy from the inlined pages into the tail */ xdr_shrink_pagelen()
419 _copy_from_pages((char *)tail->iov_base, xdr_shrink_pagelen()
463 int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len; xdr_init_encode()
591 * head, tail, and page lengths are adjusted to correspond.
600 * simple case of truncating from one position in the tail to another.
607 struct kvec *tail = buf->tail; xdr_truncate_encode() local
617 fraglen = min_t(int, buf->len - len, tail->iov_len); xdr_truncate_encode()
618 tail->iov_len -= fraglen; xdr_truncate_encode()
620 if (tail->iov_len) { xdr_truncate_encode()
621 xdr->p = tail->iov_base + tail->iov_len; xdr_truncate_encode()
695 struct kvec *iov = buf->tail; xdr_write_pages()
770 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len); xdr_set_next_page()
779 xdr_set_iov(xdr, xdr->buf->tail, xdr->buf->len); xdr_set_next_buffer()
925 /* Truncate page data and move it into the tail */ xdr_align_pages()
939 * bytes is moved into the XDR tail[].
956 xdr->iov = iov = buf->tail; xdr_read_pages()
963 * Position current pointer at beginning of tail, and xdr_read_pages()
981 * bytes is moved into the XDR tail[]. The current pointer is then
988 * Position current pointer at beginning of tail, and xdr_enter_page()
1002 buf->tail[0] = empty_iov; xdr_buf_from_iov()
1050 if (base < buf->tail[0].iov_len) { xdr_buf_subsegment()
1051 subbuf->tail[0].iov_base = buf->tail[0].iov_base + base; xdr_buf_subsegment()
1052 subbuf->tail[0].iov_len = min_t(unsigned int, len, xdr_buf_subsegment()
1053 buf->tail[0].iov_len - base); xdr_buf_subsegment()
1054 len -= subbuf->tail[0].iov_len; xdr_buf_subsegment()
1057 base -= buf->tail[0].iov_len; xdr_buf_subsegment()
1058 subbuf->tail[0].iov_len = 0; xdr_buf_subsegment()
1082 if (buf->tail[0].iov_len) { xdr_buf_trim()
1083 cur = min_t(size_t, buf->tail[0].iov_len, trim); xdr_buf_trim()
1084 buf->tail[0].iov_len -= cur; xdr_buf_trim()
1121 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); __read_bytes_from_xdr_buf()
1122 memcpy(obj, subbuf->tail[0].iov_base, this_len); __read_bytes_from_xdr_buf()
1152 this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len); __write_bytes_to_xdr_buf()
1153 memcpy(subbuf->tail[0].iov_base, obj, this_len); __write_bytes_to_xdr_buf()
1194 * entirely in the head or the tail, set object to point to it; otherwise
1195 * try to find space for it at the end of the tail, copy it there, and
1210 /* ..or is the obj contained entirely in the tail? */ xdr_buf_read_netobj()
1211 obj->data = subbuf.tail[0].iov_base; xdr_buf_read_netobj()
1212 if (subbuf.tail[0].iov_len == obj->len) xdr_buf_read_netobj()
1215 /* use end of tail as storage for obj: xdr_buf_read_netobj()
1219 * tail.) */ xdr_buf_read_netobj()
1222 if (buf->tail[0].iov_len != 0) xdr_buf_read_netobj()
1223 obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len; xdr_buf_read_netobj()
1388 base = buf->page_len; /* align to start of tail */ xdr_xcode_array2()
1391 /* process tail */ xdr_xcode_array2()
1394 c = buf->tail->iov_base + base; xdr_xcode_array2()
1442 buf->head->iov_len + buf->page_len + buf->tail->iov_len) xdr_encode_array2()
1501 if (offset < buf->tail[0].iov_len) { xdr_process_buf()
1502 thislen = buf->tail[0].iov_len - offset; xdr_process_buf()
1505 sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen); xdr_process_buf()
H A Dsocklib.c137 len = xdr->tail[0].iov_len; xdr_partial_copy_from_skb()
139 copied += copy_actor(desc, (char *)xdr->tail[0].iov_base + base, len - base); xdr_partial_copy_from_skb()
/linux-4.1.27/arch/x86/include/asm/
H A Dspinlock.h76 old.tickets.tail = new.tickets.head + TICKET_LOCK_INC; __ticket_check_and_clear_slowpath()
77 new.tickets.tail = old.tickets.tail; __ticket_check_and_clear_slowpath()
86 return __tickets_equal(lock.tickets.head, lock.tickets.tail); arch_spin_value_unlocked()
91 * the queue, and the other indicating the current tail. The lock is acquired
92 * by atomically noting the tail and incrementing it by one (thus adding
94 * becomes equal to the the initial value of the tail.
96 * We use an xadd covering *both* parts of the lock, to increment the tail and
98 * issues and should be optimal for the uncontended case. Note the tail must be
104 register struct __raw_tickets inc = { .tail = TICKET_LOCK_INC }; arch_spin_lock()
107 if (likely(inc.head == inc.tail)) arch_spin_lock()
115 if (__tickets_equal(inc.head, inc.tail)) arch_spin_lock()
119 __ticket_lock_spinning(lock, inc.tail); arch_spin_lock()
132 if (!__tickets_equal(old.tickets.head, old.tickets.tail)) arch_spin_trylock()
164 return !__tickets_equal(tmp.tail, tmp.head); arch_spin_is_locked()
172 return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC; arch_spin_is_contended()
192 if (__tickets_equal(tmp.head, tmp.tail) || arch_spin_unlock_wait()
H A Dspinlock_types.h30 __ticket_t head, tail; member in struct:arch_spinlock::__anon3085::__raw_tickets
/linux-4.1.27/drivers/of/
H A Dpdt.c139 struct property *head, *tail; of_pdt_build_prop_list() local
141 head = tail = of_pdt_build_one_prop(node, NULL, of_pdt_build_prop_list()
144 tail->next = of_pdt_build_one_prop(node, NULL, NULL, NULL, 0); of_pdt_build_prop_list()
145 tail = tail->next; of_pdt_build_prop_list()
146 while(tail) { of_pdt_build_prop_list()
147 tail->next = of_pdt_build_one_prop(node, tail->name, of_pdt_build_prop_list()
149 tail = tail->next; of_pdt_build_prop_list()
H A Doverlay.c388 /* add to the tail of the overlay list */ of_overlay_create()
455 * Newly applied overlays are inserted at the tail of the overlay list,
456 * so a top most overlay is the one that is closest to the tail.
460 * the one closest to the tail. If another overlay has affected this
461 * device node and is closest to the tail, then removal is not permited.
539 /* the tail of list is guaranteed to be safe to remove */ of_overlay_destroy_all()
/linux-4.1.27/fs/affs/
H A Dinode.c20 struct affs_tail *tail; affs_iget() local
48 tail = AFFS_TAIL(sb, bh); affs_iget()
49 prot = be32_to_cpu(tail->protect); affs_iget()
74 id = be16_to_cpu(tail->uid); affs_iget()
82 id = be16_to_cpu(tail->gid); affs_iget()
90 switch (be32_to_cpu(tail->stype)) { affs_iget()
96 if (be32_to_cpu(tail->stype) == ST_USERDIR || affs_iget()
125 size = be32_to_cpu(tail->size); affs_iget()
134 if (tail->link_chain) affs_iget()
149 = (be32_to_cpu(tail->change.days) * (24 * 60 * 60) + affs_iget()
150 be32_to_cpu(tail->change.mins) * 60 + affs_iget()
151 be32_to_cpu(tail->change.ticks) / 50 + affs_iget()
170 struct affs_tail *tail; affs_write_inode() local
184 tail = AFFS_TAIL(sb, bh); affs_write_inode()
185 if (tail->stype == cpu_to_be32(ST_ROOT)) { affs_write_inode()
188 tail->protect = cpu_to_be32(AFFS_I(inode)->i_protect); affs_write_inode()
189 tail->size = cpu_to_be32(inode->i_size); affs_write_inode()
190 secs_to_datestamp(inode->i_mtime.tv_sec,&tail->change); affs_write_inode()
201 tail->uid = cpu_to_be16(uid); affs_write_inode()
203 tail->gid = cpu_to_be16(gid); affs_write_inode()
/linux-4.1.27/drivers/scsi/bfa/
H A Dbfa_cs.h67 u32 tail; member in struct:bfa_trc_mod_s
100 trcm->head = trcm->tail = trcm->stopped = 0; bfa_trc_init()
113 int tail = trcm->tail; __bfa_trc() local
114 struct bfa_trc_s *trc = &trcm->trc[tail]; __bfa_trc()
124 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); __bfa_trc()
125 if (trcm->tail == trcm->head) __bfa_trc()
133 int tail = trcm->tail; __bfa_trc32() local
134 struct bfa_trc_s *trc = &trcm->trc[tail]; __bfa_trc32()
144 trcm->tail = (trcm->tail + 1) & (BFA_TRC_MAX - 1); __bfa_trc32()
145 if (trcm->tail == trcm->head) __bfa_trc32()
183 * bfa_q_deq_tail - dequeue an element from tail of the queue
/linux-4.1.27/fs/reiserfs/
H A Dtail_conversion.c12 * access to tail : when one is going to read tail it must make sure, that is
18 * tail. -ENOSPC if no disk space for conversion
195 * reads tail through page cache, insert direct item. When direct item
197 * what we expect from it (number of cut bytes). But when tail remains
213 char *tail; indirect2direct() local
215 loff_t pos, pos1; /* position of first byte of the tail */ indirect2direct()
239 * we are protected by i_mutex. The tail can not disapper, not indirect2direct()
241 * we are in truncate or packing tail in file_release indirect2direct()
244 tail = (char *)kmap(page); /* this can schedule */ indirect2direct()
259 reiserfs_panic(sb, "vs-5530", "tail position " indirect2direct()
270 * we want a pointer to the first byte of the tail in the page. indirect2direct()
274 tail = tail + (pos & (PAGE_CACHE_SIZE - 1)); indirect2direct()
281 /* Insert tail as new direct item in the tree */ indirect2direct()
283 tail ? tail : NULL) < 0) { indirect2direct()
H A Dioctl.c17 * 1) REISERFS_IOC_UNPACK - try to unpack tail from direct item into indirect
168 * Function try to convert tail from direct item into indirect.
202 * we unpack by finding the page with the tail, and calling reiserfs_unpack()
204 * reiserfs_get_block to unpack the tail for us. reiserfs_unpack()
H A Dfile.c19 * This implies an unnecessary copy of the tail and an unnecessary indirect item
21 * It avoids unnecessary tail packings (balances) for files that are written in
26 * small enough to have a tail, and the tail is currently in an
27 * unformatted node, the tail is converted back into a direct item.
29 * We use reiserfs_truncate_file to pack the tail, since it already has
/linux-4.1.27/net/sched/
H A Dsch_choke.c72 unsigned int tail; member in struct:choke_sched_data
82 return (q->tail - q->head) & q->tab_mask; choke_len()
102 if (q->head == q->tail) choke_zap_head_holes()
107 /* Move tail pointer backwards to reuse holes */ choke_zap_tail_holes()
111 q->tail = (q->tail - 1) & q->tab_mask; choke_zap_tail_holes()
112 if (q->head == q->tail) choke_zap_tail_holes()
114 } while (q->tab[q->tail] == NULL); choke_zap_tail_holes()
127 if (idx == q->tail) choke_drop_by_idx()
232 * Will return NULL if queue is empty (q->head == q->tail)
260 if (q->head == q->tail) choke_match_random()
332 q->tab[q->tail] = skb; choke_enqueue()
333 q->tail = (q->tail + 1) & q->tab_mask; choke_enqueue()
358 if (q->head == q->tail) { choke_dequeue()
451 unsigned int oqlen = sch->q.qlen, tail = 0; choke_change() local
453 while (q->head != q->tail) { choke_change()
459 if (tail < mask) { choke_change()
460 ntab[tail++] = skb; choke_change()
469 q->tail = tail; choke_change()
486 if (q->head == q->tail) choke_change()
612 return (q->head != q->tail) ? q->tab[q->head] : NULL; choke_peek_head()
H A Dsch_sfq.c134 struct sfq_slot *tail; /* current slot in round */ member in struct:sfq_sched_data
278 /* remove one skb from tail of slot queue */ slot_dequeue_tail()
306 /* add skb to slot queue (tail add) */ slot_queue_add()
323 /* Queue is full! Find the longest slot and drop tail packet from it */ sfq_drop()
341 x = q->tail->next; sfq_drop()
343 q->tail->next = slot->next; sfq_drop()
469 if (q->tail == NULL) { /* It is the first flow */ sfq_enqueue()
472 slot->next = q->tail->next; sfq_enqueue()
473 q->tail->next = x; sfq_enqueue()
479 q->tail = slot; sfq_enqueue()
508 if (q->tail == NULL) sfq_dequeue()
512 a = q->tail->next; sfq_dequeue()
515 q->tail = slot; sfq_dequeue()
530 q->tail = NULL; /* no more active slots */ sfq_dequeue()
533 q->tail->next = next_a; sfq_dequeue()
579 q->tail = NULL; sfq_rehash()
609 if (q->tail == NULL) { /* It is the first flow */ sfq_rehash()
612 slot->next = q->tail->next; sfq_rehash()
613 q->tail->next = x; sfq_rehash()
615 q->tail = slot; sfq_rehash()
631 if (!q->filter_list && q->tail) sfq_perturbation()
749 q->tail = NULL; sfq_init()
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/
H A Dfm10k_mbx.c34 fifo->tail = 0; fm10k_fifo_init()
45 return fifo->tail - fifo->head; fm10k_fifo_used()
56 return fifo->size + fifo->head - fifo->tail; fm10k_fifo_unused()
67 return fifo->head == fifo->tail; fm10k_fifo_empty()
83 * fm10k_fifo_tail_offset - returns indices of tail with given offset
85 * @offset: offset to add to tail
87 * This function returns the indices into the fifo based on tail + offset
91 return (fifo->tail + offset) & (fifo->size - 1); fm10k_fifo_tail_offset()
137 fifo->head = fifo->tail; fm10k_fifo_drop_all()
141 * fm10k_mbx_index_len - Convert a head/tail index into a length value
144 * @tail: head index
146 * This function takes the head and tail index and determines the length
149 static u16 fm10k_mbx_index_len(struct fm10k_mbx_info *mbx, u16 head, u16 tail) fm10k_mbx_index_len() argument
151 u16 len = tail - head; fm10k_mbx_index_len()
154 if (len > tail) fm10k_mbx_index_len()
161 * fm10k_mbx_tail_add - Determine new tail value with added offset
165 * This function takes the local tail index and recomputes it for
170 u16 tail = (mbx->tail + offset + 1) & ((mbx->mbmem_len << 1) - 1); fm10k_mbx_tail_add() local
173 return (tail > mbx->tail) ? --tail : ++tail; fm10k_mbx_tail_add()
177 * fm10k_mbx_tail_sub - Determine new tail value with subtracted offset
181 * This function takes the local tail index and recomputes it for
186 u16 tail = (mbx->tail - offset - 1) & ((mbx->mbmem_len << 1) - 1); fm10k_mbx_tail_sub() local
189 return (tail < mbx->tail) ? ++tail : --tail; fm10k_mbx_tail_sub()
229 * pushed onto the tail of the Rx queue.
233 u32 *tail = mbx->rx.buffer + fm10k_fifo_tail_offset(&mbx->rx, 0); fm10k_mbx_pushed_tail_len() local
235 /* pushed tail is only valid if pushed is set */ fm10k_mbx_pushed_tail_len()
239 return FM10K_TLV_DWORD_LEN(*tail); fm10k_mbx_pushed_tail_len()
246 * @tail_offset: additional offset to add to tail pointer
251 * the tail you can use tail_offset to adjust the pointer.
257 u32 *tail = fifo->buffer + end; fm10k_fifo_write_copy() local
269 memcpy(tail, msg, end << 2); fm10k_fifo_write_copy()
273 * fm10k_fifo_enqueue - Enqueues the message to the tail of the FIFO
278 * contained in the first DWORD of the message and will place at the tail
296 /* memory barrier to guarantee FIFO is written before tail update */ fm10k_fifo_enqueue()
299 /* Update Tx FIFO tail */ fm10k_fifo_enqueue()
300 fifo->tail += len; fm10k_fifo_enqueue()
343 * tail and len determines the length to copy.
351 u16 end, len, tail, mask; fm10k_mbx_write_copy() local
356 /* determine data length and mbmem tail index */ fm10k_mbx_write_copy()
359 tail = fm10k_mbx_tail_sub(mbx, len); fm10k_mbx_write_copy()
360 if (tail > mask) fm10k_mbx_write_copy()
361 tail++; fm10k_mbx_write_copy()
373 /* adjust tail to match offset for FIFO */ fm10k_mbx_write_copy()
374 tail &= mask; fm10k_mbx_write_copy()
375 if (!tail) fm10k_mbx_write_copy()
376 tail++; fm10k_mbx_write_copy()
379 fm10k_write_reg(hw, mbmem + tail++, *(head++)); fm10k_mbx_write_copy()
390 * This function will push the tail index forward based on the remote
398 u16 mbmem_len, len, ack = fm10k_mbx_index_len(mbx, head, mbx->tail); fm10k_mbx_pull_head()
410 /* update tail and record number of bytes in transit */ fm10k_mbx_pull_head()
411 mbx->tail = fm10k_mbx_tail_add(mbx, len - ack); fm10k_mbx_pull_head()
441 u32 *tail = fifo->buffer; fm10k_mbx_read_copy() local
452 tail += end; fm10k_mbx_read_copy()
455 for (end = fifo->size - end; len; tail = fifo->buffer) { fm10k_mbx_read_copy()
463 *(tail++) = fm10k_read_reg(hw, mbmem + head++); fm10k_mbx_read_copy()
467 /* memory barrier to guarantee FIFO is written before tail update */ fm10k_mbx_read_copy()
472 * fm10k_mbx_push_tail - Pushes up to 15 DWORDs on to tail of FIFO
475 * @tail: tail index of message
477 * This function will first validate the tail index and size for the
484 u16 tail) fm10k_mbx_push_tail()
487 u16 len, seq = fm10k_mbx_index_len(mbx, mbx->head, tail); fm10k_mbx_push_tail()
516 fifo->tail += len; fm10k_mbx_push_tail()
630 u16 len = mbx->tail_len - fm10k_mbx_index_len(mbx, head, mbx->tail); fm10k_mbx_update_local_crc()
731 memmove(fifo->buffer, fifo->buffer + fifo->tail, mbx->pushed << 2); fm10k_mbx_dequeue_rx()
733 /* shift head and tail based on the memory we moved */ fm10k_mbx_dequeue_rx()
734 fifo->tail -= fifo->head; fm10k_mbx_dequeue_rx()
741 * fm10k_mbx_enqueue_tx - Enqueues the message to the tail of the Tx FIFO
747 * contained in the first DWORD of the message and will place at the tail
865 FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) | fm10k_mbx_create_data_hdr()
891 FM10K_MSG_HDR_FIELD_SET(mbx->tail, TAIL) | fm10k_mbx_create_disconnect_hdr()
943 u16 type, rsvd0, head, tail, size; fm10k_mbx_validate_msg_hdr() local
948 tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); fm10k_mbx_validate_msg_hdr()
958 if (tail != mbx->head) fm10k_mbx_validate_msg_hdr()
966 if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) fm10k_mbx_validate_msg_hdr()
969 /* validate that tail is moving correctly */ fm10k_mbx_validate_msg_hdr()
970 if (!tail || (tail == FM10K_MSG_HDR_MASK(TAIL))) fm10k_mbx_validate_msg_hdr()
972 if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) fm10k_mbx_validate_msg_hdr()
985 /* neither create nor error include a tail offset */ fm10k_mbx_validate_msg_hdr()
986 if (tail) fm10k_mbx_validate_msg_hdr()
1057 mbx->rx.tail = 0; fm10k_mbx_reset_work()
1151 /* align our tail index to remote head index */ fm10k_mbx_process_connect()
1152 mbx->tail = head; fm10k_mbx_process_connect()
1169 u16 head, tail; fm10k_mbx_process_data() local
1174 tail = FM10K_MSG_HDR_FIELD_GET(*hdr, TAIL); fm10k_mbx_process_data()
1178 mbx->tail = head; fm10k_mbx_process_data()
1183 err = fm10k_mbx_push_tail(hw, mbx, tail); fm10k_mbx_process_data()
1221 /* we have already verified mbx->head == tail so we know this is 0 */ fm10k_mbx_process_disconnect()
1237 if (head != mbx->tail) fm10k_mbx_process_disconnect()
1282 /* reset tail index and size to prepare for reconnect */ fm10k_mbx_process_error()
1283 mbx->tail = head; fm10k_mbx_process_error()
1298 return fm10k_mbx_create_reply(hw, mbx, mbx->tail); fm10k_mbx_process_error()
1525 * evenly splitting it. In order to allow for easy masking of head/tail
1567 /* initialize tail and head */ fm10k_pfvf_mbx_init()
1568 mbx->tail = 1; fm10k_pfvf_mbx_init()
1608 mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) | fm10k_sm_mbx_create_data_hdr()
1625 mbx->mbx_hdr = FM10K_MSG_HDR_FIELD_SET(mbx->tail, SM_TAIL) | fm10k_sm_mbx_create_connect_hdr()
1646 /* initialize tail and head */ fm10k_sm_mbx_connect_reset()
1647 mbx->tail = 1; fm10k_sm_mbx_connect_reset()
1749 u16 tail, head, ver; fm10k_sm_mbx_validate_fifo_hdr() local
1751 tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL); fm10k_sm_mbx_validate_fifo_hdr()
1761 if (!tail || tail > FM10K_SM_MBX_FIFO_LEN) fm10k_sm_mbx_validate_fifo_hdr()
1763 if (mbx->tail < head) fm10k_sm_mbx_validate_fifo_hdr()
1765 if (tail < mbx->head) fm10k_sm_mbx_validate_fifo_hdr()
1766 tail += mbx->mbmem_len - 1; fm10k_sm_mbx_validate_fifo_hdr()
1767 if (fm10k_mbx_index_len(mbx, head, mbx->tail) > mbx->tail_len) fm10k_sm_mbx_validate_fifo_hdr()
1769 if (fm10k_mbx_index_len(mbx, mbx->head, tail) < mbx->mbmem_len) fm10k_sm_mbx_validate_fifo_hdr()
1853 u16 tail) fm10k_sm_mbx_receive()
1859 /* push tail in front of head */ fm10k_sm_mbx_receive()
1860 if (tail < mbx->head) fm10k_sm_mbx_receive()
1861 tail += mbmem_len; fm10k_sm_mbx_receive()
1864 err = fm10k_mbx_push_tail(hw, mbx, tail); fm10k_sm_mbx_receive()
1899 /* push head behind tail */ fm10k_sm_mbx_transmit()
1900 if (mbx->tail < head) fm10k_sm_mbx_transmit()
1914 mbx->tail = fm10k_mbx_tail_sub(mbx, mbx->tail_len - tail_len); fm10k_sm_mbx_transmit()
1919 if (mbx->tail > mbmem_len) fm10k_sm_mbx_transmit()
1920 mbx->tail -= mbmem_len; fm10k_sm_mbx_transmit()
1993 fm10k_sm_mbx_create_reply(hw, mbx, mbx->tail); fm10k_sm_mbx_process_reset()
2008 u16 head, tail; fm10k_sm_mbx_process_version_1() local
2012 tail = FM10K_MSG_HDR_FIELD_GET(*hdr, SM_TAIL); fm10k_sm_mbx_process_version_1()
2027 len = fm10k_sm_mbx_receive(hw, mbx, tail); fm10k_sm_mbx_process_version_1()
482 fm10k_mbx_push_tail(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, u16 tail) fm10k_mbx_push_tail() argument
1851 fm10k_sm_mbx_receive(struct fm10k_hw *hw, struct fm10k_mbx_info *mbx, u16 tail) fm10k_sm_mbx_receive() argument
H A Dfm10k_mbx.h120 * To get the actual mailbox offset based on the tail it
124 * Head index follows the same format as the tail index.
247 u16 tail; member in struct:fm10k_mbx_fifo
281 u16 tail, tail_len, pulled; member in struct:fm10k_mbx_info
/linux-4.1.27/drivers/input/joystick/iforce/
H A Diforce-serio.c44 if (iforce->xmit.head == iforce->xmit.tail) { iforce_serial_xmit()
54 serio_write(iforce->serio, iforce->xmit.buf[iforce->xmit.tail]); iforce_serial_xmit()
55 cs ^= iforce->xmit.buf[iforce->xmit.tail]; iforce_serial_xmit()
56 XMIT_INC(iforce->xmit.tail, 1); iforce_serial_xmit()
58 for (i=iforce->xmit.buf[iforce->xmit.tail]; i >= 0; --i) { iforce_serial_xmit()
59 serio_write(iforce->serio, iforce->xmit.buf[iforce->xmit.tail]); iforce_serial_xmit()
60 cs ^= iforce->xmit.buf[iforce->xmit.tail]; iforce_serial_xmit()
61 XMIT_INC(iforce->xmit.tail, 1); iforce_serial_xmit()
H A Diforce-usb.c37 if (iforce->xmit.head == iforce->xmit.tail) { iforce_usb_xmit()
43 ((char *)iforce->out->transfer_buffer)[0] = iforce->xmit.buf[iforce->xmit.tail]; iforce_usb_xmit()
44 XMIT_INC(iforce->xmit.tail, 1); iforce_usb_xmit()
45 n = iforce->xmit.buf[iforce->xmit.tail]; iforce_usb_xmit()
46 XMIT_INC(iforce->xmit.tail, 1); iforce_usb_xmit()
52 c = CIRC_CNT_TO_END(iforce->xmit.head, iforce->xmit.tail, XMIT_SIZE); iforce_usb_xmit()
56 &iforce->xmit.buf[iforce->xmit.tail], iforce_usb_xmit()
63 XMIT_INC(iforce->xmit.tail, n); iforce_usb_xmit()
H A Diforce-packets.c55 int head, tail; iforce_send_packet() local
59 * Update head and tail of xmit buffer iforce_send_packet()
64 tail = iforce->xmit.tail; iforce_send_packet()
67 if (CIRC_SPACE(head, tail, XMIT_SIZE) < n+2) { iforce_send_packet()
74 empty = head == tail; iforce_send_packet()
85 c = CIRC_SPACE_TO_END(head, tail, XMIT_SIZE); iforce_send_packet()
/linux-4.1.27/sound/core/seq/
H A Dseq_fifo.c56 f->tail = NULL; snd_seq_fifo_new()
133 if (f->tail != NULL) snd_seq_fifo_event_in()
134 f->tail->next = cell; snd_seq_fifo_event_in()
135 f->tail = cell; snd_seq_fifo_event_in()
159 /* reset tail if this was the last element */ fifo_cell_out()
160 if (f->tail == cell) fifo_cell_out()
161 f->tail = NULL; fifo_cell_out()
257 f->tail = NULL; snd_seq_fifo_resize()
H A Dseq_prioq.c67 f->tail = NULL; snd_seq_prioq_new()
165 if (f->tail && !prior) { snd_seq_prioq_cell_in()
166 if (compare_timestamp(&cell->event, &f->tail->event)) { snd_seq_prioq_cell_in()
167 /* add new cell to tail of the fifo */ snd_seq_prioq_cell_in()
168 f->tail->next = cell; snd_seq_prioq_cell_in()
169 f->tail = cell; snd_seq_prioq_cell_in()
211 f->tail = cell; snd_seq_prioq_cell_in()
233 /* reset tail if this was the last element */ snd_seq_prioq_cell_out()
234 if (f->tail == cell) snd_seq_prioq_cell_out()
235 f->tail = NULL; snd_seq_prioq_cell_out()
309 if (cell == f->tail) snd_seq_prioq_leave()
310 f->tail = cell->next; snd_seq_prioq_leave()
423 if (cell == f->tail) snd_seq_prioq_remove_events()
424 f->tail = cell->next; snd_seq_prioq_remove_events()
H A Dseq_prioq.h31 struct snd_seq_event_cell *tail; /* pointer to tail of prioq */ member in struct:snd_seq_prioq
H A Dseq_fifo.h33 struct snd_seq_event_cell *tail; /* pointer to tail of fifo */ member in struct:snd_seq_fifo
/linux-4.1.27/drivers/staging/lustre/lustre/obdclass/
H A Dllog_swab.c131 struct llog_rec_tail *tail = NULL; lustre_swab_llog_rec() local
146 tail = &lsc->lsc_tail; lustre_swab_llog_rec()
156 tail = &lur->lur_tail; lustre_swab_llog_rec()
166 tail = &lur->lur_tail; lustre_swab_llog_rec()
188 tail = &ext->cr_tail; lustre_swab_llog_rec()
190 tail = &cr->cr_tail; lustre_swab_llog_rec()
192 tail = (struct llog_rec_tail *)((char *)tail + lustre_swab_llog_rec()
203 tail = &cur->cur_tail; lustre_swab_llog_rec()
234 tail = &lsr->lsr_tail; lustre_swab_llog_rec()
250 tail = &llh->llh_tail; lustre_swab_llog_rec()
258 tail = &lid->lid_tail; lustre_swab_llog_rec()
267 tail = &lgr->lgr_tail; lustre_swab_llog_rec()
277 if (tail) { lustre_swab_llog_rec()
278 __swab32s(&tail->lrt_len); lustre_swab_llog_rec()
279 __swab32s(&tail->lrt_index); lustre_swab_llog_rec()
H A Dobd_mount.c894 char *tail; lmd_parse_mgssec() local
902 tail = strchr(ptr, ','); lmd_parse_mgssec()
903 if (tail == NULL) lmd_parse_mgssec()
906 length = tail - ptr; lmd_parse_mgssec()
919 char *tail; lmd_parse_string() local
930 tail = strchr(ptr, ','); lmd_parse_string()
931 if (tail == NULL) lmd_parse_string()
934 length = tail - ptr; lmd_parse_string()
950 char *tail = *ptr; lmd_parse_mgs() local
956 while (class_parse_nid_quiet(tail, &nid, &tail) == 0) {} lmd_parse_mgs()
957 length = tail - *ptr; lmd_parse_mgs()
979 *ptr = tail; lmd_parse_mgs()
1095 char *tail = strchr(s1 + 6, ','); lmd_parse() local
1096 if (tail == NULL) lmd_parse()
1099 length = tail - s1; lmd_parse()
/linux-4.1.27/arch/alpha/lib/
H A Dclear_user.S57 beq $1, $tail # .. e1 :
71 $tail:
72 bne $2, 1f # e1 : is there a tail to do?
86 and $1, 7, $2 # e1 : number of bytes in tail
/linux-4.1.27/drivers/tty/
H A Dbfin_jtag_comm.c58 #define circ_empty(circ) ((circ)->head == (circ)->tail)
59 #define circ_free(circ) CIRC_SPACE((circ)->head, (circ)->tail, CIRC_SIZE)
60 #define circ_cnt(circ) CIRC_CNT((circ)->head, (circ)->tail, CIRC_SIZE)
86 inbound_len, bfin_jc_write_buf.tail, bfin_jc_write_buf.head); bfin_jc_emudat_manager()
117 int tail = bfin_jc_write_buf.tail; bfin_jc_emudat_manager() local
121 circ_byte(&bfin_jc_write_buf, tail + 0), bfin_jc_emudat_manager()
122 circ_byte(&bfin_jc_write_buf, tail + 1), bfin_jc_emudat_manager()
123 circ_byte(&bfin_jc_write_buf, tail + 2), bfin_jc_emudat_manager()
124 circ_byte(&bfin_jc_write_buf, tail + 3) bfin_jc_emudat_manager()
126 bfin_jc_write_buf.tail += ate; bfin_jc_emudat_manager()
233 bfin_jc_write_buf.head = bfin_jc_write_buf.tail = 0; bfin_jc_init()
H A Dn_tty.c642 size_t tail; __process_echoes() local
647 tail = ldata->echo_tail; __process_echoes()
648 while (ldata->echo_commit != tail) { __process_echoes()
649 c = echo_buf(ldata, tail); __process_echoes()
659 op = echo_buf(ldata, tail + 1); __process_echoes()
665 num_chars = echo_buf(ldata, tail + 2); __process_echoes()
691 tail += 3; __process_echoes()
696 tail += 2; __process_echoes()
702 tail += 2; __process_echoes()
714 tail += 2; __process_echoes()
735 tail += 2; __process_echoes()
752 tail += 1; __process_echoes()
758 * data at the tail to prevent a subsequent overrun */ __process_echoes()
759 while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) { __process_echoes()
760 if (echo_buf(ldata, tail) == ECHO_OP_START) { __process_echoes()
761 if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB) __process_echoes()
762 tail += 3; __process_echoes()
764 tail += 2; __process_echoes()
766 tail++; __process_echoes()
769 ldata->echo_tail = tail; __process_echoes()
1049 size_t tail = ldata->read_head; eraser() local
1058 while (tail != ldata->canon_head) { eraser()
1059 tail--; eraser()
1060 c = read_buf(ldata, tail); eraser()
1335 size_t tail = ldata->canon_head; n_tty_receive_char_special() local
1340 while (tail != ldata->read_head) { n_tty_receive_char_special()
1341 echo_char(read_buf(ldata, tail), tty); n_tty_receive_char_special()
1342 tail++; n_tty_receive_char_special()
1728 size_t tail = smp_load_acquire(&ldata->read_tail); n_tty_receive_buf_common() local
1730 room = N_TTY_BUF_SIZE - (ldata->read_head - tail); n_tty_receive_buf_common()
1735 overflow = ldata->icanon && ldata->canon_head == tail; n_tty_receive_buf_common()
1974 * drain the space from the tail pointer to the (physical) end of the
1995 size_t tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); copy_from_read_buf() local
1998 n = min(head - ldata->read_tail, N_TTY_BUF_SIZE - tail); copy_from_read_buf()
2001 retval = copy_to_user(*b, read_buf_addr(ldata, tail), n); copy_from_read_buf()
2003 is_eof = n == 1 && read_buf(ldata, tail) == EOF_CHAR(tty); copy_from_read_buf()
2004 tty_audit_add_data(tty, read_buf_addr(ldata, tail), n, copy_from_read_buf()
2047 size_t tail; canon_copy_from_read_buf() local
2056 tail = ldata->read_tail & (N_TTY_BUF_SIZE - 1); canon_copy_from_read_buf()
2057 size = min_t(size_t, tail + n, N_TTY_BUF_SIZE); canon_copy_from_read_buf()
2059 n_tty_trace("%s: nr:%zu tail:%zu n:%zu size:%zu\n", canon_copy_from_read_buf()
2060 __func__, *nr, tail, n, size); canon_copy_from_read_buf()
2062 eol = find_next_bit(ldata->read_flags, size, tail); canon_copy_from_read_buf()
2063 more = n - (size - tail); canon_copy_from_read_buf()
2072 size = N_TTY_BUF_SIZE - tail; canon_copy_from_read_buf()
2073 n = eol - tail; canon_copy_from_read_buf()
2088 ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size); canon_copy_from_read_buf()
2093 ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n); canon_copy_from_read_buf()
2188 size_t tail; n_tty_read() local
2225 tail = ldata->read_tail; n_tty_read()
2317 if (tail != ldata->read_tail) n_tty_read()
2492 size_t nr, head, tail; inq_canon() local
2497 tail = ldata->read_tail; inq_canon()
2498 nr = head - tail; inq_canon()
2500 while (head != tail) { inq_canon()
2501 if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) && inq_canon()
2502 read_buf(ldata, tail) == __DISABLED_CHAR) inq_canon()
2504 tail++; inq_canon()
H A Dtty_buffer.c132 buf->tail = &buf->sentinel; tty_buffer_free_all()
255 b = buf->tail; __tty_buffer_request_room()
266 buf->tail = n; __tty_buffer_request_room()
307 struct tty_buffer *tb = port->buf.tail; tty_insert_flip_string_fixed_flag()
342 struct tty_buffer *tb = port->buf.tail; tty_insert_flip_string_flags()
371 buf->tail->commit = buf->tail->used; tty_schedule_flip()
394 struct tty_buffer *tb = port->buf.tail; tty_prepare_flip_string()
522 buf->tail = &buf->sentinel; tty_buffer_init()
H A Dmoxa.c1880 u16 head, tail, tx_mask, spage, epage; MoxaPortWriteData() local
1888 tail = readw(ofsAddr + TXwptr); MoxaPortWriteData()
1890 c = (head > tail) ? (head - tail - 1) : (head - tail + tx_mask); MoxaPortWriteData()
1899 if (head > tail) MoxaPortWriteData()
1900 len = head - tail - 1; MoxaPortWriteData()
1902 len = tx_mask + 1 - tail; MoxaPortWriteData()
1904 ofs = baseAddr + DynPage_addr + bufhead + tail; MoxaPortWriteData()
1907 tail = (tail + len) & tx_mask; MoxaPortWriteData()
1911 pageno = spage + (tail >> 13); MoxaPortWriteData()
1912 pageofs = tail & Page_mask; MoxaPortWriteData()
1926 tail = (tail + total) & tx_mask; MoxaPortWriteData()
1928 writew(tail, ofsAddr + TXwptr); MoxaPortWriteData()
1939 u16 tail, rx_mask, spage, epage; MoxaPortReadData() local
1945 tail = readw(ofsAddr + RXwptr); MoxaPortReadData()
1949 count = (tail >= head) ? (tail - head) : (tail - head + rx_mask + 1); MoxaPortReadData()
1960 len = (tail >= head) ? (tail - head) : MoxaPortReadData()
/linux-4.1.27/drivers/dma/ioat/
H A Ddma_v2.c60 "%s: head: %#x tail: %#x issued: %#x count: %#x\n", __ioat2_issue_pending()
61 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); __ioat2_issue_pending()
99 dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", __ioat2_start_null_desc()
100 __func__, ioat->head, ioat->tail, ioat->issued); __ioat2_start_null_desc()
134 int idx = ioat->tail, i; __cleanup()
136 dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", __cleanup()
137 __func__, ioat->head, ioat->tail, ioat->issued); __cleanup()
158 smp_mb(); /* finish all descriptor reads before incrementing tail */ __cleanup()
159 ioat->tail = idx + i; __cleanup()
172 * ioat2_cleanup - clean finished descriptors (advance tail pointer)
201 /* set the tail to be re-issued */ __ioat2_restart_chan()
202 ioat->issued = ioat->tail; __ioat2_restart_chan()
208 "%s: head: %#x tail: %#x issued: %#x count: %#x\n", __ioat2_restart_chan()
209 __func__, ioat->head, ioat->tail, ioat->issued, ioat->dmacount); __ioat2_restart_chan()
214 desc = ioat2_get_ring_ent(ioat, ioat->tail); __ioat2_restart_chan()
553 ioat->tail = 0; ioat2_alloc_chan_resources()
615 u16 curr_idx = (ioat->tail+i) & (curr_size-1); reshape_ring()
616 u16 new_idx = (ioat->tail+i) & (new_size-1); reshape_ring()
624 u16 new_idx = (ioat->tail+i) & (new_size-1); reshape_ring()
629 u16 new_idx = (ioat->tail+i) & (new_size-1); reshape_ring()
641 u16 new_idx = (ioat->tail+i) & (new_size-1); reshape_ring()
655 u16 curr_idx = (ioat->tail+i) & (curr_size-1); reshape_ring()
656 u16 new_idx = (ioat->tail+i) & (new_size-1); reshape_ring()
666 ent = ioat2_get_ring_ent(ioat, ioat->tail+i); reshape_ring()
671 hw = ring[(ioat->tail+new_size-1) & (new_size-1)]->hw; reshape_ring()
672 next = ring[(ioat->tail+new_size) & (new_size-1)]; reshape_ring()
704 __func__, num_descs, ioat->head, ioat->tail, ioat->issued); ioat2_check_space_lock()
728 __func__, num_descs, ioat->head, ioat->tail, ioat->issued); ioat2_check_space_lock()
828 desc = ioat2_get_ring_ent(ioat, ioat->tail + i); ioat2_free_chan_resources()
H A Ddma_v2.h46 * @tail: cleanup index
58 u16 tail; member in struct:ioat2_dma_chan
81 return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat)); ioat2_ring_active()
/linux-4.1.27/drivers/gpu/drm/i915/
H A Dintel_lrc.h46 * intel_logical_ring_advance() - advance the ringbuffer tail
49 * The tail is only updated in our logical ringbuffer struct.
53 ringbuf->tail &= ringbuf->size - 1; intel_logical_ring_advance()
63 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); intel_logical_ring_emit()
64 ringbuf->tail += 4; intel_logical_ring_emit()
H A Dintel_lrc.c48 * But, what about the ringbuffer control registers (head, tail, etc..)?
96 * for the appropriate context. The tail pointer in the hardware context is not
100 * tail after the request was written to the ring buffer and a pointer to the
131 * preemption, but just sampling the new tail pointer).
323 u32 tail) execlists_update_context()
331 reg_state[CTX_RING_TAIL+1] = tail; execlists_update_context()
384 * will update tail past first request's workload */ execlists_context_unqueue()
411 req0->tail += 8; execlists_context_unqueue()
412 req0->tail &= ringbuf->size - 1; execlists_context_unqueue()
418 execlists_submit_contexts(ring, req0->ctx, req0->tail, execlists_context_unqueue()
420 req1 ? req1->tail : 0); execlists_context_unqueue()
519 u32 tail, execlists_context_queue()
547 request->tail = tail; execlists_context_queue()
811 * intel_logical_ring_advance_and_submit() - advance the tail and submit the workload
814 * The tail is updated in our logical ringbuffer struct, not in the actual context. What
815 * really happens during submission is that the context and current tail will be placed
817 * point, the tail *inside* the context is updated and the ELSP written to.
831 execlists_context_queue(ring, ctx, ringbuf->tail, request); intel_logical_ring_advance_and_submit()
942 if (__intel_ring_space(request->tail, ringbuf->tail, logical_ring_wait_request()
1014 int rem = ringbuf->size - ringbuf->tail; logical_ring_wrap_buffer()
1023 virt = ringbuf->virtual_start + ringbuf->tail; logical_ring_wrap_buffer()
1028 ringbuf->tail = 0; logical_ring_wrap_buffer()
1039 if (unlikely(ringbuf->tail + bytes > ringbuf->effective_size)) { logical_ring_prepare()
1061 * be wrapped, or wait a bit for the tail to be updated). This function takes care of that
1972 ringbuf->tail = 0; intel_lr_context_deferred_create()
2069 ringbuf->tail = 0; for_each_ring()
321 execlists_update_context(struct drm_i915_gem_object *ctx_obj, struct drm_i915_gem_object *ring_obj, u32 tail) execlists_update_context() argument
517 execlists_context_queue(struct intel_engine_cs *ring, struct intel_context *to, u32 tail, struct drm_i915_gem_request *request) execlists_context_queue() argument
H A Dintel_ringbuffer.h103 u32 tail; member in struct:intel_ringbuffer
406 iowrite32(data, ringbuf->virtual_start + ringbuf->tail); intel_ring_emit()
407 ringbuf->tail += 4; intel_ring_emit()
412 ringbuf->tail &= ringbuf->size - 1; intel_ring_advance()
414 int __intel_ring_space(int head, int tail, int size);
440 return ringbuf->tail; intel_ring_get_tail()
/linux-4.1.27/drivers/crypto/caam/
H A Djr.c165 int hw_idx, sw_idx, i, head, tail; caam_jr_dequeue() local
178 sw_idx = tail = jrp->tail; caam_jr_dequeue()
181 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { caam_jr_dequeue()
182 sw_idx = (tail + i) & (JOBR_DEPTH - 1); caam_jr_dequeue()
189 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); caam_jr_dequeue()
213 * the tail. Otherwise, increment tail by 1 plus the caam_jr_dequeue()
216 if (sw_idx == tail) { caam_jr_dequeue()
218 tail = (tail + 1) & (JOBR_DEPTH - 1); caam_jr_dequeue()
219 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && caam_jr_dequeue()
220 jrp->entinfo[tail].desc_addr_dma == 0); caam_jr_dequeue()
222 jrp->tail = tail; caam_jr_dequeue()
323 int head, tail, desc_size; caam_jr_enqueue() local
336 tail = ACCESS_ONCE(jrp->tail); caam_jr_enqueue()
339 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { caam_jr_enqueue()
417 jrp->tail = 0; caam_jr_init()
H A Dintern.h54 int inp_ring_write_index; /* Input index "tail" */
58 int out_ring_read_index; /* Output index "tail" */
59 int tail; /* entinfo (s/w ring) tail index */ member in struct:caam_drv_private_jr
/linux-4.1.27/tools/testing/selftests/powerpc/pmu/ebb/
H A Dtrace.c32 tb->tail = tb->data; trace_buffer_allocate()
66 p = tb->tail; trace_alloc()
67 newtail = tb->tail + bytes; trace_alloc()
71 tb->tail = newtail; trace_alloc()
277 printf(" tail %p\n", tb->tail); trace_buffer_print()
287 while (trace_check_bounds(tb, p) && p < tb->tail) { trace_buffer_print()
H A Dtrace.h28 void *tail; member in struct:trace_buffer
/linux-4.1.27/arch/ia64/hp/sim/
H A Dsimserial.c117 if (CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) == 0) { rs_put_char()
145 if (info->xmit.head == info->xmit.tail || tty->stopped) { transmit_chars()
147 printk("transmit_chars: head=%d, tail=%d, stopped=%d\n", transmit_chars()
148 info->xmit.head, info->xmit.tail, tty->stopped); transmit_chars()
156 * First from current to tail if possible. transmit_chars()
160 count = min(CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE), transmit_chars()
161 SERIAL_XMIT_SIZE - info->xmit.tail); transmit_chars()
162 console->write(console, info->xmit.buf+info->xmit.tail, count); transmit_chars()
164 info->xmit.tail = (info->xmit.tail+count) & (SERIAL_XMIT_SIZE-1); transmit_chars()
169 count = CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); transmit_chars()
172 info->xmit.tail += count; transmit_chars()
182 if (info->xmit.head == info->xmit.tail || tty->stopped || rs_flush_chars()
201 c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); rs_write()
218 if (CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE) && rs_write()
229 return CIRC_SPACE(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); rs_write_room()
236 return CIRC_CNT(info->xmit.head, info->xmit.tail, SERIAL_XMIT_SIZE); rs_chars_in_buffer()
245 info->xmit.head = info->xmit.tail = 0; rs_flush_buffer()
388 state->xmit.head = state->xmit.tail = 0; activate()
/linux-4.1.27/kernel/
H A Dsoftirq.c443 struct tasklet_struct **tail; member in struct:tasklet_head
455 *__this_cpu_read(tasklet_vec.tail) = t; __tasklet_schedule()
456 __this_cpu_write(tasklet_vec.tail, &(t->next)); __tasklet_schedule()
468 *__this_cpu_read(tasklet_hi_vec.tail) = t; __tasklet_hi_schedule()
469 __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); __tasklet_hi_schedule()
492 __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); tasklet_action()
514 *__this_cpu_read(tasklet_vec.tail) = t; tasklet_action()
515 __this_cpu_write(tasklet_vec.tail, &(t->next)); tasklet_action()
528 __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); tasklet_hi_action()
550 *__this_cpu_read(tasklet_hi_vec.tail) = t; tasklet_hi_action()
551 __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); tasklet_hi_action()
639 per_cpu(tasklet_vec, cpu).tail = for_each_possible_cpu()
641 per_cpu(tasklet_hi_vec, cpu).tail = for_each_possible_cpu()
694 /* If this was the tail element, move the tail ptr */ tasklet_kill_immediate()
696 per_cpu(tasklet_vec, cpu).tail = i; tasklet_kill_immediate()
709 if (&per_cpu(tasklet_vec, cpu).head != per_cpu(tasklet_vec, cpu).tail) { takeover_tasklets()
710 *__this_cpu_read(tasklet_vec.tail) = per_cpu(tasklet_vec, cpu).head; takeover_tasklets()
711 this_cpu_write(tasklet_vec.tail, per_cpu(tasklet_vec, cpu).tail); takeover_tasklets()
713 per_cpu(tasklet_vec, cpu).tail = &per_cpu(tasklet_vec, cpu).head; takeover_tasklets()
717 if (&per_cpu(tasklet_hi_vec, cpu).head != per_cpu(tasklet_hi_vec, cpu).tail) { takeover_tasklets()
718 *__this_cpu_read(tasklet_hi_vec.tail) = per_cpu(tasklet_hi_vec, cpu).head; takeover_tasklets()
719 __this_cpu_write(tasklet_hi_vec.tail, per_cpu(tasklet_hi_vec, cpu).tail); takeover_tasklets()
721 per_cpu(tasklet_hi_vec, cpu).tail = &per_cpu(tasklet_hi_vec, cpu).head; takeover_tasklets()
/linux-4.1.27/arch/arm64/crypto/
H A Daes-ce-ccm-glue.c176 u32 tail = walk.nbytes % AES_BLOCK_SIZE; ccm_encrypt() local
179 tail = 0; ccm_encrypt()
182 walk.nbytes - tail, ctx->key_enc, ccm_encrypt()
185 len -= walk.nbytes - tail; ccm_encrypt()
186 err = blkcipher_walk_done(&desc, &walk, tail); ccm_encrypt()
232 u32 tail = walk.nbytes % AES_BLOCK_SIZE; ccm_decrypt() local
235 tail = 0; ccm_decrypt()
238 walk.nbytes - tail, ctx->key_enc, ccm_decrypt()
241 len -= walk.nbytes - tail; ccm_decrypt()
242 err = blkcipher_walk_done(&desc, &walk, tail); ccm_decrypt()
/linux-4.1.27/fs/9p/
H A Dvfs_dir.c47 * @tail: end offset of current dirread buffer
56 int tail; member in struct:p9_rdir
133 if (rdir->tail == rdir->head) { v9fs_dir_readdir()
145 rdir->tail = n; v9fs_dir_readdir()
147 while (rdir->head < rdir->tail) { v9fs_dir_readdir()
150 rdir->tail - rdir->head, &st); v9fs_dir_readdir()
194 if (rdir->tail == rdir->head) { v9fs_dir_readdir_dotl()
201 rdir->tail = err; v9fs_dir_readdir_dotl()
204 while (rdir->head < rdir->tail) { v9fs_dir_readdir_dotl()
207 rdir->tail - rdir->head, v9fs_dir_readdir_dotl()
/linux-4.1.27/drivers/mfd/
H A Dpcf50633-adc.c88 int head, tail; adc_enqueue_request() local
93 tail = adc->queue_tail; adc_enqueue_request()
95 if (adc->queue[tail]) { adc_enqueue_request()
101 adc->queue[tail] = req; adc_enqueue_request()
102 if (head == tail) adc_enqueue_request()
104 adc->queue_tail = (tail + 1) & (PCF50633_MAX_ADC_FIFO_DEPTH - 1); adc_enqueue_request()
/linux-4.1.27/drivers/net/wireless/b43/
H A Dpio.c346 u8 *tail = wl->pio_tailspace; tx_write_2byte_queue() local
352 tail[0] = data[data_len - 1]; tx_write_2byte_queue()
353 tail[1] = 0; tx_write_2byte_queue()
354 b43_block_write(dev, tail, 2, tx_write_2byte_queue()
400 u8 *tail = wl->pio_tailspace; tx_write_4byte_queue() local
403 memset(tail, 0, 4); tx_write_4byte_queue()
410 tail[0] = data[data_len - 3]; tx_write_4byte_queue()
411 tail[1] = data[data_len - 2]; tx_write_4byte_queue()
412 tail[2] = data[data_len - 1]; tx_write_4byte_queue()
416 tail[0] = data[data_len - 2]; tx_write_4byte_queue()
417 tail[1] = data[data_len - 1]; tx_write_4byte_queue()
420 tail[0] = data[data_len - 1]; tx_write_4byte_queue()
424 b43_block_write(dev, tail, 4, tx_write_4byte_queue()
722 u8 *tail = wl->pio_tailspace; pio_rx_frame() local
726 b43_block_read(dev, tail, 4, pio_rx_frame()
731 skb->data[len + padding - 3] = tail[0]; pio_rx_frame()
732 skb->data[len + padding - 2] = tail[1]; pio_rx_frame()
733 skb->data[len + padding - 1] = tail[2]; pio_rx_frame()
736 skb->data[len + padding - 2] = tail[0]; pio_rx_frame()
737 skb->data[len + padding - 1] = tail[1]; pio_rx_frame()
740 skb->data[len + padding - 1] = tail[0]; pio_rx_frame()
749 u8 *tail = wl->pio_tailspace; pio_rx_frame() local
753 b43_block_read(dev, tail, 2, pio_rx_frame()
756 skb->data[len + padding - 1] = tail[0]; pio_rx_frame()
/linux-4.1.27/sound/core/seq/oss/
H A Dseq_oss_readq.c62 q->head = q->tail = 0; snd_seq_oss_readq_new()
91 q->head = q->tail = 0; snd_seq_oss_readq_clear()
136 memcpy(&q->q[q->tail], ev, sizeof(*ev)); snd_seq_oss_readq_put_event()
137 q->tail = (q->tail + 1) % q->maxlen; snd_seq_oss_readq_put_event()
170 (q->qlen > 0 || q->head == q->tail), snd_seq_oss_readq_wait()
H A Dseq_oss_readq.h35 int head, tail; member in struct:seq_oss_readq
/linux-4.1.27/drivers/input/serio/
H A Dserio_raw.c33 unsigned int tail, head; member in struct:serio_raw
149 empty = serio_raw->head == serio_raw->tail; serio_raw_fetch_byte()
151 *c = serio_raw->queue[serio_raw->tail]; serio_raw_fetch_byte()
152 serio_raw->tail = (serio_raw->tail + 1) % SERIO_RAW_QUEUE_LEN; serio_raw_fetch_byte()
173 if (serio_raw->head == serio_raw->tail && serio_raw_read()
191 serio_raw->head != serio_raw->tail || serio_raw_read()
251 if (serio_raw->head != serio_raw->tail) serio_raw_poll()
283 if (likely(head != serio_raw->tail)) { serio_raw_interrupt()
H A Dsa1111ps2.c53 unsigned int tail; member in struct:ps2if
98 if (ps2if->head == ps2if->tail) { ps2_txint()
102 sa1111_writel(ps2if->buf[ps2if->tail], ps2if->base + PS2DATA); ps2_txint()
103 ps2if->tail = (ps2if->tail + 1) & (sizeof(ps2if->buf) - 1); ps2_txint()
128 if (ps2if->head == ps2if->tail) ps2_write()
131 if (head != ps2if->tail) { ps2_write()
/linux-4.1.27/drivers/staging/unisys/visorchannel/
H A Dvisorchannel_funcs.c39 spinlock_t remove_lock; /* protect tail writes in chan_hdr */
384 /* Choose 0 or max, maybe based on current tail value */ safe_sig_queue_validate()
390 punsafe_sqh->tail = *ptail; safe_sig_queue_validate()
404 if (sig_hdr.head == sig_hdr.tail) signalremove_inner()
407 sig_hdr.tail = (sig_hdr.tail + 1) % sig_hdr.max_slots; signalremove_inner()
408 if (!sig_read_data(channel, queue, &sig_hdr, sig_hdr.tail, msg)) { signalremove_inner()
417 if (!SIG_WRITE_FIELD(channel, queue, &sig_hdr, tail)) signalremove_inner()
450 if (sig_hdr.head == sig_hdr.tail) { signalinsert_inner()
502 u32 head, tail; visorchannel_signalqueue_slots_avail() local
507 tail = sig_hdr.tail; visorchannel_signalqueue_slots_avail()
508 if (head < tail) visorchannel_signalqueue_slots_avail()
510 slots_used = (head - tail); visorchannel_signalqueue_slots_avail()
548 seq_printf(seq, " Tail = %lu\n", (ulong)q->tail); sigqueue_debug()
/linux-4.1.27/drivers/acpi/
H A Dutils.c67 u8 *tail = NULL; acpi_extract_package() local
197 tail = buffer->pointer + tail_offset; acpi_extract_package()
222 *pointer = tail; acpi_extract_package()
223 *((u64 *) tail) = acpi_extract_package()
226 tail += sizeof(u64); acpi_extract_package()
228 *tail = (char)0; acpi_extract_package()
229 tail += sizeof(char); acpi_extract_package()
242 *pointer = tail; acpi_extract_package()
243 memcpy(tail, element->string.pointer, acpi_extract_package()
246 tail += element->string.length * sizeof(char); acpi_extract_package()
248 *tail = (char)0; acpi_extract_package()
249 tail += sizeof(char); acpi_extract_package()
253 *pointer = tail; acpi_extract_package()
254 memcpy(tail, element->buffer.pointer, acpi_extract_package()
257 tail += element->buffer.length; acpi_extract_package()
/linux-4.1.27/drivers/gpu/drm/mga/
H A Dmga_dma.c84 primary->tail = 0; mga_do_dma_reset()
106 u32 head, tail; mga_do_dma_flush() local
120 if (primary->tail == primary->last_flush) { mga_do_dma_flush()
125 tail = primary->tail + dev_priv->primary->offset; mga_do_dma_flush()
139 primary->last_flush = primary->tail; mga_do_dma_flush()
143 if (head <= tail) mga_do_dma_flush()
144 primary->space = primary->size - primary->tail; mga_do_dma_flush()
146 primary->space = head - tail; mga_do_dma_flush()
149 DRM_DEBUG(" tail = 0x%06lx\n", (unsigned long)(tail - dev_priv->primary->offset)); mga_do_dma_flush()
153 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); mga_do_dma_flush()
161 u32 head, tail; mga_do_dma_wrap_start() local
173 tail = primary->tail + dev_priv->primary->offset; mga_do_dma_wrap_start()
175 primary->tail = 0; mga_do_dma_wrap_start()
187 DRM_DEBUG(" tail = 0x%06x\n", primary->tail); mga_do_dma_wrap_start()
192 MGA_WRITE(MGA_PRIMEND, tail | dev_priv->dma_access); mga_do_dma_wrap_start()
275 dev_priv->tail = entry; mga_freelist_init()
301 dev_priv->head = dev_priv->tail = NULL; mga_freelist_cleanup()
327 drm_mga_freelist_t *tail = dev_priv->tail; mga_freelist_get() local
334 DRM_DEBUG(" tail=0x%06lx %d\n", mga_freelist_get()
335 tail->age.head ? mga_freelist_get()
336 (unsigned long)(tail->age.head - dev_priv->primary->offset) : 0, mga_freelist_get()
337 tail->age.wrap); mga_freelist_get()
341 if (TEST_AGE(&tail->age, head, wrap)) { mga_freelist_get()
342 prev = dev_priv->tail->prev; mga_freelist_get()
343 next = dev_priv->tail; mga_freelist_get()
346 dev_priv->tail = prev; mga_freelist_get()
371 prev = dev_priv->tail; mga_freelist_put()
899 dev_priv->prim.tail = 0; mga_do_init_dma()
H A Dmga_drv.h54 u32 tail; member in struct:drm_mga_primary_buffer
84 drm_mga_freelist_t *tail; member in struct:drm_mga_private
273 write = dev_priv->prim.tail; \
283 write = dev_priv->prim.tail; \
288 dev_priv->prim.tail = write; \
290 DRM_INFO("ADVANCE_DMA() tail=0x%05x sp=0x%x\n", \
298 DRM_INFO(" tail=0x%06x head=0x%06lx\n", \
299 dev_priv->prim.tail, \
351 entry->age.head = (dev_priv->prim.tail + \
/linux-4.1.27/drivers/s390/block/
H A Ddasd_eer.c91 int tail; member in struct:eerbuffer
105 if (eerb->head < eerb->tail) dasd_eer_get_free_bytes()
106 return eerb->tail - eerb->head - 1; dasd_eer_get_free_bytes()
107 return eerb->buffersize - eerb->head + eerb->tail -1; dasd_eer_get_free_bytes()
117 if (eerb->head >= eerb->tail) dasd_eer_get_filled_bytes()
118 return eerb->head - eerb->tail; dasd_eer_get_filled_bytes()
119 return eerb->buffersize - eerb->tail + eerb->head; dasd_eer_get_filled_bytes()
166 tailindex = eerb->tail / PAGE_SIZE; dasd_eer_read_buffer()
167 localtail = eerb->tail % PAGE_SIZE; dasd_eer_read_buffer()
172 eerb->tail += len; dasd_eer_read_buffer()
173 if (eerb->tail == eerb->buffersize) dasd_eer_read_buffer()
174 eerb->tail = 0; /* wrap around */ dasd_eer_read_buffer()
175 BUG_ON(eerb->tail > eerb->buffersize); dasd_eer_read_buffer()
195 eerb->tail += eerb->residual; dasd_eer_start_record()
196 if (eerb->tail >= eerb->buffersize) dasd_eer_start_record()
197 eerb->tail -= eerb->buffersize; dasd_eer_start_record()
202 eerb->tail += tailcount; dasd_eer_start_record()
203 if (eerb->tail >= eerb->buffersize) dasd_eer_start_record()
204 eerb->tail -= eerb->buffersize; dasd_eer_start_record()
623 eerb->head != eerb->tail); dasd_eer_read()
659 if (eerb->head != eerb->tail) dasd_eer_poll()
/linux-4.1.27/drivers/crypto/qat/qat_common/
H A Dadf_transport_debug.c94 int head, tail, empty; adf_ring_show() local
98 tail = READ_CSR_RING_TAIL(csr, bank->bank_number, adf_ring_show()
107 seq_printf(sfile, "head %x, tail %x, empty: %d\n", adf_ring_show()
108 head, tail, (empty & 1 << ring->ring_number) adf_ring_show()
227 int head, tail, empty; adf_bank_show() local
234 tail = READ_CSR_RING_TAIL(csr, bank->bank_number, adf_bank_show()
239 "ring num %02d, head %04x, tail %04x, empty: %d\n", adf_bank_show()
240 ring->ring_number, head, tail, adf_bank_show()
/linux-4.1.27/drivers/gpu/drm/msm/
H A Dmsm_rd.c20 * tail -f /sys/kernel/debug/dri/<minor>/rd > logfile.rd
64 (CIRC_CNT((circ)->head, (circ)->tail, BUF_SZ))
66 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, BUF_SZ))
69 (CIRC_SPACE((circ)->head, (circ)->tail, BUF_SZ))
71 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, BUF_SZ))
132 const char *fptr = &fifo->buf[fifo->tail]; rd_read()
147 fifo->tail = (fifo->tail + n) & (BUF_SZ - 1); rd_read()
/linux-4.1.27/arch/powerpc/lib/
H A Dvmx-helper.c44 * This function must return 0 because we tail call optimise when calling
66 * All calls to this function will be optimised into tail calls. We are
/linux-4.1.27/scripts/
H A Dextract-ikconfig22 tail -c+$(($pos+8)) "$1" | zcat > $tmp1 2> /dev/null
36 tail -c+$pos "$img" | $3 > $tmp2 2> /dev/null
H A Dextract-vmlinux33 tail -c+$pos "$img" | $3 > $tmp 2> /dev/null
H A Dcleanpatch220 my $tail = $5; # doesn't include the final newline
224 $tail);
/linux-4.1.27/arch/x86/platform/geode/
H A Dalix.c132 const char *tail; alix_present() local
150 tail = p + alix_sig_len; alix_present()
151 if ((tail[0] == '2' || tail[0] == '3' || tail[0] == '6')) { alix_present()
/linux-4.1.27/drivers/tty/serial/jsm/
H A Djsm_tty.c528 u16 tail; jsm_input() local
558 tail = ch->ch_r_tail & rmask; jsm_input()
560 data_len = (head - tail) & rmask; jsm_input()
578 ch->ch_r_head = tail; jsm_input()
593 "Port %d throttled, not reading any data. head: %x tail: %x\n", jsm_input()
594 ch->ch_portnum, head, tail); jsm_input()
609 s = ((head >= tail) ? head : RQUEUESIZE) - tail; jsm_input()
628 if (*(ch->ch_equeue +tail +i) & UART_LSR_BI) jsm_input()
629 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_BREAK); jsm_input()
630 else if (*(ch->ch_equeue +tail +i) & UART_LSR_PE) jsm_input()
631 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_PARITY); jsm_input()
632 else if (*(ch->ch_equeue +tail +i) & UART_LSR_FE) jsm_input()
633 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_FRAME); jsm_input()
635 tty_insert_flip_char(port, *(ch->ch_rqueue +tail +i), TTY_NORMAL); jsm_input()
638 tty_insert_flip_string(port, ch->ch_rqueue + tail, s); jsm_input()
640 tail += s; jsm_input()
643 tail &= rmask; jsm_input()
646 ch->ch_r_tail = tail & rmask; jsm_input()
647 ch->ch_e_tail = tail & rmask; jsm_input()
H A Djsm_neo.c292 u16 tail; neo_copy_data_from_uart_to_queue() local
297 /* cache head and tail of queue */ neo_copy_data_from_uart_to_queue()
299 tail = ch->ch_r_tail & RQUEUEMASK; neo_copy_data_from_uart_to_queue()
306 if ((qleft = tail - head - 1) < 0) neo_copy_data_from_uart_to_queue()
455 ch->ch_rqueue[tail], ch->ch_equeue[tail]); neo_copy_data_from_uart_to_queue()
457 ch->ch_r_tail = tail = (tail + 1) & RQUEUEMASK; neo_copy_data_from_uart_to_queue()
489 u16 tail; neo_copy_data_from_queue_to_uart() local
518 writeb(circ->buf[circ->tail], &ch->ch_neo_uart->txrx); neo_copy_data_from_queue_to_uart()
520 "Tx data: %x\n", circ->buf[circ->tail]); neo_copy_data_from_queue_to_uart()
521 circ->tail = (circ->tail + 1) & (UART_XMIT_SIZE - 1); neo_copy_data_from_queue_to_uart()
535 /* cache head and tail of queue */ neo_copy_data_from_queue_to_uart()
537 tail = circ->tail & (UART_XMIT_SIZE - 1); neo_copy_data_from_queue_to_uart()
545 s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail; neo_copy_data_from_queue_to_uart()
551 memcpy_toio(&ch->ch_neo_uart->txrxburst, circ->buf + tail, s); neo_copy_data_from_queue_to_uart()
553 tail = (tail + s) & (UART_XMIT_SIZE - 1); neo_copy_data_from_queue_to_uart()
559 /* Update the final tail */ neo_copy_data_from_queue_to_uart()
560 circ->tail = tail & (UART_XMIT_SIZE - 1); neo_copy_data_from_queue_to_uart()
H A Djsm_cls.c365 u16 tail; cls_copy_data_from_uart_to_queue() local
373 /* cache head and tail of queue */ cls_copy_data_from_uart_to_queue()
375 tail = ch->ch_r_tail & RQUEUEMASK; cls_copy_data_from_uart_to_queue()
382 qleft = tail - head - 1; cls_copy_data_from_uart_to_queue()
425 tail = (tail + 1) & RQUEUEMASK; cls_copy_data_from_uart_to_queue()
426 ch->ch_r_tail = tail; cls_copy_data_from_uart_to_queue()
460 u16 tail; cls_copy_data_from_queue_to_uart() local
485 /* cache tail of queue */ cls_copy_data_from_queue_to_uart()
486 tail = circ->tail & (UART_XMIT_SIZE - 1); cls_copy_data_from_queue_to_uart()
493 writeb(circ->buf[tail], &ch->ch_cls_uart->txrx); cls_copy_data_from_queue_to_uart()
494 tail = (tail + 1) & (UART_XMIT_SIZE - 1); cls_copy_data_from_queue_to_uart()
500 /* Update the final tail */ cls_copy_data_from_queue_to_uart()
501 circ->tail = tail & (UART_XMIT_SIZE - 1); cls_copy_data_from_queue_to_uart()
/linux-4.1.27/fs/
H A Daio.c57 unsigned tail; member in struct:aio_ring
148 unsigned tail; member in struct:kioctx::__anon10748
437 /* Compensate for the ring buffer's head/tail overlap entry */ aio_setup_ring()
509 ring->head = ring->tail = 0; aio_setup_ring()
930 unsigned tail) refill_reqs_available()
936 if (head <= tail) refill_reqs_available()
937 events_in_ring = tail - head; refill_reqs_available()
939 events_in_ring = ctx->nr_events - (head - tail); refill_reqs_available()
968 * part is that head cannot pass tail since we prevent user_refill_reqs_available()
969 * aio_complete() from updating tail by holding user_refill_reqs_available()
978 refill_reqs_available(ctx, head, ctx->tail); user_refill_reqs_available()
1056 unsigned tail, pos, head; aio_complete() local
1078 * ctx->completion_lock to prevent other code from messing with the tail aio_complete()
1083 tail = ctx->tail; aio_complete()
1084 pos = tail + AIO_EVENTS_OFFSET; aio_complete()
1086 if (++tail >= ctx->nr_events) aio_complete()
1087 tail = 0; aio_complete()
1101 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, aio_complete()
1107 smp_wmb(); /* make event visible before updating tail */ aio_complete()
1109 ctx->tail = tail; aio_complete()
1113 ring->tail = tail; aio_complete()
1119 refill_reqs_available(ctx, head, tail); aio_complete()
1122 pr_debug("added to ring %p at [%u]\n", iocb, tail); aio_complete()
1136 * We have to order our ring_info tail store above and test aio_complete()
1157 unsigned head, tail, pos; aio_read_events_ring() local
1173 tail = ring->tail; aio_read_events_ring()
1177 * Ensure that once we've read the current tail pointer, that aio_read_events_ring()
1178 * we also see the events that were stored up to the tail. aio_read_events_ring()
1182 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); aio_read_events_ring()
1184 if (head == tail) aio_read_events_ring()
1188 tail %= ctx->nr_events; aio_read_events_ring()
1195 avail = (head <= tail ? tail : ctx->nr_events) - head; aio_read_events_ring()
1196 if (head == tail) aio_read_events_ring()
1227 pr_debug("%li h%u t%u\n", ret, head, tail); aio_read_events_ring()
929 refill_reqs_available(struct kioctx *ctx, unsigned head, unsigned tail) refill_reqs_available() argument
/linux-4.1.27/net/ipv4/
H A Dtcp_probe.c82 unsigned long head, tail; member in struct:__anon14123
88 return (tcp_probe.head - tcp_probe.tail) & (bufsize - 1); tcp_probe_used()
179 tcp_probe.head = tcp_probe.tail = 0; tcpprobe_open()
189 = tcp_probe.log + tcp_probe.tail; tcpprobe_sprint()
221 if (tcp_probe.head == tcp_probe.tail) { tcpprobe_read()
230 tcp_probe.tail = (tcp_probe.tail + 1) & (bufsize - 1); tcpprobe_read()
H A Desp4.c127 u8 *tail; esp_output() local
185 tail = skb_tail_pointer(trailer); esp_output()
187 memset(tail, 0, tfclen); esp_output()
188 tail += tfclen; esp_output()
193 tail[i] = i + 1; esp_output()
195 tail[plen - 2] = plen - 2; esp_output()
196 tail[plen - 1] = *skb_mac_header(skb); esp_output()
/linux-4.1.27/net/core/
H A Dgen_stats.c70 d->tail = (struct nlattr *)skb_tail_pointer(skb); __acquires()
75 if (d->tail) __acquires()
165 if (d->tail) { gnet_stats_copy_basic()
209 if (d->tail) { gnet_stats_copy_rate_est()
286 if (d->tail) gnet_stats_copy_queue()
317 if (d->tail) gnet_stats_copy_app()
344 if (d->tail) gnet_stats_finish_copy()
345 d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail; gnet_stats_finish_copy()
H A Dskbuff.c100 pr_emerg("%s: text:%p len:%d put:%d head:%p data:%p tail:%#lx end:%#lx dev:%s\n", skb_panic()
102 (unsigned long)skb->tail, (unsigned long)skb->end, skb_panic()
173 * the tail pointer in struct sk_buff! __alloc_skb_head()
175 memset(skb, 0, offsetof(struct sk_buff, tail)); __alloc_skb_head()
196 * tail room of at least size bytes. The object has a reference count
243 * the tail pointer in struct sk_buff! __alloc_skb()
245 memset(skb, 0, offsetof(struct sk_buff, tail)); __alloc_skb()
253 skb->end = skb->tail + size; __alloc_skb()
298 * MUST add room at tail (SKB_DATA_ALIGN(skb_shared_info))
315 memset(skb, 0, offsetof(struct sk_buff, tail)); __build_skb()
321 skb->end = skb->tail + size; __build_skb()
879 C(tail); __skb_clone()
1021 /* {transport,network,mac}_header and tail are relative to skb->head */ skb_headers_offset_update()
1076 /* Set the tail pointer and length */ skb_copy()
1116 /* Set the tail pointer and length */ __pskb_copy_fclone()
1155 * @ntail: room to add at tail
1229 skb->tail += off; pskb_expand_head()
1269 * @newtailroom: new free bytes at tail
1301 /* Set the tail pointer and length */ skb_copy_expand()
1325 * skb_pad - zero pad the tail of an skb
1347 ntail = skb->data_len + pad - (skb->end - skb->tail); skb_pad()
1371 * pskb_put - add data to the tail of a potentially fragmented buffer
1373 * @tail: tail fragment of the buffer to use
1377 * fragmented buffer. @tail must be the last fragment of @skb -- or
1383 unsigned char *pskb_put(struct sk_buff *skb, struct sk_buff *tail, int len) pskb_put() argument
1385 if (tail != skb) { pskb_put()
1389 return skb_put(tail, len); pskb_put()
1406 skb->tail += len; skb_put()
1408 if (unlikely(skb->tail > skb->end)) skb_put()
1454 * Cut the length of a buffer down by removing data from the tail. If
1552 * __pskb_pull_tail - advance tail of skb header
1554 * @delta: number of bytes to advance tail
1557 * it expands header moving its tail forward and copying necessary
1563 * or value of new tail of skb in the case of success.
1569 /* Moves tail of skb head forward, copying data from fragmented part,
1578 /* If skb has not enough free space at tail, get new one __pskb_pull_tail()
1580 * room at tail, reallocate without expansion only if skb is cloned. __pskb_pull_tail()
1582 int i, k, eat = (skb->tail + delta) - skb->end; __pskb_pull_tail()
1686 skb->tail += delta; __pskb_pull_tail()
2379 * skb_dequeue_tail - remove from the tail of the queue
2382 * Remove the tail of the list. The list lock is taken so the function
2383 * may be used safely with other locking list functions. The tail item is
2436 * skb_queue_tail - queue a buffer at the list tail
2440 * Queue a buffer at the tail of the list. This function takes the
2609 * @tgt: buffer into which tail data gets added
3003 struct sk_buff *tail = NULL; skb_segment() local
3104 tail->next = nskb; skb_segment()
3107 tail = nskb; skb_segment()
3206 segs->prev = tail; skb_segment()
3213 swap(tail->truesize, head_skb->truesize); skb_segment()
3214 swap(tail->destructor, head_skb->destructor); skb_segment()
3215 swap(tail->sk, head_skb->sk); skb_segment()
/linux-4.1.27/drivers/ptp/
H A Dptp_private.h37 int tail; member in struct:timestamp_event_queue
63 * that a writer might concurrently increment the tail does not
68 int cnt = q->tail - q->head; queue_cnt()
/linux-4.1.27/arch/sparc/kernel/
H A Dsignal_32.c220 void __user *tail; setup_frame() local
241 tail = sf + 1; setup_frame()
249 __siginfo_fpu_t __user *fp = tail; setup_frame()
250 tail += sizeof(*fp); setup_frame()
257 __siginfo_rwin_t __user *rwp = tail; setup_frame()
258 tail += sizeof(*rwp); setup_frame()
315 void __user *tail; setup_rt_frame() local
333 tail = sf + 1; setup_rt_frame()
345 __siginfo_fpu_t __user *fp = tail; setup_rt_frame()
346 tail += sizeof(*fp); setup_rt_frame()
353 __siginfo_rwin_t __user *rwp = tail; setup_rt_frame()
354 tail += sizeof(*rwp); setup_rt_frame()
H A Dsignal32.c409 void __user *tail; setup_frame32() local
434 tail = (sf + 1); setup_frame32()
459 __siginfo_fpu_t __user *fp = tail; setup_frame32()
460 tail += sizeof(*fp); setup_frame32()
467 __siginfo_rwin_t __user *rwp = tail; setup_frame32()
468 tail += sizeof(*rwp); setup_frame32()
540 void __user *tail; setup_rt_frame32() local
565 tail = (sf + 1); setup_rt_frame32()
590 __siginfo_fpu_t __user *fp = tail; setup_rt_frame32()
591 tail += sizeof(*fp); setup_rt_frame32()
598 __siginfo_rwin_t __user *rwp = tail; setup_rt_frame32()
599 tail += sizeof(*rwp); setup_rt_frame32()
H A Dsun4v_ivec.S14 /* Head offset in %g2, tail offset in %g4.
64 /* Head offset in %g2, tail offset in %g4. */
122 /* Head offset in %g2, tail offset in %g4. */
211 * the head equal to the tail. We'll just trap again otherwise.
233 /* Head offset in %g2, tail offset in %g4. */
322 * the head equal to the tail. We'll just trap again otherwise.
/linux-4.1.27/drivers/gpu/drm/r128/
H A Dr128_drv.h74 u32 tail; member in struct:drm_r128_ring_buffer
90 drm_r128_freelist_t *tail; member in struct:drm_r128_private
419 ring->space = (GET_RING_HEAD(dev_priv) - ring->tail) * sizeof(u32); r128_update_ring_snapshot()
488 write = dev_priv->ring.tail; \
501 DRM_INFO("ADVANCE_RING() wr=0x%06x tail=0x%06x\n", \
502 write, dev_priv->ring.tail); \
507 if (((dev_priv->ring.tail + _nr) & tail_mask) != write) \
510 ((dev_priv->ring.tail + _nr) & tail_mask), \
513 dev_priv->ring.tail = write; \
518 DRM_INFO("COMMIT_RING() tail=0x%06x\n", \
519 dev_priv->ring.tail); \
521 R128_WRITE(R128_PM4_BUFFER_DL_WPTR, dev_priv->ring.tail); \
/linux-4.1.27/drivers/hid/
H A Dhidraw.c55 if (list->head == list->tail) { hidraw_read()
59 while (list->head == list->tail) { hidraw_read()
87 len = list->buffer[list->tail].len > count ? hidraw_read()
88 count : list->buffer[list->tail].len; hidraw_read()
90 if (list->buffer[list->tail].value) { hidraw_read()
91 if (copy_to_user(buffer, list->buffer[list->tail].value, len)) { hidraw_read()
98 kfree(list->buffer[list->tail].value); hidraw_read()
99 list->buffer[list->tail].value = NULL; hidraw_read()
100 list->tail = (list->tail + 1) & (HIDRAW_BUFFER_SIZE - 1); hidraw_read()
262 if (list->head != list->tail) hidraw_poll()
496 if (new_head == list->tail) hidraw_report_event()
H A Duhid.c44 __u8 tail; member in struct:uhid_device
64 if (newhead != uhid->tail) { uhid_queue()
651 if (uhid->head == uhid->tail) uhid_char_read()
655 uhid->head != uhid->tail); uhid_char_read()
664 if (uhid->head == uhid->tail) { uhid_char_read()
669 if (copy_to_user(buffer, uhid->outq[uhid->tail], len)) { uhid_char_read()
672 kfree(uhid->outq[uhid->tail]); uhid_char_read()
673 uhid->outq[uhid->tail] = NULL; uhid_char_read()
676 uhid->tail = (uhid->tail + 1) % UHID_BUFSIZE; uhid_char_read()
746 if (uhid->head != uhid->tail) uhid_char_poll()
/linux-4.1.27/drivers/usb/host/
H A Duhci-debug.c145 goto tail; uhci_show_urbp()
160 tail: uhci_show_urbp()
216 goto tail; uhci_show_qh()
233 goto tail; uhci_show_qh()
251 goto tail; uhci_show_qh()
257 tail: uhci_show_qh()
395 goto tail; uhci_sprint_schedule()
408 goto tail; uhci_sprint_schedule()
448 goto tail; uhci_sprint_schedule()
489 goto tail; uhci_sprint_schedule()
515 goto tail; uhci_sprint_schedule()
544 tail: uhci_sprint_schedule()
/linux-4.1.27/drivers/isdn/gigaset/
H A Dser-gigaset.c694 unsigned tail, head, n; gigaset_tty_receive() local
706 tail = inbuf->tail; gigaset_tty_receive()
709 head, tail, count); gigaset_tty_receive()
711 if (head <= tail) { gigaset_tty_receive()
713 n = min_t(unsigned, count, RBUFSIZE - tail); gigaset_tty_receive()
714 memcpy(inbuf->data + tail, buf, n); gigaset_tty_receive()
715 tail = (tail + n) % RBUFSIZE; gigaset_tty_receive()
721 /* tail < head and some data left */ gigaset_tty_receive()
722 n = head - tail - 1; gigaset_tty_receive()
729 memcpy(inbuf->data + tail, buf, count); gigaset_tty_receive()
730 tail += count; gigaset_tty_receive()
733 gig_dbg(DEBUG_INTR, "setting tail to %u", tail); gigaset_tty_receive()
734 inbuf->tail = tail; gigaset_tty_receive()
H A Dcommon.c299 unsigned head, tail; clear_events() local
305 tail = cs->ev_tail; clear_events()
307 while (tail != head) { clear_events()
313 cs->ev_head = tail; clear_events()
337 unsigned next, tail; gigaset_add_event() local
344 tail = cs->ev_tail; gigaset_add_event()
345 next = (tail + 1) % MAX_EVENTS; gigaset_add_event()
349 event = cs->events + tail; gigaset_add_event()
553 inbuf->tail = 0; gigaset_inbuf_init()
569 unsigned n, head, tail, bytesleft; gigaset_fill_inbuf() local
577 tail = inbuf->tail; gigaset_fill_inbuf()
579 gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail); gigaset_fill_inbuf()
582 if (head > tail) gigaset_fill_inbuf()
583 n = head - 1 - tail; gigaset_fill_inbuf()
585 n = (RBUFSIZE - 1) - tail; gigaset_fill_inbuf()
587 n = RBUFSIZE - tail; gigaset_fill_inbuf()
596 memcpy(inbuf->data + tail, src, n); gigaset_fill_inbuf()
598 tail = (tail + n) % RBUFSIZE; gigaset_fill_inbuf()
601 gig_dbg(DEBUG_INTR, "setting tail to %u", tail); gigaset_fill_inbuf()
602 inbuf->tail = tail; gigaset_fill_inbuf()
836 cs->inbuf->tail = 0; cleanup_cs()
/linux-4.1.27/drivers/tty/serial/
H A Dbfin_uart.c157 xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); bfin_serial_stop_tx()
341 while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) { bfin_serial_tx_chars()
342 UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); bfin_serial_tx_chars()
343 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); bfin_serial_tx_chars()
393 uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); bfin_serial_dma_tx_chars()
394 if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) bfin_serial_dma_tx_chars()
395 uart->tx_count = UART_XMIT_SIZE - xmit->tail; bfin_serial_dma_tx_chars()
396 blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail), bfin_serial_dma_tx_chars()
397 (unsigned long)(xmit->buf+xmit->tail+uart->tx_count)); bfin_serial_dma_tx_chars()
404 set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail)); bfin_serial_dma_tx_chars()
421 CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, bfin_serial_dma_rx_chars()
448 for (i = uart->rx_dma_buf.tail; ; i++) { bfin_serial_dma_rx_chars()
476 * be smaller than current buffer tail, which cause garbages bfin_serial_rx_dma_timeout()
490 * current buffer tail and small. bfin_serial_rx_dma_timeout()
492 if (pos > uart->rx_dma_buf.tail || bfin_serial_rx_dma_timeout()
493 uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) { bfin_serial_rx_dma_timeout()
496 uart->rx_dma_buf.tail = uart->rx_dma_buf.head; bfin_serial_rx_dma_timeout()
521 if (!(xmit->tail == 0 && xmit->head == 0)) { bfin_serial_dma_tx_int()
522 xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); bfin_serial_dma_tx_int()
552 if (pos > uart->rx_dma_buf.tail || bfin_serial_dma_rx_int()
553 uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) { bfin_serial_dma_rx_int()
556 uart->rx_dma_buf.tail = uart->rx_dma_buf.head; bfin_serial_dma_rx_int()
615 uart->rx_dma_buf.tail = 0; bfin_serial_startup()
H A Datmel_serial.c604 if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) atmel_buffer_rx_char()
705 UART_PUT_CHAR(port, xmit->buf[xmit->tail]); atmel_tx_chars()
706 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); atmel_tx_chars()
732 xmit->tail += sg_dma_len(&atmel_port->sg_tx); atmel_complete_tx_dma()
733 xmit->tail &= UART_XMIT_SIZE - 1; atmel_complete_tx_dma()
748 * xmit->tail to the end of xmit->buf, now we have to transmit the atmel_complete_tx_dma()
799 sg->offset = xmit->tail & (UART_XMIT_SIZE - 1); atmel_tx_dma()
804 xmit->tail, atmel_tx_dma()
957 * ring->tail points to the beginning of data to be read by the atmel_rx_from_dma()
969 * However ring->tail must always points inside the dma buffer: atmel_rx_from_dma()
970 * 0 <= ring->tail <= sg_dma_len(&atmel_port->sg_rx) - 1 atmel_rx_from_dma()
973 * where head is lower than tail. In such a case, we first read from atmel_rx_from_dma()
974 * tail to the end of the buffer then reset tail. atmel_rx_from_dma()
976 if (ring->head < ring->tail) { atmel_rx_from_dma()
977 count = sg_dma_len(&atmel_port->sg_rx) - ring->tail; atmel_rx_from_dma()
979 tty_insert_flip_string(tport, ring->buf + ring->tail, count); atmel_rx_from_dma()
980 ring->tail = 0; atmel_rx_from_dma()
984 /* Finally we read data from tail to head */ atmel_rx_from_dma()
985 if (ring->tail < ring->head) { atmel_rx_from_dma()
986 count = ring->head - ring->tail; atmel_rx_from_dma()
988 tty_insert_flip_string(tport, ring->buf + ring->tail, count); atmel_rx_from_dma()
992 ring->tail = ring->head; atmel_rx_from_dma()
1262 xmit->tail += pdc->ofs; atmel_tx_pdc()
1263 xmit->tail &= UART_XMIT_SIZE - 1; atmel_tx_pdc()
1279 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); atmel_tx_pdc()
1282 UART_PUT_TPR(port, pdc->dma_addr + xmit->tail); atmel_tx_pdc()
1324 while (ring->head != ring->tail) { atmel_rx_from_ring()
1330 c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; atmel_rx_from_ring()
1332 ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); atmel_rx_from_ring()
1408 unsigned int tail; atmel_rx_from_pdc() local
1417 tail = pdc->ofs; atmel_rx_from_pdc()
1431 if (likely(head != tail)) { atmel_rx_from_pdc()
1438 * explicitly set tail to 0. So head will atmel_rx_from_pdc()
1439 * always be greater than tail. atmel_rx_from_pdc()
1441 count = head - tail; atmel_rx_from_pdc()
1910 atmel_port->rx_ring.tail = 0; atmel_shutdown()
H A Dsunhv.c49 long status = sun4v_con_putchar(xmit->buf[xmit->tail]); transmit_chars_putchar()
54 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); transmit_chars_putchar()
62 unsigned long ra = __pa(xmit->buf + xmit->tail); transmit_chars_write()
65 len = CIRC_CNT_TO_END(xmit->head, xmit->tail, transmit_chars_write()
70 xmit->tail = (xmit->tail + sent) & (UART_XMIT_SIZE - 1); transmit_chars_write()
H A Dsn_console.c530 int xmit_count, tail, head, loops, ii; sn_transmit_chars() local
558 tail = xmit->tail; sn_transmit_chars()
559 start = &xmit->buf[tail]; sn_transmit_chars()
561 /* twice around gets the tail to the end of the buffer and sn_transmit_chars()
563 loops = (head < tail) ? 2 : 1; sn_transmit_chars()
566 xmit_count = (head < tail) ? sn_transmit_chars()
567 (UART_XMIT_SIZE - tail) : (head - tail); sn_transmit_chars()
584 tail += result; sn_transmit_chars()
585 tail &= UART_XMIT_SIZE - 1; sn_transmit_chars()
586 xmit->tail = tail; sn_transmit_chars()
587 start = &xmit->buf[tail]; sn_transmit_chars()
928 int ltail = port->sc_port.state->xmit.tail; sn_sal_console_write()
933 * lock. We wait ~20 secs after the head and tail ptrs sn_sal_console_write()
955 port->sc_port.state->xmit.tail)) { sn_sal_console_write()
959 port->sc_port.state->xmit.tail; sn_sal_console_write()
H A Dserial-tegra.c367 tegra_uart_write(tup, xmit->buf[xmit->tail], UART_TX); tegra_uart_fill_tx_fifo()
368 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); tegra_uart_fill_tx_fifo()
397 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); tegra_uart_tx_dma_complete()
415 tx_phys_addr = tup->tx_dma_buf_phys + xmit->tail; tegra_uart_start_tx_dma()
435 unsigned long tail; tegra_uart_start_next_tx() local
439 tail = (unsigned long)&xmit->buf[xmit->tail]; tegra_uart_start_next_tx()
440 count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); tegra_uart_start_next_tx()
446 else if (BYTES_TO_ALIGN(tail) > 0) tegra_uart_start_next_tx()
447 tegra_uart_start_pio_tx(tup, BYTES_TO_ALIGN(tail)); tegra_uart_start_next_tx()
492 xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1); tegra_uart_stop_tx()
H A Dtilegx.c156 ch = xmit->buf[xmit->tail]; handle_transmit()
159 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); handle_transmit()
282 ch = xmit->buf[xmit->tail]; tilegx_start_tx()
285 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); tilegx_start_tx()
H A Dmen_z135_uart.c307 int tail; men_z135_handle_tx() local
353 tail = xmit->tail & (UART_XMIT_SIZE - 1); men_z135_handle_tx()
355 s = ((head >= tail) ? head : UART_XMIT_SIZE) - tail; men_z135_handle_tx()
358 memcpy_toio(port->membase + MEN_Z135_TX_RAM, &xmit->buf[xmit->tail], n); men_z135_handle_tx()
359 xmit->tail = (xmit->tail + n) & (UART_XMIT_SIZE - 1); men_z135_handle_tx()
/linux-4.1.27/drivers/crypto/
H A Dn2_core.h179 * RET1: queue tail offset
187 * ARG1: New tail offset
222 unsigned long *tail);
224 unsigned long tail);
/linux-4.1.27/drivers/tty/serial/8250/
H A D8250_dma.c33 xmit->tail += dma->tx_size; __dma_tx_complete()
34 xmit->tail &= UART_XMIT_SIZE - 1; __dma_tx_complete()
82 dma->tx_size = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); serial8250_tx_dma()
85 dma->tx_addr + xmit->tail, serial8250_tx_dma()
/linux-4.1.27/arch/blackfin/kernel/
H A Dentry.S24 * Hw IRQs are off on entry, and we don't want the scheduling tail
/linux-4.1.27/kernel/locking/
H A Dosq_lock.c52 if (atomic_read(&lock->tail) == curr && osq_wait_next()
53 atomic_cmpxchg(&lock->tail, curr, old) == curr) { osq_wait_next()
95 old = atomic_xchg(&lock->tail, curr); osq_lock()
187 if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr)) osq_unlock()
/linux-4.1.27/mm/
H A Dswap.c93 * and could skip the tail refcounting(in _mapcount).
101 * So if we see PageHeadHuge set, and we have the tail page pin,
107 * tail pin cannot be the last reference left on the head page,
110 * there's any tail pin left. In turn all tail pinsmust be always
114 * So if we see PageSlab set, and we have the tail page pin,
121 * If @page is a THP tail, we must read the tail page put_unrefcounted_compound_page()
137 * If this is the tail of a slab THP page, put_unrefcounted_compound_page()
138 * the tail pin must not be the last reference put_unrefcounted_compound_page()
140 * be cleared before all tail pins (which skips put_unrefcounted_compound_page()
141 * the _mapcount tail refcounting) have been put_unrefcounted_compound_page()
144 * If this is the tail of a hugetlbfs page, put_unrefcounted_compound_page()
145 * the tail pin may be the last reference on put_unrefcounted_compound_page()
156 * @page was a THP tail. The split @page_head put_unrefcounted_compound_page()
189 * tail page. That is because we put_refcounted_compound_page()
191 * split THP tail and page_head was put_refcounted_compound_page()
258 * 1. a tail hugetlbfs page, or put_compound_page()
259 * 2. a tail THP page, or put_compound_page()
288 * This takes care of get_page() if run on a tail page __get_page_tail()
315 * tail. The split page_head has been __get_page_tail()
473 * reclaim. If it still appears to be reclaimable, move it to the tail of the
736 * head of the list, rather than the tail, to give the flusher
747 * 5. inactive, clean -> inactive, tail
789 * We moves tha page into tail of inactive. lru_deactivate_file_fn()
H A Dgup.c1062 * For a futex to be placed on a THP tail page, get_futex_key requires a
1076 struct page *head, *page, *tail; gup_huge_pmd() local
1085 tail = page; gup_huge_pmd()
1107 * Any tail pages need their mapcount reference taken before we gup_huge_pmd()
1112 if (PageTail(tail)) gup_huge_pmd()
1113 get_huge_page_tail(tail); gup_huge_pmd()
1114 tail++; gup_huge_pmd()
1123 struct page *head, *page, *tail; gup_huge_pud() local
1132 tail = page; gup_huge_pud()
1154 if (PageTail(tail)) gup_huge_pud()
1155 get_huge_page_tail(tail); gup_huge_pud()
1156 tail++; gup_huge_pud()
1167 struct page *head, *page, *tail; gup_huge_pgd() local
1175 tail = page; gup_huge_pgd()
1197 if (PageTail(tail)) gup_huge_pgd()
1198 get_huge_page_tail(tail); gup_huge_pgd()
1199 tail++; gup_huge_pgd()
/linux-4.1.27/fs/adfs/
H A Ddir_f.h40 * Directory tail
/linux-4.1.27/drivers/staging/rtl8712/
H A Drtl871x_event.h99 /*volatile*/ int tail; member in struct:c2hevent_queue
108 /*volatile*/ int tail; member in struct:network_queue
H A Drtl8712_recv.h122 tail ----->
124 len = (unsigned int )(tail - data);
/linux-4.1.27/drivers/staging/rtl8188eu/include/
H A Drtw_event.h102 int tail; member in struct:c2hevent_queue
111 int tail; member in struct:network_queue
/linux-4.1.27/drivers/thunderbolt/
H A Dnhi_regs.h47 * 08: ring tail (set by NHI)
57 * 10: ring tail (set by NHI)
H A Dnhi.c120 return ((ring->head + 1) % ring->size) == ring->tail; ring_full()
125 return ring->head == ring->tail; ring_empty()
181 if (!(ring->descriptors[ring->tail].flags ring_work()
188 frame->size = ring->descriptors[ring->tail].length; ring_work()
189 frame->eof = ring->descriptors[ring->tail].eof; ring_work()
190 frame->sof = ring->descriptors[ring->tail].sof; ring_work()
191 frame->flags = ring->descriptors[ring->tail].flags; ring_work()
210 ring->tail = (ring->tail + 1) % ring->size; ring_work()
274 ring->tail = 0; ring_alloc()
374 ring->tail = 0; ring_stop()
/linux-4.1.27/security/selinux/
H A Dnetlink.c83 tmp = skb->tail; selnl_notify()
88 nlh->nlmsg_len = skb->tail - tmp; selnl_notify()
H A Dnetnode.c176 struct sel_netnode *tail; sel_netnode_insert() local
177 tail = list_entry( sel_netnode_insert()
181 list_del_rcu(&tail->list); sel_netnode_insert()
182 kfree_rcu(tail, rcu); sel_netnode_insert()
H A Dnetport.c124 struct sel_netport *tail; sel_netport_insert() local
125 tail = list_entry( sel_netport_insert()
130 list_del_rcu(&tail->list); sel_netport_insert()
131 kfree_rcu(tail, rcu); sel_netport_insert()
/linux-4.1.27/arch/sparc/mm/
H A Dgup.c73 struct page *head, *page, *tail; gup_huge_pmd() local
85 tail = page; gup_huge_pmd()
106 /* Any tail page need their mapcount reference taken before we gup_huge_pmd()
110 if (PageTail(tail)) gup_huge_pmd()
111 get_huge_page_tail(tail); gup_huge_pmd()
112 tail++; gup_huge_pmd()
/linux-4.1.27/arch/s390/mm/
H A Dgup.c55 struct page *head, *page, *tail; gup_huge_pmd() local
67 tail = page; gup_huge_pmd()
89 * Any tail page need their mapcount reference taken before we gup_huge_pmd()
93 if (PageTail(tail)) gup_huge_pmd()
94 get_huge_page_tail(tail); gup_huge_pmd()
95 tail++; gup_huge_pmd()
/linux-4.1.27/drivers/input/
H A Devdev.c53 unsigned int tail; member in struct:evdev_client
55 spinlock_t buffer_lock; /* protects access to buffer, head and tail */
75 head = client->tail; __evdev_flush_queue()
76 client->packet_head = client->tail; __evdev_flush_queue()
81 for (i = client->tail; i != client->head; i = (i + 1) & mask) { __evdev_flush_queue()
130 if (unlikely(client->head == client->tail)) { __evdev_queue_syn_dropped()
132 client->tail = (client->head - 1) & (client->bufsize - 1); __evdev_queue_syn_dropped()
133 client->packet_head = client->tail; __evdev_queue_syn_dropped()
174 if (client->head != client->tail) { evdev_set_clk_type()
175 client->packet_head = client->head = client->tail; evdev_set_clk_type()
190 if (unlikely(client->head == client->tail)) { __pass_event()
195 client->tail = (client->head - 2) & (client->bufsize - 1); __pass_event()
197 client->buffer[client->tail].time = event->time; __pass_event()
198 client->buffer[client->tail].type = EV_SYN; __pass_event()
199 client->buffer[client->tail].code = SYN_DROPPED; __pass_event()
200 client->buffer[client->tail].value = 0; __pass_event()
202 client->packet_head = client->tail; __pass_event()
518 have_event = client->packet_head != client->tail; evdev_fetch_next_event()
520 *event = client->buffer[client->tail++]; evdev_fetch_next_event()
521 client->tail &= client->bufsize - 1; evdev_fetch_next_event()
545 if (client->packet_head == client->tail && evdev_read()
570 client->packet_head != client->tail || evdev_read()
594 if (client->packet_head != client->tail) evdev_poll()
H A Djoydev.c66 int tail; member in struct:joydev_client
68 spinlock_t buffer_lock; /* protects access to buffer, head and tail */
109 if (client->tail == client->head) joydev_pass_event()
319 have_event = client->head != client->tail; joydev_fetch_next_event()
321 *event = client->buffer[client->tail++]; joydev_fetch_next_event()
322 client->tail &= JOYDEV_BUFFER_SIZE - 1; joydev_fetch_next_event()
357 client->tail = client->head; joydev_0x_read()
373 client->head != client->tail; joydev_data_pending()
/linux-4.1.27/drivers/video/fbdev/i810/
H A Di810_accel.c62 u32 head, count = WAIT_COUNT, tail; wait_for_space() local
65 tail = par->cur_tail; wait_for_space()
68 if ((tail == head) || wait_for_space()
69 (tail > head && wait_for_space()
70 (par->iring.size - tail + head) >= space) || wait_for_space()
71 (tail < head && (head - tail) >= space)) { wait_for_space()
116 * space. Returns the tail of the buffer
132 * This advances the tail of the ringbuffer, effectively
433 * the head and tail pointers = 0
/linux-4.1.27/drivers/net/ethernet/apm/xgene/
H A Dxgene_enet_main.c54 u32 tail = buf_pool->tail; xgene_enet_refill_bufpool() local
65 raw_desc = &buf_pool->raw_desc16[tail]; xgene_enet_refill_bufpool()
70 buf_pool->rx_skb[tail] = skb; xgene_enet_refill_bufpool()
82 tail = (tail + 1) & slots; xgene_enet_refill_bufpool()
86 buf_pool->tail = tail; xgene_enet_refill_bufpool()
120 u32 tail = buf_pool->tail; xgene_enet_delete_bufpool() local
126 tail = (tail - 1) & slots; xgene_enet_delete_bufpool()
127 raw_desc = &buf_pool->raw_desc16[tail]; xgene_enet_delete_bufpool()
135 buf_pool->tail = tail; xgene_enet_delete_bufpool()
233 u16 tail = tx_ring->tail; xgene_enet_setup_tx_desc() local
236 raw_desc = &tx_ring->raw_desc[tail]; xgene_enet_setup_tx_desc()
246 raw_desc->m0 = cpu_to_le64(tail); xgene_enet_setup_tx_desc()
253 tx_ring->cp_ring->cp_skb[tail] = skb; xgene_enet_setup_tx_desc()
281 tx_ring->tail = (tx_ring->tail + 1) & (tx_ring->slots - 1); xgene_enet_start_xmit()
/linux-4.1.27/arch/um/drivers/
H A Dline.c42 n = line->head - line->tail; write_room()
97 line->tail = line->buffer; buffer_data()
103 end = line->buffer + LINE_BUFSIZE - line->tail; buffer_data()
106 memcpy(line->tail, buf, len); buffer_data()
107 line->tail += len; buffer_data()
111 memcpy(line->tail, buf, end); buffer_data()
114 line->tail = line->buffer + len - end; buffer_data()
133 if ((line->buffer == NULL) || (line->head == line->tail)) flush_buffer()
136 if (line->tail < line->head) { flush_buffer()
147 * must flush only from the beginning to ->tail. flush_buffer()
156 count = line->tail - line->head; flush_buffer()
164 return line->head == line->tail; flush_buffer()
198 if (line->head != line->tail) line_write()
265 line->tail = line->buffer; line_write_interrupt()
H A Dline.h49 * LINE_BUFSIZE, head to the start of the ring, tail to the end.*/
52 char *tail; member in struct:line
/linux-4.1.27/net/bluetooth/cmtp/
H A Dcore.c214 unsigned int size, tail; cmtp_process_transmit() local
227 tail = session->mtu - nskb->len; cmtp_process_transmit()
228 if (tail < 5) { cmtp_process_transmit()
231 tail = session->mtu; cmtp_process_transmit()
234 size = min_t(uint, ((tail < 258) ? (tail - 2) : (tail - 3)), skb->len); cmtp_process_transmit()
/linux-4.1.27/arch/arm64/kernel/
H A Dperf_event.c1424 * next frame tail.
1427 user_backtrace(struct frame_tail __user *tail, user_backtrace() argument
1434 if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) user_backtrace()
1438 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); user_backtrace()
1450 if (tail >= buftail.fp) user_backtrace()
1472 compat_user_backtrace(struct compat_frame_tail __user *tail, compat_user_backtrace() argument
1479 if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) compat_user_backtrace()
1483 err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); compat_user_backtrace()
1495 if (tail + 1 >= (struct compat_frame_tail __user *) compat_user_backtrace()
1515 struct frame_tail __user *tail; perf_callchain_user() local
1517 tail = (struct frame_tail __user *)regs->regs[29]; perf_callchain_user()
1520 tail && !((unsigned long)tail & 0xf)) perf_callchain_user()
1521 tail = user_backtrace(tail, entry); perf_callchain_user()
1525 struct compat_frame_tail __user *tail; perf_callchain_user() local
1527 tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; perf_callchain_user()
1530 tail && !((unsigned long)tail & 0x3)) perf_callchain_user()
1531 tail = compat_user_backtrace(tail, entry); perf_callchain_user()
/linux-4.1.27/drivers/net/wimax/i2400m/
H A Dtx.c104 * until the tail and continue at the head of it.
233 * i2400m_tx_skip_tail() Marks unusable FIFO tail space
296 * Calculate how much tail room is available
303 * | tail room |
345 * @try_head: specify either to allocate head room or tail room space
348 * The caller must always try to allocate tail room space first by
350 * is not enough tail room space but there is enough head room space,
357 * space. TAIL_FULL if there is no space at the tail but there is at
366 * | tail room | | data |
380 * A, we only try from the tail room; if it is not enough, we just
382 * skip the tail room and try to allocate from the head.
389 * N ___________ tail room is zero
400 * During such a time, where tail room is zero in the TX FIFO and if there
415 * is no tail room to accommodate the payload and calls
416 * i2400m_tx_skip_tail() to skip the tail space. Now i2400m_tx() calls
418 * i2400m_tx_fifo_push() that returns TAIL_FULL, since there is no tail space
448 /* Is there space at the tail? */ i2400m_tx_fifo_push()
452 * If the tail room space is not enough to push the message i2400m_tx_fifo_push()
457 * in tail room of the TX FIFO to accommodate the message. i2400m_tx_fifo_push()
465 d_printf(2, dev, "fifo push %zu/%zu: tail full\n", i2400m_tx_fifo_push()
483 * Mark the tail of the FIFO buffer as 'to-skip'
515 d_printf(2, dev, "skip tail: skipping %zu bytes @%zu\n", i2400m_tx_skip_tail()
582 d_printf(2, dev, "new TX message: tail full, trying head\n"); i2400m_tx_new()
627 tail (and taking padding into consideration). */ i2400m_tx_close()
635 * to move it so the tail is next to the payloads, move it and i2400m_tx_close()
771 d_printf(2, dev, "pl append: tail full\n"); i2400m_tx()
/linux-4.1.27/net/sunrpc/xprtrdma/
H A Drpc_rdma.c125 if (xdrbuf->tail[0].iov_len) { rpcrdma_convert_iovs()
128 if (xdrbuf->tail[0].iov_len < 4 && xprt_rdma_pad_optimize) rpcrdma_convert_iovs()
134 seg[n].mr_offset = xdrbuf->tail[0].iov_base; rpcrdma_convert_iovs()
135 seg[n].mr_len = xdrbuf->tail[0].iov_len; rpcrdma_convert_iovs()
329 if (rqst->rq_snd_buf.tail[0].iov_len) { rpcrdma_inline_pullup()
330 curlen = rqst->rq_snd_buf.tail[0].iov_len; rpcrdma_inline_pullup()
331 if (destp + copy_len != rqst->rq_snd_buf.tail[0].iov_base) { rpcrdma_inline_pullup()
333 rqst->rq_snd_buf.tail[0].iov_base, curlen); rpcrdma_inline_pullup()
336 dprintk("RPC: %s: tail destp 0x%p len %d\n", rpcrdma_inline_pullup()
661 if (copy_len && rqst->rq_rcv_buf.tail[0].iov_len) { rpcrdma_inline_fixup()
663 if (curlen > rqst->rq_rcv_buf.tail[0].iov_len) rpcrdma_inline_fixup()
664 curlen = rqst->rq_rcv_buf.tail[0].iov_len; rpcrdma_inline_fixup()
665 if (rqst->rq_rcv_buf.tail[0].iov_base != srcp) rpcrdma_inline_fixup()
666 memmove(rqst->rq_rcv_buf.tail[0].iov_base, srcp, curlen); rpcrdma_inline_fixup()
667 dprintk("RPC: %s: tail srcp 0x%p len %d curlen %d\n", rpcrdma_inline_fixup()
669 rqst->rq_rcv_buf.tail[0].iov_len = curlen; rpcrdma_inline_fixup()
672 rqst->rq_rcv_buf.tail[0].iov_len = 0; rpcrdma_inline_fixup()
676 unsigned char *p = rqst->rq_rcv_buf.tail[0].iov_base; rpcrdma_inline_fixup()
678 p[rqst->rq_rcv_buf.tail[0].iov_len++] = 0; rpcrdma_inline_fixup()
H A Dsvc_rdma_recvfrom.c113 /* Set up tail */ rdma_build_arg_xdr()
114 rqstp->rq_arg.tail[0].iov_base = NULL; rdma_build_arg_xdr()
115 rqstp->rq_arg.tail[0].iov_len = 0; rdma_build_arg_xdr()
384 dprintk("svcrdma: large tail unsupported\n"); rdma_copy_tail()
388 /* Fit as much of the tail on the current page as possible */ rdma_copy_tail()
444 head->arg.tail[0] = rqstp->rq_arg.tail[0]; rdma_read_chunks()
549 /* Rebuild rq_arg head and tail. */ rdma_read_complete()
551 rqstp->rq_arg.tail[0] = head->arg.tail[0]; rdma_read_complete()
564 + rqstp->rq_arg.tail[0].iov_len; rdma_read_complete()
651 + rqstp->rq_arg.tail[0].iov_len; svc_rdma_recvfrom()
H A Dsvc_rdma_sendto.c64 (xdr->head[0].iov_len + xdr->page_len + xdr->tail[0].iov_len)) { map_xdr()
94 if (xdr->tail[0].iov_len) { map_xdr()
95 vec->sge[sge_no].iov_base = xdr->tail[0].iov_base; map_xdr()
96 vec->sge[sge_no].iov_len = xdr->tail[0].iov_len; map_xdr()
103 xdr->head[0].iov_len, xdr->tail[0].iov_len); map_xdr()
127 /* This offset is in the tail */ dma_map_xdr()
130 xdr->tail[0].iov_base & ~PAGE_MASK; dma_map_xdr()
131 page = virt_to_page(xdr->tail[0].iov_base); dma_map_xdr()
238 u32 xfer_len = rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; send_write_chunks()
292 return rqstp->rq_res.page_len + rqstp->rq_res.tail[0].iov_len; send_write_chunks()
460 * xdr_buf.tail gets a separate sge, but resides in the send_reply()
/linux-4.1.27/drivers/scsi/be2iscsi/
H A Dbe.h43 u16 tail, head; member in struct:be_queue_info
71 return q->dma_mem.va + q->tail * q->entry_size; queue_tail_node()
81 index_inc(&q->tail, q->len); queue_tail_inc()
/linux-4.1.27/drivers/hid/usbhid/
H A Dhiddev.c62 int tail; member in struct:hiddev_list
360 if (list->head == list->tail) { hiddev_read()
363 while (list->head == list->tail) { hiddev_read()
396 while (list->head != list->tail && hiddev_read()
399 if (list->buffer[list->tail].field_index != HID_FIELD_INDEX_NONE) { hiddev_read()
402 event.hid = list->buffer[list->tail].usage_code; hiddev_read()
403 event.value = list->buffer[list->tail].value; hiddev_read()
411 if (list->buffer[list->tail].field_index != HID_FIELD_INDEX_NONE || hiddev_read()
414 if (copy_to_user(buffer + retval, list->buffer + list->tail, sizeof(struct hiddev_usage_ref))) { hiddev_read()
421 list->tail = (list->tail + 1) & (HIDDEV_BUFFER_SIZE - 1); hiddev_read()
439 if (list->head != list->tail) hiddev_poll()
H A Dusbhid.h77 unsigned char ctrlhead, ctrltail; /* Control fifo head & tail */
84 unsigned char outhead, outtail; /* Output pipe fifo head & tail */
/linux-4.1.27/net/tipc/
H A Dmsg.c124 struct sk_buff *tail = NULL; tipc_buf_append() local
145 TIPC_SKB_CB(head)->tail = NULL; tipc_buf_append()
147 skb_walk_frags(head, tail) { skb_walk_frags()
148 TIPC_SKB_CB(head)->tail = tail; skb_walk_frags()
162 tail = TIPC_SKB_CB(head)->tail;
166 tail->next = frag;
170 TIPC_SKB_CB(head)->tail = frag;
178 TIPC_SKB_CB(head)->tail = NULL;
339 * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
424 * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
/linux-4.1.27/sound/isa/msnd/
H A Dmsnd_midi.c87 u16 tail; snd_msndmidi_input_drop() local
89 tail = readw(mpu->dev->MIDQ + JQS_wTail); snd_msndmidi_input_drop()
90 writew(tail, mpu->dev->MIDQ + JQS_wHead); snd_msndmidi_input_drop()
/linux-4.1.27/drivers/net/wireless/mwifiex/
H A Die.c320 /* This function parses head and tail IEs, from cfg80211_beacon_data and sets
340 if (info->tail && info->tail_len) { mwifiex_uap_set_head_tail_ies()
342 info->tail, info->tail_len); mwifiex_uap_set_head_tail_ies()
351 info->tail, mwifiex_uap_set_head_tail_ies()
362 info->tail, info->tail_len); mwifiex_uap_set_head_tail_ies()
384 /* This function parses different IEs-head & tail IEs, beacon IEs,
/linux-4.1.27/net/caif/
H A Dcfpkt_skbuff.c190 /* Check whether we need to add space at the tail */ cfpkt_add_body()
198 /* Check whether we need to change the SKB before writing to the tail */ cfpkt_add_body()
318 if (dst->tail + neededtailspace > dst->end) { cfpkt_append()
319 /* Create a dumplicate of 'dst' with more tail space */ cfpkt_append()
335 dst->tail += addlen; cfpkt_append()
372 skb2->tail += len2nd; cfpkt_split()
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
H A Ddynack.h28 * @t_rb: ring buffer tail
49 * @t_rb: ring buffer tail
/linux-4.1.27/block/
H A Dblk-exec.c41 * @at_head: insert request at head or tail of queue
103 * @at_head: insert request at head or tail of queue
/linux-4.1.27/net/decnet/netfilter/
H A Ddn_rtmsg.c49 old_tail = skb->tail; dnrmg_build_message()
60 nlh->nlmsg_len = skb->tail - old_tail; dnrmg_build_message()
/linux-4.1.27/drivers/gpu/drm/savage/
H A Dsavage_bci.c215 dev_priv->head.next = &dev_priv->tail; savage_freelist_init()
219 dev_priv->tail.next = NULL; savage_freelist_init()
220 dev_priv->tail.prev = &dev_priv->head; savage_freelist_init()
221 dev_priv->tail.buf = NULL; savage_freelist_init()
242 drm_savage_buf_priv_t *tail = dev_priv->tail.prev; savage_freelist_get() local
256 DRM_DEBUG(" tail=0x%04x %d\n", tail->age.event, tail->age.wrap); savage_freelist_get()
259 if (tail->buf && (TEST_AGE(&tail->age, event, wrap) || event == 0)) { savage_freelist_get()
260 drm_savage_buf_priv_t *next = tail->next; savage_freelist_get()
261 drm_savage_buf_priv_t *prev = tail->prev; savage_freelist_get()
264 tail->next = tail->prev = NULL; savage_freelist_get()
265 return tail->buf; savage_freelist_get()
268 DRM_DEBUG("returning NULL, tail->buf=%p!\n", tail->buf); savage_freelist_get()
/linux-4.1.27/net/mac80211/
H A Dwpa.c38 int tail; ieee80211_tx_h_michael_mic_add() local
65 tail = MICHAEL_MIC_LEN; ieee80211_tx_h_michael_mic_add()
67 tail += IEEE80211_TKIP_ICV_LEN; ieee80211_tx_h_michael_mic_add()
69 if (WARN(skb_tailroom(skb) < tail || ieee80211_tx_h_michael_mic_add()
71 "mmic: not enough head/tail (%d/%d,%d/%d)\n", ieee80211_tx_h_michael_mic_add()
73 skb_tailroom(skb), tail)) ieee80211_tx_h_michael_mic_add()
190 int len, tail; tkip_encrypt_skb() local
204 tail = 0; tkip_encrypt_skb()
206 tail = IEEE80211_TKIP_ICV_LEN; tkip_encrypt_skb()
208 if (WARN_ON(skb_tailroom(skb) < tail || tkip_encrypt_skb()
404 int hdrlen, len, tail; ccmp_encrypt_skb() local
428 tail = 0; ccmp_encrypt_skb()
430 tail = mic_len; ccmp_encrypt_skb()
432 if (WARN_ON(skb_tailroom(skb) < tail || ccmp_encrypt_skb()
629 int hdrlen, len, tail; gcmp_encrypt_skb() local
652 tail = 0; gcmp_encrypt_skb()
654 tail = IEEE80211_GCMP_MIC_LEN; gcmp_encrypt_skb()
656 if (WARN_ON(skb_tailroom(skb) < tail || gcmp_encrypt_skb()
/linux-4.1.27/drivers/scsi/aic7xxx/
H A Dqueue.h35 * singly-linked tail queues, lists, tail queues, and circular queues.
47 * A singly-linked tail queue is headed by a pair of pointers, one to the
48 * head of the list and the other to the tail of the list. The elements are
52 * end of the list. Elements being removed from the head of the tail queue
54 * A singly-linked tail queue may only be traversed in the forward direction.
55 * Singly-linked tail queues are ideal for applications with large datasets
65 * A tail queue is headed by a pair of pointers, one to the head of the
66 * list and the other to the tail of the list. The elements are doubly
70 * the list. A tail queue may be traversed in either direction.
73 * list and the other to the tail of the list. The elements are doubly
/linux-4.1.27/crypto/
H A Dauthenc.c46 char tail[]; member in struct:authenc_request_ctx
129 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); authenc_geniv_ahash_update_done()
158 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); authenc_geniv_ahash_done()
181 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); authenc_verify_ahash_update_done()
230 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); authenc_verify_ahash_done()
265 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); crypto_authenc_ahash_fb()
266 u8 *hash = areq_ctx->tail; crypto_authenc_ahash_fb()
304 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); crypto_authenc_ahash()
305 u8 *hash = areq_ctx->tail; crypto_authenc_ahash()
384 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail crypto_authenc_encrypt_done()
402 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail crypto_authenc_encrypt()
H A Deseqiv.c33 char tail[]; member in struct:eseqiv_request_ctx
47 memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail, eseqiv_complete2()
85 subreq = (void *)(reqctx->tail + ctx->reqoff); eseqiv_givencrypt()
102 giv = PTR_ALIGN((u8 *)reqctx->tail, eseqiv_givencrypt()
H A Dauthencesn.c50 char tail[]; member in struct:authenc_esn_request_ctx
103 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); authenc_esn_geniv_ahash_update_done()
143 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); authenc_esn_geniv_ahash_update_done2()
174 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); authenc_esn_geniv_ahash_done()
198 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); authenc_esn_verify_ahash_update_done()
259 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); authenc_esn_verify_ahash_update_done2()
309 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); authenc_esn_verify_ahash_done()
345 struct ahash_request *ahreq = (void *)(areq_ctx->tail + ctx->reqoff); crypto_authenc_esn_ahash()
346 u8 *hash = areq_ctx->tail; crypto_authenc_esn_ahash()
478 struct ablkcipher_request *abreq = (void *)(areq_ctx->tail crypto_authenc_esn_encrypt()
/linux-4.1.27/fs/xfs/libxfs/
H A Dxfs_dir2_block.c337 xfs_dir2_block_tail_t *btp; /* block tail */ xfs_dir2_block_addname()
459 * Update the tail (entry count). xfs_dir2_block_addname()
552 * Clean up the bestfree array and log the header, tail, and entry. xfs_dir2_block_addname()
585 * Log the block tail.
611 xfs_dir2_block_tail_t *btp; /* block tail */ xfs_dir2_block_lookup()
659 xfs_dir2_block_tail_t *btp; /* block tail */ xfs_dir2_block_lookup_int()
761 xfs_dir2_block_tail_t *btp; /* block tail */ xfs_dir2_block_removename()
800 * Fix up the block tail. xfs_dir2_block_removename()
841 xfs_dir2_block_tail_t *btp; /* block tail */ xfs_dir2_block_replace()
905 xfs_dir2_block_tail_t *btp; /* block tail */ xfs_dir2_leaf_to_block()
912 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ xfs_dir2_leaf_to_block()
998 * Initialize the block tail. xfs_dir2_leaf_to_block()
1050 xfs_dir2_block_tail_t *btp; /* block tail pointer */ xfs_dir2_sf_to_block()
1124 * Compute size of block "tail" area. xfs_dir2_sf_to_block()
1130 * Say we're using the leaf and tail area. xfs_dir2_sf_to_block()
1138 * Fill in the tail. xfs_dir2_sf_to_block()
1246 * Log the leaf entry area and tail. xfs_dir2_sf_to_block()
/linux-4.1.27/arch/s390/kernel/
H A Dperf_cpum_sf.c77 unsigned long *tail; /* last sample-data-block-table */ member in struct:sf_buffer
188 unsigned long *new, *tail; realloc_sampling_buffer() local
190 if (!sfb->sdbt || !sfb->tail) realloc_sampling_buffer()
193 if (!is_link_entry(sfb->tail)) realloc_sampling_buffer()
198 * The tail variables always points to the "tail" (last and table-link) realloc_sampling_buffer()
201 tail = sfb->tail; realloc_sampling_buffer()
206 if (sfb->sdbt != get_next_sdbt(tail)) { realloc_sampling_buffer()
209 "tail=%p\n", realloc_sampling_buffer()
210 (void *) sfb->sdbt, (void *) tail); realloc_sampling_buffer()
218 if (require_table_link(tail)) { realloc_sampling_buffer()
225 /* Link current page to tail of chain */ realloc_sampling_buffer()
226 *tail = (unsigned long)(void *) new + 1; realloc_sampling_buffer()
227 tail = new; realloc_sampling_buffer()
235 rc = alloc_sample_data_block(tail, gfp_flags); realloc_sampling_buffer()
239 tail++; realloc_sampling_buffer()
243 *tail = (unsigned long) sfb->sdbt + 1; realloc_sampling_buffer()
244 sfb->tail = tail; realloc_sampling_buffer()
280 sfb->tail = sfb->sdbt; alloc_sampling_buffer()
281 *sfb->tail = (unsigned long)(void *) sfb->sdbt + 1; alloc_sampling_buffer()
/linux-4.1.27/drivers/usb/musb/
H A Dcppi_dma.c114 c->tail = NULL; cppi_pool_init()
655 tx->tail = bd; cppi_next_tx_segment()
766 struct cppi_descriptor *bd, *tail; cppi_next_rx_segment() local
836 for (i = 0, tail = NULL; bd && i < n_bds; i++, tail = bd) { cppi_next_rx_segment()
843 tail->next = bd; cppi_next_rx_segment()
844 tail->hw_next = bd->dma; cppi_next_rx_segment()
866 if (!tail) { cppi_next_rx_segment()
872 tail->next = NULL; cppi_next_rx_segment()
873 tail->hw_next = 0; cppi_next_rx_segment()
876 rx->tail = tail; cppi_next_rx_segment()
884 tail->hw_options |= CPPI_EOP_SET; cppi_next_rx_segment()
890 tail = rx->last_processed; cppi_next_rx_segment()
891 if (tail) { cppi_next_rx_segment()
892 tail->next = bd; cppi_next_rx_segment()
893 tail->hw_next = bd->dma; cppi_next_rx_segment()
1106 rx->head, rx->tail, cppi_rx_scan()
1135 rx->tail = NULL; cppi_rx_scan()
1242 tx_ch->tail = NULL; cppi_interrupt()
1407 cppi_ch->tail = NULL; cppi_channel_abort()
/linux-4.1.27/drivers/net/ethernet/intel/i40e/
H A Di40e_adminq.c53 /* set head and tail registers in our local struct */ i40e_adminq_init_regs()
55 hw->aq.asq.tail = I40E_VF_ATQT1; i40e_adminq_init_regs()
60 hw->aq.arq.tail = I40E_VF_ARQT1; i40e_adminq_init_regs()
66 hw->aq.asq.tail = I40E_PF_ATQT; i40e_adminq_init_regs()
71 hw->aq.arq.tail = I40E_PF_ARQT; i40e_adminq_init_regs()
309 wr32(hw, hw->aq.asq.tail, 0); i40e_config_asq_regs()
338 wr32(hw, hw->aq.arq.tail, 0); i40e_config_arq_regs()
346 /* Update tail in the HW to post pre-allocated buffers */ i40e_config_arq_regs()
347 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); i40e_config_arq_regs()
490 wr32(hw, hw->aq.asq.tail, 0); i40e_shutdown_asq()
523 wr32(hw, hw->aq.arq.tail, 0); i40e_shutdown_arq()
838 /* bump the tail */ i40e_asq_send_command()
846 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); i40e_asq_send_command()
997 /* set tail = the last cleaned desc index. */ i40e_clean_arq_element()
998 wr32(hw, hw->aq.arq.tail, ntc); i40e_clean_arq_element()
999 /* ntc is updated to tail + 1 */ i40e_clean_arq_element()
/linux-4.1.27/drivers/net/ethernet/intel/i40evf/
H A Di40e_adminq.c51 /* set head and tail registers in our local struct */ i40e_adminq_init_regs()
53 hw->aq.asq.tail = I40E_VF_ATQT1; i40e_adminq_init_regs()
58 hw->aq.arq.tail = I40E_VF_ARQT1; i40e_adminq_init_regs()
64 hw->aq.asq.tail = I40E_PF_ATQT; i40e_adminq_init_regs()
69 hw->aq.arq.tail = I40E_PF_ARQT; i40e_adminq_init_regs()
307 wr32(hw, hw->aq.asq.tail, 0); i40e_config_asq_regs()
336 wr32(hw, hw->aq.arq.tail, 0); i40e_config_arq_regs()
344 /* Update tail in the HW to post pre-allocated buffers */ i40e_config_arq_regs()
345 wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1); i40e_config_arq_regs()
488 wr32(hw, hw->aq.asq.tail, 0); i40e_shutdown_asq()
521 wr32(hw, hw->aq.arq.tail, 0); i40e_shutdown_arq()
789 /* bump the tail */ i40evf_asq_send_command()
797 wr32(hw, hw->aq.asq.tail, hw->aq.asq.next_to_use); i40evf_asq_send_command()
949 /* set tail = the last cleaned desc index. */ i40evf_clean_arq_element()
950 wr32(hw, hw->aq.arq.tail, ntc); i40evf_clean_arq_element()
951 /* ntc is updated to tail + 1 */ i40evf_clean_arq_element()
/linux-4.1.27/drivers/net/irda/
H A Dbfin_sir.h35 int tail; member in struct:dma_rx_buf
H A Dbfin_sir.c317 for (i = port->rx_dma_buf.head; i < port->rx_dma_buf.tail; i++) bfin_sir_dma_rx_chars()
335 if (pos > port->rx_dma_buf.tail) { bfin_sir_rx_dma_timeout()
336 port->rx_dma_buf.tail = pos; bfin_sir_rx_dma_timeout()
338 port->rx_dma_buf.head = port->rx_dma_buf.tail; bfin_sir_rx_dma_timeout()
353 port->rx_dma_buf.tail = DMA_SIR_RX_XCNT * port->rx_dma_nrows; bfin_sir_dma_rx_int()
357 port->rx_dma_buf.tail = 0; bfin_sir_dma_rx_int()
359 port->rx_dma_buf.head = port->rx_dma_buf.tail; bfin_sir_dma_rx_int()
395 port->rx_dma_buf.tail = 0; bfin_sir_startup()
/linux-4.1.27/drivers/net/wireless/ath/wil6210/
H A Dwmi.c230 r->tail = ioread32(wil->csr + HOST_MBOX + __wmi_send()
231 offsetof(struct wil6210_mbox_ctl, tx.tail)); __wmi_send()
232 if (next_head != r->tail) __wmi_send()
236 if (next_head == r->tail) { __wmi_send()
733 if (r->tail == r->head) wmi_recv_cmd()
736 wil_dbg_wmi(wil, "Mbox head %08x tail %08x\n", wmi_recv_cmd()
737 r->head, r->tail); wmi_recv_cmd()
738 /* read cmd descriptor from tail */ wmi_recv_cmd()
739 wil_memcpy_fromio_32(&d_tail, wil->csr + HOSTADDR(r->tail), wmi_recv_cmd()
770 iowrite32(0, wil->csr + HOSTADDR(r->tail) + wmi_recv_cmd()
787 /* advance tail */ wmi_recv_cmd()
788 r->tail = r->base + ((r->tail - r->base + wmi_recv_cmd()
790 iowrite32(r->tail, wil->csr + HOST_MBOX + wmi_recv_cmd()
791 offsetof(struct wil6210_mbox_ctl, rx.tail)); wmi_recv_cmd()
1125 wil_dbg_misc(wil, "Rx init: status %d tail 0x%08x\n", wmi_rx_chain_add()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Dcq.c571 unsigned tail, struct mlx4_cqe *cqe, int is_eth) use_tunnel_data()
576 qp->sqp_proxy_rcv[tail].map, use_tunnel_data()
579 hdr = (struct mlx4_ib_proxy_sqp_hdr *) (qp->sqp_proxy_rcv[tail].addr); use_tunnel_data()
606 cur = wq->head - wq->tail; mlx4_ib_qp_sw_comp()
612 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; mlx4_ib_qp_sw_comp()
615 wq->tail++; mlx4_ib_qp_sw_comp()
661 unsigned tail = 0; mlx4_ib_poll_one() local
743 wq->tail += (u16) (wqe_ctr - (u16) wq->tail); mlx4_ib_poll_one()
745 wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; mlx4_ib_poll_one()
746 ++wq->tail; mlx4_ib_poll_one()
759 tail = wq->tail & (wq->wqe_cnt - 1); mlx4_ib_poll_one()
760 wc->wr_id = wq->wrid[tail]; mlx4_ib_poll_one()
761 ++wq->tail; mlx4_ib_poll_one()
850 return use_tunnel_data(*cur_qp, cq, wc, tail, mlx4_ib_poll_one()
570 use_tunnel_data(struct mlx4_ib_qp *qp, struct mlx4_ib_cq *cq, struct ib_wc *wc, unsigned tail, struct mlx4_cqe *cqe, int is_eth) use_tunnel_data() argument
H A Dsrq.c150 srq->tail = srq->msrq.max - 1; mlx4_ib_create_srq()
302 next = get_wqe(srq, srq->tail); mlx4_ib_free_srq_wqe()
304 srq->tail = wqe_index; mlx4_ib_free_srq_wqe()
336 if (unlikely(srq->head == srq->tail)) { mlx4_ib_post_srq_recv()
/linux-4.1.27/arch/x86/lib/
H A Dusercopy_64.c69 * it is not necessary to optimize tail handling.
H A Dmemcpy_32.c112 * Calculate copy position to tail. memmove()
/linux-4.1.27/arch/sparc/boot/
H A Dpiggyback.c191 int image, tail; main() local
262 if ((tail = open(argv[4], O_RDONLY)) < 0) main()
264 while ((i = read(tail, buffer, 1024)) > 0) main()
269 if (close(tail) < 0) main()
/linux-4.1.27/fs/xfs/
H A Dxfs_aops.h48 struct buffer_head *io_buffer_tail;/* buffer linked list tail */
/linux-4.1.27/include/net/
H A Dgen_stats.h17 struct nlattr * tail; member in struct:gnet_dump
/linux-4.1.27/arch/parisc/kernel/
H A Dreal2.S173 mtctl %r0, %cr17 /* Clear IIASQ tail */
177 mtctl %r1, %cr18 /* IIAOQ tail */
207 mtctl %r0, %cr17 /* Clear IIASQ tail */
211 mtctl %r1, %cr18 /* IIAOQ tail */
/linux-4.1.27/arch/powerpc/boot/
H A Dgunzip_util.h27 * - Finally use gunzip_finish() to extract the tail of the
/linux-4.1.27/arch/avr32/lib/
H A Dcsum_partial_copy_generic.S57 /* handle additional bytes at the tail */
/linux-4.1.27/arch/arc/lib/
H A Dmemset.S58 b memset ;tail call so need to tinker with blink
/linux-4.1.27/arch/ia64/lib/
H A Ddo_csum.S34 // The code hereafter also takes care of the "tail" part of the buffer
36 // allows us to commute operations. So we do the "head" and "tail"
38 // tail values, we feed them into the pipeline, very handy initialization.
44 // possible load latency and also to accommodate for head and tail.
145 mov tmask=-1 // initialize tail mask
169 shr.u tmask=tmask,tmp1 // build tail mask, mask off ]8,lastoff]
174 (p8) and hmask=hmask,tmask // apply tail mask to head mask if 1 word only
H A Dcopy_user.S135 // the tail by copying byte-by-byte.
141 // failure_in3 does. If the byte-by-byte at the tail fails, it is
147 // tail of the 1st of the destination.
296 // To fix that, we simply copy the tail byte by byte.
453 // at the top we still need to fill the body and tail.
466 // Here we handle the head & tail part when we check for alignment.
512 // either go for the 16byte copy loop OR the ld8 in the tail part.
530 // executed any of the previous (tail) ones, so we don't need to do
580 cmp.ne p6,p0=dst1,enddst // Do we need to finish the tail ?
591 cmp.ne p6,p0=dst1,enddst // Do we need to finish the tail ?
/linux-4.1.27/net/ipv6/
H A Desp6.c164 u8 *tail; esp6_output() local
212 tail = skb_tail_pointer(trailer); esp6_output()
214 memset(tail, 0, tfclen); esp6_output()
215 tail += tfclen; esp6_output()
220 tail[i] = i + 1; esp6_output()
222 tail[plen - 2] = plen - 2; esp6_output()
223 tail[plen - 1] = *skb_mac_header(skb); esp6_output()
/linux-4.1.27/drivers/firewire/
H A Dnosy.c75 struct packet *head, *tail; member in struct:packet_buffer
133 buffer->tail = (struct packet *) buffer->data; packet_buffer_init()
206 buffer->tail->length = length; packet_buffer_put()
208 if (&buffer->tail->data[length] < end) { packet_buffer_put()
209 memcpy(buffer->tail->data, data, length); packet_buffer_put()
210 buffer->tail = (struct packet *) &buffer->tail->data[length]; packet_buffer_put()
212 size_t split = end - buffer->tail->data; packet_buffer_put()
214 memcpy(buffer->tail->data, data, split); packet_buffer_put()
216 buffer->tail = (struct packet *) &buffer->data[length - split]; packet_buffer_put()
/linux-4.1.27/fs/jbd2/
H A Drecovery.c180 struct jbd2_journal_block_tail *tail; jbd2_descr_block_csum_verify() local
187 tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize - jbd2_descr_block_csum_verify()
189 provided = tail->t_checksum; jbd2_descr_block_csum_verify()
190 tail->t_checksum = 0; jbd2_descr_block_csum_verify()
192 tail->t_checksum = provided; jbd2_descr_block_csum_verify()
821 struct jbd2_journal_revoke_tail *tail; jbd2_revoke_block_csum_verify() local
828 tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize - jbd2_revoke_block_csum_verify()
830 provided = tail->r_checksum; jbd2_revoke_block_csum_verify()
831 tail->r_checksum = 0; jbd2_revoke_block_csum_verify()
833 tail->r_checksum = provided; jbd2_revoke_block_csum_verify()
/linux-4.1.27/arch/powerpc/oprofile/cell/
H A Dspu_task_sync.c50 * in the queue, i.e. head and tail can't be equal. spu_buff_add()
55 * is called to lock the buffer, head and tail. spu_buff_add()
59 if (spu_buff[spu].head >= spu_buff[spu].tail) { spu_buff_add()
60 if ((spu_buff[spu].head - spu_buff[spu].tail) spu_buff_add()
64 } else if (spu_buff[spu].tail > spu_buff[spu].head) { spu_buff_add()
65 if ((spu_buff[spu].tail - spu_buff[spu].head) spu_buff_add()
104 /* Hold the lock to make sure the head/tail sync_spu_buff()
117 spu_buff[spu].tail, sync_spu_buff()
121 spu_buff[spu].tail = curr_head; sync_spu_buff()
471 spu_buff[spu].tail = 0; oprofile_spu_buff_create()
/linux-4.1.27/drivers/scsi/arcmsr/
H A Darcmsr_attr.c83 unsigned int tail = acb->rqbuf_getIndex; arcmsr_sysfs_iop_message_read() local
85 unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER); arcmsr_sysfs_iop_message_read()
87 allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER); arcmsr_sysfs_iop_message_read()
92 memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len); arcmsr_sysfs_iop_message_read()
94 memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end); arcmsr_sysfs_iop_message_read()
/linux-4.1.27/arch/tile/include/arch/
H A Dmpipe.h287 * For writes, this specifies the current ring tail pointer prior to any
290 * masked based on the ring size. The new tail pointer after this post
304 * current view of the state of the tail pointer.
314 * For writes, this specifies the generation number of the tail being
315 * posted. Note that if tail+cnt wraps to the beginning of the ring, the
/linux-4.1.27/drivers/pnp/
H A Dquirks.c141 struct pnp_option *tail = NULL, *first_new_option = NULL; pnp_clone_dependent_set() local
147 tail = option; pnp_clone_dependent_set()
149 if (!tail) { pnp_clone_dependent_set()
171 list_add(&new_option->list, &tail->list); pnp_clone_dependent_set()
172 tail = new_option; pnp_clone_dependent_set()
/linux-4.1.27/net/sunrpc/auth_gss/
H A Dsvcauth_gss.c871 return buf->head[0].iov_len + buf->page_len + buf->tail[0].iov_len; total_buf_len()
1600 if (resbuf->tail[0].iov_base == NULL) { svcauth_gss_wrap_resp_integ()
1603 resbuf->tail[0].iov_base = resbuf->head[0].iov_base svcauth_gss_wrap_resp_integ()
1605 resbuf->tail[0].iov_len = 0; svcauth_gss_wrap_resp_integ()
1607 resv = &resbuf->tail[0]; svcauth_gss_wrap_resp_integ()
1646 * If there is currently tail data, make sure there is svcauth_gss_wrap_resp_priv()
1647 * room for the head, tail, and 2 * RPC_MAX_AUTH_SIZE in svcauth_gss_wrap_resp_priv()
1648 * the page, and move the current tail data such that svcauth_gss_wrap_resp_priv()
1650 * both the head and tail. svcauth_gss_wrap_resp_priv()
1652 if (resbuf->tail[0].iov_base) { svcauth_gss_wrap_resp_priv()
1653 BUG_ON(resbuf->tail[0].iov_base >= resbuf->head[0].iov_base svcauth_gss_wrap_resp_priv()
1655 BUG_ON(resbuf->tail[0].iov_base < resbuf->head[0].iov_base); svcauth_gss_wrap_resp_priv()
1656 if (resbuf->tail[0].iov_len + resbuf->head[0].iov_len svcauth_gss_wrap_resp_priv()
1659 memmove(resbuf->tail[0].iov_base + RPC_MAX_AUTH_SIZE, svcauth_gss_wrap_resp_priv()
1660 resbuf->tail[0].iov_base, svcauth_gss_wrap_resp_priv()
1661 resbuf->tail[0].iov_len); svcauth_gss_wrap_resp_priv()
1662 resbuf->tail[0].iov_base += RPC_MAX_AUTH_SIZE; svcauth_gss_wrap_resp_priv()
1665 * If there is no current tail data, make sure there is svcauth_gss_wrap_resp_priv()
1667 * allotted page, and set up tail information such that there svcauth_gss_wrap_resp_priv()
1669 * head and tail. svcauth_gss_wrap_resp_priv()
1671 if (resbuf->tail[0].iov_base == NULL) { svcauth_gss_wrap_resp_priv()
1674 resbuf->tail[0].iov_base = resbuf->head[0].iov_base svcauth_gss_wrap_resp_priv()
1676 resbuf->tail[0].iov_len = 0; svcauth_gss_wrap_resp_priv()
1682 p = (__be32 *)(resbuf->tail[0].iov_base + resbuf->tail[0].iov_len); svcauth_gss_wrap_resp_priv()
1684 resbuf->tail[0].iov_len += pad; svcauth_gss_wrap_resp_priv()
H A Dgss_krb5_crypto.c411 * of page 2, tail. Anything more is a bug. */ encryptor()
503 * of page 2, tail. Anything more is a bug. */ decryptor()
562 * The client auth_gss code moves any existing tail data into a
565 * tail have slack space of RPC_MAX_AUTH_SIZE before calling gss_wrap.
678 if (buf->tail[0].iov_base != NULL) { gss_krb5_aes_encrypt()
679 ecptr = buf->tail[0].iov_base + buf->tail[0].iov_len; gss_krb5_aes_encrypt()
681 buf->tail[0].iov_base = buf->head[0].iov_base gss_krb5_aes_encrypt()
683 buf->tail[0].iov_len = 0; gss_krb5_aes_encrypt()
684 ecptr = buf->tail[0].iov_base; gss_krb5_aes_encrypt()
689 buf->tail[0].iov_len += GSS_KRB5_TOK_HDR_LEN; gss_krb5_aes_encrypt()
694 hmac.data = buf->tail[0].iov_base + buf->tail[0].iov_len; gss_krb5_aes_encrypt()
750 buf->tail[0].iov_len += kctx->gk5e->cksumlength; gss_krb5_aes_encrypt()
H A Dgss_krb5_wrap.c55 if (buf->page_len || buf->tail[0].iov_len) gss_krb5_add_padding()
56 iov = &buf->tail[0]; gss_krb5_add_padding()
91 BUG_ON(len > buf->tail[0].iov_len); gss_krb5_remove_padding()
92 pad = *(u8 *)(buf->tail[0].iov_base + len - 1); gss_krb5_remove_padding()
101 * easier on the server if we adjust head and tail length in tandem. gss_krb5_remove_padding()
103 * tail lengths, though--at worst badly formed xdr might lead the gss_krb5_remove_padding()
149 /* Assumptions: the head and tail of inbuf are ours to play with.
573 * the tail, and we really don't need to deal with it. gss_unwrap_kerberos_v2()
/linux-4.1.27/drivers/net/ethernet/toshiba/
H A Dspider_net.c354 chain->tail = chain->ring; spider_net_init_chain()
448 * spider_net_enable_rxchtails - sets RX dmac chain tail addresses
451 * spider_net_enable_rxchtails sets the RX DMAC chain tail addresses in the
460 card->rx_chain.tail->bus_addr); spider_net_enable_rxchtails()
531 struct spider_net_descr *start = chain->tail; spider_net_alloc_rx_skbs()
678 if (descr->next == chain->tail->prev) { spider_net_prepare_tx_descr()
717 struct spider_net_descr *descr = card->tx_chain.tail; spider_net_set_low_watermark()
739 descr = card->tx_chain.tail; spider_net_set_low_watermark()
783 if (chain->tail == chain->head) { spider_net_release_tx_chain()
787 descr = chain->tail; spider_net_release_tx_chain()
824 chain->tail = descr->next; spider_net_release_tx_chain()
861 descr = card->tx_chain.tail; spider_net_kick_tx_dma()
914 * packets, including updating the queue tail pointer.
994 struct spider_net_descr *start= chain->tail; show_rx_chain()
1008 dev_info(dev, "Chain tail located at descr=%d, status=0x%x\n", show_rx_chain()
1122 /* Advance tail pointer past any empty and reaped descrs */ spider_net_resync_tail_ptr()
1123 descr = chain->tail; spider_net_resync_tail_ptr()
1132 chain->tail = descr; spider_net_resync_tail_ptr()
1155 struct spider_net_descr *descr = chain->tail; spider_net_decode_one_descr()
1167 /* descriptor definitively used -- move on tail */ spider_net_decode_one_descr()
1168 chain->tail = descr->next; spider_net_decode_one_descr()
1465 if (card->tx_chain.tail != card->tx_chain.head) spider_net_handle_error_irq()
1789 /* set chain tail address for RX chains and spider_net_enable_card()
1858 * tail -q -n +2 Seq_code1_0x088.txt Seq_code2_0x090.txt \
/linux-4.1.27/arch/tile/gxio/
H A Dmpipe.c270 unsigned int head = list->tail; gxio_mpipe_rules_begin()
331 list->tail = list->head + rule->size; gxio_mpipe_rules_begin()
351 if (list->tail == 0) gxio_mpipe_rules_add_channel()
369 if (list->tail == 0) gxio_mpipe_rules_set_headroom()
383 offsetof(gxio_mpipe_rules_list_t, rules) + list->tail; gxio_mpipe_rules_commit()
408 /* Initialize the "tail". */ gxio_mpipe_iqueue_init()
/linux-4.1.27/fs/nfsd/
H A Dnfsxdr.c442 /* need to pad the tail */ nfssvc_encode_readlinkres()
443 rqstp->rq_res.tail[0].iov_base = p; nfssvc_encode_readlinkres()
445 rqstp->rq_res.tail[0].iov_len = 4 - (resp->len&3); nfssvc_encode_readlinkres()
461 /* need to pad the tail */ nfssvc_encode_readres()
462 rqstp->rq_res.tail[0].iov_base = p; nfssvc_encode_readres()
464 rqstp->rq_res.tail[0].iov_len = 4 - (resp->count&3); nfssvc_encode_readres()
/linux-4.1.27/net/vmw_vsock/
H A Dvmci_transport_notify.c193 u64 tail; send_waiting_read() local
209 vmci_qpair_get_consume_indexes(vmci_trans(vsk)->qpair, &tail, &head); send_waiting_read()
235 u64 tail; send_waiting_write() local
245 vmci_qpair_get_produce_indexes(vmci_trans(vsk)->qpair, &tail, &head); send_waiting_write()
246 room_left = vmci_trans(vsk)->produce_size - tail; send_waiting_write()
252 waiting_info.offset = tail + room_needed + 1; send_waiting_write()
/linux-4.1.27/arch/x86/kernel/
H A Dtsc.c48 * writing a new data entry and a reader advances the tail when it observes a
72 struct cyc2ns_data *tail; /* 56 + 8 = 64 */ member in struct:cyc2ns
99 * If we're the outer most nested read; update the tail pointer cyc2ns_read_end()
112 this_cpu_write(cyc2ns.tail, head); cyc2ns_read_end()
134 * When we observe the tail write from cyc2ns_read_end(), cyc2ns_write_begin()
138 while (c2n->tail == data) cyc2ns_write_begin()
198 c2n->tail = c2n->data; cyc2ns_init()
203 struct cyc2ns_data *data, *tail; cycles_2_ns() local
209 * Notable, it allows us to only do the __count and tail update cycles_2_ns()
215 tail = this_cpu_read(cyc2ns.tail); cycles_2_ns()
217 if (likely(data == tail)) { cycles_2_ns()
231 this_cpu_write(cyc2ns.tail, data); cycles_2_ns()
/linux-4.1.27/drivers/net/ethernet/cadence/
H A Dmacb.c506 unsigned int tail; macb_tx_error_task() local
537 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { macb_tx_error_task()
540 desc = macb_tx_desc(queue, tail); macb_tx_error_task()
542 tx_skb = macb_tx_skb(queue, tail); macb_tx_error_task()
549 tail++; macb_tx_error_task()
550 tx_skb = macb_tx_skb(queue, tail); macb_tx_error_task()
559 macb_tx_ring_wrap(tail), skb->data); macb_tx_error_task()
606 unsigned int tail; macb_tx_interrupt() local
622 for (tail = queue->tx_tail; tail != head; tail++) { macb_tx_interrupt()
628 desc = macb_tx_desc(queue, tail); macb_tx_interrupt()
642 for (;; tail++) { macb_tx_interrupt()
643 tx_skb = macb_tx_skb(queue, tail); macb_tx_interrupt()
649 macb_tx_ring_wrap(tail), skb->data); macb_tx_interrupt()
666 queue->tx_tail = tail; macb_tx_interrupt()
722 netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", gem_rx_refill()
905 unsigned int tail; macb_rx() local
908 for (tail = bp->rx_tail; budget > 0; tail++) { macb_rx()
909 struct macb_dma_desc *desc = macb_rx_desc(bp, tail); macb_rx()
923 discard_partial_frame(bp, first_frag, tail); macb_rx()
924 first_frag = tail; macb_rx()
931 dropped = macb_rx_frame(bp, first_frag, tail); macb_rx()
943 bp->rx_tail = tail; macb_rx()
1253 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n", macb_start_xmit()
2038 unsigned int tail, head; macb_get_regs() local
2044 tail = macb_tx_ring_wrap(bp->queues[0].tx_tail); macb_get_regs()
2056 regs_buff[8] = tail; macb_get_regs()
2058 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail); macb_get_regs()
/linux-4.1.27/drivers/dma/
H A Dmic_x100_dma.c91 u32 tail; mic_dma_cleanup() local
95 tail = mic_dma_read_cmp_cnt(ch); mic_dma_cleanup()
102 for (last_tail = ch->last_tail; tail != last_tail;) { mic_dma_cleanup()
113 /* finish all completion callbacks before incrementing tail */ mic_dma_cleanup()
119 static u32 mic_dma_ring_count(u32 head, u32 tail) mic_dma_ring_count() argument
123 if (head >= tail) mic_dma_ring_count()
124 count = (tail - 0) + (MIC_DMA_DESC_RX_SIZE - head); mic_dma_ring_count()
126 count = tail - head; mic_dma_ring_count()
/linux-4.1.27/fs/ubifs/
H A Dlog.c349 * @ltail_lnum: return new log tail LEB number
451 * @ltail_lnum: new log tail LEB number
454 * moves log tail to new position and updates the master node so that it stores
455 * the new log tail LEB number. Returns zero in case of success and a negative
469 dbg_log("old tail was LEB %d:0, new tail is LEB %d:0", ubifs_log_end_commit()
497 * @old_ltail_lnum: old log tail LEB number
641 dbg_rcvry("log tail LEB %d, log head LEB %d", c->ltail_lnum, ubifs_consolidate_log()
/linux-4.1.27/arch/s390/oprofile/
H A Dhwsampler.c247 unsigned long *tail; allocate_sdbt() local
257 tail = sdbt; allocate_sdbt()
274 /* link current page to tail of chain */ allocate_sdbt()
275 if (tail) allocate_sdbt()
276 *tail = (unsigned long)(void *)sdbt + 1; allocate_sdbt()
299 tail = sdbt; allocate_sdbt()
306 if (tail) allocate_sdbt()
307 *tail = (unsigned long) allocate_sdbt()
/linux-4.1.27/drivers/net/ethernet/qualcomm/
H A Dqca_spi.h59 u16 tail; member in struct:tx_ring

Completed in 7346 milliseconds

123456