Home
last modified time | relevance | path

Searched refs:q (Results 1 – 200 of 951) sorted by relevance

12345

/linux-4.1.27/crypto/
Dgf128mul.c56 #define gf128mul_dat(q) { \ argument
57 q(0x00), q(0x01), q(0x02), q(0x03), q(0x04), q(0x05), q(0x06), q(0x07),\
58 q(0x08), q(0x09), q(0x0a), q(0x0b), q(0x0c), q(0x0d), q(0x0e), q(0x0f),\
59 q(0x10), q(0x11), q(0x12), q(0x13), q(0x14), q(0x15), q(0x16), q(0x17),\
60 q(0x18), q(0x19), q(0x1a), q(0x1b), q(0x1c), q(0x1d), q(0x1e), q(0x1f),\
61 q(0x20), q(0x21), q(0x22), q(0x23), q(0x24), q(0x25), q(0x26), q(0x27),\
62 q(0x28), q(0x29), q(0x2a), q(0x2b), q(0x2c), q(0x2d), q(0x2e), q(0x2f),\
63 q(0x30), q(0x31), q(0x32), q(0x33), q(0x34), q(0x35), q(0x36), q(0x37),\
64 q(0x38), q(0x39), q(0x3a), q(0x3b), q(0x3c), q(0x3d), q(0x3e), q(0x3f),\
65 q(0x40), q(0x41), q(0x42), q(0x43), q(0x44), q(0x45), q(0x46), q(0x47),\
[all …]
Dalgapi.c178 struct crypto_alg *q; in __crypto_register_alg() local
192 list_for_each_entry(q, &crypto_alg_list, cra_list) { in __crypto_register_alg()
193 if (q == alg) in __crypto_register_alg()
196 if (crypto_is_moribund(q)) in __crypto_register_alg()
199 if (crypto_is_larval(q)) { in __crypto_register_alg()
200 if (!strcmp(alg->cra_driver_name, q->cra_driver_name)) in __crypto_register_alg()
205 if (!strcmp(q->cra_driver_name, alg->cra_name) || in __crypto_register_alg()
206 !strcmp(q->cra_name, alg->cra_driver_name)) in __crypto_register_alg()
242 struct crypto_alg *q; in crypto_alg_tested() local
246 list_for_each_entry(q, &crypto_alg_list, cra_list) { in crypto_alg_tested()
[all …]
/linux-4.1.27/drivers/media/v4l2-core/
Dvideobuf-core.c52 #define CALL(q, f, arg...) \ argument
53 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
54 #define CALLPTR(q, f, arg...) \ argument
55 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
57 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q) in videobuf_alloc_vb() argument
61 BUG_ON(q->msize < sizeof(*vb)); in videobuf_alloc_vb()
63 if (!q->int_ops || !q->int_ops->alloc_vb) { in videobuf_alloc_vb()
68 vb = q->int_ops->alloc_vb(q->msize); in videobuf_alloc_vb()
78 static int is_state_active_or_queued(struct videobuf_queue *q, struct videobuf_buffer *vb) in is_state_active_or_queued() argument
83 spin_lock_irqsave(q->irqlock, flags); in is_state_active_or_queued()
[all …]
Dvideobuf2-core.c91 #define log_qop(q, op) \ argument
92 dprintk(2, "call_qop(%p, %s)%s\n", q, #op, \
93 (q)->ops->op ? "" : " (nop)")
95 #define call_qop(q, op, args...) \ argument
99 log_qop(q, op); \
100 err = (q)->ops->op ? (q)->ops->op(args) : 0; \
102 (q)->cnt_ ## op++; \
106 #define call_void_qop(q, op, args...) \ argument
108 log_qop(q, op); \
109 if ((q)->ops->op) \
[all …]
Dvideobuf-dma-contig.c70 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", in videobuf_vm_open()
79 struct videobuf_queue *q = map->q; in videobuf_vm_close() local
82 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", in videobuf_vm_close()
89 dev_dbg(q->dev, "munmap %p q=%p\n", map, q); in videobuf_vm_close()
90 videobuf_queue_lock(q); in videobuf_vm_close()
93 if (q->streaming) in videobuf_vm_close()
94 videobuf_queue_cancel(q); in videobuf_vm_close()
97 if (NULL == q->bufs[i]) in videobuf_vm_close()
100 if (q->bufs[i]->map != map) in videobuf_vm_close()
103 mem = q->bufs[i]->priv; in videobuf_vm_close()
[all …]
Dvideobuf-vmalloc.c67 struct videobuf_queue *q = map->q; in videobuf_vm_close() local
77 dprintk(1, "munmap %p q=%p\n", map, q); in videobuf_vm_close()
78 videobuf_queue_lock(q); in videobuf_vm_close()
81 if (q->streaming) in videobuf_vm_close()
82 videobuf_queue_cancel(q); in videobuf_vm_close()
85 if (NULL == q->bufs[i]) in videobuf_vm_close()
88 if (q->bufs[i]->map != map) in videobuf_vm_close()
91 mem = q->bufs[i]->priv; in videobuf_vm_close()
111 q->bufs[i]->map = NULL; in videobuf_vm_close()
112 q->bufs[i]->baddr = 0; in videobuf_vm_close()
[all …]
Dvideobuf-dma-sg.c397 struct videobuf_queue *q = map->q; in videobuf_vm_close() local
406 dprintk(1, "munmap %p q=%p\n", map, q); in videobuf_vm_close()
407 videobuf_queue_lock(q); in videobuf_vm_close()
409 if (NULL == q->bufs[i]) in videobuf_vm_close()
411 mem = q->bufs[i]->priv; in videobuf_vm_close()
417 if (q->bufs[i]->map != map) in videobuf_vm_close()
419 q->bufs[i]->map = NULL; in videobuf_vm_close()
420 q->bufs[i]->baddr = 0; in videobuf_vm_close()
421 q->ops->buf_release(q, q->bufs[i]); in videobuf_vm_close()
423 videobuf_queue_unlock(q); in videobuf_vm_close()
[all …]
/linux-4.1.27/sound/core/seq/oss/
Dseq_oss_readq.c48 struct seq_oss_readq *q; in snd_seq_oss_readq_new() local
50 q = kzalloc(sizeof(*q), GFP_KERNEL); in snd_seq_oss_readq_new()
51 if (!q) in snd_seq_oss_readq_new()
54 q->q = kcalloc(maxlen, sizeof(union evrec), GFP_KERNEL); in snd_seq_oss_readq_new()
55 if (!q->q) { in snd_seq_oss_readq_new()
56 kfree(q); in snd_seq_oss_readq_new()
60 q->maxlen = maxlen; in snd_seq_oss_readq_new()
61 q->qlen = 0; in snd_seq_oss_readq_new()
62 q->head = q->tail = 0; in snd_seq_oss_readq_new()
63 init_waitqueue_head(&q->midi_sleep); in snd_seq_oss_readq_new()
[all …]
Dseq_oss_writeq.c39 struct seq_oss_writeq *q; in snd_seq_oss_writeq_new() local
42 if ((q = kzalloc(sizeof(*q), GFP_KERNEL)) == NULL) in snd_seq_oss_writeq_new()
44 q->dp = dp; in snd_seq_oss_writeq_new()
45 q->maxlen = maxlen; in snd_seq_oss_writeq_new()
46 spin_lock_init(&q->sync_lock); in snd_seq_oss_writeq_new()
47 q->sync_event_put = 0; in snd_seq_oss_writeq_new()
48 q->sync_time = 0; in snd_seq_oss_writeq_new()
49 init_waitqueue_head(&q->sync_sleep); in snd_seq_oss_writeq_new()
58 return q; in snd_seq_oss_writeq_new()
65 snd_seq_oss_writeq_delete(struct seq_oss_writeq *q) in snd_seq_oss_writeq_delete() argument
[all …]
Dseq_oss_event.c34 static int extended_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev);
39 static int old_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev);
54 snd_seq_oss_process_event(struct seq_oss_devinfo *dp, union evrec *q, struct snd_seq_event *ev) in snd_seq_oss_process_event() argument
56 switch (q->s.code) { in snd_seq_oss_process_event()
58 return extended_event(dp, q, ev); in snd_seq_oss_process_event()
61 return chn_voice_event(dp, q, ev); in snd_seq_oss_process_event()
64 return chn_common_event(dp, q, ev); in snd_seq_oss_process_event()
67 return timing_event(dp, q, ev); in snd_seq_oss_process_event()
70 return local_event(dp, q, ev); in snd_seq_oss_process_event()
73 return snd_seq_oss_synth_sysex(dp, q->x.dev, q->x.buf, ev); in snd_seq_oss_process_event()
[all …]
Dseq_oss_readq.h32 union evrec *q; member
43 void snd_seq_oss_readq_delete(struct seq_oss_readq *q);
49 int snd_seq_oss_readq_pick(struct seq_oss_readq *q, union evrec *rec);
50 void snd_seq_oss_readq_wait(struct seq_oss_readq *q);
51 void snd_seq_oss_readq_free(struct seq_oss_readq *q);
53 #define snd_seq_oss_readq_lock(q, flags) spin_lock_irqsave(&(q)->lock, flags) argument
54 #define snd_seq_oss_readq_unlock(q, flags) spin_unlock_irqrestore(&(q)->lock, flags) argument
Dseq_oss_writeq.h42 void snd_seq_oss_writeq_delete(struct seq_oss_writeq *q);
43 void snd_seq_oss_writeq_clear(struct seq_oss_writeq *q);
44 int snd_seq_oss_writeq_sync(struct seq_oss_writeq *q);
45 void snd_seq_oss_writeq_wakeup(struct seq_oss_writeq *q, abstime_t time);
46 int snd_seq_oss_writeq_get_free_size(struct seq_oss_writeq *q);
47 void snd_seq_oss_writeq_set_output(struct seq_oss_writeq *q, int size);
/linux-4.1.27/drivers/isdn/hardware/eicon/
Ddqueue.c17 diva_data_q_init(diva_um_idi_data_queue_t *q, in diva_data_q_init() argument
22 q->max_length = max_length; in diva_data_q_init()
23 q->segments = max_segments; in diva_data_q_init()
25 for (i = 0; i < q->segments; i++) { in diva_data_q_init()
26 q->data[i] = NULL; in diva_data_q_init()
27 q->length[i] = 0; in diva_data_q_init()
29 q->read = q->write = q->count = q->segment_pending = 0; in diva_data_q_init()
31 for (i = 0; i < q->segments; i++) { in diva_data_q_init()
32 if (!(q->data[i] = diva_os_malloc(0, q->max_length))) { in diva_data_q_init()
33 diva_data_q_finit(q); in diva_data_q_init()
[all …]
Ddqueue.h19 int diva_data_q_init(diva_um_idi_data_queue_t *q,
21 int diva_data_q_finit(diva_um_idi_data_queue_t *q);
22 int diva_data_q_get_max_length(const diva_um_idi_data_queue_t *q);
23 void *diva_data_q_get_segment4write(diva_um_idi_data_queue_t *q);
24 void diva_data_q_ack_segment4write(diva_um_idi_data_queue_t *q,
27 q);
28 int diva_data_q_get_segment_length(const diva_um_idi_data_queue_t *q);
29 void diva_data_q_ack_segment4read(diva_um_idi_data_queue_t *q);
/linux-4.1.27/sound/core/seq/
Dseq_queue.c63 static int queue_list_add(struct snd_seq_queue *q) in queue_list_add() argument
71 queue_list[i] = q; in queue_list_add()
72 q->queue = i; in queue_list_add()
84 struct snd_seq_queue *q; in queue_list_remove() local
88 q = queue_list[id]; in queue_list_remove()
89 if (q) { in queue_list_remove()
90 spin_lock(&q->owner_lock); in queue_list_remove()
91 if (q->owner == client) { in queue_list_remove()
93 q->klocked = 1; in queue_list_remove()
94 spin_unlock(&q->owner_lock); in queue_list_remove()
[all …]
Dseq_queue.h95 #define queuefree(q) snd_use_lock_free(&(q)->use_lock) argument
101 void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop);
120 #define udiv_qrnnd(q, r, n1, n0, d) \ argument
122 : "=a" ((u32)(q)), \
128 #define u64_div(x,y,q) do {u32 __tmp; udiv_qrnnd(q, __tmp, (x)>>32, x, y);} while (0) argument
129 #define u64_mod(x,y,r) do {u32 __tmp; udiv_qrnnd(__tmp, q, (x)>>32, x, y);} while (0)
130 #define u64_divmod(x,y,q,r) udiv_qrnnd(q, r, (x)>>32, x, y) argument
133 #define u64_div(x,y,q) ((q) = (u32)((u64)(x) / (u64)(y))) argument
135 #define u64_divmod(x,y,q,r) (u64_div(x,y,q), u64_mod(x,y,r)) argument
Dseq_timer.c140 struct snd_seq_queue *q = timeri->callback_data; in snd_seq_timer_interrupt() local
143 if (q == NULL) in snd_seq_timer_interrupt()
145 tmr = q->timer; in snd_seq_timer_interrupt()
173 snd_seq_check_queue(q, 1, 0); in snd_seq_timer_interrupt()
270 int snd_seq_timer_open(struct snd_seq_queue *q) in snd_seq_timer_open() argument
277 tmr = q->timer; in snd_seq_timer_open()
282 sprintf(str, "sequencer queue %i", q->queue); in snd_seq_timer_open()
287 err = snd_timer_open(&t, str, &tmr->alsa_id, q->queue); in snd_seq_timer_open()
297 err = snd_timer_open(&t, str, &tid, q->queue); in snd_seq_timer_open()
305 t->callback_data = q; in snd_seq_timer_open()
[all …]
/linux-4.1.27/drivers/infiniband/hw/amso1100/
Dc2_mq.c36 void *c2_mq_alloc(struct c2_mq *q) in c2_mq_alloc() argument
38 BUG_ON(q->magic != C2_MQ_MAGIC); in c2_mq_alloc()
39 BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); in c2_mq_alloc()
41 if (c2_mq_full(q)) { in c2_mq_alloc()
46 (struct c2wr_hdr *) (q->msg_pool.host + q->priv * q->msg_size); in c2_mq_alloc()
53 return q->msg_pool.host + q->priv * q->msg_size; in c2_mq_alloc()
58 void c2_mq_produce(struct c2_mq *q) in c2_mq_produce() argument
60 BUG_ON(q->magic != C2_MQ_MAGIC); in c2_mq_produce()
61 BUG_ON(q->type != C2_MQ_ADAPTER_TARGET); in c2_mq_produce()
63 if (!c2_mq_full(q)) { in c2_mq_produce()
[all …]
Dc2_mq.h86 static __inline__ int c2_mq_empty(struct c2_mq *q) in c2_mq_empty() argument
88 return q->priv == be16_to_cpu(*q->shared); in c2_mq_empty()
91 static __inline__ int c2_mq_full(struct c2_mq *q) in c2_mq_full() argument
93 return q->priv == (be16_to_cpu(*q->shared) + q->q_size - 1) % q->q_size; in c2_mq_full()
96 extern void c2_mq_lconsume(struct c2_mq *q, u32 wqe_count);
97 extern void *c2_mq_alloc(struct c2_mq *q);
98 extern void c2_mq_produce(struct c2_mq *q);
99 extern void *c2_mq_consume(struct c2_mq *q);
100 extern void c2_mq_free(struct c2_mq *q);
101 extern void c2_mq_req_init(struct c2_mq *q, u32 index, u32 q_size, u32 msg_size,
[all …]
/linux-4.1.27/drivers/s390/cio/
Dqdio_main.c99 static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) in qdio_check_ccq() argument
111 DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); in qdio_check_ccq()
126 static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, in qdio_do_eqbs() argument
129 int rc, tmp_count = count, tmp_start = start, nr = q->nr, retried = 0; in qdio_do_eqbs()
132 qperf_inc(q, eqbs); in qdio_do_eqbs()
134 if (!q->is_input_q) in qdio_do_eqbs()
135 nr += q->irq_ptr->nr_input_qs; in qdio_do_eqbs()
137 ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, in qdio_do_eqbs()
139 rc = qdio_check_ccq(q, ccq); in qdio_do_eqbs()
144 DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); in qdio_do_eqbs()
[all …]
Dqdio_debug.c115 struct qdio_q *q = m->private; in qstat_show() local
118 if (!q) in qstat_show()
122 q->timestamp, last_ai_time); in qstat_show()
124 atomic_read(&q->nr_buf_used), in qstat_show()
125 q->first_to_check, q->last_move); in qstat_show()
126 if (q->is_input_q) { in qstat_show()
128 q->u.in.polling, q->u.in.ack_start, in qstat_show()
129 q->u.in.ack_count); in qstat_show()
131 *(u32 *)q->irq_ptr->dsci, in qstat_show()
133 &q->u.in.queue_irq_state)); in qstat_show()
[all …]
Dqdio_setup.c107 struct qdio_q *q; in set_impl_params() local
121 for_each_input_queue(irq_ptr, q, i) { in set_impl_params()
123 q->slib->slibe[j].parms = in set_impl_params()
130 for_each_output_queue(irq_ptr, q, i) { in set_impl_params()
132 q->slib->slibe[j].parms = in set_impl_params()
139 struct qdio_q *q; in __qdio_allocate_qs() local
143 q = kmem_cache_alloc(qdio_q_cache, GFP_KERNEL); in __qdio_allocate_qs()
144 if (!q) in __qdio_allocate_qs()
147 q->slib = (struct slib *) __get_free_page(GFP_KERNEL); in __qdio_allocate_qs()
148 if (!q->slib) { in __qdio_allocate_qs()
[all …]
Dqdio.h314 #define queue_type(q) q->irq_ptr->qib.qfmt argument
315 #define SCH_NO(q) (q->irq_ptr->schid.sch_no) argument
330 static inline void account_sbals_error(struct qdio_q *q, int count) in account_sbals_error() argument
332 q->q_stats.nr_sbal_error += count; in account_sbals_error()
333 q->q_stats.nr_sbal_total += count; in account_sbals_error()
337 static inline int multicast_outbound(struct qdio_q *q) in multicast_outbound() argument
339 return (q->irq_ptr->nr_output_qs > 1) && in multicast_outbound()
340 (q->nr == q->irq_ptr->nr_output_qs - 1); in multicast_outbound()
343 #define pci_out_supported(q) \ argument
344 (q->irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)
[all …]
Dqdio_thinint.c88 struct qdio_q *q; in tiqdio_remove_input_queues() local
90 q = irq_ptr->input_qs[0]; in tiqdio_remove_input_queues()
92 if (!q || !q->entry.prev || !q->entry.next) in tiqdio_remove_input_queues()
96 list_del_rcu(&q->entry); in tiqdio_remove_input_queues()
147 struct qdio_q *q; in tiqdio_call_inq_handlers() local
150 for_each_input_queue(irq, q, i) { in tiqdio_call_inq_handlers()
153 xchg(q->irq_ptr->dsci, 0); in tiqdio_call_inq_handlers()
155 if (q->u.in.queue_start_poll) { in tiqdio_call_inq_handlers()
158 &q->u.in.queue_irq_state)) { in tiqdio_call_inq_handlers()
159 qperf_inc(q, int_discarded); in tiqdio_call_inq_handlers()
[all …]
/linux-4.1.27/net/sched/
Dsch_choke.c80 static unsigned int choke_len(const struct choke_sched_data *q) in choke_len() argument
82 return (q->tail - q->head) & q->tab_mask; in choke_len()
86 static int use_ecn(const struct choke_sched_data *q) in use_ecn() argument
88 return q->flags & TC_RED_ECN; in use_ecn()
92 static int use_harddrop(const struct choke_sched_data *q) in use_harddrop() argument
94 return q->flags & TC_RED_HARDDROP; in use_harddrop()
98 static void choke_zap_head_holes(struct choke_sched_data *q) in choke_zap_head_holes() argument
101 q->head = (q->head + 1) & q->tab_mask; in choke_zap_head_holes()
102 if (q->head == q->tail) in choke_zap_head_holes()
104 } while (q->tab[q->head] == NULL); in choke_zap_head_holes()
[all …]
Dsch_sfq.c152 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val) in sfq_dep_head() argument
155 return &q->slots[val].dep; in sfq_dep_head()
156 return &q->dep[val - SFQ_MAX_FLOWS]; in sfq_dep_head()
173 static unsigned int sfq_hash(const struct sfq_sched_data *q, in sfq_hash() argument
181 (__force u32)keys->ports, q->perturbation); in sfq_hash()
182 return hash & (q->divisor - 1); in sfq_hash()
188 struct sfq_sched_data *q = qdisc_priv(sch); in sfq_classify() local
195 TC_H_MIN(skb->priority) <= q->divisor) in sfq_classify()
198 fl = rcu_dereference_bh(q->filter_list); in sfq_classify()
201 return sfq_hash(q, skb) + 1; in sfq_classify()
[all …]
Dsch_pie.c100 struct pie_sched_data *q = qdisc_priv(sch); in drop_early() local
102 u32 local_prob = q->vars.prob; in drop_early()
106 if (q->vars.burst_time > 0) in drop_early()
112 if ((q->vars.qdelay < q->params.target / 2) in drop_early()
113 && (q->vars.prob < MAX_PROB / 5)) in drop_early()
125 if (q->params.bytemode && packet_size <= mtu) in drop_early()
128 local_prob = q->vars.prob; in drop_early()
139 struct pie_sched_data *q = qdisc_priv(sch); in pie_qdisc_enqueue() local
143 q->stats.overlimit++; in pie_qdisc_enqueue()
149 } else if (q->params.ecn && (q->vars.prob <= MAX_PROB / 10) && in pie_qdisc_enqueue()
[all …]
Dsch_sfb.c126 static void increment_one_qlen(u32 sfbhash, u32 slot, struct sfb_sched_data *q) in increment_one_qlen() argument
129 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in increment_one_qlen()
141 static void increment_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in increment_qlen() argument
147 increment_one_qlen(sfbhash, 0, q); in increment_qlen()
151 increment_one_qlen(sfbhash, 1, q); in increment_qlen()
155 struct sfb_sched_data *q) in decrement_one_qlen() argument
158 struct sfb_bucket *b = &q->bins[slot].bins[0][0]; in decrement_one_qlen()
170 static void decrement_qlen(const struct sk_buff *skb, struct sfb_sched_data *q) in decrement_qlen() argument
176 decrement_one_qlen(sfbhash, 0, q); in decrement_qlen()
180 decrement_one_qlen(sfbhash, 1, q); in decrement_qlen()
[all …]
Dsch_red.c49 static inline int red_use_ecn(struct red_sched_data *q) in red_use_ecn() argument
51 return q->flags & TC_RED_ECN; in red_use_ecn()
54 static inline int red_use_harddrop(struct red_sched_data *q) in red_use_harddrop() argument
56 return q->flags & TC_RED_HARDDROP; in red_use_harddrop()
61 struct red_sched_data *q = qdisc_priv(sch); in red_enqueue() local
62 struct Qdisc *child = q->qdisc; in red_enqueue()
65 q->vars.qavg = red_calc_qavg(&q->parms, in red_enqueue()
66 &q->vars, in red_enqueue()
69 if (red_is_idling(&q->vars)) in red_enqueue()
70 red_end_of_idle_period(&q->vars); in red_enqueue()
[all …]
Dsch_netem.c197 static bool loss_4state(struct netem_sched_data *q) in loss_4state() argument
199 struct clgstate *clg = &q->clg; in loss_4state()
262 static bool loss_gilb_ell(struct netem_sched_data *q) in loss_gilb_ell() argument
264 struct clgstate *clg = &q->clg; in loss_gilb_ell()
283 static bool loss_event(struct netem_sched_data *q) in loss_event() argument
285 switch (q->loss_model) { in loss_event()
288 return q->loss && q->loss >= get_crandom(&q->loss_cor); in loss_event()
296 return loss_4state(q); in loss_event()
304 return loss_gilb_ell(q); in loss_event()
342 static psched_time_t packet_len_2_sched_time(unsigned int len, struct netem_sched_data *q) in packet_len_2_sched_time() argument
[all …]
Dsch_multiq.c42 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_classify() local
45 struct tcf_proto *fl = rcu_dereference_bh(q->filter_list); in multiq_classify()
61 if (band >= q->bands) in multiq_classify()
62 return q->queues[0]; in multiq_classify()
64 return q->queues[band]; in multiq_classify()
86 sch->q.qlen++; in multiq_enqueue()
96 struct multiq_sched_data *q = qdisc_priv(sch); in multiq_dequeue() local
101 for (band = 0; band < q->bands; band++) { in multiq_dequeue()
103 q->curband++; in multiq_dequeue()
104 if (q->curband >= q->bands) in multiq_dequeue()
[all …]
Dsch_tbf.c160 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_segment() local
175 ret = qdisc_enqueue(segs, q->qdisc); in tbf_segment()
184 sch->q.qlen += nb; in tbf_segment()
193 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_enqueue() local
196 if (qdisc_pkt_len(skb) > q->max_size) { in tbf_enqueue()
197 if (skb_is_gso(skb) && skb_gso_mac_seglen(skb) <= q->max_size) in tbf_enqueue()
201 ret = qdisc_enqueue(skb, q->qdisc); in tbf_enqueue()
208 sch->q.qlen++; in tbf_enqueue()
214 struct tbf_sched_data *q = qdisc_priv(sch); in tbf_drop() local
217 if (q->qdisc->ops->drop && (len = q->qdisc->ops->drop(q->qdisc)) != 0) { in tbf_drop()
[all …]
Dsch_fq.c129 static void fq_flow_set_throttled(struct fq_sched_data *q, struct fq_flow *f) in fq_flow_set_throttled() argument
131 struct rb_node **p = &q->delayed.rb_node, *parent = NULL; in fq_flow_set_throttled()
144 rb_insert_color(&f->rate_node, &q->delayed); in fq_flow_set_throttled()
145 q->throttled_flows++; in fq_flow_set_throttled()
146 q->stat_throttled++; in fq_flow_set_throttled()
149 if (q->time_next_delayed_flow > f->time_next_packet) in fq_flow_set_throttled()
150 q->time_next_delayed_flow = f->time_next_packet; in fq_flow_set_throttled()
176 static void fq_gc(struct fq_sched_data *q, in fq_gc() argument
205 q->flows -= fcnt; in fq_gc()
206 q->inactive_flows -= fcnt; in fq_gc()
[all …]
Dsch_fq_codel.c70 static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q, in fq_codel_hash() argument
79 (__force u32)keys.ports, q->perturbation); in fq_codel_hash()
81 return reciprocal_scale(hash, q->flows_cnt); in fq_codel_hash()
87 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_classify() local
94 TC_H_MIN(skb->priority) <= q->flows_cnt) in fq_codel_classify()
97 filter = rcu_dereference_bh(q->filter_list); in fq_codel_classify()
99 return fq_codel_hash(q, skb) + 1; in fq_codel_classify()
113 if (TC_H_MIN(res.classid) <= q->flows_cnt) in fq_codel_classify()
145 struct fq_codel_sched_data *q = qdisc_priv(sch); in fq_codel_drop() local
155 for (i = 0; i < q->flows_cnt; i++) { in fq_codel_drop()
[all …]
Dsch_hhf.c179 static unsigned int skb_hash(const struct hhf_sched_data *q, in skb_hash() argument
191 (__force u32)keys.ports, q->perturbation); in skb_hash()
198 struct hhf_sched_data *q) in seek_list() argument
207 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in seek_list()
217 q->hh_flows_current_cnt--; in seek_list()
229 struct hhf_sched_data *q) in alloc_new_hh() argument
237 u32 prev = flow->hit_timestamp + q->hhf_evict_timeout; in alloc_new_hh()
244 if (q->hh_flows_current_cnt >= q->hh_flows_limit) { in alloc_new_hh()
245 q->hh_flows_overlimit++; in alloc_new_hh()
253 q->hh_flows_current_cnt++; in alloc_new_hh()
[all …]
Dsch_cbq.c115 struct Qdisc *q; /* Elementary queueing discipline */ member
178 cbq_class_lookup(struct cbq_sched_data *q, u32 classid) in cbq_class_lookup() argument
182 clc = qdisc_class_find(&q->clhash, classid); in cbq_class_lookup()
219 struct cbq_sched_data *q = qdisc_priv(sch); in cbq_classify() local
220 struct cbq_class *head = &q->link; in cbq_classify()
231 (cl = cbq_class_lookup(q, prio)) != NULL) in cbq_classify()
250 cl = cbq_class_lookup(q, res.classid); in cbq_classify()
303 struct cbq_sched_data *q = qdisc_priv(cl->qdisc); in cbq_activate_class() local
307 cl_tail = q->active[prio]; in cbq_activate_class()
308 q->active[prio] = cl; in cbq_activate_class()
[all …]
Dsch_qfq.c212 struct qfq_sched *q = qdisc_priv(sch); in qfq_find_class() local
215 clc = qdisc_class_find(&q->clhash, classid); in qfq_find_class()
223 unsigned int len = cl->qdisc->q.qlen; in qfq_purge_queue()
265 static void qfq_init_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_init_agg() argument
269 hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); in qfq_init_agg()
275 static struct qfq_aggregate *qfq_find_agg(struct qfq_sched *q, in qfq_find_agg() argument
280 hlist_for_each_entry(agg, &q->nonfull_aggs, nonfull_next) in qfq_find_agg()
289 static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg, in qfq_update_agg() argument
294 if (new_num_classes == q->max_agg_classes) in qfq_update_agg()
298 new_num_classes == q->max_agg_classes - 1) /* agg no more full */ in qfq_update_agg()
[all …]
Dsch_gred.c99 struct gred_sched_data *q = table->tab[i]; in gred_wred_mode_check() local
102 if (q == NULL) in gred_wred_mode_check()
106 if (table->tab[n] && table->tab[n]->prio == q->prio) in gred_wred_mode_check()
114 struct gred_sched_data *q, in gred_backlog() argument
120 return q->backlog; in gred_backlog()
129 struct gred_sched_data *q) in gred_load_wred_set() argument
131 q->vars.qavg = table->wred_set.qavg; in gred_load_wred_set()
132 q->vars.qidlestart = table->wred_set.qidlestart; in gred_load_wred_set()
136 struct gred_sched_data *q) in gred_store_wred_set() argument
138 table->wred_set.qavg = q->vars.qavg; in gred_store_wred_set()
[all …]
Dsch_prio.c36 struct prio_sched_data *q = qdisc_priv(sch); in prio_classify() local
44 fl = rcu_dereference_bh(q->filter_list); in prio_classify()
58 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify()
63 if (band >= q->bands) in prio_classify()
64 return q->queues[q->prio2band[0]]; in prio_classify()
66 return q->queues[band]; in prio_classify()
88 sch->q.qlen++; in prio_enqueue()
98 struct prio_sched_data *q = qdisc_priv(sch); in prio_peek() local
101 for (prio = 0; prio < q->bands; prio++) { in prio_peek()
102 struct Qdisc *qdisc = q->queues[prio]; in prio_peek()
[all …]
Dsch_plug.c91 struct plug_sched_data *q = qdisc_priv(sch); in plug_enqueue() local
93 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue()
94 if (!q->unplug_indefinite) in plug_enqueue()
95 q->pkts_current_epoch++; in plug_enqueue()
104 struct plug_sched_data *q = qdisc_priv(sch); in plug_dequeue() local
109 if (!q->unplug_indefinite) { in plug_dequeue()
110 if (!q->pkts_to_release) { in plug_dequeue()
117 q->pkts_to_release--; in plug_dequeue()
125 struct plug_sched_data *q = qdisc_priv(sch); in plug_init() local
127 q->pkts_current_epoch = 0; in plug_init()
[all …]
Dsch_codel.c69 struct sk_buff *skb = __skb_dequeue(&sch->q); in dequeue()
77 struct codel_sched_data *q = qdisc_priv(sch); in codel_qdisc_dequeue() local
80 skb = codel_dequeue(sch, &q->params, &q->vars, &q->stats, dequeue); in codel_qdisc_dequeue()
85 if (q->stats.drop_count && sch->q.qlen) { in codel_qdisc_dequeue()
86 qdisc_tree_decrease_qlen(sch, q->stats.drop_count); in codel_qdisc_dequeue()
87 q->stats.drop_count = 0; in codel_qdisc_dequeue()
96 struct codel_sched_data *q; in codel_qdisc_enqueue() local
102 q = qdisc_priv(sch); in codel_qdisc_enqueue()
103 q->drop_overlimit++; in codel_qdisc_enqueue()
116 struct codel_sched_data *q = qdisc_priv(sch); in codel_change() local
[all …]
Dsch_htb.c131 struct Qdisc *q; member
183 struct htb_sched *q = qdisc_priv(sch); in htb_find() local
186 clc = qdisc_class_find(&q->clhash, handle); in htb_find()
209 struct htb_sched *q = qdisc_priv(sch); in htb_classify() local
228 tcf = rcu_dereference_bh(q->filter_list); in htb_classify()
257 cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch); in htb_classify()
295 static void htb_add_to_wait_tree(struct htb_sched *q, in htb_add_to_wait_tree() argument
298 struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL; in htb_add_to_wait_tree()
300 cl->pq_key = q->now + delay; in htb_add_to_wait_tree()
301 if (cl->pq_key == q->now) in htb_add_to_wait_tree()
[all …]
Dsch_teql.c70 struct sk_buff_head q; member
73 #define NEXT_SLAVE(q) (((struct teql_sched_data *)qdisc_priv(q))->next) argument
83 struct teql_sched_data *q = qdisc_priv(sch); in teql_enqueue() local
85 if (q->q.qlen < dev->tx_queue_len) { in teql_enqueue()
86 __skb_queue_tail(&q->q, skb); in teql_enqueue()
99 struct Qdisc *q; in teql_dequeue() local
101 skb = __skb_dequeue(&dat->q); in teql_dequeue()
103 q = rcu_dereference_bh(dat_queue->qdisc); in teql_dequeue()
106 struct net_device *m = qdisc_dev(q); in teql_dequeue()
114 sch->q.qlen = dat->q.qlen + q->q.qlen; in teql_dequeue()
[all …]
Dsch_api.c42 struct nlmsghdr *n, struct Qdisc *q,
142 struct Qdisc_ops *q, **qp; in register_qdisc() local
146 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in register_qdisc()
147 if (!strcmp(qops->id, q->id)) in register_qdisc()
186 struct Qdisc_ops *q, **qp; in unregister_qdisc() local
190 for (qp = &qdisc_base; (q = *qp) != NULL; qp = &q->next) in unregister_qdisc()
191 if (q == qops) in unregister_qdisc()
193 if (q) { in unregister_qdisc()
194 *qp = q->next; in unregister_qdisc()
195 q->next = NULL; in unregister_qdisc()
[all …]
Dsch_drr.c44 struct drr_sched *q = qdisc_priv(sch); in drr_find_class() local
47 clc = qdisc_class_find(&q->clhash, classid); in drr_find_class()
55 unsigned int len = cl->qdisc->q.qlen; in drr_purge_queue()
68 struct drr_sched *q = qdisc_priv(sch); in drr_change_class() local
131 qdisc_class_hash_insert(&q->clhash, &cl->common); in drr_change_class()
134 qdisc_class_hash_grow(sch, &q->clhash); in drr_change_class()
149 struct drr_sched *q = qdisc_priv(sch); in drr_delete_class() local
158 qdisc_class_hash_remove(&q->clhash, &cl->common); in drr_delete_class()
191 struct drr_sched *q = qdisc_priv(sch); in drr_tcf_chain() local
196 return &q->filter_list; in drr_tcf_chain()
[all …]
Dsch_generic.c48 static inline int dev_requeue_skb(struct sk_buff *skb, struct Qdisc *q) in dev_requeue_skb() argument
50 q->gso_skb = skb; in dev_requeue_skb()
51 q->qstats.requeues++; in dev_requeue_skb()
52 q->q.qlen++; /* it's still part of the queue */ in dev_requeue_skb()
53 __netif_schedule(q); in dev_requeue_skb()
58 static void try_bulk_dequeue_skb(struct Qdisc *q, in try_bulk_dequeue_skb() argument
66 struct sk_buff *nskb = q->dequeue(q); in try_bulk_dequeue_skb()
82 static struct sk_buff *dequeue_skb(struct Qdisc *q, bool *validate, in dequeue_skb() argument
85 struct sk_buff *skb = q->gso_skb; in dequeue_skb()
86 const struct netdev_queue *txq = q->dev_queue; in dequeue_skb()
[all …]
Dsch_fifo.c32 if (likely(skb_queue_len(&sch->q) < sch->limit)) in pfifo_enqueue()
40 if (likely(skb_queue_len(&sch->q) < sch->limit)) in pfifo_tail_enqueue()
44 __qdisc_queue_drop_head(sch, &sch->q); in pfifo_tail_enqueue()
141 int fifo_set_limit(struct Qdisc *q, unsigned int limit) in fifo_set_limit() argument
147 if (strncmp(q->ops->id + 1, "fifo", 4) != 0) in fifo_set_limit()
156 ret = q->ops->change(q, nla); in fifo_set_limit()
166 struct Qdisc *q; in fifo_create_dflt() local
169 q = qdisc_create_dflt(sch->dev_queue, ops, TC_H_MAKE(sch->handle, 1)); in fifo_create_dflt()
170 if (q) { in fifo_create_dflt()
171 err = fifo_set_limit(q, limit); in fifo_create_dflt()
[all …]
Dsch_hfsc.c227 eltree_get_mindl(struct hfsc_sched *q, u64 cur_time) in eltree_get_mindl() argument
232 for (n = rb_first(&q->eligible); n != NULL; n = rb_next(n)) { in eltree_get_mindl()
244 eltree_get_minel(struct hfsc_sched *q) in eltree_get_minel() argument
248 n = rb_first(&q->eligible); in eltree_get_minel()
770 if (cl->qdisc->q.qlen == 0 && cl->cl_flags & HFSC_FSC) in update_vf()
897 unsigned int len = cl->qdisc->q.qlen; in hfsc_purge_queue()
922 struct hfsc_sched *q = qdisc_priv(sch); in hfsc_find_class() local
925 clc = qdisc_class_find(&q->clhash, classid); in hfsc_find_class()
972 struct hfsc_sched *q = qdisc_priv(sch); in hfsc_change_class() local
1035 if (cl->qdisc->q.qlen != 0) { in hfsc_change_class()
[all …]
Dsch_dsmark.c39 struct Qdisc *q; member
71 *old = p->q; in dsmark_graft()
72 p->q = new; in dsmark_graft()
73 qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); in dsmark_graft()
83 return p->q; in dsmark_leaf()
258 err = qdisc_enqueue(skb, p->q); in dsmark_enqueue()
265 sch->q.qlen++; in dsmark_enqueue()
282 skb = p->q->ops->dequeue(p->q); in dsmark_dequeue()
287 sch->q.qlen--; in dsmark_dequeue()
322 return p->q->ops->peek(p->q); in dsmark_peek()
[all …]
Dsch_atm.c43 struct Qdisc *q; /* FIFO, TBF, etc. */ member
94 *old = flow->q; in atm_tc_graft()
95 flow->q = new; in atm_tc_graft()
106 return flow ? flow->q : NULL; in atm_tc_leaf()
143 pr_debug("atm_tc_put: qdisc %p\n", flow->q); in atm_tc_put()
144 qdisc_destroy(flow->q); in atm_tc_put()
277 flow->q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, classid); in atm_tc_change()
278 if (!flow->q) in atm_tc_change()
279 flow->q = &noop_qdisc; in atm_tc_change()
280 pr_debug("atm_tc_change: qdisc %p\n", flow->q); in atm_tc_change()
[all …]
/linux-4.1.27/block/
Dblk-core.c66 void blk_queue_congestion_threshold(struct request_queue *q) in blk_queue_congestion_threshold() argument
70 nr = q->nr_requests - (q->nr_requests / 8) + 1; in blk_queue_congestion_threshold()
71 if (nr > q->nr_requests) in blk_queue_congestion_threshold()
72 nr = q->nr_requests; in blk_queue_congestion_threshold()
73 q->nr_congestion_on = nr; in blk_queue_congestion_threshold()
75 nr = q->nr_requests - (q->nr_requests / 8) - (q->nr_requests / 16) - 1; in blk_queue_congestion_threshold()
78 q->nr_congestion_off = nr; in blk_queue_congestion_threshold()
91 struct request_queue *q = bdev_get_queue(bdev); in blk_get_backing_dev_info() local
93 return &q->backing_dev_info; in blk_get_backing_dev_info()
97 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
[all …]
Dblk-sysfs.c43 static ssize_t queue_requests_show(struct request_queue *q, char *page) in queue_requests_show() argument
45 return queue_var_show(q->nr_requests, (page)); in queue_requests_show()
49 queue_requests_store(struct request_queue *q, const char *page, size_t count) in queue_requests_store() argument
54 if (!q->request_fn && !q->mq_ops) in queue_requests_store()
64 if (q->request_fn) in queue_requests_store()
65 err = blk_update_nr_requests(q, nr); in queue_requests_store()
67 err = blk_mq_update_nr_requests(q, nr); in queue_requests_store()
75 static ssize_t queue_ra_show(struct request_queue *q, char *page) in queue_ra_show() argument
77 unsigned long ra_kb = q->backing_dev_info.ra_pages << in queue_ra_show()
84 queue_ra_store(struct request_queue *q, const char *page, size_t count) in queue_ra_store() argument
[all …]
Delevator.c58 struct request_queue *q = rq->q; in elv_iosched_allow_merge() local
59 struct elevator_queue *e = q->elevator; in elv_iosched_allow_merge()
62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge()
153 struct elevator_queue *elevator_alloc(struct request_queue *q, in elevator_alloc() argument
158 eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node); in elevator_alloc()
180 int elevator_init(struct request_queue *q, char *name) in elevator_init() argument
189 lockdep_assert_held(&q->sysfs_lock); in elevator_init()
191 if (unlikely(q->elevator)) in elevator_init()
194 INIT_LIST_HEAD(&q->queue_head); in elevator_init()
195 q->last_merge = NULL; in elevator_init()
[all …]
Dblk.h34 struct request_queue *q, struct blk_mq_ctx *ctx) in blk_get_flush_queue() argument
38 if (!q->mq_ops) in blk_get_flush_queue()
39 return q->fq; in blk_get_flush_queue()
41 hctx = q->mq_ops->map_queue(q, ctx->cpu); in blk_get_flush_queue()
46 static inline void __blk_get_queue(struct request_queue *q) in __blk_get_queue() argument
48 kobject_get(&q->kobj); in __blk_get_queue()
51 struct blk_flush_queue *blk_alloc_flush_queue(struct request_queue *q,
53 void blk_free_flush_queue(struct blk_flush_queue *q);
55 int blk_init_rl(struct request_list *rl, struct request_queue *q,
59 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
[all …]
Dblk-settings.c33 void blk_queue_prep_rq(struct request_queue *q, prep_rq_fn *pfn) in blk_queue_prep_rq() argument
35 q->prep_rq_fn = pfn; in blk_queue_prep_rq()
50 void blk_queue_unprep_rq(struct request_queue *q, unprep_rq_fn *ufn) in blk_queue_unprep_rq() argument
52 q->unprep_rq_fn = ufn; in blk_queue_unprep_rq()
72 void blk_queue_merge_bvec(struct request_queue *q, merge_bvec_fn *mbfn) in blk_queue_merge_bvec() argument
74 q->merge_bvec_fn = mbfn; in blk_queue_merge_bvec()
78 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn) in blk_queue_softirq_done() argument
80 q->softirq_done_fn = fn; in blk_queue_softirq_done()
84 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout) in blk_queue_rq_timeout() argument
86 q->rq_timeout = timeout; in blk_queue_rq_timeout()
[all …]
Dblk-mq.c80 static int blk_mq_queue_enter(struct request_queue *q, gfp_t gfp) in blk_mq_queue_enter() argument
85 if (percpu_ref_tryget_live(&q->mq_usage_counter)) in blk_mq_queue_enter()
91 ret = wait_event_interruptible(q->mq_freeze_wq, in blk_mq_queue_enter()
92 !q->mq_freeze_depth || blk_queue_dying(q)); in blk_mq_queue_enter()
93 if (blk_queue_dying(q)) in blk_mq_queue_enter()
100 static void blk_mq_queue_exit(struct request_queue *q) in blk_mq_queue_exit() argument
102 percpu_ref_put(&q->mq_usage_counter); in blk_mq_queue_exit()
107 struct request_queue *q = in blk_mq_usage_counter_release() local
110 wake_up_all(&q->mq_freeze_wq); in blk_mq_usage_counter_release()
113 void blk_mq_freeze_queue_start(struct request_queue *q) in blk_mq_freeze_queue_start() argument
[all …]
Dblk-merge.c12 static unsigned int __blk_recalc_rq_segments(struct request_queue *q, in __blk_recalc_rq_segments() argument
36 cluster = blk_queue_cluster(q); in __blk_recalc_rq_segments()
54 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q); in __blk_recalc_rq_segments()
57 > queue_max_segment_size(q)) in __blk_recalc_rq_segments()
61 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv)) in __blk_recalc_rq_segments()
92 &rq->q->queue_flags); in blk_recalc_rq_segments()
94 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments()
98 void blk_recount_segments(struct request_queue *q, struct bio *bio) in blk_recount_segments() argument
108 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) && in blk_recount_segments()
109 (seg_cnt < queue_max_segments(q))) in blk_recount_segments()
[all …]
Dblk-flush.c94 static bool blk_kick_flush(struct request_queue *q,
134 if (rq->q->mq_ops) { in blk_flush_queue_rq()
135 struct request_queue *q = rq->q; in blk_flush_queue_rq() local
138 blk_mq_kick_requeue_list(q); in blk_flush_queue_rq()
142 list_add(&rq->queuelist, &rq->q->queue_head); in blk_flush_queue_rq()
144 list_add_tail(&rq->queuelist, &rq->q->queue_head); in blk_flush_queue_rq()
169 struct request_queue *q = rq->q; in blk_flush_complete_seq() local
205 if (q->mq_ops) in blk_flush_complete_seq()
215 kicked = blk_kick_flush(q, fq); in blk_flush_complete_seq()
221 struct request_queue *q = flush_rq->q; in flush_end_io() local
[all …]
Dblk-cgroup.c35 static bool blkcg_policy_enabled(struct request_queue *q, in blkcg_policy_enabled() argument
38 return pol && test_bit(pol->plid, q->blkcg_pols); in blkcg_policy_enabled()
69 static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q, in blkg_alloc() argument
76 blkg = kzalloc_node(sizeof(*blkg), gfp_mask, q->node); in blkg_alloc()
80 blkg->q = q; in blkg_alloc()
87 if (blk_init_rl(&blkg->rl, q, gfp_mask)) in blkg_alloc()
96 if (!blkcg_policy_enabled(q, pol)) in blkg_alloc()
100 pd = kzalloc_node(pol->pd_size, gfp_mask, q->node); in blkg_alloc()
127 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q, in __blkg_lookup() argument
133 if (blkg && blkg->q == q) in __blkg_lookup()
[all …]
Dblk-timeout.c22 int blk_should_fake_timeout(struct request_queue *q) in blk_should_fake_timeout() argument
24 if (!test_bit(QUEUE_FLAG_FAIL_IO, &q->queue_flags)) in blk_should_fake_timeout()
56 struct request_queue *q = disk->queue; in part_timeout_store() local
60 spin_lock_irq(q->queue_lock); in part_timeout_store()
62 queue_flag_set(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store()
64 queue_flag_clear(QUEUE_FLAG_FAIL_IO, q); in part_timeout_store()
65 spin_unlock_irq(q->queue_lock); in part_timeout_store()
85 struct request_queue *q = req->q; in blk_rq_timed_out() local
88 if (q->rq_timed_out_fn) in blk_rq_timed_out()
89 ret = q->rq_timed_out_fn(req); in blk_rq_timed_out()
[all …]
Dblk-tag.c23 struct request *blk_queue_find_tag(struct request_queue *q, int tag) in blk_queue_find_tag() argument
25 return blk_map_queue_find_tag(q->queue_tags, tag); in blk_queue_find_tag()
61 void __blk_queue_free_tags(struct request_queue *q) in __blk_queue_free_tags() argument
63 struct blk_queue_tag *bqt = q->queue_tags; in __blk_queue_free_tags()
70 q->queue_tags = NULL; in __blk_queue_free_tags()
71 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); in __blk_queue_free_tags()
82 void blk_queue_free_tags(struct request_queue *q) in blk_queue_free_tags() argument
84 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); in blk_queue_free_tags()
89 init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth) in init_tag_map() argument
95 if (q && depth > q->nr_requests * 2) { in init_tag_map()
[all …]
Dblk-ioc.c41 struct elevator_type *et = icq->q->elevator->type; in ioc_exit_icq()
56 struct request_queue *q = icq->q; in ioc_destroy_icq() local
57 struct elevator_type *et = q->elevator->type; in ioc_destroy_icq()
60 lockdep_assert_held(q->queue_lock); in ioc_destroy_icq()
62 radix_tree_delete(&ioc->icq_tree, icq->q->id); in ioc_destroy_icq()
105 struct request_queue *q = icq->q; in ioc_release_fn() local
107 if (spin_trylock(q->queue_lock)) { in ioc_release_fn()
109 spin_unlock(q->queue_lock); in ioc_release_fn()
185 if (spin_trylock(icq->q->queue_lock)) { in put_io_context_active()
187 spin_unlock(icq->q->queue_lock); in put_io_context_active()
[all …]
Dbsg-lib.c100 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); in bsg_map_buffer()
113 struct request_queue *q = req->q; in bsg_create_job() local
119 job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL); in bsg_create_job()
125 if (q->bsg_job_size) in bsg_create_job()
163 void bsg_request_fn(struct request_queue *q) in bsg_request_fn() argument
165 struct device *dev = q->queuedata; in bsg_request_fn()
174 req = blk_fetch_request(q); in bsg_request_fn()
177 spin_unlock_irq(q->queue_lock); in bsg_request_fn()
183 spin_lock_irq(q->queue_lock); in bsg_request_fn()
188 ret = q->bsg_job_fn(job); in bsg_request_fn()
[all …]
Dblk-mq-sysfs.c37 struct request_queue *q; in blk_mq_sysfs_show() local
42 q = ctx->queue; in blk_mq_sysfs_show()
48 mutex_lock(&q->sysfs_lock); in blk_mq_sysfs_show()
49 if (!blk_queue_dying(q)) in blk_mq_sysfs_show()
51 mutex_unlock(&q->sysfs_lock); in blk_mq_sysfs_show()
60 struct request_queue *q; in blk_mq_sysfs_store() local
65 q = ctx->queue; in blk_mq_sysfs_store()
71 mutex_lock(&q->sysfs_lock); in blk_mq_sysfs_store()
72 if (!blk_queue_dying(q)) in blk_mq_sysfs_store()
74 mutex_unlock(&q->sysfs_lock); in blk_mq_sysfs_store()
[all …]
Dblk-mq.h30 void blk_mq_freeze_queue(struct request_queue *q);
31 void blk_mq_free_queue(struct request_queue *q);
34 int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
35 void blk_mq_wake_waiters(struct request_queue *q);
60 extern int blk_mq_sysfs_register(struct request_queue *q);
61 extern void blk_mq_sysfs_unregister(struct request_queue *q);
65 void blk_mq_release(struct request_queue *q);
76 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, in __blk_mq_get_ctx() argument
79 return per_cpu_ptr(q->queue_ctx, cpu); in __blk_mq_get_ctx()
88 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) in blk_mq_get_ctx() argument
[all …]
Dnoop-iosched.c15 static void noop_merged_requests(struct request_queue *q, struct request *rq, in noop_merged_requests() argument
21 static int noop_dispatch(struct request_queue *q, int force) in noop_dispatch() argument
23 struct noop_data *nd = q->elevator->elevator_data; in noop_dispatch()
29 elv_dispatch_sort(q, rq); in noop_dispatch()
35 static void noop_add_request(struct request_queue *q, struct request *rq) in noop_add_request() argument
37 struct noop_data *nd = q->elevator->elevator_data; in noop_add_request()
43 noop_former_request(struct request_queue *q, struct request *rq) in noop_former_request() argument
45 struct noop_data *nd = q->elevator->elevator_data; in noop_former_request()
53 noop_latter_request(struct request_queue *q, struct request *rq) in noop_latter_request() argument
55 struct noop_data *nd = q->elevator->elevator_data; in noop_latter_request()
[all …]
Dblk-map.c12 int blk_rq_append_bio(struct request_queue *q, struct request *rq, in blk_rq_append_bio() argument
16 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio()
17 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio()
63 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, in blk_rq_map_user_iov() argument
84 if (uaddr & queue_dma_alignment(q)) in blk_rq_map_user_iov()
88 if (unaligned || (q->dma_pad_mask & iter->count) || map_data) in blk_rq_map_user_iov()
89 bio = bio_copy_user_iov(q, map_data, iter, gfp_mask); in blk_rq_map_user_iov()
91 bio = bio_map_user_iov(q, iter, gfp_mask); in blk_rq_map_user_iov()
114 blk_queue_bounce(q, &bio); in blk_rq_map_user_iov()
116 blk_rq_bio_prep(q, rq, bio); in blk_rq_map_user_iov()
[all …]
Dscsi_ioctl.c60 static int scsi_get_idlun(struct request_queue *q, int __user *p) in scsi_get_idlun() argument
65 static int scsi_get_bus(struct request_queue *q, int __user *p) in scsi_get_bus() argument
70 static int sg_get_timeout(struct request_queue *q) in sg_get_timeout() argument
72 return jiffies_to_clock_t(q->sg_timeout); in sg_get_timeout()
75 static int sg_set_timeout(struct request_queue *q, int __user *p) in sg_set_timeout() argument
80 q->sg_timeout = clock_t_to_jiffies(timeout); in sg_set_timeout()
85 static int max_sectors_bytes(struct request_queue *q) in max_sectors_bytes() argument
87 unsigned int max_sectors = queue_max_sectors(q); in max_sectors_bytes()
94 static int sg_get_reserved_size(struct request_queue *q, int __user *p) in sg_get_reserved_size() argument
96 int val = min_t(int, q->sg_reserved_size, max_sectors_bytes(q)); in sg_get_reserved_size()
[all …]
Dblk-exec.c51 void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, in blk_execute_rq_nowait() argument
68 if (q->mq_ops) { in blk_execute_rq_nowait()
79 spin_lock_irq(q->queue_lock); in blk_execute_rq_nowait()
81 if (unlikely(blk_queue_dying(q))) { in blk_execute_rq_nowait()
85 spin_unlock_irq(q->queue_lock); in blk_execute_rq_nowait()
89 __elv_add_request(q, rq, where); in blk_execute_rq_nowait()
90 __blk_run_queue(q); in blk_execute_rq_nowait()
93 __blk_run_queue_uncond(q); in blk_execute_rq_nowait()
94 spin_unlock_irq(q->queue_lock); in blk_execute_rq_nowait()
109 int blk_execute_rq(struct request_queue *q, struct gendisk *bd_disk, in blk_execute_rq() argument
[all …]
Dblk-cgroup.h93 struct request_queue *q; member
138 struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q);
140 struct request_queue *q);
141 int blkcg_init_queue(struct request_queue *q);
142 void blkcg_drain_queue(struct request_queue *q);
143 void blkcg_exit_queue(struct request_queue *q);
148 int blkcg_activate_policy(struct request_queue *q,
150 void blkcg_deactivate_policy(struct request_queue *q,
279 struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
297 (p_blkg)->q, false)))
[all …]
Ddeadline-iosched.c99 deadline_add_request(struct request_queue *q, struct request *rq) in deadline_add_request() argument
101 struct deadline_data *dd = q->elevator->elevator_data; in deadline_add_request()
116 static void deadline_remove_request(struct request_queue *q, struct request *rq) in deadline_remove_request() argument
118 struct deadline_data *dd = q->elevator->elevator_data; in deadline_remove_request()
125 deadline_merge(struct request_queue *q, struct request **req, struct bio *bio) in deadline_merge() argument
127 struct deadline_data *dd = q->elevator->elevator_data; in deadline_merge()
154 static void deadline_merged_request(struct request_queue *q, in deadline_merged_request() argument
157 struct deadline_data *dd = q->elevator->elevator_data; in deadline_merged_request()
169 deadline_merged_requests(struct request_queue *q, struct request *req, in deadline_merged_requests() argument
186 deadline_remove_request(q, next); in deadline_merged_requests()
[all …]
Dbounce.c182 static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) in must_snapshot_stable_pages() argument
187 if (!bdi_cap_stable_pages_required(&q->backing_dev_info)) in must_snapshot_stable_pages()
193 static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) in must_snapshot_stable_pages() argument
199 static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig, in __blk_queue_bounce() argument
211 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q)) in __blk_queue_bounce()
221 if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force) in __blk_queue_bounce()
224 to->bv_page = mempool_alloc(pool, q->bounce_gfp); in __blk_queue_bounce()
239 trace_block_bio_bounce(q, *bio_orig); in __blk_queue_bounce()
257 void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig) in blk_queue_bounce() argument
268 must_bounce = must_snapshot_stable_pages(q, *bio_orig); in blk_queue_bounce()
[all …]
Dblk-lib.c44 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_discard() local
53 if (!q) in blkdev_issue_discard()
56 if (!blk_queue_discard(q)) in blkdev_issue_discard()
60 granularity = max(q->limits.discard_granularity >> 9, 1U); in blkdev_issue_discard()
67 max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9); in blkdev_issue_discard()
75 if (!blk_queue_secdiscard(q)) in blkdev_issue_discard()
160 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_write_same() local
166 if (!q) in blkdev_issue_write_same()
169 max_write_same_sectors = q->limits.max_write_same_sectors; in blkdev_issue_write_same()
304 struct request_queue *q = bdev_get_queue(bdev); in blkdev_issue_zeroout() local
[all …]
Dbsg.c139 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sgv4_hdr_rq() argument
166 rq->timeout = q->sg_timeout; in blk_fill_sgv4_hdr_rq()
179 bsg_validate_sgv4_hdr(struct request_queue *q, struct sg_io_v4 *hdr, int *rw) in bsg_validate_sgv4_hdr() argument
211 struct request_queue *q = bd->queue; in bsg_map_hdr() local
216 struct bsg_class_device *bcd = &q->bsg_dev; in bsg_map_hdr()
229 ret = bsg_validate_sgv4_hdr(q, hdr, &rw); in bsg_map_hdr()
236 rq = blk_get_request(q, rw, GFP_KERNEL); in bsg_map_hdr()
241 ret = blk_fill_sgv4_hdr_rq(q, rq, hdr, bd, has_write_perm); in bsg_map_hdr()
246 if (!test_bit(QUEUE_FLAG_BIDI, &q->queue_flags)) { in bsg_map_hdr()
251 next_rq = blk_get_request(q, READ, GFP_KERNEL); in bsg_map_hdr()
[all …]
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
Dkfd_queue.c27 void print_queue_properties(struct queue_properties *q) in print_queue_properties() argument
29 if (!q) in print_queue_properties()
33 pr_debug("Queue Type: %u\n", q->type); in print_queue_properties()
34 pr_debug("Queue Size: %llu\n", q->queue_size); in print_queue_properties()
35 pr_debug("Queue percent: %u\n", q->queue_percent); in print_queue_properties()
36 pr_debug("Queue Address: 0x%llX\n", q->queue_address); in print_queue_properties()
37 pr_debug("Queue Id: %u\n", q->queue_id); in print_queue_properties()
38 pr_debug("Queue Process Vmid: %u\n", q->vmid); in print_queue_properties()
39 pr_debug("Queue Read Pointer: 0x%p\n", q->read_ptr); in print_queue_properties()
40 pr_debug("Queue Write Pointer: 0x%p\n", q->write_ptr); in print_queue_properties()
[all …]
Dkfd_mqd_manager_cik.c38 struct queue_properties *q) in init_mqd() argument
44 BUG_ON(!mm || !q || !mqd); in init_mqd()
97 if (q->format == KFD_QUEUE_FORMAT_AQL) in init_mqd()
103 retval = mm->update_mqd(mm, m, q); in init_mqd()
110 struct queue_properties *q) in init_mqd_sdma() argument
132 retval = mm->update_mqd(mm, m, q); in init_mqd_sdma()
166 struct queue_properties *q) in update_mqd() argument
170 BUG_ON(!mm || !q || !mqd); in update_mqd()
182 m->cp_hqd_pq_control |= ffs(q->queue_size / sizeof(unsigned int)) in update_mqd()
184 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); in update_mqd()
[all …]
Dkfd_device_queue_manager.c44 struct queue *q,
51 struct queue *q,
95 struct queue *q) in allocate_vmid() argument
109 q->properties.vmid = allocated_vmid; in allocate_vmid()
111 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid); in allocate_vmid()
119 struct queue *q) in deallocate_vmid() argument
128 q->properties.vmid = 0; in deallocate_vmid()
132 struct queue *q, in create_queue_nocpsch() argument
138 BUG_ON(!dqm || !q || !qpd || !allocated_vmid); in create_queue_nocpsch()
141 print_queue(q); in create_queue_nocpsch()
[all …]
Dkfd_process_queue_manager.c38 if (pqn->q && pqn->q->properties.queue_id == qid) in get_queue_by_qid()
100 (pqn->q != NULL) ? in pqm_uninit()
101 pqn->q->properties.queue_id : in pqm_uninit()
114 struct kfd_dev *dev, struct queue **q, in create_cp_queue() argument
132 retval = init_queue(q, *q_properties); in create_cp_queue()
136 (*q)->device = dev; in create_cp_queue()
137 (*q)->process = pqm->process; in create_cp_queue()
158 struct queue *q; in pqm_create_queue() local
166 q = NULL; in pqm_create_queue()
202 retval = create_cp_queue(pqm, dev, &q, &q_properties, f, *qid); in pqm_create_queue()
[all …]
/linux-4.1.27/drivers/mtd/spi-nor/
Dfsl-quadspi.c238 static inline int is_vybrid_qspi(struct fsl_qspi *q) in is_vybrid_qspi() argument
240 return q->devtype_data->devtype == FSL_QUADSPI_VYBRID; in is_vybrid_qspi()
243 static inline int is_imx6sx_qspi(struct fsl_qspi *q) in is_imx6sx_qspi() argument
245 return q->devtype_data->devtype == FSL_QUADSPI_IMX6SX; in is_imx6sx_qspi()
252 static inline u32 fsl_qspi_endian_xchg(struct fsl_qspi *q, u32 a) in fsl_qspi_endian_xchg() argument
254 return is_vybrid_qspi(q) ? __swab32(a) : a; in fsl_qspi_endian_xchg()
257 static inline void fsl_qspi_unlock_lut(struct fsl_qspi *q) in fsl_qspi_unlock_lut() argument
259 writel(QUADSPI_LUTKEY_VALUE, q->iobase + QUADSPI_LUTKEY); in fsl_qspi_unlock_lut()
260 writel(QUADSPI_LCKER_UNLOCK, q->iobase + QUADSPI_LCKCR); in fsl_qspi_unlock_lut()
263 static inline void fsl_qspi_lock_lut(struct fsl_qspi *q) in fsl_qspi_lock_lut() argument
[all …]
/linux-4.1.27/drivers/net/wireless/b43/
Dpio.c37 static u16 generate_cookie(struct b43_pio_txqueue *q, in generate_cookie() argument
50 cookie = (((u16)q->index + 1) << 12); in generate_cookie()
62 struct b43_pio_txqueue *q = NULL; in parse_cookie() local
67 q = pio->tx_queue_AC_BK; in parse_cookie()
70 q = pio->tx_queue_AC_BE; in parse_cookie()
73 q = pio->tx_queue_AC_VI; in parse_cookie()
76 q = pio->tx_queue_AC_VO; in parse_cookie()
79 q = pio->tx_queue_mcast; in parse_cookie()
82 if (B43_WARN_ON(!q)) in parse_cookie()
85 if (B43_WARN_ON(pack_index >= ARRAY_SIZE(q->packets))) in parse_cookie()
[all …]
Dpio.h108 static inline u16 b43_piotx_read16(struct b43_pio_txqueue *q, u16 offset) in b43_piotx_read16() argument
110 return b43_read16(q->dev, q->mmio_base + offset); in b43_piotx_read16()
113 static inline u32 b43_piotx_read32(struct b43_pio_txqueue *q, u16 offset) in b43_piotx_read32() argument
115 return b43_read32(q->dev, q->mmio_base + offset); in b43_piotx_read32()
118 static inline void b43_piotx_write16(struct b43_pio_txqueue *q, in b43_piotx_write16() argument
121 b43_write16(q->dev, q->mmio_base + offset, value); in b43_piotx_write16()
124 static inline void b43_piotx_write32(struct b43_pio_txqueue *q, in b43_piotx_write32() argument
127 b43_write32(q->dev, q->mmio_base + offset, value); in b43_piotx_write32()
131 static inline u16 b43_piorx_read16(struct b43_pio_rxqueue *q, u16 offset) in b43_piorx_read16() argument
133 return b43_read16(q->dev, q->mmio_base + offset); in b43_piorx_read16()
[all …]
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/
Dsge.c227 static inline unsigned int txq_avail(const struct sge_txq *q) in txq_avail() argument
229 return q->size - 1 - q->in_use; in txq_avail()
322 const struct ulptx_sgl *sgl, const struct sge_txq *q) in unmap_sgl() argument
341 if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) { in unmap_sgl()
347 } else if ((u8 *)p == (u8 *)q->stat) { in unmap_sgl()
348 p = (const struct ulptx_sge_pair *)q->desc; in unmap_sgl()
350 } else if ((u8 *)p + 8 == (u8 *)q->stat) { in unmap_sgl()
351 const __be64 *addr = (const __be64 *)q->desc; in unmap_sgl()
359 const __be64 *addr = (const __be64 *)q->desc; in unmap_sgl()
371 if ((u8 *)p == (u8 *)q->stat) in unmap_sgl()
[all …]
Dcxgb4.h475 typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
574 struct sge_txq q; member
586 struct sge_txq q; member
595 struct sge_txq q; member
924 static inline void cxgb_busy_poll_init_lock(struct sge_rspq *q) in cxgb_busy_poll_init_lock() argument
926 spin_lock_init(&q->bpoll_lock); in cxgb_busy_poll_init_lock()
927 q->bpoll_state = CXGB_POLL_STATE_IDLE; in cxgb_busy_poll_init_lock()
930 static inline bool cxgb_poll_lock_napi(struct sge_rspq *q) in cxgb_poll_lock_napi() argument
934 spin_lock(&q->bpoll_lock); in cxgb_poll_lock_napi()
935 if (q->bpoll_state & CXGB_POLL_LOCKED) { in cxgb_poll_lock_napi()
[all …]
/linux-4.1.27/arch/sh/kernel/cpu/sh5/
Dswitchto.S66 st.q r0, ( 9*8), r9
67 st.q r0, (10*8), r10
68 st.q r0, (11*8), r11
69 st.q r0, (12*8), r12
70 st.q r0, (13*8), r13
71 st.q r0, (14*8), r14 ! for unwind, want to look as though we took a trap at
74 st.q r0, (16*8), r16
76 st.q r0, (24*8), r24
77 st.q r0, (25*8), r25
78 st.q r0, (26*8), r26
[all …]
Dentry.S261 st.q SP, SAVED_R2, r2
262 st.q SP, SAVED_R3, r3
263 st.q SP, SAVED_R4, r4
264 st.q SP, SAVED_R5, r5
265 st.q SP, SAVED_R6, r6
266 st.q SP, SAVED_R18, r18
268 st.q SP, SAVED_TR0, r3
302 st.q SP, TLB_SAVED_R0 , r0
303 st.q SP, TLB_SAVED_R1 , r1
304 st.q SP, SAVED_R2 , r2
[all …]
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb3/
Dsge.c167 static inline struct sge_qset *fl_to_qset(const struct sge_fl *q, int qidx) in fl_to_qset() argument
169 return container_of(q, struct sge_qset, fl[qidx]); in fl_to_qset()
172 static inline struct sge_qset *rspq_to_qset(const struct sge_rspq *q) in rspq_to_qset() argument
174 return container_of(q, struct sge_qset, rspq); in rspq_to_qset()
177 static inline struct sge_qset *txq_to_qset(const struct sge_txq *q, int qidx) in txq_to_qset() argument
179 return container_of(q, struct sge_qset, txq[qidx]); in txq_to_qset()
192 const struct sge_rspq *q, unsigned int credits) in refill_rspq() argument
196 V_RSPQ(q->cntxt_id) | V_CREDITS(credits)); in refill_rspq()
236 static inline void unmap_skb(struct sk_buff *skb, struct sge_txq *q, in unmap_skb() argument
240 struct tx_sw_desc *d = &q->sdesc[cidx]; in unmap_skb()
[all …]
/linux-4.1.27/include/media/
Dvideobuf-core.h53 struct videobuf_queue *q; member
106 int (*buf_setup)(struct videobuf_queue *q,
108 int (*buf_prepare)(struct videobuf_queue *q,
111 void (*buf_queue)(struct videobuf_queue *q,
113 void (*buf_release)(struct videobuf_queue *q,
125 int (*iolock) (struct videobuf_queue *q,
128 int (*sync) (struct videobuf_queue *q,
130 int (*mmap_mapper) (struct videobuf_queue *q,
165 static inline void videobuf_queue_lock(struct videobuf_queue *q) in videobuf_queue_lock() argument
167 if (!q->ext_lock) in videobuf_queue_lock()
[all …]
Dvideobuf2-core.h313 int (*queue_setup)(struct vb2_queue *q, const struct v4l2_format *fmt,
317 void (*wait_prepare)(struct vb2_queue *q);
318 void (*wait_finish)(struct vb2_queue *q);
325 int (*start_streaming)(struct vb2_queue *q, unsigned int count);
326 void (*stop_streaming)(struct vb2_queue *q);
447 void vb2_discard_done(struct vb2_queue *q);
448 int vb2_wait_for_all_buffers(struct vb2_queue *q);
450 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b);
451 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req);
453 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create);
[all …]
/linux-4.1.27/include/linux/
Dblkdev.h57 struct request_queue *q; /* the queue this rl belongs to */ member
107 struct request_queue *q; member
236 typedef void (request_fn_proc) (struct request_queue *q);
237 typedef void (make_request_fn) (struct request_queue *q, struct bio *bio);
252 typedef int (lld_busy_fn) (struct request_queue *q);
526 static inline void queue_lockdep_assert_held(struct request_queue *q) in queue_lockdep_assert_held() argument
528 if (q->queue_lock) in queue_lockdep_assert_held()
529 lockdep_assert_held(q->queue_lock); in queue_lockdep_assert_held()
533 struct request_queue *q) in queue_flag_set_unlocked() argument
535 __set_bit(flag, &q->queue_flags); in queue_flag_set_unlocked()
[all …]
Dblk-mq.h168 struct request_queue *q);
169 void blk_mq_finish_init(struct request_queue *q);
182 struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
213 void blk_mq_cancel_requeue_work(struct request_queue *q);
214 void blk_mq_kick_requeue_list(struct request_queue *q);
215 void blk_mq_abort_requeue_list(struct request_queue *q);
220 void blk_mq_stop_hw_queues(struct request_queue *q);
221 void blk_mq_start_hw_queues(struct request_queue *q);
222 void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
223 void blk_mq_run_hw_queues(struct request_queue *q, bool async);
[all …]
Dblktrace_api.h33 extern int do_blk_trace_setup(struct request_queue *q, char *name,
52 #define blk_add_trace_msg(q, fmt, ...) \ argument
54 struct blk_trace *bt = (q)->blk_trace; \
60 extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
62 extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
65 extern int blk_trace_startstop(struct request_queue *q, int start);
66 extern int blk_trace_remove(struct request_queue *q);
74 # define blk_trace_shutdown(q) do { } while (0) argument
75 # define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY) argument
76 # define blk_add_driver_data(q, rq, data, len) do {} while (0) argument
[all …]
Dquicklist.h35 struct quicklist *q; in quicklist_alloc() local
38 q =&get_cpu_var(quicklist)[nr]; in quicklist_alloc()
39 p = q->page; in quicklist_alloc()
41 q->page = p[0]; in quicklist_alloc()
43 q->nr_pages--; in quicklist_alloc()
58 struct quicklist *q; in __quicklist_free() local
60 q = &get_cpu_var(quicklist)[nr]; in __quicklist_free()
61 *(void **)p = q->page; in __quicklist_free()
62 q->page = p; in __quicklist_free()
63 q->nr_pages++; in __quicklist_free()
Dwait.h72 extern void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *);
74 #define init_waitqueue_head(q) \ argument
78 __init_waitqueue_head((q), #q, &__key); \
90 static inline void init_waitqueue_entry(wait_queue_t *q, struct task_struct *p) in init_waitqueue_entry() argument
92 q->flags = 0; in init_waitqueue_entry()
93 q->private = p; in init_waitqueue_entry()
94 q->func = default_wake_function; in init_waitqueue_entry()
98 init_waitqueue_func_entry(wait_queue_t *q, wait_queue_func_t func) in init_waitqueue_func_entry() argument
100 q->flags = 0; in init_waitqueue_func_entry()
101 q->private = NULL; in init_waitqueue_func_entry()
[all …]
/linux-4.1.27/kernel/sched/
Dwait.c14 void __init_waitqueue_head(wait_queue_head_t *q, const char *name, struct lock_class_key *key) in __init_waitqueue_head() argument
16 spin_lock_init(&q->lock); in __init_waitqueue_head()
17 lockdep_set_class_and_name(&q->lock, key, name); in __init_waitqueue_head()
18 INIT_LIST_HEAD(&q->task_list); in __init_waitqueue_head()
23 void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) in add_wait_queue() argument
28 spin_lock_irqsave(&q->lock, flags); in add_wait_queue()
29 __add_wait_queue(q, wait); in add_wait_queue()
30 spin_unlock_irqrestore(&q->lock, flags); in add_wait_queue()
34 void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) in add_wait_queue_exclusive() argument
39 spin_lock_irqsave(&q->lock, flags); in add_wait_queue_exclusive()
[all …]
/linux-4.1.27/drivers/net/
Dmacvtap.c52 static inline u16 macvtap16_to_cpu(struct macvtap_queue *q, __virtio16 val) in macvtap16_to_cpu() argument
54 return __virtio16_to_cpu(q->flags & MACVTAP_VNET_LE, val); in macvtap16_to_cpu()
57 static inline __virtio16 cpu_to_macvtap16(struct macvtap_queue *q, u16 val) in cpu_to_macvtap16() argument
59 return __cpu_to_virtio16(q->flags & MACVTAP_VNET_LE, val); in cpu_to_macvtap16()
110 struct macvtap_queue *q) in macvtap_enable_queue() argument
117 if (q->enabled) in macvtap_enable_queue()
121 rcu_assign_pointer(vlan->taps[vlan->numvtaps], q); in macvtap_enable_queue()
122 q->queue_index = vlan->numvtaps; in macvtap_enable_queue()
123 q->enabled = true; in macvtap_enable_queue()
132 struct macvtap_queue *q) in macvtap_set_queue() argument
[all …]
/linux-4.1.27/drivers/scsi/arm/
Dqueue.c42 #define SET_MAGIC(q,m) ((q)->magic = (m)) argument
43 #define BAD_MAGIC(q,m) ((q)->magic != (m)) argument
45 #define SET_MAGIC(q,m) do { } while (0) argument
46 #define BAD_MAGIC(q,m) (0) argument
61 QE_t *q; in queue_initialise() local
73 queue->alloc = q = kmalloc(sizeof(QE_t) * nqueues, GFP_KERNEL); in queue_initialise()
74 if (q) { in queue_initialise()
75 for (; nqueues; q++, nqueues--) { in queue_initialise()
76 SET_MAGIC(q, QUEUE_MAGIC_FREE); in queue_initialise()
77 q->SCpnt = NULL; in queue_initialise()
[all …]
/linux-4.1.27/lib/raid6/
Dsse2.c44 u8 *p, *q; in raid6_sse21_gen_syndrome() local
49 q = dptr[z0+2]; /* RS syndrome */ in raid6_sse21_gen_syndrome()
83 asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); in raid6_sse21_gen_syndrome()
96 u8 *p, *q; in raid6_sse21_xor_syndrome() local
101 q = dptr[disks-1]; /* RS syndrome */ in raid6_sse21_xor_syndrome()
130 asm volatile("pxor %0,%%xmm4" : : "m" (q[d])); in raid6_sse21_xor_syndrome()
132 asm volatile("movdqa %%xmm4,%0" : "=m" (q[d])); in raid6_sse21_xor_syndrome()
154 u8 *p, *q; in raid6_sse22_gen_syndrome() local
159 q = dptr[z0+2]; /* RS syndrome */ in raid6_sse22_gen_syndrome()
195 asm volatile("movntdq %%xmm4,%0" : "=m" (q[d])); in raid6_sse22_gen_syndrome()
[all …]
Drecov_avx2.c25 u8 *p, *q, *dp, *dq; in raid6_2data_recov_avx2() local
31 q = (u8 *)ptrs[disks-1]; in raid6_2data_recov_avx2()
49 ptrs[disks-1] = q; in raid6_2data_recov_avx2()
63 asm volatile("vmovdqa %0, %%ymm1" : : "m" (q[0])); in raid6_2data_recov_avx2()
64 asm volatile("vmovdqa %0, %%ymm9" : : "m" (q[32])); in raid6_2data_recov_avx2()
136 q += 64; in raid6_2data_recov_avx2()
140 asm volatile("vmovdqa %0, %%ymm1" : : "m" (*q)); in raid6_2data_recov_avx2()
183 q += 32; in raid6_2data_recov_avx2()
195 u8 *p, *q, *dq; in raid6_datap_recov_avx2() local
200 q = (u8 *)ptrs[disks-1]; in raid6_datap_recov_avx2()
[all …]
Drecov_ssse3.c25 u8 *p, *q, *dp, *dq; in raid6_2data_recov_ssse3() local
33 q = (u8 *)ptrs[disks-1]; in raid6_2data_recov_ssse3()
51 ptrs[disks-1] = q; in raid6_2data_recov_ssse3()
73 asm volatile("movdqa %0,%%xmm1" : : "m" (q[0])); in raid6_2data_recov_ssse3()
74 asm volatile("movdqa %0,%%xmm9" : : "m" (q[16])); in raid6_2data_recov_ssse3()
140 q += 32; in raid6_2data_recov_ssse3()
144 asm volatile("movdqa %0,%%xmm1" : : "m" (*q)); in raid6_2data_recov_ssse3()
187 q += 16; in raid6_2data_recov_ssse3()
200 u8 *p, *q, *dq; in raid6_datap_recov_ssse3() local
207 q = (u8 *)ptrs[disks-1]; in raid6_datap_recov_ssse3()
[all …]
Davx2.c45 u8 *p, *q; in raid6_avx21_gen_syndrome() local
50 q = dptr[z0+2]; /* RS syndrome */ in raid6_avx21_gen_syndrome()
82 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); in raid6_avx21_gen_syndrome()
104 u8 *p, *q; in raid6_avx22_gen_syndrome() local
109 q = dptr[z0+2]; /* RS syndrome */ in raid6_avx22_gen_syndrome()
144 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); in raid6_avx22_gen_syndrome()
145 asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32])); in raid6_avx22_gen_syndrome()
168 u8 *p, *q; in raid6_avx24_gen_syndrome() local
173 q = dptr[z0+2]; /* RS syndrome */ in raid6_avx24_gen_syndrome()
231 asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d])); in raid6_avx24_gen_syndrome()
[all …]
Drecov.c28 u8 *p, *q, *dp, *dq; in raid6_2data_recov_intx1() local
34 q = (u8 *)ptrs[disks-1]; in raid6_2data_recov_intx1()
52 ptrs[disks-1] = q; in raid6_2data_recov_intx1()
61 qx = qmul[*q ^ *dq]; in raid6_2data_recov_intx1()
64 p++; q++; in raid6_2data_recov_intx1()
72 u8 *p, *q, *dq; in raid6_datap_recov_intx1() local
76 q = (u8 *)ptrs[disks-1]; in raid6_datap_recov_intx1()
88 ptrs[disks-1] = q; in raid6_datap_recov_intx1()
95 *p++ ^= *dq = qmul[*q ^ *dq]; in raid6_datap_recov_intx1()
96 q++; dq++; in raid6_datap_recov_intx1()
Dmmx.c43 u8 *p, *q; in raid6_mmx1_gen_syndrome() local
48 q = dptr[z0+2]; /* RS syndrome */ in raid6_mmx1_gen_syndrome()
70 asm volatile("movq %%mm4,%0" : "=m" (q[d])); in raid6_mmx1_gen_syndrome()
91 u8 *p, *q; in raid6_mmx2_gen_syndrome() local
96 q = dptr[z0+2]; /* RS syndrome */ in raid6_mmx2_gen_syndrome()
129 asm volatile("movq %%mm4,%0" : "=m" (q[d])); in raid6_mmx2_gen_syndrome()
130 asm volatile("movq %%mm6,%0" : "=m" (q[d+8])); in raid6_mmx2_gen_syndrome()
Dsse1.c48 u8 *p, *q; in raid6_sse11_gen_syndrome() local
53 q = dptr[z0+2]; /* RS syndrome */ in raid6_sse11_gen_syndrome()
86 asm volatile("movntq %%mm4,%0" : "=m" (q[d])); in raid6_sse11_gen_syndrome()
107 u8 *p, *q; in raid6_sse12_gen_syndrome() local
112 q = dptr[z0+2]; /* RS syndrome */ in raid6_sse12_gen_syndrome()
148 asm volatile("movntq %%mm4,%0" : "=m" (q[d])); in raid6_sse12_gen_syndrome()
149 asm volatile("movntq %%mm6,%0" : "=m" (q[d+8])); in raid6_sse12_gen_syndrome()
/linux-4.1.27/net/sctp/
Doutqueue.c56 static void sctp_check_transmitted(struct sctp_outq *q,
63 static void sctp_mark_missing(struct sctp_outq *q,
69 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
71 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
74 static inline void sctp_outq_head_data(struct sctp_outq *q, in sctp_outq_head_data() argument
77 list_add(&ch->list, &q->out_chunk_list); in sctp_outq_head_data()
78 q->out_qlen += ch->skb->len; in sctp_outq_head_data()
82 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q) in sctp_outq_dequeue_data() argument
86 if (!list_empty(&q->out_chunk_list)) { in sctp_outq_dequeue_data()
87 struct list_head *entry = q->out_chunk_list.next; in sctp_outq_dequeue_data()
[all …]
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmutil/
Dutils.c61 struct sk_buff_head *q; in brcmu_pktq_penq() local
66 q = &pq->q[prec].skblist; in brcmu_pktq_penq()
67 skb_queue_tail(q, p); in brcmu_pktq_penq()
80 struct sk_buff_head *q; in brcmu_pktq_penq_head() local
85 q = &pq->q[prec].skblist; in brcmu_pktq_penq_head()
86 skb_queue_head(q, p); in brcmu_pktq_penq_head()
98 struct sk_buff_head *q; in brcmu_pktq_pdeq() local
101 q = &pq->q[prec].skblist; in brcmu_pktq_pdeq()
102 p = skb_dequeue(q); in brcmu_pktq_pdeq()
121 struct sk_buff_head *q; in brcmu_pktq_pdeq_match() local
[all …]
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb/
Dsge.c480 struct cmdQ *q = &sge->cmdQ[0]; in sched_skb() local
481 clear_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); in sched_skb()
482 if (test_and_set_bit(CMDQ_STAT_RUNNING, &q->status) == 0) { in sched_skb()
483 set_bit(CMDQ_STAT_LAST_PKT_DB, &q->status); in sched_skb()
505 static void free_freelQ_buffers(struct pci_dev *pdev, struct freelQ *q) in free_freelQ_buffers() argument
507 unsigned int cidx = q->cidx; in free_freelQ_buffers()
509 while (q->credits--) { in free_freelQ_buffers()
510 struct freelQ_ce *ce = &q->centries[cidx]; in free_freelQ_buffers()
517 if (++cidx == q->size) in free_freelQ_buffers()
537 struct freelQ *q = &sge->freelQ[i]; in free_rx_resources() local
[all …]
/linux-4.1.27/net/ipv4/
Dip_fragment.c70 struct inet_frag_queue q; member
111 static unsigned int ip4_hashfn(const struct inet_frag_queue *q) in ip4_hashfn() argument
115 ipq = container_of(q, struct ipq, q); in ip4_hashfn()
119 static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a) in ip4_frag_match() argument
124 qp = container_of(q, struct ipq, q); in ip4_frag_match()
132 static void ip4_frag_init(struct inet_frag_queue *q, const void *a) in ip4_frag_init() argument
134 struct ipq *qp = container_of(q, struct ipq, q); in ip4_frag_init()
135 struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4, in ip4_frag_init()
151 static void ip4_frag_free(struct inet_frag_queue *q) in ip4_frag_free() argument
155 qp = container_of(q, struct ipq, q); in ip4_frag_free()
[all …]
Dinet_fragment.c56 inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q) in inet_frag_hashfn() argument
58 return f->hashfn(q) & (INETFRAGS_HASHSZ - 1); in inet_frag_hashfn()
80 struct inet_frag_queue *q; in inet_frag_secret_rebuild() local
86 hlist_for_each_entry_safe(q, n, &hb->chain, list) { in inet_frag_secret_rebuild()
87 unsigned int hval = inet_frag_hashfn(f, q); in inet_frag_secret_rebuild()
92 hlist_del(&q->list); in inet_frag_secret_rebuild()
107 hlist_add_head(&q->list, &hb_dest->chain); in inet_frag_secret_rebuild()
120 static bool inet_fragq_should_evict(const struct inet_frag_queue *q) in inet_fragq_should_evict() argument
122 return q->net->low_thresh == 0 || in inet_fragq_should_evict()
123 frag_mem_limit(q->net) >= q->net->low_thresh; in inet_fragq_should_evict()
[all …]
/linux-4.1.27/arch/powerpc/lib/
Dcode-patching.c348 unsigned int *p, *q; in test_translate_branch() local
361 q = p + 1; in test_translate_branch()
362 patch_instruction(q, translate_branch(q, p)); in test_translate_branch()
363 check(instr_is_branch_to_addr(q, addr)); in test_translate_branch()
369 q = buf + 0x2000000; in test_translate_branch()
370 patch_instruction(q, translate_branch(q, p)); in test_translate_branch()
372 check(instr_is_branch_to_addr(q, addr)); in test_translate_branch()
373 check(*q == 0x4a000000); in test_translate_branch()
379 q = buf + 4; in test_translate_branch()
380 patch_instruction(q, translate_branch(q, p)); in test_translate_branch()
[all …]
/linux-4.1.27/arch/x86/kernel/
Dpci-iommu_table.c12 struct iommu_table_entry *q) in find_dependents_of() argument
16 if (!q) in find_dependents_of()
20 if (p->detect == q->depend) in find_dependents_of()
30 struct iommu_table_entry *p, *q, tmp; in sort_iommu_table() local
34 q = find_dependents_of(start, finish, p); in sort_iommu_table()
38 if (q > p) { in sort_iommu_table()
40 memmove(p, q, sizeof(*p)); in sort_iommu_table()
41 *q = tmp; in sort_iommu_table()
52 struct iommu_table_entry *p, *q, *x; in check_iommu_entries() local
56 q = find_dependents_of(start, finish, p); in check_iommu_entries()
[all …]
/linux-4.1.27/drivers/scsi/csiostor/
Dcsio_wr.c190 struct csio_q *q, *flq; in csio_wr_alloc_q() local
233 q = wrm->q_arr[free_idx]; in csio_wr_alloc_q()
235 q->vstart = pci_zalloc_consistent(hw->pdev, qsz, &q->pstart); in csio_wr_alloc_q()
236 if (!q->vstart) { in csio_wr_alloc_q()
243 q->type = type; in csio_wr_alloc_q()
244 q->owner = owner; in csio_wr_alloc_q()
245 q->pidx = q->cidx = q->inc_idx = 0; in csio_wr_alloc_q()
246 q->size = qsz; in csio_wr_alloc_q()
247 q->wr_sz = wrsize; /* If using fixed size WRs */ in csio_wr_alloc_q()
253 q->un.iq.genbit = 1; in csio_wr_alloc_q()
[all …]
/linux-4.1.27/net/ieee802154/6lowpan/
Dreassembly.c62 static unsigned int lowpan_hashfn(const struct inet_frag_queue *q) in lowpan_hashfn() argument
66 fq = container_of(q, struct lowpan_frag_queue, q); in lowpan_hashfn()
70 static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a) in lowpan_frag_match() argument
75 fq = container_of(q, struct lowpan_frag_queue, q); in lowpan_frag_match()
81 static void lowpan_frag_init(struct inet_frag_queue *q, const void *a) in lowpan_frag_init() argument
86 fq = container_of(q, struct lowpan_frag_queue, q); in lowpan_frag_init()
99 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); in lowpan_frag_expire()
100 net = container_of(fq->q.net, struct net, ieee802154_lowpan.frags); in lowpan_frag_expire()
102 spin_lock(&fq->q.lock); in lowpan_frag_expire()
104 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_expire()
[all …]
/linux-4.1.27/arch/sh/lib64/
Dmemcpy.S46 #define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
47 #define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
93 stlo.q r2, 0, r0
95 sthi.q r5, -1, r6
96 stlo.q r5, -8, r6
121 ldlo.q r6, -8, r7
123 sthi.q r2, 7, r0
124 ldhi.q r6, -1, r6
131 sthi.q r2, 7, r0
133 sthi.q r2, 15, r8
[all …]
Dcopy_user_memcpy.S61 #define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
62 #define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
108 stlo.q r2, 0, r0
110 sthi.q r5, -1, r6
111 stlo.q r5, -8, r6
136 ldlo.q r6, -8, r7
138 sthi.q r2, 7, r0
139 ldhi.q r6, -1, r6
146 sthi.q r2, 7, r0
148 sthi.q r2, 15, r8
[all …]
Dcopy_page.S44 ld.q r3, 0x00, r63
45 ld.q r3, 0x20, r63
46 ld.q r3, 0x40, r63
47 ld.q r3, 0x60, r63
71 ldx.q r2, r22, r63 ! prefetch 4 lines hence
78 ldx.q r2, r60, r36
79 ldx.q r2, r61, r37
80 ldx.q r2, r62, r38
81 ldx.q r2, r23, r39
82 st.q r2, 0, r36
[all …]
Dstrcpy.S25 ldlo.q r3,0,r4
37 ldx.q r0, r21, r5
46 ldlo.q r2, 0, r9
50 stlo.q r2, 0, r9
59 stlo.q r2, 0, r4
61 sthi.q r0, -1, r4
81 stlo.q r0, 0, r5
82 ldx.q r0, r20, r4
84 sthi.q r0, -9, r5
87 ldx.q r0, r21, r5
[all …]
/linux-4.1.27/drivers/net/wireless/iwlwifi/pcie/
Dtx.c68 static int iwl_queue_space(const struct iwl_queue *q) in iwl_queue_space() argument
79 if (q->n_window < TFD_QUEUE_SIZE_MAX) in iwl_queue_space()
80 max = q->n_window; in iwl_queue_space()
88 used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); in iwl_queue_space()
99 static int iwl_queue_init(struct iwl_queue *q, int slots_num, u32 id) in iwl_queue_init() argument
101 q->n_window = slots_num; in iwl_queue_init()
102 q->id = id; in iwl_queue_init()
109 q->low_mark = q->n_window / 4; in iwl_queue_init()
110 if (q->low_mark < 4) in iwl_queue_init()
111 q->low_mark = 4; in iwl_queue_init()
[all …]
Dinternal.h227 struct iwl_queue q; member
449 if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) { in iwl_wake_queue()
450 IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id); in iwl_wake_queue()
451 iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id); in iwl_wake_queue()
460 if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) { in iwl_stop_queue()
461 iwl_op_mode_queue_full(trans->op_mode, txq->q.id); in iwl_stop_queue()
462 IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id); in iwl_stop_queue()
465 txq->q.id); in iwl_stop_queue()
468 static inline bool iwl_queue_used(const struct iwl_queue *q, int i) in iwl_queue_used() argument
470 return q->write_ptr >= q->read_ptr ? in iwl_queue_used()
[all …]
/linux-4.1.27/net/ipv6/
Dreassembly.c94 static unsigned int ip6_hashfn(const struct inet_frag_queue *q) in ip6_hashfn() argument
98 fq = container_of(q, struct frag_queue, q); in ip6_hashfn()
102 bool ip6_frag_match(const struct inet_frag_queue *q, const void *a) in ip6_frag_match() argument
107 fq = container_of(q, struct frag_queue, q); in ip6_frag_match()
118 void ip6_frag_init(struct inet_frag_queue *q, const void *a) in ip6_frag_init() argument
120 struct frag_queue *fq = container_of(q, struct frag_queue, q); in ip6_frag_init()
136 spin_lock(&fq->q.lock); in ip6_expire_frag_queue()
138 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6_expire_frag_queue()
141 inet_frag_kill(&fq->q, frags); in ip6_expire_frag_queue()
150 if (fq->q.flags & INET_FRAG_EVICTED) in ip6_expire_frag_queue()
[all …]
/linux-4.1.27/net/netfilter/
Dxt_quota.c28 struct xt_quota_info *q = (void *)par->matchinfo; in quota_mt() local
29 struct xt_quota_priv *priv = q->master; in quota_mt()
30 bool ret = q->flags & XT_QUOTA_INVERT; in quota_mt()
47 struct xt_quota_info *q = par->matchinfo; in quota_mt_check() local
49 if (q->flags & ~XT_QUOTA_MASK) in quota_mt_check()
52 q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); in quota_mt_check()
53 if (q->master == NULL) in quota_mt_check()
56 spin_lock_init(&q->master->lock); in quota_mt_check()
57 q->master->quota = q->quota; in quota_mt_check()
63 const struct xt_quota_info *q = par->matchinfo; in quota_mt_destroy() local
[all …]
Dnfnetlink_queue_core.c99 instance_lookup(struct nfnl_queue_net *q, u_int16_t queue_num) in instance_lookup() argument
104 head = &q->instance_table[instance_hashfn(queue_num)]; in instance_lookup()
113 instance_create(struct nfnl_queue_net *q, u_int16_t queue_num, u32 portid) in instance_create() argument
119 spin_lock(&q->instances_lock); in instance_create()
120 if (instance_lookup(q, queue_num)) { in instance_create()
145 hlist_add_head_rcu(&inst->hlist, &q->instance_table[h]); in instance_create()
147 spin_unlock(&q->instances_lock); in instance_create()
154 spin_unlock(&q->instances_lock); in instance_create()
180 instance_destroy(struct nfnl_queue_net *q, struct nfqnl_instance *inst) in instance_destroy() argument
182 spin_lock(&q->instances_lock); in instance_destroy()
[all …]
/linux-4.1.27/arch/alpha/include/asm/
Dcore_wildfire.h226 #define WILDFIRE_QBB(q) ((~((long)(q)) & WILDFIRE_QBB_MASK) << 36) argument
229 #define WILDFIRE_QBB_IO(q) (WILDFIRE_BASE | WILDFIRE_QBB(q)) argument
230 #define WILDFIRE_QBB_HOSE(q,h) (WILDFIRE_QBB_IO(q) | WILDFIRE_HOSE(h)) argument
232 #define WILDFIRE_MEM(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x000000000UL) argument
233 #define WILDFIRE_CONF(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FE000000UL) argument
234 #define WILDFIRE_IO(q,h) (WILDFIRE_QBB_HOSE(q,h) | 0x1FF000000UL) argument
236 #define WILDFIRE_qsd(q) \ argument
237 ((wildfire_qsd *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSD_ENTITY_SLOW|(((1UL<<13)-1)<<23)))
242 #define WILDFIRE_qsa(q) \ argument
243 ((wildfire_qsa *)(WILDFIRE_QBB_IO(q)|WILDFIRE_QSA_ENTITY|(((1UL<<13)-1)<<23)))
[all …]
/linux-4.1.27/drivers/net/wireless/ath/ath9k/
Dmac.c46 u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q) in ath9k_hw_gettxbuf() argument
48 return REG_READ(ah, AR_QTXDP(q)); in ath9k_hw_gettxbuf()
52 void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) in ath9k_hw_puttxbuf() argument
54 REG_WRITE(ah, AR_QTXDP(q), txdp); in ath9k_hw_puttxbuf()
58 void ath9k_hw_txstart(struct ath_hw *ah, u32 q) in ath9k_hw_txstart() argument
60 ath_dbg(ath9k_hw_common(ah), QUEUE, "Enable TXE on queue: %u\n", q); in ath9k_hw_txstart()
61 REG_WRITE(ah, AR_Q_TXE, 1 << q); in ath9k_hw_txstart()
65 u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) in ath9k_hw_numtxpending() argument
69 npend = REG_READ(ah, AR_QSTS(q)) & AR_Q_STS_PEND_FR_CNT; in ath9k_hw_numtxpending()
72 if (REG_READ(ah, AR_Q_TXE) & (1 << q)) in ath9k_hw_numtxpending()
[all …]
/linux-4.1.27/drivers/tty/vt/
Dconsolemap.c190 unsigned char *q; in set_inverse_transl() local
193 q = p->inverse_translations[i]; in set_inverse_transl()
195 if (!q) { in set_inverse_transl()
196 q = p->inverse_translations[i] = kmalloc(MAX_GLYPH, GFP_KERNEL); in set_inverse_transl()
197 if (!q) return; in set_inverse_transl()
199 memset(q, 0, MAX_GLYPH); in set_inverse_transl()
203 if (glyph >= 0 && glyph < MAX_GLYPH && q[glyph] < 32) { in set_inverse_transl()
205 q[glyph] = j; in set_inverse_transl()
215 u16 *q; in set_inverse_trans_unicode() local
218 q = p->inverse_trans_unicode; in set_inverse_trans_unicode()
[all …]
/linux-4.1.27/drivers/scsi/be2iscsi/
Dbe.h59 static inline void *queue_head_node(struct be_queue_info *q) in queue_head_node() argument
61 return q->dma_mem.va + q->head * q->entry_size; in queue_head_node()
64 static inline void *queue_get_wrb(struct be_queue_info *q, unsigned int wrb_num) in queue_get_wrb() argument
66 return q->dma_mem.va + wrb_num * q->entry_size; in queue_get_wrb()
69 static inline void *queue_tail_node(struct be_queue_info *q) in queue_tail_node() argument
71 return q->dma_mem.va + q->tail * q->entry_size; in queue_tail_node()
74 static inline void queue_head_inc(struct be_queue_info *q) in queue_head_inc() argument
76 index_inc(&q->head, q->len); in queue_head_inc()
79 static inline void queue_tail_inc(struct be_queue_info *q) in queue_tail_inc() argument
81 index_inc(&q->tail, q->len); in queue_tail_inc()
[all …]
/linux-4.1.27/drivers/net/fddi/skfp/
Dqueue.c36 smc->q.ev_put = smc->q.ev_get = smc->q.ev_queue ; in ev_init()
45 smc->q.ev_put->class = class ; in queue_event()
46 smc->q.ev_put->event = event ; in queue_event()
47 if (++smc->q.ev_put == &smc->q.ev_queue[MAX_EVENT]) in queue_event()
48 smc->q.ev_put = smc->q.ev_queue ; in queue_event()
50 if (smc->q.ev_put == smc->q.ev_get) { in queue_event()
78 ev = smc->q.ev_get ; in ev_dispatcher()
79 PRINTF("dispatch get %x put %x\n",ev,smc->q.ev_put) ; in ev_dispatcher()
80 while (ev != smc->q.ev_put) { in ev_dispatcher()
112 if (++ev == &smc->q.ev_queue[MAX_EVENT]) in ev_dispatcher()
[all …]
/linux-4.1.27/lib/
Dts_kmp.c49 unsigned int i, q = 0, text_len, consumed = state->offset; in kmp_find() local
60 while (q > 0 && kmp->pattern[q] in kmp_find()
62 q = kmp->prefix_tbl[q - 1]; in kmp_find()
63 if (kmp->pattern[q] in kmp_find()
65 q++; in kmp_find()
66 if (unlikely(q == kmp->pattern_len)) { in kmp_find()
81 unsigned int k, q; in compute_prefix_tbl() local
84 for (k = 0, q = 1; q < len; q++) { in compute_prefix_tbl()
86 != (icase ? toupper(pattern[q]) : pattern[q])) in compute_prefix_tbl()
89 == (icase ? toupper(pattern[q]) : pattern[q])) in compute_prefix_tbl()
[all …]
Dstring_helpers.c130 char *p = *dst, *q = *src; in unescape_space() local
132 switch (*q) { in unescape_space()
158 char *p = *dst, *q = *src; in unescape_octal() local
161 if (isodigit(*q) == 0) in unescape_octal()
164 num = (*q++) & 7; in unescape_octal()
165 while (num < 32 && isodigit(*q) && (q - *src < 3)) { in unescape_octal()
167 num += (*q++) & 7; in unescape_octal()
171 *src = q; in unescape_octal()
177 char *p = *dst, *q = *src; in unescape_hex() local
181 if (*q++ != 'x') in unescape_hex()
[all …]
Dcordic.c64 coord.q = 0; in cordic_calc_iq()
82 valtmp = coord.i - (coord.q >> iter); in cordic_calc_iq()
83 coord.q += (coord.i >> iter); in cordic_calc_iq()
86 valtmp = coord.i + (coord.q >> iter); in cordic_calc_iq()
87 coord.q -= (coord.i >> iter); in cordic_calc_iq()
94 coord.q *= signx; in cordic_calc_iq()
Dcrc32.c61 # define DO_CRC4 (t3[(q) & 255] ^ t2[(q >> 8) & 255] ^ \
62 t1[(q >> 16) & 255] ^ t0[(q >> 24) & 255])
63 # define DO_CRC8 (t7[(q) & 255] ^ t6[(q >> 8) & 255] ^ \
64 t5[(q >> 16) & 255] ^ t4[(q >> 24) & 255])
67 # define DO_CRC4 (t0[(q) & 255] ^ t1[(q >> 8) & 255] ^ \
68 t2[(q >> 16) & 255] ^ t3[(q >> 24) & 255])
69 # define DO_CRC8 (t4[(q) & 255] ^ t5[(q >> 8) & 255] ^ \
70 t6[(q >> 16) & 255] ^ t7[(q >> 24) & 255])
81 u32 q; local
105 q = crc ^ *++b; /* use pre increment for speed */
[all …]
/linux-4.1.27/net/ipv6/netfilter/
Dnf_conntrack_reasm.c165 static unsigned int nf_hashfn(const struct inet_frag_queue *q) in nf_hashfn() argument
169 nq = container_of(q, struct frag_queue, q); in nf_hashfn()
184 fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q); in nf_ct_frag6_expire()
185 net = container_of(fq->q.net, struct net, nf_frag.frags); in nf_ct_frag6_expire()
195 struct inet_frag_queue *q; in fq_find() local
209 q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash); in fq_find()
211 if (IS_ERR_OR_NULL(q)) { in fq_find()
212 inet_frag_maybe_warn_overflow(q, pr_fmt()); in fq_find()
215 return container_of(q, struct frag_queue, q); in fq_find()
227 if (fq->q.flags & INET_FRAG_COMPLETE) { in nf_ct_frag6_queue()
[all …]
/linux-4.1.27/kernel/
Dfutex.c1078 static void __unqueue_futex(struct futex_q *q) in __unqueue_futex() argument
1082 if (WARN_ON_SMP(!q->lock_ptr || !spin_is_locked(q->lock_ptr)) in __unqueue_futex()
1083 || WARN_ON(plist_node_empty(&q->list))) in __unqueue_futex()
1086 hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); in __unqueue_futex()
1087 plist_del(&q->list, &hb->chain); in __unqueue_futex()
1095 static void wake_futex(struct futex_q *q) in wake_futex() argument
1097 struct task_struct *p = q->task; in wake_futex()
1099 if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) in wake_futex()
1111 __unqueue_futex(q); in wake_futex()
1119 q->lock_ptr = NULL; in wake_futex()
[all …]
Dlatencytop.c105 int q, same = 1; in account_global_scheduler_latency() local
113 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in account_global_scheduler_latency()
114 unsigned long record = lat->backtrace[q]; in account_global_scheduler_latency()
116 if (latency_record[i].backtrace[q] != record) { in account_global_scheduler_latency()
176 int i, q; in __account_scheduler_latency() local
203 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in __account_scheduler_latency()
204 unsigned long record = lat.backtrace[q]; in __account_scheduler_latency()
206 if (mylat->backtrace[q] != record) { in __account_scheduler_latency()
248 int q; in lstats_show() local
251 for (q = 0; q < LT_BACKTRACEDEPTH; q++) { in lstats_show()
[all …]
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/
Dfm10k_common.c341 struct fm10k_hw_stats_q *q, in fm10k_update_hw_stats_tx_q() argument
353 &q->tx_packets); in fm10k_update_hw_stats_tx_q()
358 &q->tx_bytes); in fm10k_update_hw_stats_tx_q()
370 if (q->tx_stats_idx == id_tx) { in fm10k_update_hw_stats_tx_q()
371 q->tx_packets.count += tx_packets; in fm10k_update_hw_stats_tx_q()
372 q->tx_bytes.count += tx_bytes; in fm10k_update_hw_stats_tx_q()
376 fm10k_update_hw_base_32b(&q->tx_packets, tx_packets); in fm10k_update_hw_stats_tx_q()
377 fm10k_update_hw_base_48b(&q->tx_bytes, tx_bytes); in fm10k_update_hw_stats_tx_q()
379 q->tx_stats_idx = id_tx; in fm10k_update_hw_stats_tx_q()
392 struct fm10k_hw_stats_q *q, in fm10k_update_hw_stats_rx_q() argument
[all …]
/linux-4.1.27/mm/
Dquicklist.c50 static long min_pages_to_free(struct quicklist *q, in min_pages_to_free() argument
55 pages_to_free = q->nr_pages - max_pages(min_pages); in min_pages_to_free()
67 struct quicklist *q; in quicklist_trim() local
69 q = &get_cpu_var(quicklist)[nr]; in quicklist_trim()
70 if (q->nr_pages > min_pages) { in quicklist_trim()
71 pages_to_free = min_pages_to_free(q, min_pages, max_free); in quicklist_trim()
93 struct quicklist *ql, *q; in quicklist_total_size() local
97 for (q = ql; q < ql + CONFIG_NR_QUICK; q++) in quicklist_total_size()
98 count += q->nr_pages; in quicklist_total_size()
/linux-4.1.27/include/trace/events/
Dblock.h66 TP_PROTO(struct request_queue *q, struct request *rq),
68 TP_ARGS(q, rq),
110 TP_PROTO(struct request_queue *q, struct request *rq),
112 TP_ARGS(q, rq)
126 TP_PROTO(struct request_queue *q, struct request *rq),
128 TP_ARGS(q, rq)
145 TP_PROTO(struct request_queue *q, struct request *rq,
148 TP_ARGS(q, rq, nr_bytes),
178 TP_PROTO(struct request_queue *q, struct request *rq),
180 TP_ARGS(q, rq),
[all …]
/linux-4.1.27/drivers/media/common/saa7146/
Dsaa7146_fops.c51 void saa7146_dma_free(struct saa7146_dev *dev,struct videobuf_queue *q, in saa7146_dma_free() argument
59 videobuf_waiton(q, &buf->vb, 0, 0); in saa7146_dma_free()
60 videobuf_dma_unmap(q->dev, dma); in saa7146_dma_free()
70 struct saa7146_dmaqueue *q, in saa7146_buffer_queue() argument
74 DEB_EE("dev:%p, dmaq:%p, buf:%p\n", dev, q, buf); in saa7146_buffer_queue()
76 BUG_ON(!q); in saa7146_buffer_queue()
78 if (NULL == q->curr) { in saa7146_buffer_queue()
79 q->curr = buf; in saa7146_buffer_queue()
83 list_add_tail(&buf->vb.queue,&q->queue); in saa7146_buffer_queue()
92 struct saa7146_dmaqueue *q, in saa7146_buffer_finish() argument
[all …]
/linux-4.1.27/drivers/media/pci/cx88/
Dcx88-vbi.c47 struct cx88_dmaqueue *q, in cx8800_start_vbi_dma() argument
62 q->count = 1; in cx8800_start_vbi_dma()
94 struct cx88_dmaqueue *q) in cx8800_restart_vbi_queue() argument
98 if (list_empty(&q->active)) in cx8800_restart_vbi_queue()
101 buf = list_entry(q->active.next, struct cx88_buffer, list); in cx8800_restart_vbi_queue()
104 cx8800_start_vbi_dma(dev, q, buf); in cx8800_restart_vbi_queue()
105 list_for_each_entry(buf, &q->active, list) in cx8800_restart_vbi_queue()
106 buf->count = q->count++; in cx8800_restart_vbi_queue()
112 static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, in queue_setup() argument
116 struct cx8800_dev *dev = q->drv_priv; in queue_setup()
[all …]
/linux-4.1.27/include/net/
Dinet_frag.h97 bool (*match)(const struct inet_frag_queue *q,
99 void (*constructor)(struct inet_frag_queue *q,
114 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
115 void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
119 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
122 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f) in inet_frag_put() argument
124 if (atomic_dec_and_test(&q->refcnt)) in inet_frag_put()
125 inet_frag_destroy(q, f); in inet_frag_put()
142 static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) in sub_frag_mem_limit() argument
144 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); in sub_frag_mem_limit()
[all …]
Dpkt_sched.h19 static inline void *qdisc_priv(struct Qdisc *q) in qdisc_priv() argument
21 return (char *) q + QDISC_ALIGN(sizeof(struct Qdisc)); in qdisc_priv()
83 int fifo_set_limit(struct Qdisc *q, unsigned int limit);
92 void qdisc_list_add(struct Qdisc *q);
93 void qdisc_list_del(struct Qdisc *q);
101 int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
105 void __qdisc_run(struct Qdisc *q);
107 static inline void qdisc_run(struct Qdisc *q) in qdisc_run() argument
109 if (qdisc_run_begin(q)) in qdisc_run()
110 __qdisc_run(q); in qdisc_run()
Dsch_generic.h74 struct Qdisc *q);
94 struct sk_buff_head q; member
248 struct Qdisc *q; member
270 static inline int qdisc_qlen(const struct Qdisc *q) in qdisc_qlen() argument
272 return q->q.qlen; in qdisc_qlen()
282 return &qdisc->q.lock; in qdisc_lock()
287 struct Qdisc *q = rcu_dereference_rtnl(qdisc->dev_queue->qdisc); in qdisc_root() local
289 return q; in qdisc_root()
329 static inline void sch_tree_lock(const struct Qdisc *q) in sch_tree_lock() argument
331 spin_lock_bh(qdisc_root_sleeping_lock(q)); in sch_tree_lock()
[all …]
/linux-4.1.27/kernel/trace/
Dblktrace.c310 int blk_trace_remove(struct request_queue *q) in blk_trace_remove() argument
314 bt = xchg(&q->blk_trace, NULL); in blk_trace_remove()
436 int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev, in do_blk_trace_setup() argument
524 old_bt = xchg(&q->blk_trace, bt); in do_blk_trace_setup()
526 (void) xchg(&q->blk_trace, old_bt); in do_blk_trace_setup()
539 int blk_trace_setup(struct request_queue *q, char *name, dev_t dev, in blk_trace_setup() argument
550 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); in blk_trace_setup()
555 blk_trace_remove(q); in blk_trace_setup()
563 static int compat_blk_trace_setup(struct request_queue *q, char *name, in compat_blk_trace_setup() argument
583 ret = do_blk_trace_setup(q, name, dev, bdev, &buts); in compat_blk_trace_setup()
[all …]
/linux-4.1.27/drivers/scsi/aacraid/
Ddpcsup.c54 unsigned int aac_response_normal(struct aac_queue * q) in aac_response_normal() argument
56 struct aac_dev * dev = q->dev; in aac_response_normal()
63 spin_lock_irqsave(q->lock, flags); in aac_response_normal()
70 while(aac_consumer_get(dev, q, &entry)) in aac_response_normal()
78 aac_consumer_free(dev, q, HostNormRespQueue); in aac_response_normal()
90 spin_unlock_irqrestore(q->lock, flags); in aac_response_normal()
93 spin_lock_irqsave(q->lock, flags); in aac_response_normal()
96 spin_unlock_irqrestore(q->lock, flags); in aac_response_normal()
150 spin_lock_irqsave(q->lock, flags); in aac_response_normal()
158 spin_unlock_irqrestore(q->lock, flags); in aac_response_normal()
[all …]
/linux-4.1.27/drivers/firewire/
Dcore-topology.c37 #define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f) argument
38 #define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01) argument
39 #define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01) argument
40 #define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f) argument
41 #define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03) argument
42 #define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01) argument
43 #define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01) argument
44 #define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01) argument
46 #define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07) argument
55 u32 q; in count_ports() local
[all …]
/linux-4.1.27/lib/mpi/
Dmpih-div.c117 mpi_limb_t q; in mpihelp_divrem() local
129 q = ~(mpi_limb_t) 0; in mpihelp_divrem()
135 qp[i] = q; in mpihelp_divrem()
141 udiv_qrnnd(q, r, n1, n0, d1); in mpihelp_divrem()
142 umul_ppmm(n1, n0, d0, q); in mpihelp_divrem()
149 q--; in mpihelp_divrem()
156 qp[i] = q; in mpihelp_divrem()
184 mpi_limb_t q; in mpihelp_divrem() local
200 q = ~(mpi_limb_t) 0; in mpihelp_divrem()
204 udiv_qrnnd(q, r, n0, np[dsize - 1], dX); in mpihelp_divrem()
[all …]
/linux-4.1.27/fs/jffs2/
Dcompr_rubin.c39 unsigned long q; member
92 rs->q = 0; in init_rubin()
108 while ((rs->q >= UPPER_BIT_RUBIN) || in encode()
109 ((rs->p + rs->q) <= UPPER_BIT_RUBIN)) { in encode()
112 ret = pushbit(&rs->pp, (rs->q & UPPER_BIT_RUBIN) ? 1 : 0, 0); in encode()
115 rs->q &= LOWER_BITS_RUBIN; in encode()
116 rs->q <<= 1; in encode()
132 rs->q += i0; in encode()
144 pushbit(&rs->pp, (UPPER_BIT_RUBIN & rs->q) ? 1 : 0, 1); in end_rubin()
145 rs->q &= LOWER_BITS_RUBIN; in end_rubin()
[all …]
/linux-4.1.27/drivers/scsi/
Dscsi_lib.c122 struct request_queue *q = cmd->request->q; in scsi_mq_requeue_cmd() local
125 blk_mq_kick_requeue_list(q); in scsi_mq_requeue_cmd()
144 struct request_queue *q = device->request_queue; in __scsi_queue_insert() local
166 if (q->mq_ops) { in __scsi_queue_insert()
170 spin_lock_irqsave(q->queue_lock, flags); in __scsi_queue_insert()
171 blk_requeue_request(q, cmd->request); in __scsi_queue_insert()
173 spin_unlock_irqrestore(q->queue_lock, flags); in __scsi_queue_insert()
244 blk_execute_rq(req->q, NULL, req, 1); in scsi_execute()
328 static void scsi_kick_queue(struct request_queue *q) in scsi_kick_queue() argument
330 if (q->mq_ops) in scsi_kick_queue()
[all …]
Dgvp11.c213 unsigned char q, qq; in check_wd33c93() local
232 q = *sasr_3393; /* read it */ in check_wd33c93()
233 if (q & 0x08) /* bit 3 should always be clear */ in check_wd33c93()
240 if (*sasr_3393 != q) { /* should still read the same */ in check_wd33c93()
244 if (*scmd_3393 != q) /* and so should the image at 0x1f */ in check_wd33c93()
254 q = *scmd_3393; in check_wd33c93()
256 *scmd_3393 = ~q; in check_wd33c93()
260 *scmd_3393 = q; in check_wd33c93()
261 if (qq != q) /* should be read only */ in check_wd33c93()
264 q = *scmd_3393; in check_wd33c93()
[all …]
/linux-4.1.27/sound/core/
Dmisc.c121 const struct snd_pci_quirk *q; in snd_pci_quirk_lookup_id() local
123 for (q = list; q->subvendor; q++) { in snd_pci_quirk_lookup_id()
124 if (q->subvendor != vendor) in snd_pci_quirk_lookup_id()
126 if (!q->subdevice || in snd_pci_quirk_lookup_id()
127 (device & q->subdevice_mask) == q->subdevice) in snd_pci_quirk_lookup_id()
128 return q; in snd_pci_quirk_lookup_id()
/linux-4.1.27/drivers/mmc/card/
Dqueue.c29 static int mmc_prep_request(struct request_queue *q, struct request *req) in mmc_prep_request() argument
31 struct mmc_queue *mq = q->queuedata; in mmc_prep_request()
52 struct request_queue *q = mq->queue; in mmc_queue_thread() local
62 spin_lock_irq(q->queue_lock); in mmc_queue_thread()
64 req = blk_fetch_request(q); in mmc_queue_thread()
66 spin_unlock_irq(q->queue_lock); in mmc_queue_thread()
113 static void mmc_request_fn(struct request_queue *q) in mmc_request_fn() argument
115 struct mmc_queue *mq = q->queuedata; in mmc_request_fn()
121 while ((req = blk_fetch_request(q)) != NULL) { in mmc_request_fn()
160 static void mmc_queue_setup_discard(struct request_queue *q, in mmc_queue_setup_discard() argument
[all …]
/linux-4.1.27/include/crypto/
Db128ops.h64 static inline void u128_xor(u128 *r, const u128 *p, const u128 *q) in u128_xor() argument
66 r->a = p->a ^ q->a; in u128_xor()
67 r->b = p->b ^ q->b; in u128_xor()
70 static inline void be128_xor(be128 *r, const be128 *p, const be128 *q) in be128_xor() argument
72 u128_xor((u128 *)r, (u128 *)p, (u128 *)q); in be128_xor()
75 static inline void le128_xor(le128 *r, const le128 *p, const le128 *q) in le128_xor() argument
77 u128_xor((u128 *)r, (u128 *)p, (u128 *)q); in le128_xor()
/linux-4.1.27/drivers/pcmcia/
Dcistpl.c666 u_char *p, *q; in parse_device() local
669 q = p + tuple->TupleDataLen; in parse_device()
695 if (++p == q) in parse_device()
699 if (++p == q) in parse_device()
706 if (++p == q) in parse_device()
715 if (++p == q) in parse_device()
764 static int parse_strings(u_char *p, u_char *q, int max, in parse_strings() argument
769 if (p == q) in parse_strings()
781 if (++p == q) in parse_strings()
784 if ((*p == 0xff) || (++p == q)) in parse_strings()
[all …]
Drsrc_nonstatic.c113 struct resource_map *p, *q; in add_interval() local
123 q = kmalloc(sizeof(struct resource_map), GFP_KERNEL); in add_interval()
124 if (!q) { in add_interval()
128 q->base = base; q->num = num; in add_interval()
129 q->next = p->next; p->next = q; in add_interval()
137 struct resource_map *p, *q; in sub_interval() local
139 for (p = map; ; p = q) { in sub_interval()
140 q = p->next; in sub_interval()
141 if (q == map) in sub_interval()
143 if ((q->base+q->num > base) && (base+num > q->base)) { in sub_interval()
[all …]
/linux-4.1.27/drivers/md/
Ddm-cache-policy-mq.c140 static void queue_init(struct queue *q) in queue_init() argument
144 q->nr_elts = 0; in queue_init()
145 q->current_writeback_sentinels = false; in queue_init()
146 q->next_writeback = 0; in queue_init()
148 INIT_LIST_HEAD(q->qs + i); in queue_init()
149 INIT_LIST_HEAD(q->sentinels + i); in queue_init()
150 INIT_LIST_HEAD(q->sentinels + NR_QUEUE_LEVELS + i); in queue_init()
151 INIT_LIST_HEAD(q->sentinels + (2 * NR_QUEUE_LEVELS) + i); in queue_init()
155 static unsigned queue_size(struct queue *q) in queue_size() argument
157 return q->nr_elts; in queue_size()
[all …]
Ddm-table.c280 struct request_queue *q; in device_area_is_invalid() local
294 q = bdev_get_queue(bdev); in device_area_is_invalid()
295 if (!q || !q->make_request_fn) { in device_area_is_invalid()
424 struct request_queue *q = bdev_get_queue(bdev); in dm_set_device_limits() local
427 if (unlikely(!q)) { in dm_set_device_limits()
438 q->limits.physical_block_size, in dm_set_device_limits()
439 q->limits.logical_block_size, in dm_set_device_limits()
440 q->limits.alignment_offset, in dm_set_device_limits()
448 if (dm_queue_merge_is_compulsory(q) && !ti->type->merge) in dm_set_device_limits()
889 struct request_queue *q = bdev_get_queue(dd->dm_dev->bdev); in dm_table_set_type() local
[all …]
/linux-4.1.27/drivers/media/pci/cx18/
Dcx18-queue.c50 void cx18_queue_init(struct cx18_queue *q) in cx18_queue_init() argument
52 INIT_LIST_HEAD(&q->list); in cx18_queue_init()
53 atomic_set(&q->depth, 0); in cx18_queue_init()
54 q->bytesused = 0; in cx18_queue_init()
58 struct cx18_queue *q, int to_front) in _cx18_enqueue() argument
61 if (q != &s->q_full) { in _cx18_enqueue()
70 if (q == &s->q_busy && in _cx18_enqueue()
71 atomic_read(&q->depth) >= CX18_MAX_FW_MDLS_PER_STREAM) in _cx18_enqueue()
72 q = &s->q_free; in _cx18_enqueue()
74 spin_lock(&q->lock); in _cx18_enqueue()
[all …]
Dcx18-queue.h70 struct cx18_queue *q, int to_front);
74 struct cx18_queue *q) in cx18_enqueue() argument
76 return _cx18_enqueue(s, mdl, q, 0); /* FIFO */ in cx18_enqueue()
81 struct cx18_queue *q) in cx18_push() argument
83 return _cx18_enqueue(s, mdl, q, 1); /* LIFO */ in cx18_push()
86 void cx18_queue_init(struct cx18_queue *q);
87 struct cx18_mdl *cx18_dequeue(struct cx18_stream *s, struct cx18_queue *q);
/linux-4.1.27/sound/oss/
Dmidibuf.c58 #define DATA_AVAIL(q) (q->len) argument
59 #define SPACE_AVAIL(q) (MAX_QUEUE_SIZE - q->len) argument
61 #define QUEUE_BYTE(q, data) \ argument
62 if (SPACE_AVAIL(q)) \
66 q->queue[q->tail] = (data); \
67 q->len++; q->tail = (q->tail+1) % MAX_QUEUE_SIZE; \
71 #define REMOVE_BYTE(q, data) \ argument
72 if (DATA_AVAIL(q)) \
76 data = q->queue[q->head]; \
77 q->len--; q->head = (q->head+1) % MAX_QUEUE_SIZE; \
Dsequencer.c360 static int extended_event(unsigned char *q) in extended_event() argument
362 int dev = q[2]; in extended_event()
370 switch (q[1]) in extended_event()
373 synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]); in extended_event()
377 if (q[4] > 127 && q[4] != 255) in extended_event()
380 if (q[5] == 0) in extended_event()
382 synth_devs[dev]->kill_note(dev, q[3], q[4], q[5]); in extended_event()
385 synth_devs[dev]->start_note(dev, q[3], q[4], q[5]); in extended_event()
389 synth_devs[dev]->set_instr(dev, q[3], q[4]); in extended_event()
393 synth_devs[dev]->aftertouch(dev, q[3], q[4]); in extended_event()
[all …]
/linux-4.1.27/arch/mips/math-emu/
Dsp_sqrt.c26 int ix, s, q, m, t, i; in ieee754sp_sqrt() local
85 q = s = 0; /* q = sqrt(x) */ in ieee754sp_sqrt()
93 q += r; in ieee754sp_sqrt()
103 q += 2; in ieee754sp_sqrt()
106 q += (q & 1); in ieee754sp_sqrt()
110 ix = (q >> 1) + 0x3f000000; in ieee754sp_sqrt()
/linux-4.1.27/fs/xfs/
Dxfs_qm_syscalls.c55 struct xfs_quotainfo *q = mp->m_quotainfo; in xfs_qm_scall_quotaoff() local
78 ASSERT(q); in xfs_qm_scall_quotaoff()
79 mutex_lock(&q->qi_quotaofflock); in xfs_qm_scall_quotaoff()
90 mutex_unlock(&q->qi_quotaofflock); in xfs_qm_scall_quotaoff()
195 mutex_unlock(&q->qi_quotaofflock); in xfs_qm_scall_quotaoff()
203 if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { in xfs_qm_scall_quotaoff()
204 IRELE(q->qi_uquotaip); in xfs_qm_scall_quotaoff()
205 q->qi_uquotaip = NULL; in xfs_qm_scall_quotaoff()
207 if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) { in xfs_qm_scall_quotaoff()
208 IRELE(q->qi_gquotaip); in xfs_qm_scall_quotaoff()
[all …]
Dxfs_quotaops.c39 struct xfs_quotainfo *q = mp->m_quotainfo; in xfs_qm_fill_state() local
53 tstate->spc_timelimit = q->qi_btimelimit; in xfs_qm_fill_state()
54 tstate->ino_timelimit = q->qi_itimelimit; in xfs_qm_fill_state()
55 tstate->rt_spc_timelimit = q->qi_rtbtimelimit; in xfs_qm_fill_state()
56 tstate->spc_warnlimit = q->qi_bwarnlimit; in xfs_qm_fill_state()
57 tstate->ino_warnlimit = q->qi_iwarnlimit; in xfs_qm_fill_state()
58 tstate->rt_spc_warnlimit = q->qi_rtbwarnlimit; in xfs_qm_fill_state()
73 struct xfs_quotainfo *q = mp->m_quotainfo; in xfs_fs_get_quota_state() local
78 state->s_incoredqs = q->qi_dquots; in xfs_fs_get_quota_state()
92 xfs_qm_fill_state(&state->s_state[USRQUOTA], mp, q->qi_uquotaip, in xfs_fs_get_quota_state()
[all …]
/linux-4.1.27/net/sunrpc/
Dsched.c103 struct list_head *q = &queue->tasks[queue->priority]; in rpc_rotate_queue_owner() local
106 if (!list_empty(q)) { in rpc_rotate_queue_owner()
107 task = list_first_entry(q, struct rpc_task, u.tk_wait.list); in rpc_rotate_queue_owner()
109 list_move_tail(&task->u.tk_wait.list, q); in rpc_rotate_queue_owner()
141 struct list_head *q; in __rpc_add_wait_queue_priority() local
149 q = &queue->tasks[queue_priority]; in __rpc_add_wait_queue_priority()
150 list_for_each_entry(t, q, u.tk_wait.list) { in __rpc_add_wait_queue_priority()
156 list_add_tail(&task->u.tk_wait.list, q); in __rpc_add_wait_queue_priority()
352 static void __rpc_sleep_on_priority(struct rpc_wait_queue *q, in __rpc_sleep_on_priority() argument
358 task->tk_pid, rpc_qname(q), jiffies); in __rpc_sleep_on_priority()
[all …]
/linux-4.1.27/drivers/gpu/drm/radeon/
Dradeon_mem.c118 struct mem_block *q = p->next; in free_block() local
119 p->size += q->size; in free_block()
120 p->next = q->next; in free_block()
122 kfree(q); in free_block()
126 struct mem_block *q = p->prev; in free_block() local
127 q->size += p->size; in free_block()
128 q->next = p->next; in free_block()
129 q->next->prev = q; in free_block()
178 struct mem_block *q = p->next; in radeon_mem_release() local
179 p->size += q->size; in radeon_mem_release()
[all …]
/linux-4.1.27/drivers/infiniband/hw/mthca/
Dmthca_mad.c287 int p, q; in mthca_create_agents() local
293 for (q = 0; q <= 1; ++q) { in mthca_create_agents()
295 q ? IB_QPT_GSI : IB_QPT_SMI, in mthca_create_agents()
302 dev->send_agent[p][q] = agent; in mthca_create_agents()
319 for (q = 0; q <= 1; ++q) in mthca_create_agents()
320 if (dev->send_agent[p][q]) in mthca_create_agents()
321 ib_unregister_mad_agent(dev->send_agent[p][q]); in mthca_create_agents()
329 int p, q; in mthca_free_agents() local
332 for (q = 0; q <= 1; ++q) { in mthca_free_agents()
333 agent = dev->send_agent[p][q]; in mthca_free_agents()
[all …]
/linux-4.1.27/drivers/scsi/device_handler/
Dscsi_dh.c410 int scsi_dh_activate(struct request_queue *q, activate_complete fn, void *data) in scsi_dh_activate() argument
418 spin_lock_irqsave(q->queue_lock, flags); in scsi_dh_activate()
419 sdev = q->queuedata; in scsi_dh_activate()
421 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_dh_activate()
437 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_dh_activate()
463 int scsi_dh_set_params(struct request_queue *q, const char *params) in scsi_dh_set_params() argument
470 spin_lock_irqsave(q->queue_lock, flags); in scsi_dh_set_params()
471 sdev = q->queuedata; in scsi_dh_set_params()
476 spin_unlock_irqrestore(q->queue_lock, flags); in scsi_dh_set_params()
503 int scsi_dh_attach(struct request_queue *q, const char *name) in scsi_dh_attach() argument
[all …]
/linux-4.1.27/drivers/media/pci/ivtv/
Divtv-queue.c44 void ivtv_queue_init(struct ivtv_queue *q) in ivtv_queue_init() argument
46 INIT_LIST_HEAD(&q->list); in ivtv_queue_init()
47 q->buffers = 0; in ivtv_queue_init()
48 q->length = 0; in ivtv_queue_init()
49 q->bytesused = 0; in ivtv_queue_init()
52 void ivtv_enqueue(struct ivtv_stream *s, struct ivtv_buffer *buf, struct ivtv_queue *q) in ivtv_enqueue() argument
57 if (q == &s->q_free) { in ivtv_enqueue()
64 list_add_tail(&buf->list, &q->list); in ivtv_enqueue()
65 q->buffers++; in ivtv_enqueue()
66 q->length += s->buf_size; in ivtv_enqueue()
[all …]
/linux-4.1.27/tools/power/cpupower/utils/helpers/
Dbitmask.c104 static const char *nexttoken(const char *q, int sep) in nexttoken() argument
106 if (q) in nexttoken()
107 q = strchr(q, sep); in nexttoken()
108 if (q) in nexttoken()
109 q++; in nexttoken()
110 return q; in nexttoken()
193 const char *p, *q; in bitmask_parselist() local
197 q = buf; in bitmask_parselist()
198 while (p = q, q = nexttoken(q, ','), p) { in bitmask_parselist()
/linux-4.1.27/drivers/block/
Dnull_blk.c33 struct request_queue *q; member
313 static void null_queue_bio(struct request_queue *q, struct bio *bio) in null_queue_bio() argument
315 struct nullb *nullb = q->queuedata; in null_queue_bio()
325 static int null_rq_prep_fn(struct request_queue *q, struct request *req) in null_rq_prep_fn() argument
327 struct nullb *nullb = q->queuedata; in null_rq_prep_fn()
341 static void null_request_fn(struct request_queue *q) in null_request_fn() argument
345 while ((rq = blk_fetch_request(q)) != NULL) { in null_request_fn()
348 spin_unlock_irq(q->queue_lock); in null_request_fn()
350 spin_lock_irq(q->queue_lock); in null_request_fn()
402 blk_cleanup_queue(nullb->q); in null_del_dev()
[all …]
Dosdblk.c107 struct request_queue *q; member
295 static void osdblk_rq_fn(struct request_queue *q) in osdblk_rq_fn() argument
297 struct osdblk_device *osdev = q->queuedata; in osdblk_rq_fn()
307 rq = blk_fetch_request(q); in osdblk_rq_fn()
368 blk_requeue_request(q, rq); in osdblk_rq_fn()
398 struct request_queue *q; in osdblk_init_disk() local
419 q = blk_init_queue(osdblk_rq_fn, &osdev->lock); in osdblk_init_disk()
420 if (!q) { in osdblk_init_disk()
426 rc = blk_queue_init_tags(q, OSDBLK_MAX_REQ, NULL, BLK_TAG_ALLOC_FIFO); in osdblk_init_disk()
428 blk_cleanup_queue(q); in osdblk_init_disk()
[all …]
/linux-4.1.27/net/8021q/
DMakefile5 obj-$(CONFIG_VLAN_8021Q) += 8021q.o
7 8021q-y := vlan.o vlan_dev.o vlan_netlink.o
8 8021q-$(CONFIG_VLAN_8021Q_GVRP) += vlan_gvrp.o
9 8021q-$(CONFIG_VLAN_8021Q_MVRP) += vlan_mvrp.o
10 8021q-$(CONFIG_PROC_FS) += vlanproc.o
/linux-4.1.27/scripts/coccinelle/misc/
Dsimple_return.cocci63 position q,s1.p;
67 * t i@q;
86 q << s2.q;
89 cocci.print_main("decl",q)
96 q << s2.q;
99 cocci.print_main("decl",q)
134 q << s2.q;
137 msg = "WARNING: end returns can be simpified and declaration on line %s can be dropped" % (q[0].lin…
144 q << s2.q
148 …ns may be simpified if negative or 0 value and declaration on line %s can be dropped" % (q[0].line)
/linux-4.1.27/ipc/
Dsem.c208 struct sem_queue *q, *tq; in unmerge_queues() local
218 list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { in unmerge_queues()
220 curr = &sma->sem_base[q->sops[0].sem_num]; in unmerge_queues()
222 list_add_tail(&q->list, &curr->pending_alter); in unmerge_queues()
613 static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) in perform_atomic_semop() argument
621 sops = q->sops; in perform_atomic_semop()
622 nsops = q->nsops; in perform_atomic_semop()
623 un = q->undo; in perform_atomic_semop()
651 pid = q->pid; in perform_atomic_semop()
664 q->blocking = sop; in perform_atomic_semop()
[all …]
/linux-4.1.27/drivers/media/pci/cx23885/
Dcx23885-vbi.c96 struct cx23885_dmaqueue *q, in cx23885_start_vbi_dma() argument
109 q->count = 0; in cx23885_start_vbi_dma()
124 static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, in queue_setup() argument
128 struct cx23885_dev *dev = q->drv_priv; in queue_setup()
196 struct cx23885_dmaqueue *q = &dev->vbiq; in buffer_queue() local
204 if (list_empty(&q->active)) { in buffer_queue()
206 list_add_tail(&buf->queue, &q->active); in buffer_queue()
213 prev = list_entry(q->active.prev, struct cx23885_buffer, in buffer_queue()
216 list_add_tail(&buf->queue, &q->active); in buffer_queue()
224 static int cx23885_start_streaming(struct vb2_queue *q, unsigned int count) in cx23885_start_streaming() argument
[all …]
Dcx23885-video.c98 struct cx23885_dmaqueue *q, u32 count) in cx23885_video_wakeup() argument
102 if (list_empty(&q->active)) in cx23885_video_wakeup()
104 buf = list_entry(q->active.next, in cx23885_video_wakeup()
107 buf->vb.v4l2_buf.sequence = q->count++; in cx23885_video_wakeup()
110 count, q->count); in cx23885_video_wakeup()
291 struct cx23885_dmaqueue *q, in cx23885_start_video_dma() argument
305 q->count = 0; in cx23885_start_video_dma()
318 static int queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, in queue_setup() argument
322 struct cx23885_dev *dev = q->drv_priv; in queue_setup()
445 struct cx23885_dmaqueue *q = &dev->vidq; in buffer_queue() local
[all …]
/linux-4.1.27/drivers/tty/hvc/
Dhvc_beat.c44 static unsigned char q[sizeof(unsigned long) * 2] in hvc_beat_get_chars() local
52 memcpy(buf, q, cnt); in hvc_beat_get_chars()
54 memmove(q + cnt, q, qlen); in hvc_beat_get_chars()
59 memcpy(buf, q, qlen); in hvc_beat_get_chars()
66 ((u64 *)q), ((u64 *)q) + 1) == 0) { in hvc_beat_get_chars()
/linux-4.1.27/drivers/scsi/lpfc/
Dlpfc_debugfs.h290 lpfc_debug_dump_qe(struct lpfc_queue *q, uint32_t idx) in lpfc_debug_dump_qe() argument
297 if (!q) in lpfc_debug_dump_qe()
299 if (idx >= q->entry_count) in lpfc_debug_dump_qe()
302 esize = q->entry_size; in lpfc_debug_dump_qe()
304 pword = q->qe[idx].address; in lpfc_debug_dump_qe()
338 lpfc_debug_dump_q(struct lpfc_queue *q) in lpfc_debug_dump_q() argument
343 if (!q) in lpfc_debug_dump_q()
346 dev_printk(KERN_ERR, &(((q->phba))->pcidev)->dev, in lpfc_debug_dump_q()
350 (q->phba)->brd_no, in lpfc_debug_dump_q()
351 q->queue_id, q->type, q->subtype, in lpfc_debug_dump_q()
[all …]
/linux-4.1.27/drivers/hid/usbhid/
Dhid-quirks.c174 struct quirks_list_struct *q; in usbhid_exists_dquirk() local
177 list_for_each_entry(q, &dquirks_list, node) { in usbhid_exists_dquirk()
178 if (q->hid_bl_item.idVendor == idVendor && in usbhid_exists_dquirk()
179 q->hid_bl_item.idProduct == idProduct) { in usbhid_exists_dquirk()
180 bl_entry = &q->hid_bl_item; in usbhid_exists_dquirk()
210 struct quirks_list_struct *q_new, *q; in usbhid_modify_dquirk() local
230 list_for_each_entry(q, &dquirks_list, node) { in usbhid_modify_dquirk()
232 if (q->hid_bl_item.idVendor == idVendor && in usbhid_modify_dquirk()
233 q->hid_bl_item.idProduct == idProduct) { in usbhid_modify_dquirk()
235 list_replace(&q->node, &q_new->node); in usbhid_modify_dquirk()
[all …]
/linux-4.1.27/drivers/net/usb/
Dcatc.c190 void (*callback)(struct catc *catc, struct ctrl_queue *q);
483 struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail; in catc_ctrl_run() local
489 dr->bRequest = q->request; in catc_ctrl_run()
490 dr->bRequestType = 0x40 | q->dir; in catc_ctrl_run()
491 dr->wValue = cpu_to_le16(q->value); in catc_ctrl_run()
492 dr->wIndex = cpu_to_le16(q->index); in catc_ctrl_run()
493 dr->wLength = cpu_to_le16(q->len); in catc_ctrl_run()
495 urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0); in catc_ctrl_run()
496 urb->transfer_buffer_length = q->len; in catc_ctrl_run()
501 if (!q->dir && q->buf && q->len) in catc_ctrl_run()
[all …]
/linux-4.1.27/drivers/media/platform/vivid/
Dvivid-core.c650 struct vb2_queue *q; in vivid_create_instance() local
1017 q = &dev->vb_vid_cap_q; in vivid_create_instance()
1018 q->type = dev->multiplanar ? V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE : in vivid_create_instance()
1020 q->io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF | VB2_READ; in vivid_create_instance()
1021 q->drv_priv = dev; in vivid_create_instance()
1022 q->buf_struct_size = sizeof(struct vivid_buffer); in vivid_create_instance()
1023 q->ops = &vivid_vid_cap_qops; in vivid_create_instance()
1024 q->mem_ops = &vb2_vmalloc_memops; in vivid_create_instance()
1025 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; in vivid_create_instance()
1026 q->min_buffers_needed = 2; in vivid_create_instance()
[all …]
/linux-4.1.27/arch/x86/lib/
Dmsr.c42 m->q = val; in msr_read()
55 return wrmsrl_safe(msr, m->q); in msr_write()
72 m1.q |= BIT_64(bit); in __flip_bit()
74 m1.q &= ~BIT_64(bit); in __flip_bit()
76 if (m1.q == m.q) in __flip_bit()
Dmsr-smp.c50 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) in rdmsrl_on_cpu() argument
59 *q = rv.reg.q; in rdmsrl_on_cpu()
81 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) in wrmsrl_on_cpu() argument
89 rv.reg.q = q; in wrmsrl_on_cpu()
193 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q) in wrmsrl_safe_on_cpu() argument
201 rv.reg.q = q; in wrmsrl_safe_on_cpu()
209 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) in rdmsrl_safe_on_cpu() argument
218 *q = rv.reg.q; in rdmsrl_safe_on_cpu()
/linux-4.1.27/drivers/char/
Dapm-emulation.c172 static inline int queue_empty(struct apm_queue *q) in queue_empty() argument
174 return q->event_head == q->event_tail; in queue_empty()
177 static inline apm_event_t queue_get_event(struct apm_queue *q) in queue_get_event() argument
179 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; in queue_get_event()
180 return q->events[q->event_tail]; in queue_get_event()
183 static void queue_add_event(struct apm_queue *q, apm_event_t event) in queue_add_event() argument
185 q->event_head = (q->event_head + 1) % APM_MAX_EVENTS; in queue_add_event()
186 if (q->event_head == q->event_tail) { in queue_add_event()
191 q->event_tail = (q->event_tail + 1) % APM_MAX_EVENTS; in queue_add_event()
193 q->events[q->event_head] = event; in queue_add_event()
/linux-4.1.27/drivers/staging/media/dt3155v4l/
Ddt3155v4l.c221 dt3155_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, in dt3155_queue_setup() argument
226 struct dt3155_priv *pd = vb2_get_drv_priv(q); in dt3155_queue_setup()
233 if (pd->q->alloc_ctx[0]) in dt3155_queue_setup()
238 pd->q->alloc_ctx[0] = ret; in dt3155_queue_setup()
243 dt3155_wait_prepare(struct vb2_queue *q) in dt3155_wait_prepare() argument
245 struct dt3155_priv *pd = vb2_get_drv_priv(q); in dt3155_wait_prepare()
251 dt3155_wait_finish(struct vb2_queue *q) in dt3155_wait_finish() argument
253 struct dt3155_priv *pd = vb2_get_drv_priv(q); in dt3155_wait_finish()
266 dt3155_stop_streaming(struct vb2_queue *q) in dt3155_stop_streaming() argument
268 struct dt3155_priv *pd = vb2_get_drv_priv(q); in dt3155_stop_streaming()
[all …]
/linux-4.1.27/tools/testing/selftests/timers/
Dmqueue-lat.c72 mqd_t q; in mqueue_lat_test() local
77 q = mq_open("/foo", O_CREAT | O_RDONLY, 0666, NULL); in mqueue_lat_test()
78 if (q < 0) { in mqueue_lat_test()
82 mq_getattr(q, &attr); in mqueue_lat_test()
95 ret = mq_timedreceive(q, buf, sizeof(buf), NULL, &target); in mqueue_lat_test()
103 mq_close(q); in mqueue_lat_test()
/linux-4.1.27/arch/x86/include/asm/
Dmsr.h18 u64 q; member
223 int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
224 int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
229 int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q);
230 int wrmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 q);
244 static inline int rdmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) in rdmsrl_on_cpu() argument
246 rdmsrl(msr_no, *q); in rdmsrl_on_cpu()
249 static inline int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) in wrmsrl_on_cpu() argument
251 wrmsrl(msr_no, q); in wrmsrl_on_cpu()
273 static inline int rdmsrl_safe_on_cpu(unsigned int cpu, u32 msr_no, u64 *q) in rdmsrl_safe_on_cpu() argument
[all …]
/linux-4.1.27/drivers/media/pci/saa7134/
Dsaa7134-core.c270 struct saa7134_dmaqueue *q, in saa7134_buffer_queue() argument
278 if (NULL == q->curr) { in saa7134_buffer_queue()
279 if (!q->need_two) { in saa7134_buffer_queue()
280 q->curr = buf; in saa7134_buffer_queue()
282 } else if (list_empty(&q->queue)) { in saa7134_buffer_queue()
283 list_add_tail(&buf->entry, &q->queue); in saa7134_buffer_queue()
285 next = list_entry(q->queue.next, struct saa7134_buf, in saa7134_buffer_queue()
287 q->curr = buf; in saa7134_buffer_queue()
291 list_add_tail(&buf->entry, &q->queue); in saa7134_buffer_queue()
298 struct saa7134_dmaqueue *q, in saa7134_buffer_finish() argument
[all …]
Dsaa7134-empress.c255 struct vb2_queue *q; in empress_init() local
281 q = &dev->empress_vbq; in empress_init()
282 q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; in empress_init()
288 q->io_modes = VB2_MMAP | VB2_READ; in empress_init()
289 q->drv_priv = &dev->ts_q; in empress_init()
290 q->ops = &saa7134_empress_qops; in empress_init()
291 q->gfp_flags = GFP_DMA32; in empress_init()
292 q->mem_ops = &vb2_dma_sg_memops; in empress_init()
293 q->buf_struct_size = sizeof(struct saa7134_buf); in empress_init()
294 q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; in empress_init()
[all …]
/linux-4.1.27/arch/mips/bmips/
Dsetup.c143 const struct bmips_quirk *q; in plat_mem_setup() local
160 for (q = bmips_quirk_list; q->quirk_fn; q++) { in plat_mem_setup()
162 q->compatible)) { in plat_mem_setup()
163 q->quirk_fn(); in plat_mem_setup()
/linux-4.1.27/fs/hpfs/
Dalloc.c121 unsigned i, q; in alloc_in_bmp() local
137 q = nr + n; b = 0; in alloc_in_bmp()
138 while ((a = tstbits(bmp, q, n + forward)) != 0) { in alloc_in_bmp()
139 q += a; in alloc_in_bmp()
140 if (n != 1) q = ((q-1)&~(n-1))+n; in alloc_in_bmp()
142 if (q>>5 != nr>>5) { in alloc_in_bmp()
144 q = nr & 0x1f; in alloc_in_bmp()
146 } else if (q > nr) break; in alloc_in_bmp()
149 ret = bs + q; in alloc_in_bmp()
158 q = i<<5; in alloc_in_bmp()
[all …]
/linux-4.1.27/drivers/staging/unisys/visorchannel/
Dvisorchannel_funcs.c295 #define SIG_QUEUE_OFFSET(chan_hdr, q) \ argument
297 ((q) * sizeof(struct signal_queue_header)))
302 #define SIG_DATA_OFFSET(chan_hdr, q, sig_hdr, slot) \ argument
303 (SIG_QUEUE_OFFSET(chan_hdr, q) + (sig_hdr)->sig_base_offset + \
528 sigqueue_debug(struct signal_queue_header *q, int which, struct seq_file *seq) in sigqueue_debug() argument
531 seq_printf(seq, " VersionId = %lu\n", (ulong)q->version); in sigqueue_debug()
532 seq_printf(seq, " Type = %lu\n", (ulong)q->chtype); in sigqueue_debug()
534 (long long)q->sig_base_offset); in sigqueue_debug()
535 seq_printf(seq, " SignalSize = %lu\n", (ulong)q->signal_size); in sigqueue_debug()
537 (ulong)q->max_slots); in sigqueue_debug()
[all …]
/linux-4.1.27/include/linux/sunrpc/
Dsched.h201 #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0) argument
255 static inline const char * rpc_qname(const struct rpc_wait_queue *q) in rpc_qname() argument
257 return ((q && q->name) ? q->name : "unknown"); in rpc_qname()
260 static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, in rpc_assign_waitqueue_name() argument
263 q->name = name; in rpc_assign_waitqueue_name()
266 static inline void rpc_assign_waitqueue_name(struct rpc_wait_queue *q, in rpc_assign_waitqueue_name() argument
/linux-4.1.27/arch/x86/math-emu/
Dfpu_trig.c21 unsigned long long st1, unsigned long long q, int n);
37 unsigned long long q; in trig_arg() local
55 q = significand(&tmp); in trig_arg()
56 if (q) { in trig_arg()
60 q, exponent(st0_ptr) - exponent(&CONST_PI2)); in trig_arg()
66 if ((even && !(q & 1)) || (!even && (q & 1))) { in trig_arg()
77 || (q > 1)) { in trig_arg()
81 significand(&tmp) = q + 1; in trig_arg()
97 q++; in trig_arg()
108 if (((q > 0) in trig_arg()
[all …]
/linux-4.1.27/arch/x86/xen/
Dplatform-pci-unplug.c187 char *p, *q; in parse_xen_emul_unplug() local
190 for (p = arg; p; p = q) { in parse_xen_emul_unplug()
191 q = strchr(p, ','); in parse_xen_emul_unplug()
192 if (q) { in parse_xen_emul_unplug()
193 l = q - p; in parse_xen_emul_unplug()
194 q++; in parse_xen_emul_unplug()
/linux-4.1.27/drivers/staging/vt6655/
Dtmacro.h48 #define LODWORD(q) ((q).u.dwLowDword) argument
49 #define HIDWORD(q) ((q).u.dwHighDword) argument
/linux-4.1.27/drivers/video/fbdev/aty/
Dmach64_ct.c210 u32 q; in aty_valid_pll_ct() local
215 q = par->ref_clk_per * pll->pll_ref_div * 4 / vclk_per; in aty_valid_pll_ct()
216 if (q < 16*8 || q > 255*8) { in aty_valid_pll_ct()
220 pll->vclk_post_div = (q < 128*8); in aty_valid_pll_ct()
221 pll->vclk_post_div += (q < 64*8); in aty_valid_pll_ct()
222 pll->vclk_post_div += (q < 32*8); in aty_valid_pll_ct()
226 pll->vclk_fb_div = q * pll->vclk_post_div_real / 8; in aty_valid_pll_ct()
403 u32 q, memcntl, trp; in aty_init_pll_ct() local
526 q = par->ref_clk_per * pll->ct.pll_ref_div * 8 / in aty_init_pll_ct()
529 if (q < 16*8 || q > 255*8) { in aty_init_pll_ct()
[all …]
/linux-4.1.27/drivers/ide/
Dide-timings.c109 static void ide_timing_quantize(struct ide_timing *t, struct ide_timing *q, in ide_timing_quantize() argument
112 q->setup = EZ(t->setup * 1000, T); in ide_timing_quantize()
113 q->act8b = EZ(t->act8b * 1000, T); in ide_timing_quantize()
114 q->rec8b = EZ(t->rec8b * 1000, T); in ide_timing_quantize()
115 q->cyc8b = EZ(t->cyc8b * 1000, T); in ide_timing_quantize()
116 q->active = EZ(t->active * 1000, T); in ide_timing_quantize()
117 q->recover = EZ(t->recover * 1000, T); in ide_timing_quantize()
118 q->cycle = EZ(t->cycle * 1000, T); in ide_timing_quantize()
119 q->udma = EZ(t->udma * 1000, UT); in ide_timing_quantize()
/linux-4.1.27/drivers/net/wireless/brcm80211/include/
Dbrcmu_utils.h82 struct pktq_prec q[PKTQ_MAX_PREC]; member
89 return pq->q[prec].skblist.qlen; in pktq_plen()
94 return pq->q[prec].max - pq->q[prec].skblist.qlen; in pktq_pavail()
99 return pq->q[prec].skblist.qlen >= pq->q[prec].max; in pktq_pfull()
104 return skb_queue_empty(&pq->q[prec].skblist); in pktq_pempty()
109 return skb_peek(&pq->q[prec].skblist); in pktq_ppeek()
114 return skb_peek_tail(&pq->q[prec].skblist); in pktq_ppeek_tail()
/linux-4.1.27/scripts/basic/
Dfixdep.c241 const char *p, *q; in parse_config_file() local
254 for (q = p + 7; q < map + len; q++) { in parse_config_file()
255 if (!(isalnum(*q) || *q == '_')) in parse_config_file()
261 if (!memcmp(q - 7, "_MODULE", 7)) in parse_config_file()
262 q -= 7; in parse_config_file()
263 if( (q-p-7) < 0 ) in parse_config_file()
265 use_config(p+7, q-p-7); in parse_config_file()
/linux-4.1.27/arch/powerpc/platforms/powermac/
Dbootx_init.c50 const char *p, *q, *s; in bootx_printf() local
55 for (p = format; *p != 0; p = q) { in bootx_printf()
56 for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) in bootx_printf()
58 if (q > p) in bootx_printf()
59 btext_drawtext(p, q - p); in bootx_printf()
60 if (*q == 0) in bootx_printf()
62 if (*q == '\n') { in bootx_printf()
63 ++q; in bootx_printf()
69 ++q; in bootx_printf()
70 if (*q == 0) in bootx_printf()
[all …]

12345