Home
last modified time | relevance | path

Searched refs:rq (Results 1 – 200 of 422) sorted by relevance

123

/linux-4.4.14/kernel/sched/
Ddeadline.c28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq()
30 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq()
36 struct rq *rq = task_rq(p); in dl_rq_of_se() local
38 return &rq->dl; in dl_rq_of_se()
90 static inline int dl_overloaded(struct rq *rq) in dl_overloaded() argument
92 return atomic_read(&rq->rd->dlo_count); in dl_overloaded()
95 static inline void dl_set_overload(struct rq *rq) in dl_set_overload() argument
97 if (!rq->online) in dl_set_overload()
100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); in dl_set_overload()
108 atomic_inc(&rq->rd->dlo_count); in dl_set_overload()
[all …]
Dstats.h8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument
10 if (rq) { in rq_sched_info_arrive()
11 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive()
12 rq->rq_sched_info.pcount++; in rq_sched_info_arrive()
20 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument
22 if (rq) in rq_sched_info_depart()
23 rq->rq_cpu_time += delta; in rq_sched_info_depart()
27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeued() argument
29 if (rq) in rq_sched_info_dequeued()
30 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeued()
[all …]
Drt.c123 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
125 return rt_rq->rq; in rq_of_rt_rq()
133 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
137 return rt_rq->rq; in rq_of_rt_se()
162 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local
166 rt_rq->rq = rq; in init_tg_rt_entry()
176 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry()
234 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq()
236 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq()
239 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se()
[all …]
Dsched.h17 struct rq;
29 extern void calc_global_load_tick(struct rq *this_rq);
30 extern long calc_load_fold_active(struct rq *this_rq);
33 extern void update_cpu_load_active(struct rq *this_rq);
35 static inline void update_cpu_load_active(struct rq *this_rq) { } in update_cpu_load_active()
399 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ member
471 struct rq *rq; member
559 struct rq { struct
693 static inline int cpu_of(struct rq *rq) in cpu_of() argument
696 return rq->cpu; in cpu_of()
[all …]
Dstop_task.c21 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument
27 pick_next_task_stop(struct rq *rq, struct task_struct *prev) in pick_next_task_stop() argument
29 struct task_struct *stop = rq->stop; in pick_next_task_stop()
34 put_prev_task(rq, prev); in pick_next_task_stop()
36 stop->se.exec_start = rq_clock_task(rq); in pick_next_task_stop()
42 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument
44 add_nr_running(rq, 1); in enqueue_task_stop()
48 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_stop() argument
50 sub_nr_running(rq, 1); in dequeue_task_stop()
53 static void yield_task_stop(struct rq *rq) in yield_task_stop() argument
[all …]
Didle_task.c21 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_idle() argument
23 resched_curr(rq); in check_preempt_curr_idle()
27 pick_next_task_idle(struct rq *rq, struct task_struct *prev) in pick_next_task_idle() argument
29 put_prev_task(rq, prev); in pick_next_task_idle()
31 schedstat_inc(rq, sched_goidle); in pick_next_task_idle()
32 return rq->idle; in pick_next_task_idle()
40 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_idle() argument
42 raw_spin_unlock_irq(&rq->lock); in dequeue_task_idle()
45 raw_spin_lock_irq(&rq->lock); in dequeue_task_idle()
48 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) in put_prev_task_idle() argument
[all …]
Dcore.c94 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
96 static void update_rq_clock_task(struct rq *rq, s64 delta);
98 void update_rq_clock(struct rq *rq) in update_rq_clock() argument
102 lockdep_assert_held(&rq->lock); in update_rq_clock()
104 if (rq->clock_skip_update & RQCF_ACT_SKIP) in update_rq_clock()
107 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock()
110 rq->clock += delta; in update_rq_clock()
111 update_rq_clock_task(rq, delta); in update_rq_clock()
293 static struct rq *this_rq_lock(void) in this_rq_lock()
294 __acquires(rq->lock) in this_rq_lock()
[all …]
Dfair.c249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
251 return cfs_rq->rq; in rq_of()
317 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
318 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
374 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of()
376 return container_of(cfs_rq, struct rq, cfs); in rq_of()
392 struct rq *rq = task_rq(p); in cfs_rq_of() local
394 return &rq->cfs; in cfs_rq_of()
411 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument
412 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL)
[all …]
Dstats.c23 struct rq *rq; in show_schedstat() local
29 rq = cpu_rq(cpu); in show_schedstat()
34 cpu, rq->yld_count, in show_schedstat()
35 rq->sched_count, rq->sched_goidle, in show_schedstat()
36 rq->ttwu_count, rq->ttwu_local, in show_schedstat()
37 rq->rq_cpu_time, in show_schedstat()
38 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); in show_schedstat()
Ddebug.c112 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) in print_task() argument
114 if (rq->curr == p) in print_task()
145 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) in print_rq() argument
161 print_task(m, rq, p); in print_rq()
170 struct rq *rq = cpu_rq(cpu); in print_cfs_rq() local
182 raw_spin_lock_irqsave(&rq->lock, flags); in print_cfs_rq()
190 raw_spin_unlock_irqrestore(&rq->lock, flags); in print_cfs_rq()
269 struct rq *rq = cpu_rq(cpu); in print_cpu() local
285 if (sizeof(rq->x) == 4) \ in print_cpu()
286 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ in print_cpu()
[all …]
Dcputime.c249 struct rq *rq = this_rq(); in account_idle_time() local
251 if (atomic_read(&rq->nr_iowait) > 0) in account_idle_time()
340 struct rq *rq, int ticks) in irqtime_account_process_tick() argument
365 } else if (p == rq->idle) { in irqtime_account_process_tick()
376 struct rq *rq = this_rq(); in irqtime_account_idle_ticks() local
378 irqtime_account_process_tick(current, 0, rq, ticks); in irqtime_account_idle_ticks()
383 struct rq *rq, int nr_ticks) {} in irqtime_account_process_tick() argument
467 struct rq *rq = this_rq(); in account_process_tick() local
473 irqtime_account_process_tick(p, user_tick, rq, 1); in account_process_tick()
482 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
Dloadavg.c81 long calc_load_fold_active(struct rq *this_rq) in calc_load_fold_active()
184 struct rq *this_rq = this_rq(); in calc_load_enter_idle()
201 struct rq *this_rq = this_rq(); in calc_load_exit_idle()
385 void calc_global_load_tick(struct rq *this_rq) in calc_global_load_tick()
/linux-4.4.14/drivers/scsi/fnic/
Dvnic_rq.c27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
31 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
34 vdev = rq->vdev; in vnic_rq_alloc_bufs()
37 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs()
38 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs()
45 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
48 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
49 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
51 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
54 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
108 return rq->ring.desc_avail; in vnic_rq_desc_avail()
111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
119 return rq->to_use->desc; in vnic_rq_next_desc()
122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
124 return rq->to_use->index; in vnic_rq_next_index()
127 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument
129 return rq->buf_index++; in vnic_rq_next_buf_index()
[all …]
Dfnic.h309 ____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX]; member
335 int fnic_alloc_rq_frame(struct vnic_rq *rq);
336 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
Dfnic_res.h223 static inline void fnic_queue_rq_desc(struct vnic_rq *rq, in fnic_queue_rq_desc() argument
227 struct rq_enet_desc *desc = vnic_rq_next_desc(rq); in fnic_queue_rq_desc()
234 vnic_rq_post(rq, os_buf, 0, dma_addr, len); in fnic_queue_rq_desc()
Dfnic_fcs.c790 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc in fnic_rq_cmpl_frame_recv() argument
795 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_rq_cmpl_frame_recv()
902 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, in fnic_rq_cmpl_handler_cont()
919 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); in fnic_rq_cmpl_handler()
936 int fnic_alloc_rq_frame(struct vnic_rq *rq) in fnic_alloc_rq_frame() argument
938 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_alloc_rq_frame()
963 fnic_queue_rq_desc(rq, skb, pa, len); in fnic_alloc_rq_frame()
971 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) in fnic_free_rq_buf() argument
974 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_free_rq_buf()
Dfnic_res.c221 vnic_rq_free(&fnic->rq[i]); in fnic_free_vnic_resources()
275 err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i, in fnic_alloc_vnic_resources()
357 vnic_rq_init(&fnic->rq[i], in fnic_alloc_vnic_resources()
Dfnic_main.c342 error_status = ioread32(&fnic->rq[i].ctrl->error_status); in fnic_log_q_error()
477 err = vnic_rq_disable(&fnic->rq[i]); in fnic_cleanup()
501 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); in fnic_cleanup()
806 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); in fnic_probe()
881 vnic_rq_enable(&fnic->rq[i]); in fnic_probe()
910 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); in fnic_probe()
/linux-4.4.14/drivers/net/ethernet/cisco/enic/
Dvnic_rq.c31 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument
34 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs()
38 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); in vnic_rq_alloc_bufs()
39 if (!rq->bufs[i]) in vnic_rq_alloc_bufs()
44 buf = rq->bufs[i]; in vnic_rq_alloc_bufs()
47 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs()
48 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs()
50 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs()
53 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs()
61 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs()
[all …]
Dvnic_rq.h100 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument
103 return rq->ring.desc_avail; in vnic_rq_desc_avail()
106 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument
109 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used()
112 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument
114 return rq->to_use->desc; in vnic_rq_next_desc()
117 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument
119 return rq->to_use->index; in vnic_rq_next_index()
122 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument
127 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post()
[all …]
Denic_main.c262 error_status = vnic_rq_error_status(&enic->rq[i]); in enic_log_q_error()
1030 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) in enic_free_rq_buf() argument
1032 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_free_rq_buf()
1043 static int enic_rq_alloc_buf(struct vnic_rq *rq) in enic_rq_alloc_buf() argument
1045 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_alloc_buf()
1051 struct vnic_rq_buf *buf = rq->to_use; in enic_rq_alloc_buf()
1054 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, in enic_rq_alloc_buf()
1070 enic_queue_rq_desc(rq, skb, os_buf_index, in enic_rq_alloc_buf()
1104 static void enic_rq_indicate_buf(struct vnic_rq *rq, in enic_rq_indicate_buf() argument
1108 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_indicate_buf()
[all …]
Denic.h176 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; member
220 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) in enic_cq_rq() argument
222 return rq; in enic_cq_rq()
246 unsigned int rq) in enic_msix_rq_intr() argument
248 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; in enic_msix_rq_intr()
Denic_res.h122 static inline void enic_queue_rq_desc(struct vnic_rq *rq, in enic_queue_rq_desc() argument
126 struct rq_enet_desc *desc = vnic_rq_next_desc(rq); in enic_queue_rq_desc()
135 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid); in enic_queue_rq_desc()
Denic_res.c189 vnic_rq_free(&enic->rq[i]); in enic_free_vnic_resources()
244 vnic_rq_init(&enic->rq[i], in enic_init_vnic_resources()
341 err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i, in enic_alloc_vnic_resources()
Denic_clsf.c20 int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) in enic_addfltr_5t() argument
43 res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data); in enic_addfltr_5t()
45 res = (res == 0) ? rq : res; in enic_addfltr_5t()
Denic_clsf.h9 int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq);
/linux-4.4.14/drivers/scsi/esas2r/
Desas2r_disc.c49 struct esas2r_request *rq);
51 struct esas2r_request *rq);
55 struct esas2r_request *rq);
59 struct esas2r_request *rq);
61 struct esas2r_request *rq);
63 struct esas2r_request *rq);
65 struct esas2r_request *rq);
67 struct esas2r_request *rq);
69 struct esas2r_request *rq);
71 struct esas2r_request *rq);
[all …]
Desas2r_vda.c59 static void clear_vda_request(struct esas2r_request *rq);
62 struct esas2r_request *rq);
67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument
93 clear_vda_request(rq); in esas2r_process_vda_ioctl()
95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl()
96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl()
97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl()
112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl()
113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl()
115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl()
[all …]
Desas2r_int.c173 struct esas2r_request *rq, in esas2r_handle_outbound_rsp_err() argument
181 if (unlikely(rq->req_stat != RS_SUCCESS)) { in esas2r_handle_outbound_rsp_err()
182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); in esas2r_handle_outbound_rsp_err()
184 if (rq->req_stat == RS_ABORTED) { in esas2r_handle_outbound_rsp_err()
185 if (rq->timeout > RQ_MAX_TIMEOUT) in esas2r_handle_outbound_rsp_err()
186 rq->req_stat = RS_TIMEOUT; in esas2r_handle_outbound_rsp_err()
187 } else if (rq->req_stat == RS_SCSI_ERROR) { in esas2r_handle_outbound_rsp_err()
188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; in esas2r_handle_outbound_rsp_err()
197 rq->req_stat = RS_SUCCESS; in esas2r_handle_outbound_rsp_err()
198 rq->func_rsp.scsi_rsp.scsi_stat = in esas2r_handle_outbound_rsp_err()
[all …]
Desas2r_io.c46 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) in esas2r_start_request() argument
49 struct esas2r_request *startrq = rq; in esas2r_start_request()
54 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) in esas2r_start_request()
55 rq->req_stat = RS_SEL2; in esas2r_start_request()
57 rq->req_stat = RS_DEGRADED; in esas2r_start_request()
58 } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { in esas2r_start_request()
59 t = a->targetdb + rq->target_id; in esas2r_start_request()
63 rq->req_stat = RS_SEL; in esas2r_start_request()
66 rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id); in esas2r_start_request()
75 rq->req_stat = RS_SEL; in esas2r_start_request()
[all …]
Desas2r_ioctl.c83 struct esas2r_request *rq) in complete_fm_api_req() argument
111 struct esas2r_request *rq; in do_fm_api() local
118 rq = esas2r_alloc_request(a); in do_fm_api()
119 if (rq == NULL) { in do_fm_api()
151 rq->comp_cb = complete_fm_api_req; in do_fm_api()
155 if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq, in do_fm_api()
174 esas2r_free_request(a, (struct esas2r_request *)rq); in do_fm_api()
182 struct esas2r_request *rq) in complete_nvr_req() argument
199 struct esas2r_request *rq) in complete_buffered_ioctl_req() argument
208 struct esas2r_request *rq; in handle_buffered_ioctl() local
[all …]
Desas2r_main.c145 struct esas2r_request *rq; in write_live_nvram() local
148 rq = esas2r_alloc_request(a); in write_live_nvram()
149 if (rq == NULL) in write_live_nvram()
152 if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf)) in write_live_nvram()
155 esas2r_free_request(a, rq); in write_live_nvram()
881 struct esas2r_request *rq; in esas2r_queuecommand() local
894 rq = esas2r_alloc_request(a); in esas2r_queuecommand()
895 if (unlikely(rq == NULL)) { in esas2r_queuecommand()
900 rq->cmd = cmd; in esas2r_queuecommand()
905 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); in esas2r_queuecommand()
[all …]
Desas2r_flash.c134 struct esas2r_request *rq) in esas2r_fmapi_callback() argument
136 struct atto_vda_flash_req *vrq = &rq->vrq->flash; in esas2r_fmapi_callback()
138 (struct esas2r_flash_context *)rq->interrupt_cx; in esas2r_fmapi_callback()
140 if (rq->req_stat == RS_SUCCESS) { in esas2r_fmapi_callback()
148 rq->req_stat = RS_PENDING; in esas2r_fmapi_callback()
154 rq->req_stat = RS_PENDING; in esas2r_fmapi_callback()
155 rq->interrupt_cb = fc->interrupt_cb; in esas2r_fmapi_callback()
163 if (rq->req_stat != RS_PENDING) in esas2r_fmapi_callback()
169 (*fc->interrupt_cb)(a, rq); in esas2r_fmapi_callback()
177 struct esas2r_request *rq) in build_flash_msg() argument
[all …]
Desas2r.h406 struct esas2r_request *rq);
967 int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq,
1005 bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq,
1010 struct esas2r_request *rq);
1016 void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1023 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq);
1037 struct esas2r_request *rq,
1043 struct esas2r_request *rq,
1049 void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq);
1051 struct esas2r_request *rq,
[all …]
Desas2r_init.c104 struct esas2r_request *rq) in alloc_vda_req() argument
126 rq->vrq_md = memdesc; in alloc_vda_req()
127 rq->vrq = (union atto_vda_req *)memdesc->virt_addr; in alloc_vda_req()
128 rq->vrq->scsi.handle = a->num_vrqs; in alloc_vda_req()
840 struct esas2r_request *rq; in esas2r_init_adapter_struct() local
990 for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++, in esas2r_init_adapter_struct()
992 INIT_LIST_HEAD(&rq->req_list); in esas2r_init_adapter_struct()
993 if (!alloc_vda_req(a, rq)) { in esas2r_init_adapter_struct()
999 esas2r_rq_init_request(rq, a); in esas2r_init_adapter_struct()
1002 rq->comp_cb = esas2r_ae_complete; in esas2r_init_adapter_struct()
[all …]
/linux-4.4.14/block/
Dblk-flush.c98 static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) in blk_flush_policy() argument
102 if (blk_rq_sectors(rq)) in blk_flush_policy()
106 if (rq->cmd_flags & REQ_FLUSH) in blk_flush_policy()
108 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) in blk_flush_policy()
114 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument
116 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq()
119 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument
126 rq->bio = rq->biotail; in blk_flush_restore_request()
129 rq->cmd_flags &= ~REQ_FLUSH_SEQ; in blk_flush_restore_request()
130 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request()
[all …]
Delevator.c50 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) argument
56 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_merge() argument
58 struct request_queue *q = rq->q; in elv_iosched_allow_merge()
62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge()
70 bool elv_rq_merge_ok(struct request *rq, struct bio *bio) in elv_rq_merge_ok() argument
72 if (!blk_rq_merge_ok(rq, bio)) in elv_rq_merge_ok()
75 if (!elv_iosched_allow_merge(rq, bio)) in elv_rq_merge_ok()
245 static inline void __elv_rqhash_del(struct request *rq) in __elv_rqhash_del() argument
247 hash_del(&rq->hash); in __elv_rqhash_del()
248 rq->cmd_flags &= ~REQ_HASHED; in __elv_rqhash_del()
[all …]
Dblk-exec.c23 static void blk_end_sync_rq(struct request *rq, int error) in blk_end_sync_rq() argument
25 struct completion *waiting = rq->end_io_data; in blk_end_sync_rq()
27 rq->end_io_data = NULL; in blk_end_sync_rq()
52 struct request *rq, int at_head, in blk_execute_rq_nowait() argument
58 WARN_ON(rq->cmd_type == REQ_TYPE_FS); in blk_execute_rq_nowait()
60 rq->rq_disk = bd_disk; in blk_execute_rq_nowait()
61 rq->end_io = done; in blk_execute_rq_nowait()
68 blk_mq_insert_request(rq, at_head, true, false); in blk_execute_rq_nowait()
75 rq->cmd_flags |= REQ_QUIET; in blk_execute_rq_nowait()
76 rq->errors = -ENXIO; in blk_execute_rq_nowait()
[all …]
Dblk-mq.c162 struct request *rq, unsigned int rw_flags) in blk_mq_rq_ctx_init() argument
167 INIT_LIST_HEAD(&rq->queuelist); in blk_mq_rq_ctx_init()
169 rq->q = q; in blk_mq_rq_ctx_init()
170 rq->mq_ctx = ctx; in blk_mq_rq_ctx_init()
171 rq->cmd_flags |= rw_flags; in blk_mq_rq_ctx_init()
173 rq->cpu = -1; in blk_mq_rq_ctx_init()
174 INIT_HLIST_NODE(&rq->hash); in blk_mq_rq_ctx_init()
175 RB_CLEAR_NODE(&rq->rb_node); in blk_mq_rq_ctx_init()
176 rq->rq_disk = NULL; in blk_mq_rq_ctx_init()
177 rq->part = NULL; in blk_mq_rq_ctx_init()
[all …]
Dblk-core.c122 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument
124 memset(rq, 0, sizeof(*rq)); in blk_rq_init()
126 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init()
127 INIT_LIST_HEAD(&rq->timeout_list); in blk_rq_init()
128 rq->cpu = -1; in blk_rq_init()
129 rq->q = q; in blk_rq_init()
130 rq->__sector = (sector_t) -1; in blk_rq_init()
131 INIT_HLIST_NODE(&rq->hash); in blk_rq_init()
132 RB_CLEAR_NODE(&rq->rb_node); in blk_rq_init()
133 rq->cmd = rq->__cmd; in blk_rq_init()
[all …]
Dscsi_ioctl.c227 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sghdr_rq() argument
230 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) in blk_fill_sghdr_rq()
232 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) in blk_fill_sghdr_rq()
238 rq->cmd_len = hdr->cmd_len; in blk_fill_sghdr_rq()
240 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sghdr_rq()
241 if (!rq->timeout) in blk_fill_sghdr_rq()
242 rq->timeout = q->sg_timeout; in blk_fill_sghdr_rq()
243 if (!rq->timeout) in blk_fill_sghdr_rq()
244 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; in blk_fill_sghdr_rq()
245 if (rq->timeout < BLK_MIN_SG_TIMEOUT) in blk_fill_sghdr_rq()
[all …]
Ddeadline-iosched.c57 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument
59 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root()
66 deadline_latter_request(struct request *rq) in deadline_latter_request() argument
68 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request()
77 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument
79 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb()
81 elv_rb_add(root, rq); in deadline_add_rq_rb()
85 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument
87 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb()
89 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb()
[all …]
Dblk.h65 void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
67 int blk_rq_append_bio(struct request_queue *q, struct request *rq,
71 void blk_dequeue_request(struct request *rq);
73 bool __blk_end_bidi_request(struct request *rq, int error,
127 static inline int blk_mark_rq_complete(struct request *rq) in blk_mark_rq_complete() argument
129 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_mark_rq_complete()
132 static inline void blk_clear_rq_complete(struct request *rq) in blk_clear_rq_complete() argument
134 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_clear_rq_complete()
140 #define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED) argument
142 void blk_insert_flush(struct request *rq);
[all …]
Dbsg.c83 struct request *rq; member
139 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sgv4_hdr_rq() argument
144 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); in blk_fill_sgv4_hdr_rq()
145 if (!rq->cmd) in blk_fill_sgv4_hdr_rq()
149 if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, in blk_fill_sgv4_hdr_rq()
154 if (blk_verify_command(rq->cmd, has_write_perm)) in blk_fill_sgv4_hdr_rq()
162 rq->cmd_len = hdr->request_len; in blk_fill_sgv4_hdr_rq()
164 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sgv4_hdr_rq()
165 if (!rq->timeout) in blk_fill_sgv4_hdr_rq()
166 rq->timeout = q->sg_timeout; in blk_fill_sgv4_hdr_rq()
[all …]
Dnoop-iosched.c15 static void noop_merged_requests(struct request_queue *q, struct request *rq, in noop_merged_requests() argument
24 struct request *rq; in noop_dispatch() local
26 rq = list_first_entry_or_null(&nd->queue, struct request, queuelist); in noop_dispatch()
27 if (rq) { in noop_dispatch()
28 list_del_init(&rq->queuelist); in noop_dispatch()
29 elv_dispatch_sort(q, rq); in noop_dispatch()
35 static void noop_add_request(struct request_queue *q, struct request *rq) in noop_add_request() argument
39 list_add_tail(&rq->queuelist, &nd->queue); in noop_add_request()
43 noop_former_request(struct request_queue *q, struct request *rq) in noop_former_request() argument
47 if (rq->queuelist.prev == &nd->queue) in noop_former_request()
[all …]
Dblk-map.c30 int blk_rq_append_bio(struct request_queue *q, struct request *rq, in blk_rq_append_bio() argument
33 if (!rq->bio) in blk_rq_append_bio()
34 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio()
35 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio()
38 rq->biotail->bi_next = bio; in blk_rq_append_bio()
39 rq->biotail = bio; in blk_rq_append_bio()
41 rq->__data_len += bio->bi_iter.bi_size; in blk_rq_append_bio()
81 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, in blk_rq_map_user_iov() argument
134 rq->cmd_flags |= REQ_COPY_USER; in blk_rq_map_user_iov()
138 blk_rq_bio_prep(q, rq, bio); in blk_rq_map_user_iov()
[all …]
Dblk-merge.c266 void blk_recalc_rq_segments(struct request *rq) in blk_recalc_rq_segments() argument
269 &rq->q->queue_flags); in blk_recalc_rq_segments()
271 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments()
424 int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg() argument
430 if (rq->bio) in blk_rq_map_sg()
431 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); in blk_rq_map_sg()
433 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && in blk_rq_map_sg()
434 (blk_rq_bytes(rq) & q->dma_pad_mask)) { in blk_rq_map_sg()
436 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; in blk_rq_map_sg()
439 rq->extra_len += pad_len; in blk_rq_map_sg()
[all …]
Dblk-softirq.c31 struct request *rq; in blk_done_softirq() local
33 rq = list_entry(local_list.next, struct request, ipi_list); in blk_done_softirq()
34 list_del_init(&rq->ipi_list); in blk_done_softirq()
35 rq->q->softirq_done_fn(rq); in blk_done_softirq()
42 struct request *rq = data; in trigger_softirq() local
48 list_add_tail(&rq->ipi_list, list); in trigger_softirq()
50 if (list->next == &rq->ipi_list) in trigger_softirq()
59 static int raise_blk_irq(int cpu, struct request *rq) in raise_blk_irq() argument
62 struct call_single_data *data = &rq->csd; in raise_blk_irq()
65 data->info = rq; in raise_blk_irq()
[all …]
Dblk-tag.c265 void blk_queue_end_tag(struct request_queue *q, struct request *rq) in blk_queue_end_tag() argument
268 unsigned tag = rq->tag; /* negative tags invalid */ in blk_queue_end_tag()
272 list_del_init(&rq->queuelist); in blk_queue_end_tag()
273 rq->cmd_flags &= ~REQ_QUEUED; in blk_queue_end_tag()
274 rq->tag = -1; in blk_queue_end_tag()
313 int blk_queue_start_tag(struct request_queue *q, struct request *rq) in blk_queue_start_tag() argument
319 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { in blk_queue_start_tag()
322 __func__, rq, in blk_queue_start_tag()
323 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); in blk_queue_start_tag()
335 if (!rq_is_sync(rq) && max_depth > 1) { in blk_queue_start_tag()
[all …]
Dblk-timeout.c113 static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, in blk_rq_check_expired() argument
116 if (time_after_eq(jiffies, rq->deadline)) { in blk_rq_check_expired()
117 list_del_init(&rq->timeout_list); in blk_rq_check_expired()
122 if (!blk_mark_rq_complete(rq)) in blk_rq_check_expired()
123 blk_rq_timed_out(rq); in blk_rq_check_expired()
124 } else if (!*next_set || time_after(*next_timeout, rq->deadline)) { in blk_rq_check_expired()
125 *next_timeout = rq->deadline; in blk_rq_check_expired()
134 struct request *rq, *tmp; in blk_rq_timed_out_timer() local
139 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) in blk_rq_timed_out_timer()
140 blk_rq_check_expired(rq, &next, &next_set); in blk_rq_timed_out_timer()
Dcfq-iosched.c57 #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) argument
58 #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) argument
59 #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) argument
2397 static void cfq_del_rq_rb(struct request *rq) in cfq_del_rq_rb() argument
2399 struct cfq_queue *cfqq = RQ_CFQQ(rq); in cfq_del_rq_rb()
2400 const int sync = rq_is_sync(rq); in cfq_del_rq_rb()
2405 elv_rb_del(&cfqq->sort_list, rq); in cfq_del_rq_rb()
2420 static void cfq_add_rq_rb(struct request *rq) in cfq_add_rq_rb() argument
2422 struct cfq_queue *cfqq = RQ_CFQQ(rq); in cfq_add_rq_rb()
2426 cfqq->queued[rq_is_sync(rq)]++; in cfq_add_rq_rb()
[all …]
Dblk-mq-tag.c427 struct request *rq; in bt_for_each() local
436 rq = hctx->tags->rqs[off + bit]; in bt_for_each()
437 if (rq->q == hctx->queue) in bt_for_each()
438 fn(hctx, rq, data, reserved); in bt_for_each()
449 struct request *rq; in bt_tags_for_each() local
460 rq = tags->rqs[off + bit]; in bt_tags_for_each()
461 fn(rq, data, reserved); in bt_tags_for_each()
686 u32 blk_mq_unique_tag(struct request *rq) in blk_mq_unique_tag() argument
688 struct request_queue *q = rq->q; in blk_mq_unique_tag()
693 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_unique_tag()
[all …]
Dbsg-lib.c82 static void bsg_softirq_done(struct request *rq) in bsg_softirq_done() argument
84 struct bsg_job *job = rq->special; in bsg_softirq_done()
86 blk_end_request_all(rq, rq->errors); in bsg_softirq_done()
Dblk-mq-tag.h101 unsigned int tag, struct request *rq) in blk_mq_tag_set_rq() argument
103 hctx->tags->rqs[tag] = rq; in blk_mq_tag_set_rq()
Dblk-mq-sysfs.c144 struct request *rq; in sysfs_list_show() local
147 list_for_each_entry(rq, list, queuelist) { in sysfs_list_show()
148 const int rq_len = 2 * sizeof(rq) + 2; in sysfs_list_show()
160 "\t%p\n", rq); in sysfs_list_show()
/linux-4.4.14/drivers/ide/
Dide-pm.c10 struct request *rq; in generic_ide_suspend() local
21 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); in generic_ide_suspend()
22 rq->cmd_type = REQ_TYPE_ATA_PM_SUSPEND; in generic_ide_suspend()
23 rq->special = &rqpm; in generic_ide_suspend()
29 ret = blk_execute_rq(drive->queue, NULL, rq, 0); in generic_ide_suspend()
30 blk_put_request(rq); in generic_ide_suspend()
41 static void ide_end_sync_rq(struct request *rq, int error) in ide_end_sync_rq() argument
43 complete(rq->end_io_data); in ide_end_sync_rq()
46 static int ide_pm_execute_rq(struct request *rq) in ide_pm_execute_rq() argument
48 struct request_queue *q = rq->q; in ide_pm_execute_rq()
[all …]
Dide-io.c57 int ide_end_rq(ide_drive_t *drive, struct request *rq, int error, in ide_end_rq() argument
70 return blk_end_request(rq, error, nr_bytes); in ide_end_rq()
78 struct request *rq = cmd->rq; in ide_complete_cmd() local
105 if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { in ide_complete_cmd()
106 struct ide_cmd *orig_cmd = rq->special; in ide_complete_cmd()
118 struct request *rq = hwif->rq; in ide_complete_rq() local
125 if (blk_noretry_request(rq) && error <= 0) in ide_complete_rq()
126 nr_bytes = blk_rq_sectors(rq) << 9; in ide_complete_rq()
128 rc = ide_end_rq(drive, rq, error, nr_bytes); in ide_complete_rq()
130 hwif->rq = NULL; in ide_complete_rq()
[all …]
Dide-cd.c96 static int cdrom_log_sense(ide_drive_t *drive, struct request *rq) in cdrom_log_sense() argument
101 if (!sense || !rq || (rq->cmd_flags & REQ_QUIET)) in cdrom_log_sense()
124 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) in cdrom_log_sense()
210 static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) in ide_cd_complete_failed_rq() argument
218 struct request *failed = (struct request *)rq->special; in ide_cd_complete_failed_rq()
219 void *sense = bio_data(rq->bio); in ide_cd_complete_failed_rq()
229 failed->sense_len = rq->sense_len; in ide_cd_complete_failed_rq()
247 static int ide_cd_breathe(ide_drive_t *drive, struct request *rq) in ide_cd_breathe() argument
252 if (!rq->errors) in ide_cd_breathe()
255 rq->errors = 1; in ide_cd_breathe()
[all …]
Dide-eh.c7 static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, in ide_ata_error() argument
15 rq->errors |= ERROR_RESET; in ide_ata_error()
28 rq->errors = ERROR_MAX; in ide_ata_error()
31 rq->errors |= ERROR_RECAL; in ide_ata_error()
35 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && in ide_ata_error()
42 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { in ide_ata_error()
43 ide_kill_rq(drive, rq); in ide_ata_error()
48 rq->errors |= ERROR_RESET; in ide_ata_error()
50 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { in ide_ata_error()
51 ++rq->errors; in ide_ata_error()
[all …]
Dide-floppy.c66 struct request *rq = pc->rq; in ide_floppy_callback() local
75 rq->cmd_type == REQ_TYPE_BLOCK_PC) in ide_floppy_callback()
79 u8 *buf = bio_data(rq->bio); in ide_floppy_callback()
100 if (rq->cmd_type == REQ_TYPE_DRV_PRIV) in ide_floppy_callback()
101 rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; in ide_floppy_callback()
136 unsigned int done = blk_rq_bytes(drive->hwif->rq); in ide_floppy_issue_pc()
191 struct ide_atapi_pc *pc, struct request *rq, in idefloppy_create_rw_cmd() argument
196 int blocks = blk_rq_sectors(rq) / floppy->bs_factor; in idefloppy_create_rw_cmd()
197 int cmd = rq_data_dir(rq); in idefloppy_create_rw_cmd()
206 memcpy(rq->cmd, pc->c, 12); in idefloppy_create_rw_cmd()
[all …]
Dide-atapi.c92 struct request *rq; in ide_queue_pc_tail() local
95 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); in ide_queue_pc_tail()
96 rq->cmd_type = REQ_TYPE_DRV_PRIV; in ide_queue_pc_tail()
97 rq->special = (char *)pc; in ide_queue_pc_tail()
100 error = blk_rq_map_kern(drive->queue, rq, buf, bufflen, in ide_queue_pc_tail()
106 memcpy(rq->cmd, pc->c, 12); in ide_queue_pc_tail()
108 rq->cmd[13] = REQ_IDETAPE_PC1; in ide_queue_pc_tail()
109 error = blk_execute_rq(drive->queue, disk, rq, 0); in ide_queue_pc_tail()
111 blk_put_request(rq); in ide_queue_pc_tail()
171 void ide_prep_sense(ide_drive_t *drive, struct request *rq) in ide_prep_sense() argument
[all …]
Dide-park.c13 struct request *rq; in issue_park_cmd() local
34 rq = blk_get_request(q, READ, __GFP_RECLAIM); in issue_park_cmd()
35 rq->cmd[0] = REQ_PARK_HEADS; in issue_park_cmd()
36 rq->cmd_len = 1; in issue_park_cmd()
37 rq->cmd_type = REQ_TYPE_DRV_PRIV; in issue_park_cmd()
38 rq->special = &timeout; in issue_park_cmd()
39 rc = blk_execute_rq(q, NULL, rq, 1); in issue_park_cmd()
40 blk_put_request(rq); in issue_park_cmd()
48 rq = blk_get_request(q, READ, GFP_NOWAIT); in issue_park_cmd()
49 if (IS_ERR(rq)) in issue_park_cmd()
[all …]
Dide-devsets.c162 struct request *rq; in ide_devset_execute() local
168 rq = blk_get_request(q, READ, __GFP_RECLAIM); in ide_devset_execute()
169 rq->cmd_type = REQ_TYPE_DRV_PRIV; in ide_devset_execute()
170 rq->cmd_len = 5; in ide_devset_execute()
171 rq->cmd[0] = REQ_DEVSET_EXEC; in ide_devset_execute()
172 *(int *)&rq->cmd[1] = arg; in ide_devset_execute()
173 rq->special = setting->set; in ide_devset_execute()
175 if (blk_execute_rq(q, NULL, rq, 0)) in ide_devset_execute()
176 ret = rq->errors; in ide_devset_execute()
177 blk_put_request(rq); in ide_devset_execute()
[all …]
Dide-ioctls.c126 struct request *rq; in ide_cmd_ioctl() local
128 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); in ide_cmd_ioctl()
129 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; in ide_cmd_ioctl()
130 err = blk_execute_rq(drive->queue, NULL, rq, 0); in ide_cmd_ioctl()
131 blk_put_request(rq); in ide_cmd_ioctl()
221 struct request *rq; in generic_drive_reset() local
224 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); in generic_drive_reset()
225 rq->cmd_type = REQ_TYPE_DRV_PRIV; in generic_drive_reset()
226 rq->cmd_len = 1; in generic_drive_reset()
227 rq->cmd[0] = REQ_DRIVE_RESET; in generic_drive_reset()
[all …]
Dide-disk.c81 static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, in __ide_do_rw_disk() argument
85 u16 nsectors = (u16)blk_rq_sectors(rq); in __ide_do_rw_disk()
93 if (block + blk_rq_sectors(rq) > 1ULL << 28) in __ide_do_rw_disk()
151 if (rq_data_dir(rq)) in __ide_do_rw_disk()
155 cmd.rq = rq; in __ide_do_rw_disk()
181 static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, in ide_do_rw_disk() argument
187 BUG_ON(rq->cmd_type != REQ_TYPE_FS); in ide_do_rw_disk()
192 drive->name, rq_data_dir(rq) == READ ? "read" : "writ", in ide_do_rw_disk()
193 (unsigned long long)block, blk_rq_sectors(rq)); in ide_do_rw_disk()
196 hwif->rw_disk(drive, rq); in ide_do_rw_disk()
[all …]
Dide-tape.c272 struct request *rq = drive->hwif->rq; in idetape_analyze_error() local
273 u8 *sense = bio_data(rq->bio); in idetape_analyze_error()
281 rq->cmd[0], tape->sense_key, tape->asc, tape->ascq); in idetape_analyze_error()
285 rq->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]); in idetape_analyze_error()
319 (blk_rq_bytes(rq) - rq->resid_len)) in idetape_analyze_error()
330 struct request *rq = drive->hwif->rq; in ide_tape_callback() local
334 ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x, dsc: %d, err: %d", rq->cmd[0], in ide_tape_callback()
351 (blk_rq_bytes(rq) - rq->resid_len) / tape->blk_size; in ide_tape_callback()
369 rq->errors = err; in ide_tape_callback()
383 drive->hwif->rq->cmd[0], tape->dsc_poll_freq); in ide_tape_stall_queue()
[all …]
Dide-taskfile.c187 struct request *rq = hwif->rq; in task_no_data_intr() local
189 if (ata_pm_request(rq)) in task_no_data_intr()
190 ide_complete_pm_rq(drive, rq); in task_no_data_intr()
289 cmd->rq->errors = 0; in ide_pio_datablock()
326 struct request *rq = drive->hwif->rq; in ide_finish_cmd() local
331 rq->errors = err; in ide_finish_cmd()
338 ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq)); in ide_finish_cmd()
396 ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9); in task_pio_intr()
429 struct request *rq; in ide_raw_taskfile() local
433 rq = blk_get_request(drive->queue, rw, __GFP_RECLAIM); in ide_raw_taskfile()
[all …]
Dide-lib.c94 struct request *rq = drive->hwif->rq; in ide_dump_ata_error() local
98 if (rq) in ide_dump_ata_error()
100 (unsigned long long)blk_rq_pos(rq)); in ide_dump_ata_error()
Dide-cd_ioctl.c303 struct request *rq; in ide_cdrom_reset() local
306 rq = blk_get_request(drive->queue, READ, __GFP_RECLAIM); in ide_cdrom_reset()
307 rq->cmd_type = REQ_TYPE_DRV_PRIV; in ide_cdrom_reset()
308 rq->cmd_flags = REQ_QUIET; in ide_cdrom_reset()
309 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); in ide_cdrom_reset()
310 blk_put_request(rq); in ide_cdrom_reset()
Dpdc202xx_old.c152 struct request *rq = hwif->rq; in pdc202xx_dma_start() local
159 word_count = (blk_rq_sectors(rq) << 8); in pdc202xx_dma_start()
160 word_count = (rq_data_dir(rq) == READ) ? in pdc202xx_dma_start()
Dide-dma.c108 blk_rq_sectors(cmd->rq) << 9); in ide_dma_intr()
492 if (hwif->rq) in ide_dma_timeout_retry()
493 hwif->rq->errors = 0; in ide_dma_timeout_retry()
/linux-4.4.14/drivers/scsi/device_handler/
Dscsi_dh_alua.c106 struct request *rq; in get_alua_req() local
109 rq = blk_get_request(q, rw, GFP_NOIO); in get_alua_req()
111 if (IS_ERR(rq)) { in get_alua_req()
116 blk_rq_set_block_pc(rq); in get_alua_req()
118 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { in get_alua_req()
119 blk_put_request(rq); in get_alua_req()
125 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | in get_alua_req()
127 rq->retries = ALUA_FAILOVER_RETRIES; in get_alua_req()
128 rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ; in get_alua_req()
130 return rq; in get_alua_req()
[all …]
Dscsi_dh_emc.c265 struct request *rq; in get_req() local
268 rq = blk_get_request(sdev->request_queue, in get_req()
270 if (IS_ERR(rq)) { in get_req()
275 blk_rq_set_block_pc(rq); in get_req()
276 rq->cmd_len = COMMAND_SIZE(cmd); in get_req()
277 rq->cmd[0] = cmd; in get_req()
282 rq->cmd[1] = 0x10; in get_req()
283 rq->cmd[4] = len; in get_req()
287 rq->cmd[1] = 0x10; in get_req()
288 rq->cmd[8] = len; in get_req()
[all …]
Dscsi_dh_rdac.c265 struct request *rq; in get_rdac_req() local
268 rq = blk_get_request(q, rw, GFP_NOIO); in get_rdac_req()
270 if (IS_ERR(rq)) { in get_rdac_req()
275 blk_rq_set_block_pc(rq); in get_rdac_req()
277 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { in get_rdac_req()
278 blk_put_request(rq); in get_rdac_req()
284 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | in get_rdac_req()
286 rq->retries = RDAC_RETRIES; in get_rdac_req()
287 rq->timeout = RDAC_TIMEOUT; in get_rdac_req()
289 return rq; in get_rdac_req()
[all …]
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx5/core/
Den_rx.c38 static inline int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, in mlx5e_alloc_rx_wqe() argument
44 skb = netdev_alloc_skb(rq->netdev, rq->wqe_sz); in mlx5e_alloc_rx_wqe()
48 dma_addr = dma_map_single(rq->pdev, in mlx5e_alloc_rx_wqe()
52 rq->wqe_sz, in mlx5e_alloc_rx_wqe()
55 if (unlikely(dma_mapping_error(rq->pdev, dma_addr))) in mlx5e_alloc_rx_wqe()
63 rq->skb[ix] = skb; in mlx5e_alloc_rx_wqe()
73 bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq) in mlx5e_post_rx_wqes() argument
75 struct mlx5_wq_ll *wq = &rq->wq; in mlx5e_post_rx_wqes()
77 if (unlikely(!test_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state))) in mlx5e_post_rx_wqes()
83 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, wq->head))) in mlx5e_post_rx_wqes()
[all …]
Den_main.c54 struct mlx5e_rq_param rq; member
155 rq_stats = &priv->channel[i]->rq.stats; in mlx5e_update_stats()
311 struct mlx5e_rq *rq) in mlx5e_create_rq() argument
323 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq, in mlx5e_create_rq()
324 &rq->wq_ctrl); in mlx5e_create_rq()
328 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR]; in mlx5e_create_rq()
330 wq_sz = mlx5_wq_ll_get_size(&rq->wq); in mlx5e_create_rq()
331 rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL, in mlx5e_create_rq()
333 if (!rq->skb) { in mlx5e_create_rq()
338 rq->wqe_sz = (priv->params.lro_en) ? priv->params.lro_wqe_sz : in mlx5e_create_rq()
[all …]
Den_txrx.c64 busy |= mlx5e_poll_rx_cq(&c->rq.cq, budget); in mlx5e_napi_poll()
66 busy |= mlx5e_post_rx_wqes(&c->rq); in mlx5e_napi_poll()
81 mlx5e_cq_arm(&c->rq.cq); in mlx5e_napi_poll()
/linux-4.4.14/include/trace/events/
Dblock.h66 TP_PROTO(struct request_queue *q, struct request *rq),
68 TP_ARGS(q, rq),
76 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
80 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
81 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
82 0 : blk_rq_pos(rq);
83 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ?
84 0 : blk_rq_sectors(rq);
85 __entry->errors = rq->errors;
87 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
[all …]
/linux-4.4.14/drivers/staging/rdma/hfi1/
Dsrq.c78 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in hfi1_post_srq_receive()
84 spin_lock_irqsave(&srq->rq.lock, flags); in hfi1_post_srq_receive()
85 wq = srq->rq.wq; in hfi1_post_srq_receive()
87 if (next >= srq->rq.size) in hfi1_post_srq_receive()
90 spin_unlock_irqrestore(&srq->rq.lock, flags); in hfi1_post_srq_receive()
96 wqe = get_rwqe_ptr(&srq->rq, wq->head); in hfi1_post_srq_receive()
104 spin_unlock_irqrestore(&srq->rq.lock, flags); in hfi1_post_srq_receive()
149 srq->rq.size = srq_init_attr->attr.max_wr + 1; in hfi1_create_srq()
150 srq->rq.max_sge = srq_init_attr->attr.max_sge; in hfi1_create_srq()
151 sz = sizeof(struct ib_sge) * srq->rq.max_sge + in hfi1_create_srq()
[all …]
Druc.c160 struct hfi1_rq *rq; in hfi1_get_rwqe() local
171 rq = &srq->rq; in hfi1_get_rwqe()
175 rq = &qp->r_rq; in hfi1_get_rwqe()
178 spin_lock_irqsave(&rq->lock, flags); in hfi1_get_rwqe()
184 wq = rq->wq; in hfi1_get_rwqe()
187 if (tail >= rq->size) in hfi1_get_rwqe()
195 wqe = get_rwqe_ptr(rq, tail); in hfi1_get_rwqe()
201 if (++tail >= rq->size) in hfi1_get_rwqe()
220 if (n >= rq->size) in hfi1_get_rwqe()
223 n += rq->size - tail; in hfi1_get_rwqe()
[all …]
/linux-4.4.14/drivers/infiniband/hw/qib/
Dqib_srq.c61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in qib_post_srq_receive()
67 spin_lock_irqsave(&srq->rq.lock, flags); in qib_post_srq_receive()
68 wq = srq->rq.wq; in qib_post_srq_receive()
70 if (next >= srq->rq.size) in qib_post_srq_receive()
73 spin_unlock_irqrestore(&srq->rq.lock, flags); in qib_post_srq_receive()
79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in qib_post_srq_receive()
87 spin_unlock_irqrestore(&srq->rq.lock, flags); in qib_post_srq_receive()
132 srq->rq.size = srq_init_attr->attr.max_wr + 1; in qib_create_srq()
133 srq->rq.max_sge = srq_init_attr->attr.max_sge; in qib_create_srq()
134 sz = sizeof(struct ib_sge) * srq->rq.max_sge + in qib_create_srq()
[all …]
Dqib_ruc.c142 struct qib_rq *rq; in qib_get_rwqe() local
153 rq = &srq->rq; in qib_get_rwqe()
157 rq = &qp->r_rq; in qib_get_rwqe()
160 spin_lock_irqsave(&rq->lock, flags); in qib_get_rwqe()
166 wq = rq->wq; in qib_get_rwqe()
169 if (tail >= rq->size) in qib_get_rwqe()
177 wqe = get_rwqe_ptr(rq, tail); in qib_get_rwqe()
183 if (++tail >= rq->size) in qib_get_rwqe()
202 if (n >= rq->size) in qib_get_rwqe()
205 n += rq->size - tail; in qib_get_rwqe()
[all …]
/linux-4.4.14/drivers/staging/rdma/ipath/
Dipath_srq.c61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in ipath_post_srq_receive()
67 spin_lock_irqsave(&srq->rq.lock, flags); in ipath_post_srq_receive()
68 wq = srq->rq.wq; in ipath_post_srq_receive()
70 if (next >= srq->rq.size) in ipath_post_srq_receive()
73 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive()
79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive()
87 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive()
135 srq->rq.size = srq_init_attr->attr.max_wr + 1; in ipath_create_srq()
136 srq->rq.max_sge = srq_init_attr->attr.max_sge; in ipath_create_srq()
137 sz = sizeof(struct ib_sge) * srq->rq.max_sge + in ipath_create_srq()
[all …]
Dipath_ud.c55 struct ipath_rq *rq; in ipath_ud_loopback() local
109 rq = &srq->rq; in ipath_ud_loopback()
113 rq = &qp->r_rq; in ipath_ud_loopback()
121 spin_lock_irqsave(&rq->lock, flags); in ipath_ud_loopback()
122 wq = rq->wq; in ipath_ud_loopback()
125 if (tail >= rq->size) in ipath_ud_loopback()
128 spin_unlock_irqrestore(&rq->lock, flags); in ipath_ud_loopback()
132 wqe = get_rwqe_ptr(rq, tail); in ipath_ud_loopback()
135 spin_unlock_irqrestore(&rq->lock, flags); in ipath_ud_loopback()
141 spin_unlock_irqrestore(&rq->lock, flags); in ipath_ud_loopback()
[all …]
Dipath_ruc.c168 struct ipath_rq *rq; in ipath_get_rwqe() local
179 rq = &srq->rq; in ipath_get_rwqe()
183 rq = &qp->r_rq; in ipath_get_rwqe()
186 spin_lock_irqsave(&rq->lock, flags); in ipath_get_rwqe()
192 wq = rq->wq; in ipath_get_rwqe()
195 if (tail >= rq->size) in ipath_get_rwqe()
204 wqe = get_rwqe_ptr(rq, tail); in ipath_get_rwqe()
205 if (++tail >= rq->size) in ipath_get_rwqe()
224 if (n >= rq->size) in ipath_get_rwqe()
227 n += rq->size - tail; in ipath_get_rwqe()
[all …]
Dipath_verbs.h328 struct ipath_rq rq; member
499 static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, in get_rwqe_ptr() argument
503 ((char *) rq->wq->wq + in get_rwqe_ptr()
505 rq->max_sge * sizeof(struct ib_sge)) * n); in get_rwqe_ptr()
/linux-4.4.14/drivers/s390/char/
Draw3270.c136 struct raw3270_request *rq; in raw3270_request_alloc() local
139 rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA); in raw3270_request_alloc()
140 if (!rq) in raw3270_request_alloc()
145 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); in raw3270_request_alloc()
146 if (!rq->buffer) { in raw3270_request_alloc()
147 kfree(rq); in raw3270_request_alloc()
151 rq->size = size; in raw3270_request_alloc()
152 INIT_LIST_HEAD(&rq->list); in raw3270_request_alloc()
157 rq->ccw.cda = __pa(rq->buffer); in raw3270_request_alloc()
158 rq->ccw.flags = CCW_FLAG_SLI; in raw3270_request_alloc()
[all …]
Dfs3270.c47 fs3270_wake_up(struct raw3270_request *rq, void *data) in fs3270_wake_up() argument
63 fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq) in fs3270_do_io() argument
69 rq->callback = fs3270_wake_up; in fs3270_do_io()
70 rq->callback_data = &fp->wait; in fs3270_do_io()
80 rc = raw3270_start(view, rq); in fs3270_do_io()
83 wait_event(fp->wait, raw3270_request_final(rq)); in fs3270_do_io()
93 fs3270_reset_callback(struct raw3270_request *rq, void *data) in fs3270_reset_callback() argument
97 fp = (struct fs3270 *) rq->view; in fs3270_reset_callback()
98 raw3270_request_reset(rq); in fs3270_reset_callback()
103 fs3270_restore_callback(struct raw3270_request *rq, void *data) in fs3270_restore_callback() argument
[all …]
Dcon3270.c192 con3270_write_callback(struct raw3270_request *rq, void *data) in con3270_write_callback() argument
194 raw3270_request_reset(rq); in con3270_write_callback()
195 xchg(&((struct con3270 *) rq->view)->write, rq); in con3270_write_callback()
350 con3270_read_callback(struct raw3270_request *rq, void *data) in con3270_read_callback() argument
352 raw3270_get_view(rq->view); in con3270_read_callback()
354 tasklet_schedule(&((struct con3270 *) rq->view)->readlet); in con3270_read_callback()
404 con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) in con3270_irq() argument
410 if (rq) { in con3270_irq()
412 rq->rc = -EIO; in con3270_irq()
415 rq->rescnt = irb->scsw.cmd.count; in con3270_irq()
Draw3270.h121 raw3270_request_final(struct raw3270_request *rq) in raw3270_request_final() argument
123 return list_empty(&rq->list); in raw3270_request_final()
Dtty3270.c325 tty3270_write_callback(struct raw3270_request *rq, void *data) in tty3270_write_callback() argument
327 struct tty3270 *tp = container_of(rq->view, struct tty3270, view); in tty3270_write_callback()
329 if (rq->rc != 0) { in tty3270_write_callback()
334 raw3270_request_reset(rq); in tty3270_write_callback()
335 xchg(&tp->write, rq); in tty3270_write_callback()
588 tty3270_read_callback(struct raw3270_request *rq, void *data) in tty3270_read_callback() argument
590 struct tty3270 *tp = container_of(rq->view, struct tty3270, view); in tty3270_read_callback()
591 raw3270_get_view(rq->view); in tty3270_read_callback()
646 tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) in tty3270_irq() argument
656 if (rq) { in tty3270_irq()
[all …]
/linux-4.4.14/drivers/usb/misc/
Duss720.c95 struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count); in destroy_async() local
96 struct parport_uss720_private *priv = rq->priv; in destroy_async()
99 if (likely(rq->urb)) in destroy_async()
100 usb_free_urb(rq->urb); in destroy_async()
101 kfree(rq->dr); in destroy_async()
103 list_del_init(&rq->asynclist); in destroy_async()
105 kfree(rq); in destroy_async()
113 struct uss720_async_request *rq; in async_complete() local
118 rq = urb->context; in async_complete()
119 priv = rq->priv; in async_complete()
[all …]
/linux-4.4.14/drivers/char/
Draw.c211 struct raw_config_request rq; in raw_ctl_ioctl() local
217 if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) in raw_ctl_ioctl()
220 return bind_set(rq.raw_minor, rq.block_major, rq.block_minor); in raw_ctl_ioctl()
223 if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) in raw_ctl_ioctl()
226 err = bind_get(rq.raw_minor, &dev); in raw_ctl_ioctl()
230 rq.block_major = MAJOR(dev); in raw_ctl_ioctl()
231 rq.block_minor = MINOR(dev); in raw_ctl_ioctl()
233 if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) in raw_ctl_ioctl()
253 struct raw32_config_request rq; in raw_ctl_compat_ioctl() local
259 if (copy_from_user(&rq, user_req, sizeof(rq))) in raw_ctl_compat_ioctl()
[all …]
/linux-4.4.14/drivers/gpu/drm/amd/scheduler/
Dgpu_scheduler.c40 static void amd_sched_rq_init(struct amd_sched_rq *rq) in amd_sched_rq_init() argument
42 spin_lock_init(&rq->lock); in amd_sched_rq_init()
43 INIT_LIST_HEAD(&rq->entities); in amd_sched_rq_init()
44 rq->current_entity = NULL; in amd_sched_rq_init()
47 static void amd_sched_rq_add_entity(struct amd_sched_rq *rq, in amd_sched_rq_add_entity() argument
50 spin_lock(&rq->lock); in amd_sched_rq_add_entity()
51 list_add_tail(&entity->list, &rq->entities); in amd_sched_rq_add_entity()
52 spin_unlock(&rq->lock); in amd_sched_rq_add_entity()
55 static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq, in amd_sched_rq_remove_entity() argument
58 spin_lock(&rq->lock); in amd_sched_rq_remove_entity()
[all …]
Dgpu_scheduler.h46 struct amd_sched_rq *rq; member
132 struct amd_sched_rq *rq,
/linux-4.4.14/drivers/net/
Dvirtio_net.c105 struct receive_queue *rq; member
193 static void give_pages(struct receive_queue *rq, struct page *page) in give_pages() argument
199 end->private = (unsigned long)rq->pages; in give_pages()
200 rq->pages = page; in give_pages()
203 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) in get_a_page() argument
205 struct page *p = rq->pages; in get_a_page()
208 rq->pages = (struct page *)p->private; in get_a_page()
247 struct receive_queue *rq, in page_to_skb() argument
315 give_pages(rq, page); in page_to_skb()
332 struct receive_queue *rq, in receive_big() argument
[all …]
Difb.c46 struct sk_buff_head rq; member
76 skb_queue_splice_tail_init(&txp->rq, &txp->tq); in ifb_ri_tasklet()
114 skb = skb_peek(&txp->rq); in ifb_ri_tasklet()
177 __skb_queue_head_init(&txp->rq); in ifb_dev_init()
210 __skb_queue_purge(&txp->rq); in ifb_dev_free()
255 if (skb_queue_len(&txp->rq) >= dev->tx_queue_len) in ifb_xmit()
258 __skb_queue_tail(&txp->rq, skb); in ifb_xmit()
/linux-4.4.14/drivers/infiniband/hw/cxgb4/
Dt4.h338 struct t4_rq rq; member
346 return wq->rq.in_use; in t4_rqes_posted()
351 return wq->rq.in_use == 0; in t4_rq_empty()
356 return wq->rq.in_use == (wq->rq.size - 1); in t4_rq_full()
361 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail()
366 wq->rq.in_use++; in t4_rq_produce()
367 if (++wq->rq.pidx == wq->rq.size) in t4_rq_produce()
368 wq->rq.pidx = 0; in t4_rq_produce()
369 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); in t4_rq_produce()
370 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) in t4_rq_produce()
[all …]
Dqp.c157 wq->rq.memsize, wq->rq.queue, in destroy_qp()
158 dma_unmap_addr(&wq->rq, mapping)); in destroy_qp()
160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in destroy_qp()
161 kfree(wq->rq.sw_rq); in destroy_qp()
163 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp()
212 wq->rq.qid = c4iw_get_qpid(rdev, uctx); in create_qp()
213 if (!wq->rq.qid) { in create_qp()
226 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, in create_qp()
228 if (!wq->rq.sw_rq) { in create_qp()
237 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); in create_qp()
[all …]
Ddevice.c138 le.qid = wq->rq.qid; in c4iw_log_wr_stats()
140 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts; in c4iw_log_wr_stats()
141 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts; in c4iw_log_wr_stats()
257 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp()
281 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp()
295 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp()
806 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, in c4iw_rdev_open()
807 rdev->lldi.vr->rq.size, in c4iw_rdev_open()
827 rdev->stats.rqt.total = rdev->lldi.vr->rq.size; in c4iw_rdev_open()
918 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && in rdma_supported()
[all …]
Dcq.c203 int in_use = wq->rq.in_use - count; in c4iw_flush_rq()
207 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq()
582 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { in poll_cq()
645 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx); in poll_cq()
646 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; in poll_cq()
/linux-4.4.14/include/linux/
Dblkdev.h581 #define blk_noretry_request(rq) \ argument
582 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
585 #define blk_account_rq(rq) \ argument
586 (((rq)->cmd_flags & REQ_STARTED) && \
587 ((rq)->cmd_type == REQ_TYPE_FS))
589 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) argument
590 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) argument
592 #define blk_queued_rq(rq) (!list_empty(&(rq)->queuelist)) argument
596 #define rq_data_dir(rq) ((int)((rq)->cmd_flags & 1)) argument
620 static inline bool rq_is_sync(struct request *rq) in rq_is_sync() argument
[all …]
Dblk-mq.h85 struct request *rq; member
188 void blk_mq_free_request(struct request *rq);
189 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
201 u32 blk_mq_unique_tag(struct request *rq);
216 int blk_mq_request_started(struct request *rq);
217 void blk_mq_start_request(struct request *rq);
218 void blk_mq_end_request(struct request *rq, int error);
219 void __blk_mq_end_request(struct request *rq, int error);
221 void blk_mq_requeue_request(struct request *rq);
222 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
[all …]
Dblktrace_api.h60 extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
76 # define blk_add_driver_data(q, rq, data, len) do {} while (0) argument
106 static inline int blk_cmd_buf_len(struct request *rq) in blk_cmd_buf_len() argument
108 return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1; in blk_cmd_buf_len()
111 extern void blk_dump_cmd(char *buf, struct request *rq);
Delevator.h139 extern int elv_set_request(struct request_queue *q, struct request *rq,
203 #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) argument
207 #define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist) argument
Dblk-cgroup.h477 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) in blk_rq_set_rl() argument
479 rq->rl = rl; in blk_rq_set_rl()
488 static inline struct request_list *blk_rq_rl(struct request *rq) in blk_rq_rl() argument
490 return rq->rl; in blk_rq_rl()
775 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } in blk_rq_set_rl() argument
776 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } in blk_rq_rl() argument
Dide.h51 #define ata_pm_request(rq) \ argument
52 ((rq)->cmd_type == REQ_TYPE_ATA_PM_SUSPEND || \
53 (rq)->cmd_type == REQ_TYPE_ATA_PM_RESUME)
322 struct request *rq; /* copy of request */ member
350 struct request *rq; member
501 struct request *rq; /* current request */ member
767 struct request *rq; member
1147 void ide_prep_sense(ide_drive_t *drive, struct request *rq);
1181 extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
Dmii.h45 static inline struct mii_ioctl_data *if_mii(struct ifreq *rq) in if_mii() argument
47 return (struct mii_ioctl_data *) &rq->ifr_ifru; in if_mii()
Disdn_ppp.h162 struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */ member
/linux-4.4.14/drivers/isdn/mISDN/
Dstack.c429 struct channel_req rq; in connect_layer1() local
445 rq.protocol = protocol; in connect_layer1()
446 rq.adr.channel = adr->channel; in connect_layer1()
447 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); in connect_layer1()
466 struct channel_req rq, rq2; in connect_Bstack() local
478 rq.protocol = protocol; in connect_Bstack()
479 rq.adr = *adr; in connect_Bstack()
480 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); in connect_Bstack()
483 ch->recv = rq.ch->send; in connect_Bstack()
484 ch->peer = rq.ch; in connect_Bstack()
[all …]
Dl1oip_core.c994 open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq) in open_dchannel() argument
999 if (rq->protocol == ISDN_P_NONE) in open_dchannel()
1002 (dch->dev.D.protocol != rq->protocol)) { in open_dchannel()
1005 __func__, dch->dev.D.protocol, rq->protocol); in open_dchannel()
1007 if (dch->dev.D.protocol != rq->protocol) in open_dchannel()
1008 dch->dev.D.protocol = rq->protocol; in open_dchannel()
1014 rq->ch = &dch->dev.D; in open_dchannel()
1021 open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq) in open_bchannel() argument
1026 if (!test_channelmap(rq->adr.channel, dch->dev.channelmap)) in open_bchannel()
1028 if (rq->protocol == ISDN_P_NONE) in open_bchannel()
[all …]
/linux-4.4.14/drivers/block/
Dnull_blk.c17 struct request *rq; member
222 if (cmd->rq) in end_cmd()
223 q = cmd->rq->q; in end_cmd()
227 blk_mq_end_request(cmd->rq, 0); in end_cmd()
230 INIT_LIST_HEAD(&cmd->rq->queuelist); in end_cmd()
231 blk_end_request_all(cmd->rq, 0); in end_cmd()
264 static void null_softirq_done_fn(struct request *rq) in null_softirq_done_fn() argument
267 end_cmd(blk_mq_rq_to_pdu(rq)); in null_softirq_done_fn()
269 end_cmd(rq->special); in null_softirq_done_fn()
279 blk_mq_complete_request(cmd->rq, cmd->rq->errors); in null_handle_cmd()
[all …]
Dosdblk.c97 struct request *rq; /* blk layer request */ member
249 __blk_end_request_all(orq->rq, ret); in osdblk_osd_complete()
300 struct request *rq; in osdblk_rq_fn() local
307 rq = blk_fetch_request(q); in osdblk_rq_fn()
308 if (!rq) in osdblk_rq_fn()
312 if (rq->cmd_type != REQ_TYPE_FS) { in osdblk_rq_fn()
313 blk_end_request_all(rq, 0); in osdblk_rq_fn()
324 do_flush = rq->cmd_flags & REQ_FLUSH; in osdblk_rq_fn()
325 do_write = (rq_data_dir(rq) == WRITE); in osdblk_rq_fn()
329 bio = bio_chain_clone(rq->bio, GFP_ATOMIC); in osdblk_rq_fn()
[all …]
Dloop.c283 static int lo_write_simple(struct loop_device *lo, struct request *rq, in lo_write_simple() argument
290 rq_for_each_segment(bvec, rq, iter) { in lo_write_simple()
305 static int lo_write_transfer(struct loop_device *lo, struct request *rq, in lo_write_transfer() argument
317 rq_for_each_segment(bvec, rq, iter) { in lo_write_transfer()
335 static int lo_read_simple(struct loop_device *lo, struct request *rq, in lo_read_simple() argument
343 rq_for_each_segment(bvec, rq, iter) { in lo_read_simple()
354 __rq_for_each_bio(bio, rq) in lo_read_simple()
364 static int lo_read_transfer(struct loop_device *lo, struct request *rq, in lo_read_transfer() argument
378 rq_for_each_segment(bvec, rq, iter) { in lo_read_transfer()
402 __rq_for_each_bio(bio, rq) in lo_read_transfer()
[all …]
Dsx8.c262 struct request *rq; member
554 struct request *rq; in carm_get_special() local
570 rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); in carm_get_special()
571 if (IS_ERR(rq)) { in carm_get_special()
578 crq->rq = rq; in carm_get_special()
623 crq->rq->cmd_type = REQ_TYPE_DRV_PRIV; in carm_array_info()
624 crq->rq->special = crq; in carm_array_info()
625 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); in carm_array_info()
664 crq->rq->cmd_type = REQ_TYPE_DRV_PRIV; in carm_send_special()
665 crq->rq->special = crq; in carm_send_special()
[all …]
Dxen-blkfront.c140 struct request_queue *rq; member
750 struct blkfront_info *info = qd->rq->rq_disk->private_data; in blkif_queue_rq()
752 blk_mq_start_request(qd->rq); in blkif_queue_rq()
757 if (blkif_request_flush_invalid(qd->rq, info)) in blkif_queue_rq()
760 if (blkif_queue_request(qd->rq)) in blkif_queue_rq()
786 struct request_queue *rq; in xlvbd_init_blk_queue() local
800 rq = blk_mq_init_queue(&info->tag_set); in xlvbd_init_blk_queue()
801 if (IS_ERR(rq)) { in xlvbd_init_blk_queue()
806 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); in xlvbd_init_blk_queue()
809 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); in xlvbd_init_blk_queue()
[all …]
Dcciss.c1828 static void cciss_softirq_done(struct request *rq) in cciss_softirq_done() argument
1830 CommandList_struct *c = rq->completion_data; in cciss_softirq_done()
1859 dev_dbg(&h->pdev->dev, "Done with %p\n", rq); in cciss_softirq_done()
1862 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) in cciss_softirq_done()
1863 rq->resid_len = c->err_info->ResidualCnt; in cciss_softirq_done()
1865 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); in cciss_softirq_done()
3085 if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) in evaluate_target_status()
3094 if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) in evaluate_target_status()
3105 (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) in evaluate_target_status()
3109 *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); in evaluate_target_status()
[all …]
Damiflop.c1344 struct request *rq = NULL; in set_next_request() local
1361 rq = blk_fetch_request(q); in set_next_request()
1362 if (rq) in set_next_request()
1370 return rq; in set_next_request()
1375 struct request *rq; in redo_fd_request() local
1384 rq = set_next_request(); in redo_fd_request()
1385 if (!rq) { in redo_fd_request()
1390 floppy = rq->rq_disk->private_data; in redo_fd_request()
1395 for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) { in redo_fd_request()
1398 blk_rq_pos(rq), cnt, in redo_fd_request()
[all …]
Dpktcdvd.c703 struct request *rq; in pkt_generic_packet() local
706 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? in pkt_generic_packet()
708 if (IS_ERR(rq)) in pkt_generic_packet()
709 return PTR_ERR(rq); in pkt_generic_packet()
710 blk_rq_set_block_pc(rq); in pkt_generic_packet()
713 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, in pkt_generic_packet()
719 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]); in pkt_generic_packet()
720 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); in pkt_generic_packet()
722 rq->timeout = 60*HZ; in pkt_generic_packet()
724 rq->cmd_flags |= REQ_QUIET; in pkt_generic_packet()
[all …]
Dloop.h70 struct request *rq; member
Drbd.c301 struct request *rq; /* block request */ member
2179 img_request->rq = NULL; in rbd_img_request_create()
2321 rbd_assert(img_request->rq != NULL); in rbd_img_obj_end_request()
2323 more = blk_update_request(img_request->rq, result, xferred); in rbd_img_obj_end_request()
2325 __blk_mq_end_request(img_request->rq, result); in rbd_img_obj_end_request()
3359 struct request *rq = blk_mq_rq_from_pdu(work); in rbd_queue_workfn() local
3360 struct rbd_device *rbd_dev = rq->q->queuedata; in rbd_queue_workfn()
3363 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; in rbd_queue_workfn()
3364 u64 length = blk_rq_bytes(rq); in rbd_queue_workfn()
3369 if (rq->cmd_type != REQ_TYPE_FS) { in rbd_queue_workfn()
[all …]
/linux-4.4.14/drivers/mtd/
Dmtd_blkdevs.c47 blk_cleanup_queue(dev->rq); in blktrans_dev_release()
129 struct request_queue *rq = dev->rq; in mtd_blktrans_work() local
133 spin_lock_irq(rq->queue_lock); in mtd_blktrans_work()
139 if (!req && !(req = blk_fetch_request(rq))) { in mtd_blktrans_work()
141 spin_unlock_irq(rq->queue_lock); in mtd_blktrans_work()
145 spin_lock_irq(rq->queue_lock); in mtd_blktrans_work()
156 spin_unlock_irq(rq->queue_lock); in mtd_blktrans_work()
162 spin_lock_irq(rq->queue_lock); in mtd_blktrans_work()
170 spin_unlock_irq(rq->queue_lock); in mtd_blktrans_work()
173 static void mtd_blktrans_request(struct request_queue *rq) in mtd_blktrans_request() argument
[all …]
/linux-4.4.14/drivers/net/vmxnet3/
Dvmxnet3_drv.c561 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, in vmxnet3_rq_alloc_rx_buf() argument
565 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; in vmxnet3_rq_alloc_rx_buf()
566 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; in vmxnet3_rq_alloc_rx_buf()
582 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
593 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
607 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
617 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf()
1158 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, in vmxnet3_rx_error() argument
1161 rq->stats.drop_err++; in vmxnet3_rx_error()
1163 rq->stats.drop_fcs++; in vmxnet3_rx_error()
[all …]
Dvmxnet3_ethtool.c413 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i]; in vmxnet3_get_regs() local
420 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[0].basePA); in vmxnet3_get_regs()
421 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[0].basePA); in vmxnet3_get_regs()
422 buf[j++] = rq->rx_ring[0].size; in vmxnet3_get_regs()
423 buf[j++] = rq->rx_ring[0].next2fill; in vmxnet3_get_regs()
424 buf[j++] = rq->rx_ring[0].next2comp; in vmxnet3_get_regs()
425 buf[j++] = rq->rx_ring[0].gen; in vmxnet3_get_regs()
427 buf[j++] = VMXNET3_GET_ADDR_LO(rq->rx_ring[1].basePA); in vmxnet3_get_regs()
428 buf[j++] = VMXNET3_GET_ADDR_HI(rq->rx_ring[1].basePA); in vmxnet3_get_regs()
429 buf[j++] = rq->rx_ring[1].size; in vmxnet3_get_regs()
[all …]
Dvmxnet3_int.h384 #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ argument
385 ((rq)->rx_ring[ring_idx].size >> 3)
/linux-4.4.14/drivers/scsi/
Dhpsa.h462 struct reply_queue_buffer *rq = &h->reply_queue[q]; in SA5_performant_completed() local
478 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) { in SA5_performant_completed()
479 register_value = rq->head[rq->current_entry]; in SA5_performant_completed()
480 rq->current_entry++; in SA5_performant_completed()
486 if (rq->current_entry == h->max_commands) { in SA5_performant_completed()
487 rq->current_entry = 0; in SA5_performant_completed()
488 rq->wraparound ^= 1; in SA5_performant_completed()
556 struct reply_queue_buffer *rq = &h->reply_queue[q]; in SA5_ioaccel_mode1_completed() local
560 register_value = rq->head[rq->current_entry]; in SA5_ioaccel_mode1_completed()
562 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; in SA5_ioaccel_mode1_completed()
[all …]
Dsd.c707 struct request *rq = cmd->request; in sd_setup_discard_cmnd() local
709 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); in sd_setup_discard_cmnd()
710 sector_t sector = blk_rq_pos(rq); in sd_setup_discard_cmnd()
711 unsigned int nr_sectors = blk_rq_sectors(rq); in sd_setup_discard_cmnd()
712 unsigned int nr_bytes = blk_rq_bytes(rq); in sd_setup_discard_cmnd()
768 rq->completion_data = page; in sd_setup_discard_cmnd()
769 rq->timeout = SD_TIMEOUT; in sd_setup_discard_cmnd()
782 blk_add_request_payload(rq, page, len); in sd_setup_discard_cmnd()
784 rq->__data_len = nr_bytes; in sd_setup_discard_cmnd()
832 struct request *rq = cmd->request; in sd_setup_write_same_cmnd() local
[all …]
Dsg.c146 struct request *rq; member
191 static void sg_rq_end_io(struct request *rq, int uptodate);
792 if (srp->rq->cmd != srp->rq->__cmd) in sg_common_write()
793 kfree(srp->rq->cmd); in sg_common_write()
795 blk_end_request_all(srp->rq, -EIO); in sg_common_write()
796 srp->rq = NULL; in sg_common_write()
810 srp->rq->timeout = timeout; in sg_common_write()
813 srp->rq, at_head, sg_rq_end_io); in sg_common_write()
1286 sg_rq_end_io(struct request *rq, int uptodate) in sg_rq_end_io() argument
1288 struct sg_request *srp = rq->end_io_data; in sg_rq_end_io()
[all …]
Dsr.c390 struct request *rq = SCpnt->request; in sr_init_command() local
396 SCpnt = rq->special; in sr_init_command()
397 cd = scsi_cd(rq->rq_disk); in sr_init_command()
408 "Finishing %u sectors\n", blk_rq_sectors(rq))); in sr_init_command()
440 if (rq_data_dir(rq) == WRITE) { in sr_init_command()
445 } else if (rq_data_dir(rq) == READ) { in sr_init_command()
448 blk_dump_rq_flags(rq, "Unknown sr command"); in sr_init_command()
471 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || in sr_init_command()
482 (rq_data_dir(rq) == WRITE) ? in sr_init_command()
484 this_count, blk_rq_sectors(rq))); in sr_init_command()
[all …]
Dscsi_lib.c1119 struct request *rq = cmd->request; in scsi_init_io() local
1120 bool is_mq = (rq->mq_ctx != NULL); in scsi_init_io()
1123 BUG_ON(!rq->nr_phys_segments); in scsi_init_io()
1125 error = scsi_init_sgtable(rq, &cmd->sdb); in scsi_init_io()
1129 if (blk_bidi_rq(rq)) { in scsi_init_io()
1130 if (!rq->q->mq_ops) { in scsi_init_io()
1138 rq->next_rq->special = bidi_sdb; in scsi_init_io()
1141 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); in scsi_init_io()
1146 if (blk_integrity_rq(rq)) { in scsi_init_io()
1161 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); in scsi_init_io()
[all …]
/linux-4.4.14/drivers/md/
Ddm.c109 union map_info *dm_get_rq_mapinfo(struct request *rq) in dm_get_rq_mapinfo() argument
111 if (rq && rq->end_io_data) in dm_get_rq_mapinfo()
112 return &((struct dm_rq_target_io *)rq->end_io_data)->info; in dm_get_rq_mapinfo()
662 static void free_clone_request(struct mapped_device *md, struct request *rq) in free_clone_request() argument
664 mempool_free(rq, md->rq_pool); in free_clone_request()
1077 static struct dm_rq_target_io *tio_from_request(struct request *rq) in tio_from_request() argument
1079 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); in tio_from_request()
1154 struct request *rq = tio->orig; in dm_end_request() local
1156 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { in dm_end_request()
1157 rq->errors = clone->errors; in dm_end_request()
[all …]
/linux-4.4.14/Documentation/locking/
Dlockstat.txt140 34 &rq->lock: 13128 13128 0.43 190.5…
142 36 &rq->lock 645 [<ffffffff8103bfc4>] task_rq_lock+…
143 37 &rq->lock 297 [<ffffffff8104ba65>] try_to_wake_u…
144 38 &rq->lock 360 [<ffffffff8103c4c5>] select_task_r…
145 39 &rq->lock 428 [<ffffffff81045f98>] scheduler_tic…
147 41 &rq->lock 77 [<ffffffff8103bfc4>] task_rq_lock+…
148 42 &rq->lock 174 [<ffffffff8104ba65>] try_to_wake_u…
149 43 &rq->lock 4715 [<ffffffff8103ed4b>] double_rq_loc…
150 44 &rq->lock 893 [<ffffffff81340524>] schedule+0x15…
154 48 &rq->lock/1: 1526 11488 0.33 388.7…
[all …]
/linux-4.4.14/drivers/infiniband/hw/mthca/
Dmthca_qp.c210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe()
212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe()
213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); in get_recv_wqe()
499 qp_attr->cap.max_recv_wr = qp->rq.max; in mthca_query_qp()
501 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mthca_query_qp()
598 if (qp->rq.max) in __mthca_modify_qp()
599 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; in __mthca_modify_qp()
600 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mthca_modify_qp()
761 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); in __mthca_modify_qp()
827 mthca_wq_reset(&qp->rq); in __mthca_modify_qp()
[all …]
/linux-4.4.14/drivers/nvme/host/
Dlightnvm.c439 static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd, in nvme_nvm_rqtocmd() argument
453 static void nvme_nvm_end_io(struct request *rq, int error) in nvme_nvm_end_io() argument
455 struct nvm_rq *rqd = rq->end_io_data; in nvme_nvm_end_io()
460 rq->errors, (unsigned long)rq->special); in nvme_nvm_end_io()
462 kfree(rq->cmd); in nvme_nvm_end_io()
463 blk_mq_free_request(rq); in nvme_nvm_end_io()
470 struct request *rq; in nvme_nvm_submit_io() local
474 rq = blk_mq_alloc_request(q, bio_rw(bio), GFP_KERNEL, 0); in nvme_nvm_submit_io()
475 if (IS_ERR(rq)) in nvme_nvm_submit_io()
480 blk_mq_free_request(rq); in nvme_nvm_submit_io()
[all …]
/linux-4.4.14/Documentation/DocBook/
Dtracepoint.xml.db10 API-trace-block-rq-abort
11 API-trace-block-rq-requeue
12 API-trace-block-rq-complete
13 API-trace-block-rq-insert
14 API-trace-block-rq-issue
26 API-trace-block-rq-remap
Dkernel-api.xml.db608 API-blk-rq-set-block-pc
615 API-blk-rq-err-bytes
629 API-rq-flush-dcache-pages
631 API-blk-rq-unprep-clone
632 API-blk-rq-prep-clone
640 API-rq-ioc
644 API-blk-cloned-rq-check-limits
647 API-blk-rq-map-user-iov
648 API-blk-rq-unmap-user
649 API-blk-rq-map-kern
[all …]
/linux-4.4.14/samples/bpf/
Dtracex3_kern.c26 long rq = PT_REGS_PARM1(ctx); in bpf_prog1() local
29 bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY); in bpf_prog1()
54 long rq = PT_REGS_PARM1(ctx); in bpf_prog2() local
58 value = bpf_map_lookup_elem(&my_map, &rq); in bpf_prog2()
65 bpf_map_delete_elem(&my_map, &rq); in bpf_prog2()
/linux-4.4.14/drivers/s390/block/
Dscm_blk.c246 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); in scm_ensure_queue_restart()
256 blk_requeue_request(bdev->rq, scmrq->request[i]); in scm_request_requeue()
295 static void scm_blk_request(struct request_queue *rq) in scm_blk_request() argument
297 struct scm_device *scmdev = rq->queuedata; in scm_blk_request()
302 while ((req = blk_peek_request(rq))) { in scm_blk_request()
465 blk_run_queue(bdev->rq); in scm_blk_tasklet()
474 struct request_queue *rq; in scm_blk_dev_setup() local
495 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); in scm_blk_dev_setup()
496 if (!rq) in scm_blk_dev_setup()
499 bdev->rq = rq; in scm_blk_dev_setup()
[all …]
Dscm_blk.h18 struct request_queue *rq; member
48 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data) argument
/linux-4.4.14/kernel/trace/
Dblktrace.c707 static void blk_add_trace_rq(struct request_queue *q, struct request *rq, in blk_add_trace_rq() argument
715 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { in blk_add_trace_rq()
717 __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags, in blk_add_trace_rq()
718 what, rq->errors, rq->cmd_len, rq->cmd); in blk_add_trace_rq()
721 __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, in blk_add_trace_rq()
722 rq->cmd_flags, what, rq->errors, 0, NULL); in blk_add_trace_rq()
727 struct request_queue *q, struct request *rq) in blk_add_trace_rq_abort() argument
729 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT); in blk_add_trace_rq_abort()
733 struct request_queue *q, struct request *rq) in blk_add_trace_rq_insert() argument
735 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT); in blk_add_trace_rq_insert()
[all …]
/linux-4.4.14/drivers/net/ethernet/cavium/thunder/
Dnicvf_queues.c504 struct rcv_queue *rq; in nicvf_rcv_queue_config() local
507 rq = &qs->rq[qidx]; in nicvf_rcv_queue_config()
508 rq->enable = enable; in nicvf_rcv_queue_config()
513 if (!rq->enable) { in nicvf_rcv_queue_config()
518 rq->cq_qs = qs->vnic_id; in nicvf_rcv_queue_config()
519 rq->cq_idx = qidx; in nicvf_rcv_queue_config()
520 rq->start_rbdr_qs = qs->vnic_id; in nicvf_rcv_queue_config()
521 rq->start_qs_rbdr_idx = qs->rbdr_cnt - 1; in nicvf_rcv_queue_config()
522 rq->cont_rbdr_qs = qs->vnic_id; in nicvf_rcv_queue_config()
523 rq->cont_qs_rbdr_idx = qs->rbdr_cnt - 1; in nicvf_rcv_queue_config()
[all …]
Dnic_main.c682 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | in nic_handle_mbx_intr()
683 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); in nic_handle_mbx_intr()
684 nic_reg_write(nic, reg_addr, mbx.rq.cfg); in nic_handle_mbx_intr()
688 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | in nic_handle_mbx_intr()
689 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); in nic_handle_mbx_intr()
690 nic_reg_write(nic, reg_addr, mbx.rq.cfg); in nic_handle_mbx_intr()
697 (mbx.rq.qs_num << NIC_QS_ID_SHIFT) | in nic_handle_mbx_intr()
698 (mbx.rq.rq_num << NIC_Q_NUM_SHIFT); in nic_handle_mbx_intr()
699 nic_reg_write(nic, reg_addr, mbx.rq.cfg); in nic_handle_mbx_intr()
/linux-4.4.14/drivers/ptp/
Dptp_chardev.c31 struct ptp_clock_request rq; in ptp_disable_pinfunc() local
34 memset(&rq, 0, sizeof(rq)); in ptp_disable_pinfunc()
40 rq.type = PTP_CLK_REQ_EXTTS; in ptp_disable_pinfunc()
41 rq.extts.index = chan; in ptp_disable_pinfunc()
42 err = ops->enable(ops, &rq, 0); in ptp_disable_pinfunc()
45 rq.type = PTP_CLK_REQ_PEROUT; in ptp_disable_pinfunc()
46 rq.perout.index = chan; in ptp_disable_pinfunc()
47 err = ops->enable(ops, &rq, 0); in ptp_disable_pinfunc()
Dptp_ixp46x.c218 struct ptp_clock_request *rq, int on) in ptp_ixp_enable() argument
222 switch (rq->type) { in ptp_ixp_enable()
224 switch (rq->extts.index) { in ptp_ixp_enable()
/linux-4.4.14/drivers/block/aoe/
Daoedev.c163 struct request *rq; in aoe_failip() local
169 rq = d->ip.rq; in aoe_failip()
170 if (rq == NULL) in aoe_failip()
175 n = (unsigned long) rq->special; in aoe_failip()
176 rq->special = (void *) --n; in aoe_failip()
178 if ((unsigned long) rq->special == 0) in aoe_failip()
179 aoe_end_request(d, rq, 0); in aoe_failip()
201 struct request *rq; in aoedev_downdev() local
229 while ((rq = blk_peek_request(d->blkq))) { in aoedev_downdev()
230 blk_start_request(rq); in aoedev_downdev()
[all …]
Daoecmd.c896 bufinit(struct buf *buf, struct request *rq, struct bio *bio) in bufinit() argument
899 buf->rq = rq; in bufinit()
908 struct request *rq; in nextbuf() local
918 rq = d->ip.rq; in nextbuf()
919 if (rq == NULL) { in nextbuf()
920 rq = blk_peek_request(q); in nextbuf()
921 if (rq == NULL) in nextbuf()
923 blk_start_request(rq); in nextbuf()
924 d->ip.rq = rq; in nextbuf()
925 d->ip.nxbio = rq->bio; in nextbuf()
[all …]
Daoe.h105 struct request *rq; member
177 struct request *rq; member
Daoeblk.c281 struct request *rq; in aoeblk_request() local
287 while ((rq = blk_peek_request(q))) { in aoeblk_request()
288 blk_start_request(rq); in aoeblk_request()
289 aoe_end_request(d, rq, 1); in aoeblk_request()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/
Dgk104.c28 gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) in gk104_aux_stat() argument
33 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) { in gk104_aux_stat()
36 if ((stat & (4 << (i * 4)))) *rq |= 1 << i; in gk104_aux_stat()
Dg94.c28 g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) in g94_aux_stat() argument
33 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) { in g94_aux_stat()
36 if ((stat & (4 << (i * 4)))) *rq |= 1 << i; in g94_aux_stat()
Dbase.c133 u32 hi, lo, rq, tx; in nvkm_i2c_intr() local
138 i2c->func->aux_stat(i2c, &hi, &lo, &rq, &tx); in nvkm_i2c_intr()
139 if (!hi && !lo && !rq && !tx) in nvkm_i2c_intr()
146 if (rq & aux->intr) mask |= NVKM_I2C_IRQ; in nvkm_i2c_intr()
/linux-4.4.14/drivers/infiniband/hw/mlx5/
Dqp.c91 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
121 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe()
226 qp->rq.max_gs = 0; in set_rq_size()
227 qp->rq.wqe_cnt = 0; in set_rq_size()
228 qp->rq.wqe_shift = 0; in set_rq_size()
231 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size()
232 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size()
233 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; in set_rq_size()
234 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
241 qp->rq.wqe_cnt = wq_size / wqe_size; in set_rq_size()
[all …]
/linux-4.4.14/drivers/mtd/ubi/
Dblock.c91 struct request_queue *rq; member
322 struct request *req = bd->rq; in ubiblock_queue_rq()
422 dev->rq = blk_mq_init_queue(&dev->tag_set); in ubiblock_create()
423 if (IS_ERR(dev->rq)) { in ubiblock_create()
425 ret = PTR_ERR(dev->rq); in ubiblock_create()
428 blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT); in ubiblock_create()
430 dev->rq->queuedata = dev; in ubiblock_create()
431 dev->gd->queue = dev->rq; in ubiblock_create()
454 blk_cleanup_queue(dev->rq); in ubiblock_create()
474 blk_cleanup_queue(dev->rq); in ubiblock_cleanup()
/linux-4.4.14/drivers/gpu/drm/i915/
Di915_guc_submission.c517 struct drm_i915_gem_request *rq) in guc_add_workqueue_item() argument
519 enum intel_ring_id ring_id = rq->ring->id; in guc_add_workqueue_item()
553 wqi->context_desc = (u32)intel_lr_context_descriptor(rq->ctx, rq->ring); in guc_add_workqueue_item()
556 tail = rq->ringbuf->tail >> 3; in guc_add_workqueue_item()
568 static void lr_context_update(struct drm_i915_gem_request *rq) in lr_context_update() argument
570 enum intel_ring_id ring_id = rq->ring->id; in lr_context_update()
571 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring_id].state; in lr_context_update()
572 struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; in lr_context_update()
597 struct drm_i915_gem_request *rq) in i915_guc_submit() argument
600 enum intel_ring_id ring_id = rq->ring->id; in i915_guc_submit()
[all …]
Dintel_lrc.c223 static int intel_lr_context_pin(struct drm_i915_gem_request *rq);
357 static int execlists_update_context(struct drm_i915_gem_request *rq) in execlists_update_context() argument
359 struct intel_engine_cs *ring = rq->ring; in execlists_update_context()
360 struct i915_hw_ppgtt *ppgtt = rq->ctx->ppgtt; in execlists_update_context()
361 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; in execlists_update_context()
362 struct drm_i915_gem_object *rb_obj = rq->ringbuf->obj; in execlists_update_context()
373 reg_state[CTX_RING_TAIL+1] = rq->tail; in execlists_update_context()
1042 static int intel_lr_context_pin(struct drm_i915_gem_request *rq) in intel_lr_context_pin() argument
1045 struct intel_engine_cs *ring = rq->ring; in intel_lr_context_pin()
1046 struct drm_i915_gem_object *ctx_obj = rq->ctx->engine[ring->id].state; in intel_lr_context_pin()
[all …]
Dintel_guc.h120 struct drm_i915_gem_request *rq);
/linux-4.4.14/drivers/char/agp/
Disoch.c75 u32 rq; in agp_3_5_isochronous_node_enable() member
126 target.rq = (tstatus >> 24) & 0xff; in agp_3_5_isochronous_node_enable()
216 master[cdev].rq = master[cdev].n; in agp_3_5_isochronous_node_enable()
218 master[cdev].rq *= (1 << (master[cdev].y - 1)); in agp_3_5_isochronous_node_enable()
220 tot_rq += master[cdev].rq; in agp_3_5_isochronous_node_enable()
227 rq_async = target.rq - rq_isoch; in agp_3_5_isochronous_node_enable()
254 master[cdev].rq += (cdev == ndevs - 1) in agp_3_5_isochronous_node_enable()
266 mcmd |= master[cdev].rq << 24; in agp_3_5_isochronous_node_enable()
/linux-4.4.14/net/sunrpc/xprtrdma/
Dxprt_rdma.h331 #define RPCRDMA_INLINE_READ_THRESHOLD(rq) \ argument
332 (rpcx_to_rdmad(rq->rq_xprt).inline_rsize)
334 #define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\ argument
335 (rpcx_to_rdmad(rq->rq_xprt).inline_wsize)
337 #define RPCRDMA_INLINE_PAD_VALUE(rq)\ argument
338 rpcx_to_rdmad(rq->rq_xprt).padding
/linux-4.4.14/drivers/net/ethernet/intel/igb/
Digb_ptp.c475 struct ptp_clock_request *rq, int on) in igb_ptp_feature_enable_i210() argument
486 switch (rq->type) { in igb_ptp_feature_enable_i210()
490 rq->extts.index); in igb_ptp_feature_enable_i210()
494 if (rq->extts.index == 1) { in igb_ptp_feature_enable_i210()
505 igb_pin_extts(igb, rq->extts.index, pin); in igb_ptp_feature_enable_i210()
520 rq->perout.index); in igb_ptp_feature_enable_i210()
524 ts.tv_sec = rq->perout.period.sec; in igb_ptp_feature_enable_i210()
525 ts.tv_nsec = rq->perout.period.nsec; in igb_ptp_feature_enable_i210()
534 if (rq->perout.index == 1) { in igb_ptp_feature_enable_i210()
560 if (rq->perout.index == 1) { in igb_ptp_feature_enable_i210()
[all …]
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dqp.c191 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe()
390 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size()
396 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size()
397 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); in set_rq_size()
398 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); in set_rq_size()
403 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size()
404 cap->max_recv_sge = qp->rq.max_gs; in set_rq_size()
406 cap->max_recv_wr = qp->rq.max_post = in set_rq_size()
407 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); in set_rq_size()
408 cap->max_recv_sge = min(qp->rq.max_gs, in set_rq_size()
[all …]
/linux-4.4.14/fs/ncpfs/
Dsock.c217 struct ncp_request_reply *rq; in __ncptcp_try_send() local
222 rq = server->tx.creq; in __ncptcp_try_send()
223 if (!rq) in __ncptcp_try_send()
227 memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0])); in __ncptcp_try_send()
228 result = do_send(server->ncp_sock, iovc, rq->tx_iovlen, in __ncptcp_try_send()
229 rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT); in __ncptcp_try_send()
236 __ncp_abort_request(server, rq, result); in __ncptcp_try_send()
239 if (result >= rq->tx_totallen) { in __ncptcp_try_send()
240 server->rcv.creq = rq; in __ncptcp_try_send()
244 rq->tx_totallen -= result; in __ncptcp_try_send()
[all …]
/linux-4.4.14/drivers/infiniband/hw/ocrdma/
Docrdma_verbs.c1305 uresp.rq_dbid = qp->rq.dbid; in ocrdma_copy_qp_uresp()
1307 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); in ocrdma_copy_qp_uresp()
1308 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va); in ocrdma_copy_qp_uresp()
1309 uresp.num_rqe_allocated = qp->rq.max_cnt; in ocrdma_copy_qp_uresp()
1372 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); in ocrdma_alloc_wr_id_tbl()
1392 qp->rq.max_sges = attrs->cap.max_recv_sge; in ocrdma_set_qp_init_params()
1602 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; in ocrdma_query_qp()
1604 qp_attr->cap.max_recv_sge = qp->rq.max_sges; in ocrdma_query_qp()
1679 return (qp->rq.tail == qp->rq.head); in is_hw_rq_empty()
1744 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> in ocrdma_discard_cqes()
[all …]
Docrdma_hw.c2133 qp->rq.head = 0; in ocrdma_init_hwq_ptr()
2134 qp->rq.tail = 0; in ocrdma_init_hwq_ptr()
2260 qp->rq.max_cnt = max_rqe_allocated; in ocrdma_set_create_qp_rq_cmd()
2263 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); in ocrdma_set_create_qp_rq_cmd()
2264 if (!qp->rq.va) in ocrdma_set_create_qp_rq_cmd()
2266 memset(qp->rq.va, 0, len); in ocrdma_set_create_qp_rq_cmd()
2267 qp->rq.pa = pa; in ocrdma_set_create_qp_rq_cmd()
2268 qp->rq.len = len; in ocrdma_set_create_qp_rq_cmd()
2269 qp->rq.entry_size = dev->attr.rqe_size; in ocrdma_set_create_qp_rq_cmd()
2280 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) << in ocrdma_set_create_qp_rq_cmd()
[all …]
/linux-4.4.14/include/linux/sunrpc/
Dsvcauth.h119 int (*accept)(struct svc_rqst *rq, __be32 *authp);
120 int (*release)(struct svc_rqst *rq);
122 int (*set_client)(struct svc_rqst *rq);
/linux-4.4.14/drivers/block/mtip32xx/
Dmtip32xx.c174 struct request *rq; in mtip_get_int_command() local
179 rq = blk_mq_alloc_request(dd->queue, 0, __GFP_RECLAIM, true); in mtip_get_int_command()
180 if (IS_ERR(rq)) in mtip_get_int_command()
183 return blk_mq_rq_to_pdu(rq); in mtip_get_int_command()
205 struct request *rq = mtip_rq_from_tag(dd, tag); in mtip_cmd_from_tag() local
207 return blk_mq_rq_to_pdu(rq); in mtip_cmd_from_tag()
232 struct request *rq; in mtip_async_complete() local
242 rq = mtip_rq_from_tag(dd, tag); in mtip_async_complete()
244 blk_mq_complete_request(rq, status); in mtip_async_complete()
2381 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, in mtip_hw_submit_io() argument
[all …]
/linux-4.4.14/net/sunrpc/
Dcache.c767 struct cache_request *rq; in cache_read() local
791 rq = container_of(rp->q.list.next, struct cache_request, q.list); in cache_read()
792 WARN_ON_ONCE(rq->q.reader); in cache_read()
794 rq->readers++; in cache_read()
797 if (rq->len == 0) { in cache_read()
798 err = cache_request(cd, rq); in cache_read()
801 rq->len = err; in cache_read()
804 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { in cache_read()
807 list_move(&rp->q.list, &rq->q.list); in cache_read()
810 if (rp->offset + count > rq->len) in cache_read()
[all …]
/linux-4.4.14/drivers/net/ethernet/intel/fm10k/
Dfm10k_ptp.c323 struct ptp_clock_request *rq, in fm10k_ptp_enable() argument
326 struct ptp_clock_time *t = &rq->perout.period; in fm10k_ptp_enable()
333 if (rq->type != PTP_CLK_REQ_PEROUT) in fm10k_ptp_enable()
337 if (rq->perout.index >= ptp->n_per_out) in fm10k_ptp_enable()
368 fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index), in fm10k_ptp_enable()
/linux-4.4.14/net/bridge/
Dbr_ioctl.c113 static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) in old_dev_ioctl() argument
118 if (copy_from_user(args, rq->ifr_data, sizeof(args))) in old_dev_ioctl()
378 int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) in br_dev_ioctl() argument
384 return old_dev_ioctl(dev, rq, cmd); in br_dev_ioctl()
388 return add_del_if(br, rq->ifr_ifindex, cmd == SIOCBRADDIF); in br_dev_ioctl()
/linux-4.4.14/drivers/isdn/hardware/mISDN/
Dw6692.c1008 open_bchannel(struct w6692_hw *card, struct channel_req *rq) in open_bchannel() argument
1012 if (rq->adr.channel == 0 || rq->adr.channel > 2) in open_bchannel()
1014 if (rq->protocol == ISDN_P_NONE) in open_bchannel()
1016 bch = &card->bc[rq->adr.channel - 1].bch; in open_bchannel()
1019 bch->ch.protocol = rq->protocol; in open_bchannel()
1020 rq->ch = &bch->ch; in open_bchannel()
1179 open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller) in open_dchannel() argument
1183 if (rq->protocol != ISDN_P_TE_S0) in open_dchannel()
1185 if (rq->adr.channel == 1) in open_dchannel()
1188 rq->ch = &card->dch.dev.D; in open_dchannel()
[all …]
Dhfcsusb.c425 struct channel_req *rq) in open_dchannel() argument
431 hw->name, __func__, hw->dch.dev.id, rq->adr.channel, in open_dchannel()
433 if (rq->protocol == ISDN_P_NONE) in open_dchannel()
441 if (rq->adr.channel == 1) { in open_dchannel()
452 hw->protocol = rq->protocol; in open_dchannel()
453 if (rq->protocol == ISDN_P_TE_S0) { in open_dchannel()
459 ch->protocol = rq->protocol; in open_dchannel()
462 if (rq->protocol != ch->protocol) in open_dchannel()
470 rq->ch = ch; in open_dchannel()
478 open_bchannel(struct hfcsusb *hw, struct channel_req *rq) in open_bchannel() argument
[all …]
DmISDNipac.c757 open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller) in open_dchannel_caller() argument
761 if (rq->protocol != ISDN_P_TE_S0) in open_dchannel_caller()
763 if (rq->adr.channel == 1) in open_dchannel_caller()
766 rq->ch = &isac->dch.dev.D; in open_dchannel_caller()
767 rq->ch->protocol = rq->protocol; in open_dchannel_caller()
769 _queue_data(rq->ch, PH_ACTIVATE_IND, MISDN_ID_ANY, in open_dchannel_caller()
775 open_dchannel(struct isac_hw *isac, struct channel_req *rq) in open_dchannel() argument
777 return open_dchannel_caller(isac, rq, __builtin_return_address(0)); in open_dchannel()
1496 open_bchannel(struct ipac_hw *ipac, struct channel_req *rq) in open_bchannel() argument
1500 if (rq->adr.channel == 0 || rq->adr.channel > 2) in open_bchannel()
[all …]
Davmfritz.c908 open_bchannel(struct fritzcard *fc, struct channel_req *rq) in open_bchannel() argument
912 if (rq->adr.channel == 0 || rq->adr.channel > 2) in open_bchannel()
914 if (rq->protocol == ISDN_P_NONE) in open_bchannel()
916 bch = &fc->bch[rq->adr.channel - 1]; in open_bchannel()
919 bch->ch.protocol = rq->protocol; in open_bchannel()
920 rq->ch = &bch->ch; in open_bchannel()
933 struct channel_req *rq; in avm_dctrl() local
939 rq = arg; in avm_dctrl()
940 if (rq->protocol == ISDN_P_TE_S0) in avm_dctrl()
941 err = fc->isac.open(&fc->isac, rq); in avm_dctrl()
[all …]
Dhfcpci.c1890 struct channel_req *rq) in open_dchannel() argument
1897 if (rq->protocol == ISDN_P_NONE) in open_dchannel()
1899 if (rq->adr.channel == 1) { in open_dchannel()
1904 if (rq->protocol == ISDN_P_TE_S0) { in open_dchannel()
1909 hc->hw.protocol = rq->protocol; in open_dchannel()
1910 ch->protocol = rq->protocol; in open_dchannel()
1915 if (rq->protocol != ch->protocol) { in open_dchannel()
1918 if (rq->protocol == ISDN_P_TE_S0) { in open_dchannel()
1923 hc->hw.protocol = rq->protocol; in open_dchannel()
1924 ch->protocol = rq->protocol; in open_dchannel()
[all …]
Dspeedfax.c254 struct channel_req *rq; in sfax_dctrl() local
260 rq = arg; in sfax_dctrl()
261 if (rq->protocol == ISDN_P_TE_S0) in sfax_dctrl()
262 err = sf->isac.open(&sf->isac, rq); in sfax_dctrl()
264 err = sf->isar.open(&sf->isar, rq); in sfax_dctrl()
Dnetjet.c863 open_bchannel(struct tiger_hw *card, struct channel_req *rq) in open_bchannel() argument
867 if (rq->adr.channel == 0 || rq->adr.channel > 2) in open_bchannel()
869 if (rq->protocol == ISDN_P_NONE) in open_bchannel()
871 bch = &card->bc[rq->adr.channel - 1].bch; in open_bchannel()
875 bch->ch.protocol = rq->protocol; in open_bchannel()
876 rq->ch = &bch->ch; in open_bchannel()
889 struct channel_req *rq; in nj_dctrl() local
895 rq = arg; in nj_dctrl()
896 if (rq->protocol == ISDN_P_TE_S0) in nj_dctrl()
897 err = card->isac.open(&card->isac, rq); in nj_dctrl()
[all …]
/linux-4.4.14/tools/perf/scripts/python/
Dsched-migration.py269 rq = ts.rqs[cpu]
271 raw += "Last event : %s\n" % rq.event.__repr__()
274 raw += "Load = %d\n" % rq.load()
275 for t in rq.tasks:
281 rq = slice.rqs[cpu]
284 load_rate = rq.load() / float(slice.total_load)
294 top_color = rq.event.color()
/linux-4.4.14/drivers/block/paride/
Dpd.c723 struct request *rq; in pd_special_command() local
726 rq = blk_get_request(disk->gd->queue, READ, __GFP_RECLAIM); in pd_special_command()
727 if (IS_ERR(rq)) in pd_special_command()
728 return PTR_ERR(rq); in pd_special_command()
730 rq->cmd_type = REQ_TYPE_DRV_PRIV; in pd_special_command()
731 rq->special = func; in pd_special_command()
733 err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0); in pd_special_command()
735 blk_put_request(rq); in pd_special_command()
/linux-4.4.14/drivers/gpu/drm/amd/amdgpu/
Damdgpu_ctx.c44 struct amd_sched_rq *rq; in amdgpu_ctx_init() local
46 rq = &adev->rings[i]->sched.kernel_rq; in amdgpu_ctx_init()
48 rq = &adev->rings[i]->sched.sched_rq; in amdgpu_ctx_init()
51 rq, amdgpu_sched_jobs); in amdgpu_ctx_init()
/linux-4.4.14/net/key/
Daf_key.c1888 parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) in parse_ipsecrequest() argument
1897 if (rq->sadb_x_ipsecrequest_mode == 0) in parse_ipsecrequest()
1900 t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */ in parse_ipsecrequest()
1901 if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0) in parse_ipsecrequest()
1904 if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) in parse_ipsecrequest()
1906 else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) { in parse_ipsecrequest()
1907 t->reqid = rq->sadb_x_ipsecrequest_reqid; in parse_ipsecrequest()
1916 u8 *sa = (u8 *) (rq + 1); in parse_ipsecrequest()
1943 struct sadb_x_ipsecrequest *rq = (void*)(pol+1); in parse_ipsecrequests() local
1949 if ((err = parse_ipsecrequest(xp, rq)) < 0) in parse_ipsecrequests()
[all …]
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/fb/
Dgddr5.c39 int rq = ram->freq < 1000000; /* XXX */ in nvkm_gddr5_calc() local
96 ram->mr[3] |= (rq & 0x01) << 5; in nvkm_gddr5_calc()
/linux-4.4.14/drivers/staging/octeon/
Dethernet-mdio.h30 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
Dethernet-mdio.c89 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) in cvm_oct_ioctl() argument
99 return phy_mii_ioctl(priv->phydev, rq, cmd); in cvm_oct_ioctl()
/linux-4.4.14/drivers/staging/rtl8712/
Dosdep_intf.h42 int r871x_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
/linux-4.4.14/arch/powerpc/platforms/cell/spufs/
Dsched.c114 BUG_ON(!list_empty(&ctx->rq)); in __spu_update_sched_info()
508 if (list_empty(&ctx->rq)) { in __spu_add_to_rq()
509 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); in __spu_add_to_rq()
527 if (!list_empty(&ctx->rq)) { in __spu_del_from_rq()
530 list_del_init(&ctx->rq); in __spu_del_from_rq()
845 struct list_head *rq = &spu_prio->runq[best]; in grab_runnable_context() local
847 list_for_each_entry(ctx, rq, rq) { in grab_runnable_context()
Dcontext.c61 INIT_LIST_HEAD(&ctx->rq); in alloc_spu_context()
93 BUG_ON(!list_empty(&ctx->rq)); in destroy_spu_context()
/linux-4.4.14/drivers/atm/
Dfirestream.c679 long rq; in process_return_queue() local
683 while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { in process_return_queue()
684 fs_dprintk (FS_DEBUG_QUEUE, "reaping return queue entry at %lx\n", rq); in process_return_queue()
685 qe = bus_to_virt (rq); in process_return_queue()
705 long rq; in process_txdone_queue() local
711 while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { in process_txdone_queue()
712 fs_dprintk (FS_DEBUG_QUEUE, "reaping txdone entry at %lx\n", rq); in process_txdone_queue()
713 qe = bus_to_virt (rq); in process_txdone_queue()
775 long rq; in process_incoming() local
782 while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { in process_incoming()
[all …]
/linux-4.4.14/drivers/cdrom/
Dcdrom.c2164 struct request *rq; in cdrom_read_cdda_bpc() local
2183 rq = blk_get_request(q, READ, GFP_KERNEL); in cdrom_read_cdda_bpc()
2184 if (IS_ERR(rq)) { in cdrom_read_cdda_bpc()
2185 ret = PTR_ERR(rq); in cdrom_read_cdda_bpc()
2188 blk_rq_set_block_pc(rq); in cdrom_read_cdda_bpc()
2190 ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); in cdrom_read_cdda_bpc()
2192 blk_put_request(rq); in cdrom_read_cdda_bpc()
2196 rq->cmd[0] = GPCMD_READ_CD; in cdrom_read_cdda_bpc()
2197 rq->cmd[1] = 1 << 2; in cdrom_read_cdda_bpc()
2198 rq->cmd[2] = (lba >> 24) & 0xff; in cdrom_read_cdda_bpc()
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
Dservice.c927 struct ptlrpc_request *rq = NULL; in ptlrpc_at_add_timed() local
946 list_for_each_entry_reverse(rq, in ptlrpc_at_add_timed()
949 if (req->rq_deadline >= rq->rq_deadline) { in ptlrpc_at_add_timed()
951 &rq->rq_timed_list); in ptlrpc_at_add_timed()
1124 struct ptlrpc_request *rq, *n; in ptlrpc_at_check_timed() local
1162 list_for_each_entry_safe(rq, n, in ptlrpc_at_check_timed()
1165 if (rq->rq_deadline > now + at_early_margin) { in ptlrpc_at_check_timed()
1168 rq->rq_deadline < deadline) in ptlrpc_at_check_timed()
1169 deadline = rq->rq_deadline; in ptlrpc_at_check_timed()
1173 ptlrpc_at_remove_timed(rq); in ptlrpc_at_check_timed()
[all …]
/linux-4.4.14/drivers/staging/rtl8188eu/include/
Dosdep_intf.h37 int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
/linux-4.4.14/arch/alpha/include/asm/
Dagp_backend.h14 u32 rq : 8; member
/linux-4.4.14/fs/nfsd/
Dnfsd.h108 static inline int nfsd_v4client(struct svc_rqst *rq) in nfsd_v4client() argument
110 return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4; in nfsd_v4client()
/linux-4.4.14/drivers/scsi/bnx2fc/
Dbnx2fc_tgt.c703 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, in bnx2fc_alloc_session_resc()
705 if (!tgt->rq) { in bnx2fc_alloc_session_resc()
710 memset(tgt->rq, 0, tgt->rq_mem_size); in bnx2fc_alloc_session_resc()
883 if (tgt->rq) { in bnx2fc_free_session_resc()
885 tgt->rq, tgt->rq_dma); in bnx2fc_free_session_resc()
886 tgt->rq = NULL; in bnx2fc_free_session_resc()
/linux-4.4.14/net/9p/
Dtrans_fd.c143 struct work_struct rq; member
303 m = container_of(work, struct p9_conn, rq); in p9_read_work()
395 schedule_work(&m->rq); in p9_read_work()
591 INIT_WORK(&m->rq, p9_read_work); in p9_conn_create()
634 schedule_work(&m->rq); in p9_poll_mux()
852 cancel_work_sync(&m->rq); in p9_conn_destroy()
/linux-4.4.14/include/linux/mtd/
Dblktrans.h49 struct request_queue *rq; member
/linux-4.4.14/drivers/net/ppp/
Dppp_generic.c83 struct sk_buff_head rq; /* receive queue for pppd */ member
433 skb = skb_dequeue(&pf->rq); in ppp_read()
528 if (skb_peek(&pf->rq)) in ppp_poll()
1338 if (ppp->file.rq.qlen > PPP_MAX_RQLEN) in ppp_send_frame()
1340 skb_queue_tail(&ppp->file.rq, skb); in ppp_send_frame()
1719 skb_queue_tail(&pch->file.rq, skb); in ppp_input()
1721 while (pch->file.rq.qlen > PPP_MAX_RQLEN && in ppp_input()
1722 (skb = skb_dequeue(&pch->file.rq))) in ppp_input()
1869 skb_queue_tail(&ppp->file.rq, skb); in ppp_receive_nonmp_frame()
1871 while (ppp->file.rq.qlen > PPP_MAX_RQLEN && in ppp_receive_nonmp_frame()
[all …]
/linux-4.4.14/drivers/scsi/osd/
Dosd_initiator.c442 static void _put_request(struct request *rq) in _put_request() argument
450 if (unlikely(rq->bio)) in _put_request()
451 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq)); in _put_request()
453 blk_put_request(rq); in _put_request()
458 struct request *rq = or->request; in osd_end_request() local
460 if (rq) { in osd_end_request()
461 if (rq->next_rq) { in osd_end_request()
462 _put_request(rq->next_rq); in osd_end_request()
463 rq->next_rq = NULL; in osd_end_request()
466 _put_request(rq); in osd_end_request()
/linux-4.4.14/drivers/video/fbdev/
Dmx3fb.c1529 struct dma_chan_request *rq = arg; in chan_filter() local
1536 if (!rq) in chan_filter()
1539 dev = rq->mx3fb->dev; in chan_filter()
1542 return rq->id == chan->chan_id && in chan_filter()
1564 struct dma_chan_request rq; in mx3fb_probe() local
1594 rq.mx3fb = mx3fb; in mx3fb_probe()
1599 rq.id = IDMAC_SDC_0; in mx3fb_probe()
1600 chan = dma_request_channel(mask, chan_filter, &rq); in mx3fb_probe()
/linux-4.4.14/drivers/media/platform/soc_camera/
Dmx3_camera.c617 struct dma_chan_request *rq = arg; in chan_filter() local
623 if (!rq) in chan_filter()
626 pdata = rq->mx3_cam->soc_host.v4l2_dev.dev->platform_data; in chan_filter()
628 return rq->id == chan->chan_id && in chan_filter()
771 struct dma_chan_request rq = {.mx3_cam = mx3_cam, in acquire_dma_channel() local
777 chan = dma_request_channel(mask, chan_filter, &rq); in acquire_dma_channel()
/linux-4.4.14/drivers/isdn/i4l/
Disdn_ppp.c317 is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */ in isdn_ppp_open()
318 is->last = is->rq; in isdn_ppp_open()
375 kfree(is->rq[i].buf); in isdn_ppp_release()
376 is->rq[i].buf = NULL; in isdn_ppp_release()
378 is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */ in isdn_ppp_release()
379 is->last = is->rq; in isdn_ppp_release()
917 ippp_table[i]->first = ippp_table[i]->rq + NUM_RCV_BUFFS - 1; in isdn_ppp_init()
918 ippp_table[i]->last = ippp_table[i]->rq; in isdn_ppp_init()
921 ippp_table[i]->rq[j].buf = NULL; in isdn_ppp_init()
922 ippp_table[i]->rq[j].last = ippp_table[i]->rq + in isdn_ppp_init()
[all …]

123