/linux-4.1.27/kernel/sched/ |
D | deadline.c | 28 static inline struct rq *rq_of_dl_rq(struct dl_rq *dl_rq) in rq_of_dl_rq() 30 return container_of(dl_rq, struct rq, dl); in rq_of_dl_rq() 36 struct rq *rq = task_rq(p); in dl_rq_of_se() local 38 return &rq->dl; in dl_rq_of_se() 90 static inline int dl_overloaded(struct rq *rq) in dl_overloaded() argument 92 return atomic_read(&rq->rd->dlo_count); in dl_overloaded() 95 static inline void dl_set_overload(struct rq *rq) in dl_set_overload() argument 97 if (!rq->online) in dl_set_overload() 100 cpumask_set_cpu(rq->cpu, rq->rd->dlo_mask); in dl_set_overload() 108 atomic_inc(&rq->rd->dlo_count); in dl_set_overload() [all …]
|
D | stats.h | 8 rq_sched_info_arrive(struct rq *rq, unsigned long long delta) in rq_sched_info_arrive() argument 10 if (rq) { in rq_sched_info_arrive() 11 rq->rq_sched_info.run_delay += delta; in rq_sched_info_arrive() 12 rq->rq_sched_info.pcount++; in rq_sched_info_arrive() 20 rq_sched_info_depart(struct rq *rq, unsigned long long delta) in rq_sched_info_depart() argument 22 if (rq) in rq_sched_info_depart() 23 rq->rq_cpu_time += delta; in rq_sched_info_depart() 27 rq_sched_info_dequeued(struct rq *rq, unsigned long long delta) in rq_sched_info_dequeued() argument 29 if (rq) in rq_sched_info_dequeued() 30 rq->rq_sched_info.run_delay += delta; in rq_sched_info_dequeued() [all …]
|
D | rt.c | 119 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() 121 return rt_rq->rq; in rq_of_rt_rq() 129 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se() 133 return rt_rq->rq; in rq_of_rt_se() 158 struct rq *rq = cpu_rq(cpu); in init_tg_rt_entry() local 162 rt_rq->rq = rq; in init_tg_rt_entry() 172 rt_se->rt_rq = &rq->rt; in init_tg_rt_entry() 230 static inline struct rq *rq_of_rt_rq(struct rt_rq *rt_rq) in rq_of_rt_rq() 232 return container_of(rt_rq, struct rq, rt); in rq_of_rt_rq() 235 static inline struct rq *rq_of_rt_se(struct sched_rt_entity *rt_se) in rq_of_rt_se() [all …]
|
D | stop_task.c | 21 check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_stop() argument 27 pick_next_task_stop(struct rq *rq, struct task_struct *prev) in pick_next_task_stop() argument 29 struct task_struct *stop = rq->stop; in pick_next_task_stop() 34 put_prev_task(rq, prev); in pick_next_task_stop() 36 stop->se.exec_start = rq_clock_task(rq); in pick_next_task_stop() 42 enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) in enqueue_task_stop() argument 44 add_nr_running(rq, 1); in enqueue_task_stop() 48 dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_stop() argument 50 sub_nr_running(rq, 1); in dequeue_task_stop() 53 static void yield_task_stop(struct rq *rq) in yield_task_stop() argument [all …]
|
D | idle_task.c | 21 static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int flags) in check_preempt_curr_idle() argument 23 resched_curr(rq); in check_preempt_curr_idle() 27 pick_next_task_idle(struct rq *rq, struct task_struct *prev) in pick_next_task_idle() argument 29 put_prev_task(rq, prev); in pick_next_task_idle() 31 schedstat_inc(rq, sched_goidle); in pick_next_task_idle() 32 return rq->idle; in pick_next_task_idle() 40 dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags) in dequeue_task_idle() argument 42 raw_spin_unlock_irq(&rq->lock); in dequeue_task_idle() 45 raw_spin_lock_irq(&rq->lock); in dequeue_task_idle() 48 static void put_prev_task_idle(struct rq *rq, struct task_struct *prev) in put_prev_task_idle() argument [all …]
|
D | sched.h | 17 struct rq; 29 extern long calc_load_fold_active(struct rq *this_rq); 30 extern void update_cpu_load_active(struct rq *this_rq); 396 struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ member 468 struct rq *rq; member 556 struct rq { struct 691 static inline int cpu_of(struct rq *rq) in cpu_of() argument 694 return rq->cpu; in cpu_of() 700 DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 708 static inline u64 __rq_clock_broken(struct rq *rq) in __rq_clock_broken() argument [all …]
|
D | core.c | 114 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 116 static void update_rq_clock_task(struct rq *rq, s64 delta); 118 void update_rq_clock(struct rq *rq) in update_rq_clock() argument 122 lockdep_assert_held(&rq->lock); in update_rq_clock() 124 if (rq->clock_skip_update & RQCF_ACT_SKIP) in update_rq_clock() 127 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; in update_rq_clock() 130 rq->clock += delta; in update_rq_clock() 131 update_rq_clock_task(rq, delta); in update_rq_clock() 315 static struct rq *this_rq_lock(void) in this_rq_lock() 316 __acquires(rq->lock) in this_rq_lock() [all …]
|
D | fair.c | 249 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() 251 return cfs_rq->rq; in rq_of() 322 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument 323 list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list) 379 static inline struct rq *rq_of(struct cfs_rq *cfs_rq) in rq_of() 381 return container_of(cfs_rq, struct rq, cfs); in rq_of() 397 struct rq *rq = task_rq(p); in cfs_rq_of() local 399 return &rq->cfs; in cfs_rq_of() 416 #define for_each_leaf_cfs_rq(rq, cfs_rq) \ argument 417 for (cfs_rq = &rq->cfs; cfs_rq; cfs_rq = NULL) [all …]
|
D | stats.c | 23 struct rq *rq; in show_schedstat() local 29 rq = cpu_rq(cpu); in show_schedstat() 34 cpu, rq->yld_count, in show_schedstat() 35 rq->sched_count, rq->sched_goidle, in show_schedstat() 36 rq->ttwu_count, rq->ttwu_local, in show_schedstat() 37 rq->rq_cpu_time, in show_schedstat() 38 rq->rq_sched_info.run_delay, rq->rq_sched_info.pcount); in show_schedstat()
|
D | proc.c | 79 long calc_load_fold_active(struct rq *this_rq) in calc_load_fold_active() 182 struct rq *this_rq = this_rq(); in calc_load_enter_idle() 198 struct rq *this_rq = this_rq(); in calc_load_exit_idle() 379 static void calc_load_account_active(struct rq *this_rq) in calc_load_account_active() 469 static void __update_cpu_load(struct rq *this_rq, unsigned long this_load, in __update_cpu_load() 501 static inline unsigned long get_rq_runnable_load(struct rq *rq) in get_rq_runnable_load() argument 503 return rq->cfs.runnable_load_avg; in get_rq_runnable_load() 506 static inline unsigned long get_rq_runnable_load(struct rq *rq) in get_rq_runnable_load() argument 508 return rq->load.weight; in get_rq_runnable_load() 530 void update_idle_cpu_load(struct rq *this_rq) in update_idle_cpu_load() [all …]
|
D | debug.c | 121 print_task(struct seq_file *m, struct rq *rq, struct task_struct *p) in print_task() argument 123 if (rq->curr == p) in print_task() 152 static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu) in print_rq() argument 168 print_task(m, rq, p); in print_rq() 177 struct rq *rq = cpu_rq(cpu); in print_cfs_rq() local 189 raw_spin_lock_irqsave(&rq->lock, flags); in print_cfs_rq() 197 raw_spin_unlock_irqrestore(&rq->lock, flags); in print_cfs_rq() 278 struct rq *rq = cpu_rq(cpu); in print_cpu() local 294 if (sizeof(rq->x) == 4) \ in print_cpu() 295 SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \ in print_cpu() [all …]
|
D | cputime.c | 249 struct rq *rq = this_rq(); in account_idle_time() local 251 if (atomic_read(&rq->nr_iowait) > 0) in account_idle_time() 340 struct rq *rq, int ticks) in irqtime_account_process_tick() argument 365 } else if (p == rq->idle) { in irqtime_account_process_tick() 376 struct rq *rq = this_rq(); in irqtime_account_idle_ticks() local 378 irqtime_account_process_tick(current, 0, rq, ticks); in irqtime_account_idle_ticks() 383 struct rq *rq, int nr_ticks) {} in irqtime_account_process_tick() argument 466 struct rq *rq = this_rq(); in account_process_tick() local 472 irqtime_account_process_tick(p, user_tick, rq, 1); in account_process_tick() 481 else if ((p != rq->idle) || (irq_count() != HARDIRQ_OFFSET)) in account_process_tick()
|
/linux-4.1.27/drivers/scsi/fnic/ |
D | vnic_rq.c | 27 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument 31 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs() 34 vdev = rq->vdev; in vnic_rq_alloc_bufs() 37 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ, GFP_ATOMIC); in vnic_rq_alloc_bufs() 38 if (!rq->bufs[i]) { in vnic_rq_alloc_bufs() 45 buf = rq->bufs[i]; in vnic_rq_alloc_bufs() 48 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs() 49 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs() 51 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs() 54 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs() [all …]
|
D | vnic_rq.h | 105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument 108 return rq->ring.desc_avail; in vnic_rq_desc_avail() 111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument 114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used() 117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument 119 return rq->to_use->desc; in vnic_rq_next_desc() 122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument 124 return rq->to_use->index; in vnic_rq_next_index() 127 static inline unsigned int vnic_rq_next_buf_index(struct vnic_rq *rq) in vnic_rq_next_buf_index() argument 129 return rq->buf_index++; in vnic_rq_next_buf_index() [all …]
|
D | fnic_res.h | 223 static inline void fnic_queue_rq_desc(struct vnic_rq *rq, in fnic_queue_rq_desc() argument 227 struct rq_enet_desc *desc = vnic_rq_next_desc(rq); in fnic_queue_rq_desc() 234 vnic_rq_post(rq, os_buf, 0, dma_addr, len); in fnic_queue_rq_desc()
|
D | fnic.h | 309 ____cacheline_aligned struct vnic_rq rq[FNIC_RQ_MAX]; member 335 int fnic_alloc_rq_frame(struct vnic_rq *rq); 336 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf);
|
D | fnic_fcs.c | 790 static void fnic_rq_cmpl_frame_recv(struct vnic_rq *rq, struct cq_desc in fnic_rq_cmpl_frame_recv() argument 795 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_rq_cmpl_frame_recv() 902 vnic_rq_service(&fnic->rq[q_number], cq_desc, completed_index, in fnic_rq_cmpl_handler_cont() 919 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); in fnic_rq_cmpl_handler() 936 int fnic_alloc_rq_frame(struct vnic_rq *rq) in fnic_alloc_rq_frame() argument 938 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_alloc_rq_frame() 955 fnic_queue_rq_desc(rq, skb, pa, len); in fnic_alloc_rq_frame() 959 void fnic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) in fnic_free_rq_buf() argument 962 struct fnic *fnic = vnic_dev_priv(rq->vdev); in fnic_free_rq_buf()
|
D | fnic_res.c | 221 vnic_rq_free(&fnic->rq[i]); in fnic_free_vnic_resources() 275 err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i, in fnic_alloc_vnic_resources() 357 vnic_rq_init(&fnic->rq[i], in fnic_alloc_vnic_resources()
|
D | fnic_main.c | 343 error_status = ioread32(&fnic->rq[i].ctrl->error_status); in fnic_log_q_error() 478 err = vnic_rq_disable(&fnic->rq[i]); in fnic_cleanup() 502 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); in fnic_cleanup() 814 err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); in fnic_probe() 889 vnic_rq_enable(&fnic->rq[i]); in fnic_probe() 918 vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); in fnic_probe()
|
/linux-4.1.27/drivers/net/ethernet/cisco/enic/ |
D | vnic_rq.c | 30 static int vnic_rq_alloc_bufs(struct vnic_rq *rq) in vnic_rq_alloc_bufs() argument 33 unsigned int i, j, count = rq->ring.desc_count; in vnic_rq_alloc_bufs() 37 rq->bufs[i] = kzalloc(VNIC_RQ_BUF_BLK_SZ(count), GFP_ATOMIC); in vnic_rq_alloc_bufs() 38 if (!rq->bufs[i]) in vnic_rq_alloc_bufs() 43 buf = rq->bufs[i]; in vnic_rq_alloc_bufs() 46 buf->desc = (u8 *)rq->ring.descs + in vnic_rq_alloc_bufs() 47 rq->ring.desc_size * buf->index; in vnic_rq_alloc_bufs() 49 buf->next = rq->bufs[0]; in vnic_rq_alloc_bufs() 52 buf->next = rq->bufs[i + 1]; in vnic_rq_alloc_bufs() 60 rq->to_use = rq->to_clean = rq->bufs[0]; in vnic_rq_alloc_bufs() [all …]
|
D | vnic_rq.h | 105 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) in vnic_rq_desc_avail() argument 108 return rq->ring.desc_avail; in vnic_rq_desc_avail() 111 static inline unsigned int vnic_rq_desc_used(struct vnic_rq *rq) in vnic_rq_desc_used() argument 114 return rq->ring.desc_count - rq->ring.desc_avail - 1; in vnic_rq_desc_used() 117 static inline void *vnic_rq_next_desc(struct vnic_rq *rq) in vnic_rq_next_desc() argument 119 return rq->to_use->desc; in vnic_rq_next_desc() 122 static inline unsigned int vnic_rq_next_index(struct vnic_rq *rq) in vnic_rq_next_index() argument 124 return rq->to_use->index; in vnic_rq_next_index() 127 static inline void vnic_rq_post(struct vnic_rq *rq, in vnic_rq_post() argument 132 struct vnic_rq_buf *buf = rq->to_use; in vnic_rq_post() [all …]
|
D | enic_main.c | 194 error_status = vnic_rq_error_status(&enic->rq[i]); in enic_log_q_error() 960 static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf) in enic_free_rq_buf() argument 962 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_free_rq_buf() 973 static int enic_rq_alloc_buf(struct vnic_rq *rq) in enic_rq_alloc_buf() argument 975 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_alloc_buf() 981 struct vnic_rq_buf *buf = rq->to_use; in enic_rq_alloc_buf() 984 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr, in enic_rq_alloc_buf() 1000 enic_queue_rq_desc(rq, skb, os_buf_index, in enic_rq_alloc_buf() 1034 static void enic_rq_indicate_buf(struct vnic_rq *rq, in enic_rq_indicate_buf() argument 1038 struct enic *enic = vnic_dev_priv(rq->vdev); in enic_rq_indicate_buf() [all …]
|
D | enic.h | 174 ____cacheline_aligned struct vnic_rq rq[ENIC_RQ_MAX]; member 199 static inline unsigned int enic_cq_rq(struct enic *enic, unsigned int rq) in enic_cq_rq() argument 201 return rq; in enic_cq_rq() 225 unsigned int rq) in enic_msix_rq_intr() argument 227 return enic->cq[enic_cq_rq(enic, rq)].interrupt_offset; in enic_msix_rq_intr()
|
D | enic_res.h | 122 static inline void enic_queue_rq_desc(struct vnic_rq *rq, in enic_queue_rq_desc() argument 126 struct rq_enet_desc *desc = vnic_rq_next_desc(rq); in enic_queue_rq_desc() 135 vnic_rq_post(rq, os_buf, os_buf_index, dma_addr, len, wrid); in enic_queue_rq_desc()
|
D | enic_clsf.c | 20 int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) in enic_addfltr_5t() argument 43 res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data); in enic_addfltr_5t() 45 res = (res == 0) ? rq : res; in enic_addfltr_5t()
|
D | enic_res.c | 189 vnic_rq_free(&enic->rq[i]); in enic_free_vnic_resources() 244 vnic_rq_init(&enic->rq[i], in enic_init_vnic_resources() 341 err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i, in enic_alloc_vnic_resources()
|
D | enic_clsf.h | 9 int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq);
|
/linux-4.1.27/block/ |
D | blk-flush.c | 97 static unsigned int blk_flush_policy(unsigned int fflags, struct request *rq) in blk_flush_policy() argument 101 if (blk_rq_sectors(rq)) in blk_flush_policy() 105 if (rq->cmd_flags & REQ_FLUSH) in blk_flush_policy() 107 if (!(fflags & REQ_FUA) && (rq->cmd_flags & REQ_FUA)) in blk_flush_policy() 113 static unsigned int blk_flush_cur_seq(struct request *rq) in blk_flush_cur_seq() argument 115 return 1 << ffz(rq->flush.seq); in blk_flush_cur_seq() 118 static void blk_flush_restore_request(struct request *rq) in blk_flush_restore_request() argument 125 rq->bio = rq->biotail; in blk_flush_restore_request() 128 rq->cmd_flags &= ~REQ_FLUSH_SEQ; in blk_flush_restore_request() 129 rq->end_io = rq->flush.saved_end_io; in blk_flush_restore_request() [all …]
|
D | elevator.c | 50 #define rq_hash_key(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) argument 56 static int elv_iosched_allow_merge(struct request *rq, struct bio *bio) in elv_iosched_allow_merge() argument 58 struct request_queue *q = rq->q; in elv_iosched_allow_merge() 62 return e->type->ops.elevator_allow_merge_fn(q, rq, bio); in elv_iosched_allow_merge() 70 bool elv_rq_merge_ok(struct request *rq, struct bio *bio) in elv_rq_merge_ok() argument 72 if (!blk_rq_merge_ok(rq, bio)) in elv_rq_merge_ok() 75 if (!elv_iosched_allow_merge(rq, bio)) in elv_rq_merge_ok() 245 static inline void __elv_rqhash_del(struct request *rq) in __elv_rqhash_del() argument 247 hash_del(&rq->hash); in __elv_rqhash_del() 248 rq->cmd_flags &= ~REQ_HASHED; in __elv_rqhash_del() [all …]
|
D | blk-exec.c | 23 static void blk_end_sync_rq(struct request *rq, int error) in blk_end_sync_rq() argument 25 struct completion *waiting = rq->end_io_data; in blk_end_sync_rq() 27 rq->end_io_data = NULL; in blk_end_sync_rq() 52 struct request *rq, int at_head, in blk_execute_rq_nowait() argument 59 WARN_ON(rq->cmd_type == REQ_TYPE_FS); in blk_execute_rq_nowait() 61 rq->rq_disk = bd_disk; in blk_execute_rq_nowait() 62 rq->end_io = done; in blk_execute_rq_nowait() 69 blk_mq_insert_request(rq, at_head, true, false); in blk_execute_rq_nowait() 77 is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; in blk_execute_rq_nowait() 82 rq->cmd_flags |= REQ_QUIET; in blk_execute_rq_nowait() [all …]
|
D | blk-mq.c | 183 struct request *rq, unsigned int rw_flags) in blk_mq_rq_ctx_init() argument 188 INIT_LIST_HEAD(&rq->queuelist); in blk_mq_rq_ctx_init() 190 rq->q = q; in blk_mq_rq_ctx_init() 191 rq->mq_ctx = ctx; in blk_mq_rq_ctx_init() 192 rq->cmd_flags |= rw_flags; in blk_mq_rq_ctx_init() 194 rq->cpu = -1; in blk_mq_rq_ctx_init() 195 INIT_HLIST_NODE(&rq->hash); in blk_mq_rq_ctx_init() 196 RB_CLEAR_NODE(&rq->rb_node); in blk_mq_rq_ctx_init() 197 rq->rq_disk = NULL; in blk_mq_rq_ctx_init() 198 rq->part = NULL; in blk_mq_rq_ctx_init() [all …]
|
D | blk-core.c | 97 void blk_rq_init(struct request_queue *q, struct request *rq) in blk_rq_init() argument 99 memset(rq, 0, sizeof(*rq)); in blk_rq_init() 101 INIT_LIST_HEAD(&rq->queuelist); in blk_rq_init() 102 INIT_LIST_HEAD(&rq->timeout_list); in blk_rq_init() 103 rq->cpu = -1; in blk_rq_init() 104 rq->q = q; in blk_rq_init() 105 rq->__sector = (sector_t) -1; in blk_rq_init() 106 INIT_HLIST_NODE(&rq->hash); in blk_rq_init() 107 RB_CLEAR_NODE(&rq->rb_node); in blk_rq_init() 108 rq->cmd = rq->__cmd; in blk_rq_init() [all …]
|
D | scsi_ioctl.c | 227 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sghdr_rq() argument 230 if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len)) in blk_fill_sghdr_rq() 232 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE)) in blk_fill_sghdr_rq() 238 rq->cmd_len = hdr->cmd_len; in blk_fill_sghdr_rq() 240 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sghdr_rq() 241 if (!rq->timeout) in blk_fill_sghdr_rq() 242 rq->timeout = q->sg_timeout; in blk_fill_sghdr_rq() 243 if (!rq->timeout) in blk_fill_sghdr_rq() 244 rq->timeout = BLK_DEFAULT_SG_TIMEOUT; in blk_fill_sghdr_rq() 245 if (rq->timeout < BLK_MIN_SG_TIMEOUT) in blk_fill_sghdr_rq() [all …]
|
D | deadline-iosched.c | 57 deadline_rb_root(struct deadline_data *dd, struct request *rq) in deadline_rb_root() argument 59 return &dd->sort_list[rq_data_dir(rq)]; in deadline_rb_root() 66 deadline_latter_request(struct request *rq) in deadline_latter_request() argument 68 struct rb_node *node = rb_next(&rq->rb_node); in deadline_latter_request() 77 deadline_add_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_add_rq_rb() argument 79 struct rb_root *root = deadline_rb_root(dd, rq); in deadline_add_rq_rb() 81 elv_rb_add(root, rq); in deadline_add_rq_rb() 85 deadline_del_rq_rb(struct deadline_data *dd, struct request *rq) in deadline_del_rq_rb() argument 87 const int data_dir = rq_data_dir(rq); in deadline_del_rq_rb() 89 if (dd->next_rq[data_dir] == rq) in deadline_del_rq_rb() [all …]
|
D | blk.h | 59 void blk_rq_bio_prep(struct request_queue *q, struct request *rq, 61 int blk_rq_append_bio(struct request_queue *q, struct request *rq, 65 void blk_dequeue_request(struct request *rq); 67 bool __blk_end_bidi_request(struct request *rq, int error, 99 static inline int blk_mark_rq_complete(struct request *rq) in blk_mark_rq_complete() argument 101 return test_and_set_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_mark_rq_complete() 104 static inline void blk_clear_rq_complete(struct request *rq) in blk_clear_rq_complete() argument 106 clear_bit(REQ_ATOM_COMPLETE, &rq->atomic_flags); in blk_clear_rq_complete() 112 #define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED) argument 114 void blk_insert_flush(struct request *rq); [all …]
|
D | bsg.c | 83 struct request *rq; member 139 static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq, in blk_fill_sgv4_hdr_rq() argument 144 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL); in blk_fill_sgv4_hdr_rq() 145 if (!rq->cmd) in blk_fill_sgv4_hdr_rq() 149 if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request, in blk_fill_sgv4_hdr_rq() 154 if (blk_verify_command(rq->cmd, has_write_perm)) in blk_fill_sgv4_hdr_rq() 162 rq->cmd_len = hdr->request_len; in blk_fill_sgv4_hdr_rq() 164 rq->timeout = msecs_to_jiffies(hdr->timeout); in blk_fill_sgv4_hdr_rq() 165 if (!rq->timeout) in blk_fill_sgv4_hdr_rq() 166 rq->timeout = q->sg_timeout; in blk_fill_sgv4_hdr_rq() [all …]
|
D | blk-merge.c | 89 void blk_recalc_rq_segments(struct request *rq) in blk_recalc_rq_segments() argument 92 &rq->q->queue_flags); in blk_recalc_rq_segments() 94 rq->nr_phys_segments = __blk_recalc_rq_segments(rq->q, rq->bio, in blk_recalc_rq_segments() 247 int blk_rq_map_sg(struct request_queue *q, struct request *rq, in blk_rq_map_sg() argument 253 if (rq->bio) in blk_rq_map_sg() 254 nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg); in blk_rq_map_sg() 256 if (unlikely(rq->cmd_flags & REQ_COPY_USER) && in blk_rq_map_sg() 257 (blk_rq_bytes(rq) & q->dma_pad_mask)) { in blk_rq_map_sg() 259 (q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; in blk_rq_map_sg() 262 rq->extra_len += pad_len; in blk_rq_map_sg() [all …]
|
D | blk-map.c | 12 int blk_rq_append_bio(struct request_queue *q, struct request *rq, in blk_rq_append_bio() argument 15 if (!rq->bio) in blk_rq_append_bio() 16 blk_rq_bio_prep(q, rq, bio); in blk_rq_append_bio() 17 else if (!ll_back_merge_fn(q, rq, bio)) in blk_rq_append_bio() 20 rq->biotail->bi_next = bio; in blk_rq_append_bio() 21 rq->biotail = bio; in blk_rq_append_bio() 23 rq->__data_len += bio->bi_iter.bi_size; in blk_rq_append_bio() 63 int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, in blk_rq_map_user_iov() argument 112 rq->cmd_flags |= REQ_COPY_USER; in blk_rq_map_user_iov() 116 blk_rq_bio_prep(q, rq, bio); in blk_rq_map_user_iov() [all …]
|
D | noop-iosched.c | 15 static void noop_merged_requests(struct request_queue *q, struct request *rq, in noop_merged_requests() argument 26 struct request *rq; in noop_dispatch() local 27 rq = list_entry(nd->queue.next, struct request, queuelist); in noop_dispatch() 28 list_del_init(&rq->queuelist); in noop_dispatch() 29 elv_dispatch_sort(q, rq); in noop_dispatch() 35 static void noop_add_request(struct request_queue *q, struct request *rq) in noop_add_request() argument 39 list_add_tail(&rq->queuelist, &nd->queue); in noop_add_request() 43 noop_former_request(struct request_queue *q, struct request *rq) in noop_former_request() argument 47 if (rq->queuelist.prev == &nd->queue) in noop_former_request() 49 return list_entry(rq->queuelist.prev, struct request, queuelist); in noop_former_request() [all …]
|
D | blk-softirq.c | 31 struct request *rq; in blk_done_softirq() local 33 rq = list_entry(local_list.next, struct request, ipi_list); in blk_done_softirq() 34 list_del_init(&rq->ipi_list); in blk_done_softirq() 35 rq->q->softirq_done_fn(rq); in blk_done_softirq() 42 struct request *rq = data; in trigger_softirq() local 48 list_add_tail(&rq->ipi_list, list); in trigger_softirq() 50 if (list->next == &rq->ipi_list) in trigger_softirq() 59 static int raise_blk_irq(int cpu, struct request *rq) in raise_blk_irq() argument 62 struct call_single_data *data = &rq->csd; in raise_blk_irq() 65 data->info = rq; in raise_blk_irq() [all …]
|
D | blk-tag.c | 265 void blk_queue_end_tag(struct request_queue *q, struct request *rq) in blk_queue_end_tag() argument 268 unsigned tag = rq->tag; /* negative tags invalid */ in blk_queue_end_tag() 272 list_del_init(&rq->queuelist); in blk_queue_end_tag() 273 rq->cmd_flags &= ~REQ_QUEUED; in blk_queue_end_tag() 274 rq->tag = -1; in blk_queue_end_tag() 313 int blk_queue_start_tag(struct request_queue *q, struct request *rq) in blk_queue_start_tag() argument 319 if (unlikely((rq->cmd_flags & REQ_QUEUED))) { in blk_queue_start_tag() 322 __func__, rq, in blk_queue_start_tag() 323 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag); in blk_queue_start_tag() 335 if (!rq_is_sync(rq) && max_depth > 1) { in blk_queue_start_tag() [all …]
|
D | blk-timeout.c | 113 static void blk_rq_check_expired(struct request *rq, unsigned long *next_timeout, in blk_rq_check_expired() argument 116 if (time_after_eq(jiffies, rq->deadline)) { in blk_rq_check_expired() 117 list_del_init(&rq->timeout_list); in blk_rq_check_expired() 122 if (!blk_mark_rq_complete(rq)) in blk_rq_check_expired() 123 blk_rq_timed_out(rq); in blk_rq_check_expired() 124 } else if (!*next_set || time_after(*next_timeout, rq->deadline)) { in blk_rq_check_expired() 125 *next_timeout = rq->deadline; in blk_rq_check_expired() 134 struct request *rq, *tmp; in blk_rq_timed_out_timer() local 139 list_for_each_entry_safe(rq, tmp, &q->timeout_list, timeout_list) in blk_rq_timed_out_timer() 140 blk_rq_check_expired(rq, &next, &next_set); in blk_rq_timed_out_timer()
|
D | cfq-iosched.c | 57 #define RQ_CIC(rq) icq_to_cic((rq)->elv.icq) argument 58 #define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elv.priv[0]) argument 59 #define RQ_CFQG(rq) (struct cfq_group *) ((rq)->elv.priv[1]) argument 2220 static void cfq_del_rq_rb(struct request *rq) in cfq_del_rq_rb() argument 2222 struct cfq_queue *cfqq = RQ_CFQQ(rq); in cfq_del_rq_rb() 2223 const int sync = rq_is_sync(rq); in cfq_del_rq_rb() 2228 elv_rb_del(&cfqq->sort_list, rq); in cfq_del_rq_rb() 2243 static void cfq_add_rq_rb(struct request *rq) in cfq_add_rq_rb() argument 2245 struct cfq_queue *cfqq = RQ_CFQQ(rq); in cfq_add_rq_rb() 2249 cfqq->queued[rq_is_sync(rq)]++; in cfq_add_rq_rb() [all …]
|
D | bsg-lib.c | 82 static void bsg_softirq_done(struct request *rq) in bsg_softirq_done() argument 84 struct bsg_job *job = rq->special; in bsg_softirq_done() 86 blk_end_request_all(rq, rq->errors); in bsg_softirq_done()
|
D | blk-cgroup.h | 377 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) in blk_rq_set_rl() argument 379 rq->rl = rl; in blk_rq_set_rl() 388 static inline struct request_list *blk_rq_rl(struct request *rq) in blk_rq_rl() argument 390 return rq->rl; in blk_rq_rl() 596 static inline void blk_rq_set_rl(struct request *rq, struct request_list *rl) { } in blk_rq_set_rl() argument 597 static inline struct request_list *blk_rq_rl(struct request *rq) { return &rq->q->root_rl; } in blk_rq_rl() argument
|
D | blk-mq-tag.c | 423 struct request *rq; in bt_for_each() local 432 rq = blk_mq_tag_to_rq(hctx->tags, off + bit); in bt_for_each() 433 if (rq->q == hctx->queue) in bt_for_each() 434 fn(hctx, rq, data, reserved); in bt_for_each() 630 u32 blk_mq_unique_tag(struct request *rq) in blk_mq_unique_tag() argument 632 struct request_queue *q = rq->q; in blk_mq_unique_tag() 637 hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu); in blk_mq_unique_tag() 642 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); in blk_mq_unique_tag()
|
D | blk-mq-sysfs.c | 144 struct request *rq; in sysfs_list_show() local 147 list_for_each_entry(rq, list, queuelist) { in sysfs_list_show() 148 const int rq_len = 2 * sizeof(rq) + 2; in sysfs_list_show() 160 "\t%p\n", rq); in sysfs_list_show()
|
D | blk-mq.h | 28 void __blk_mq_complete_request(struct request *rq);
|
/linux-4.1.27/drivers/scsi/esas2r/ |
D | esas2r_disc.c | 49 struct esas2r_request *rq); 51 struct esas2r_request *rq); 55 struct esas2r_request *rq); 59 struct esas2r_request *rq); 61 struct esas2r_request *rq); 63 struct esas2r_request *rq); 65 struct esas2r_request *rq); 67 struct esas2r_request *rq); 69 struct esas2r_request *rq); 71 struct esas2r_request *rq); [all …]
|
D | esas2r_vda.c | 59 static void clear_vda_request(struct esas2r_request *rq); 62 struct esas2r_request *rq); 67 struct esas2r_request *rq, in esas2r_process_vda_ioctl() argument 93 clear_vda_request(rq); in esas2r_process_vda_ioctl() 95 rq->vrq->scsi.function = vi->function; in esas2r_process_vda_ioctl() 96 rq->interrupt_cb = esas2r_complete_vda_ioctl; in esas2r_process_vda_ioctl() 97 rq->interrupt_cx = vi; in esas2r_process_vda_ioctl() 112 rq->vrq->flash.length = cpu_to_le32(datalen); in esas2r_process_vda_ioctl() 113 rq->vrq->flash.sub_func = vi->cmd.flash.sub_func; in esas2r_process_vda_ioctl() 115 memcpy(rq->vrq->flash.data.file.file_name, in esas2r_process_vda_ioctl() [all …]
|
D | esas2r_int.c | 173 struct esas2r_request *rq, in esas2r_handle_outbound_rsp_err() argument 181 if (unlikely(rq->req_stat != RS_SUCCESS)) { in esas2r_handle_outbound_rsp_err() 182 memcpy(&rq->func_rsp, &rsp->func_rsp, sizeof(rsp->func_rsp)); in esas2r_handle_outbound_rsp_err() 184 if (rq->req_stat == RS_ABORTED) { in esas2r_handle_outbound_rsp_err() 185 if (rq->timeout > RQ_MAX_TIMEOUT) in esas2r_handle_outbound_rsp_err() 186 rq->req_stat = RS_TIMEOUT; in esas2r_handle_outbound_rsp_err() 187 } else if (rq->req_stat == RS_SCSI_ERROR) { in esas2r_handle_outbound_rsp_err() 188 u8 scsistatus = rq->func_rsp.scsi_rsp.scsi_stat; in esas2r_handle_outbound_rsp_err() 197 rq->req_stat = RS_SUCCESS; in esas2r_handle_outbound_rsp_err() 198 rq->func_rsp.scsi_rsp.scsi_stat = in esas2r_handle_outbound_rsp_err() [all …]
|
D | esas2r_io.c | 46 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq) in esas2r_start_request() argument 49 struct esas2r_request *startrq = rq; in esas2r_start_request() 54 if (rq->vrq->scsi.function == VDA_FUNC_SCSI) in esas2r_start_request() 55 rq->req_stat = RS_SEL2; in esas2r_start_request() 57 rq->req_stat = RS_DEGRADED; in esas2r_start_request() 58 } else if (likely(rq->vrq->scsi.function == VDA_FUNC_SCSI)) { in esas2r_start_request() 59 t = a->targetdb + rq->target_id; in esas2r_start_request() 63 rq->req_stat = RS_SEL; in esas2r_start_request() 66 rq->vrq->scsi.target_id = cpu_to_le16(t->virt_targ_id); in esas2r_start_request() 75 rq->req_stat = RS_SEL; in esas2r_start_request() [all …]
|
D | esas2r_ioctl.c | 83 struct esas2r_request *rq) in complete_fm_api_req() argument 111 struct esas2r_request *rq; in do_fm_api() local 118 rq = esas2r_alloc_request(a); in do_fm_api() 119 if (rq == NULL) { in do_fm_api() 151 rq->comp_cb = complete_fm_api_req; in do_fm_api() 155 if (!esas2r_fm_api(a, (struct esas2r_flash_img *)a->save_offset, rq, in do_fm_api() 174 esas2r_free_request(a, (struct esas2r_request *)rq); in do_fm_api() 182 struct esas2r_request *rq) in complete_nvr_req() argument 199 struct esas2r_request *rq) in complete_buffered_ioctl_req() argument 208 struct esas2r_request *rq; in handle_buffered_ioctl() local [all …]
|
D | esas2r_main.c | 145 struct esas2r_request *rq; in write_live_nvram() local 148 rq = esas2r_alloc_request(a); in write_live_nvram() 149 if (rq == NULL) in write_live_nvram() 152 if (esas2r_write_params(a, rq, (struct esas2r_sas_nvram *)buf)) in write_live_nvram() 155 esas2r_free_request(a, rq); in write_live_nvram() 882 struct esas2r_request *rq; in esas2r_queuecommand() local 895 rq = esas2r_alloc_request(a); in esas2r_queuecommand() 896 if (unlikely(rq == NULL)) { in esas2r_queuecommand() 901 rq->cmd = cmd; in esas2r_queuecommand() 906 rq->vrq->scsi.flags |= cpu_to_le32(FCP_CMND_WRD); in esas2r_queuecommand() [all …]
|
D | esas2r_flash.c | 134 struct esas2r_request *rq) in esas2r_fmapi_callback() argument 136 struct atto_vda_flash_req *vrq = &rq->vrq->flash; in esas2r_fmapi_callback() 138 (struct esas2r_flash_context *)rq->interrupt_cx; in esas2r_fmapi_callback() 140 if (rq->req_stat == RS_SUCCESS) { in esas2r_fmapi_callback() 148 rq->req_stat = RS_PENDING; in esas2r_fmapi_callback() 154 rq->req_stat = RS_PENDING; in esas2r_fmapi_callback() 155 rq->interrupt_cb = fc->interrupt_cb; in esas2r_fmapi_callback() 163 if (rq->req_stat != RS_PENDING) in esas2r_fmapi_callback() 169 (*fc->interrupt_cb)(a, rq); in esas2r_fmapi_callback() 177 struct esas2r_request *rq) in build_flash_msg() argument [all …]
|
D | esas2r.h | 406 struct esas2r_request *rq); 967 int esas2r_write_params(struct esas2r_adapter *a, struct esas2r_request *rq, 1005 bool esas2r_nvram_write(struct esas2r_adapter *a, struct esas2r_request *rq, 1010 struct esas2r_request *rq); 1016 void esas2r_free_request(struct esas2r_adapter *a, struct esas2r_request *rq); 1023 void esas2r_start_request(struct esas2r_adapter *a, struct esas2r_request *rq); 1037 struct esas2r_request *rq, 1043 struct esas2r_request *rq, 1049 void esas2r_build_ae_req(struct esas2r_adapter *a, struct esas2r_request *rq); 1051 struct esas2r_request *rq, [all …]
|
D | esas2r_init.c | 104 struct esas2r_request *rq) in alloc_vda_req() argument 126 rq->vrq_md = memdesc; in alloc_vda_req() 127 rq->vrq = (union atto_vda_req *)memdesc->virt_addr; in alloc_vda_req() 128 rq->vrq->scsi.handle = a->num_vrqs; in alloc_vda_req() 840 struct esas2r_request *rq; in esas2r_init_adapter_struct() local 990 for (rq = a->first_ae_req, i = 0; i < num_ae_requests; rq++, in esas2r_init_adapter_struct() 992 INIT_LIST_HEAD(&rq->req_list); in esas2r_init_adapter_struct() 993 if (!alloc_vda_req(a, rq)) { in esas2r_init_adapter_struct() 999 esas2r_rq_init_request(rq, a); in esas2r_init_adapter_struct() 1002 rq->comp_cb = esas2r_ae_complete; in esas2r_init_adapter_struct() [all …]
|
/linux-4.1.27/drivers/ide/ |
D | ide-io.c | 57 int ide_end_rq(ide_drive_t *drive, struct request *rq, int error, in ide_end_rq() argument 70 return blk_end_request(rq, error, nr_bytes); in ide_end_rq() 78 struct request *rq = cmd->rq; in ide_complete_cmd() local 105 if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { in ide_complete_cmd() 106 struct ide_cmd *orig_cmd = rq->special; in ide_complete_cmd() 118 struct request *rq = hwif->rq; in ide_complete_rq() local 125 if (blk_noretry_request(rq) && error <= 0) in ide_complete_rq() 126 nr_bytes = blk_rq_sectors(rq) << 9; in ide_complete_rq() 128 rc = ide_end_rq(drive, rq, error, nr_bytes); in ide_complete_rq() 130 hwif->rq = NULL; in ide_complete_rq() [all …]
|
D | ide-eh.c | 7 static ide_startstop_t ide_ata_error(ide_drive_t *drive, struct request *rq, in ide_ata_error() argument 15 rq->errors |= ERROR_RESET; in ide_ata_error() 28 rq->errors = ERROR_MAX; in ide_ata_error() 31 rq->errors |= ERROR_RECAL; in ide_ata_error() 35 if ((stat & ATA_DRQ) && rq_data_dir(rq) == READ && in ide_ata_error() 42 if (rq->errors >= ERROR_MAX || blk_noretry_request(rq)) { in ide_ata_error() 43 ide_kill_rq(drive, rq); in ide_ata_error() 48 rq->errors |= ERROR_RESET; in ide_ata_error() 50 if ((rq->errors & ERROR_RESET) == ERROR_RESET) { in ide_ata_error() 51 ++rq->errors; in ide_ata_error() [all …]
|
D | ide-cd.c | 96 static int cdrom_log_sense(ide_drive_t *drive, struct request *rq) in cdrom_log_sense() argument 101 if (!sense || !rq || (rq->cmd_flags & REQ_QUIET)) in cdrom_log_sense() 124 if (rq->cmd[0] == GPCMD_START_STOP_UNIT && sense->asc == 0x24) in cdrom_log_sense() 210 static void ide_cd_complete_failed_rq(ide_drive_t *drive, struct request *rq) in ide_cd_complete_failed_rq() argument 218 struct request *failed = (struct request *)rq->special; in ide_cd_complete_failed_rq() 219 void *sense = bio_data(rq->bio); in ide_cd_complete_failed_rq() 229 failed->sense_len = rq->sense_len; in ide_cd_complete_failed_rq() 247 static int ide_cd_breathe(ide_drive_t *drive, struct request *rq) in ide_cd_breathe() argument 252 if (!rq->errors) in ide_cd_breathe() 255 rq->errors = 1; in ide_cd_breathe() [all …]
|
D | ide-pm.c | 10 struct request *rq; in generic_ide_suspend() local 21 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_suspend() 22 rq->cmd_type = REQ_TYPE_PM_SUSPEND; in generic_ide_suspend() 23 rq->special = &rqpm; in generic_ide_suspend() 29 ret = blk_execute_rq(drive->queue, NULL, rq, 0); in generic_ide_suspend() 30 blk_put_request(rq); in generic_ide_suspend() 46 struct request *rq; in generic_ide_resume() local 61 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_ide_resume() 62 rq->cmd_type = REQ_TYPE_PM_RESUME; in generic_ide_resume() 63 rq->cmd_flags |= REQ_PREEMPT; in generic_ide_resume() [all …]
|
D | ide-floppy.c | 66 struct request *rq = pc->rq; in ide_floppy_callback() local 75 rq->cmd_type == REQ_TYPE_BLOCK_PC) in ide_floppy_callback() 79 u8 *buf = bio_data(rq->bio); in ide_floppy_callback() 100 if (rq->cmd_type == REQ_TYPE_SPECIAL) in ide_floppy_callback() 101 rq->errors = uptodate ? 0 : IDE_DRV_ERROR_GENERAL; in ide_floppy_callback() 136 unsigned int done = blk_rq_bytes(drive->hwif->rq); in ide_floppy_issue_pc() 191 struct ide_atapi_pc *pc, struct request *rq, in idefloppy_create_rw_cmd() argument 196 int blocks = blk_rq_sectors(rq) / floppy->bs_factor; in idefloppy_create_rw_cmd() 197 int cmd = rq_data_dir(rq); in idefloppy_create_rw_cmd() 206 memcpy(rq->cmd, pc->c, 12); in idefloppy_create_rw_cmd() [all …]
|
D | ide-atapi.c | 92 struct request *rq; in ide_queue_pc_tail() local 95 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in ide_queue_pc_tail() 96 rq->cmd_type = REQ_TYPE_SPECIAL; in ide_queue_pc_tail() 97 rq->special = (char *)pc; in ide_queue_pc_tail() 100 error = blk_rq_map_kern(drive->queue, rq, buf, bufflen, in ide_queue_pc_tail() 106 memcpy(rq->cmd, pc->c, 12); in ide_queue_pc_tail() 108 rq->cmd[13] = REQ_IDETAPE_PC1; in ide_queue_pc_tail() 109 error = blk_execute_rq(drive->queue, disk, rq, 0); in ide_queue_pc_tail() 111 blk_put_request(rq); in ide_queue_pc_tail() 171 void ide_prep_sense(ide_drive_t *drive, struct request *rq) in ide_prep_sense() argument [all …]
|
D | ide-park.c | 13 struct request *rq; in issue_park_cmd() local 34 rq = blk_get_request(q, READ, __GFP_WAIT); in issue_park_cmd() 35 rq->cmd[0] = REQ_PARK_HEADS; in issue_park_cmd() 36 rq->cmd_len = 1; in issue_park_cmd() 37 rq->cmd_type = REQ_TYPE_SPECIAL; in issue_park_cmd() 38 rq->special = &timeout; in issue_park_cmd() 39 rc = blk_execute_rq(q, NULL, rq, 1); in issue_park_cmd() 40 blk_put_request(rq); in issue_park_cmd() 48 rq = blk_get_request(q, READ, GFP_NOWAIT); in issue_park_cmd() 49 if (IS_ERR(rq)) in issue_park_cmd() [all …]
|
D | ide-devsets.c | 162 struct request *rq; in ide_devset_execute() local 168 rq = blk_get_request(q, READ, __GFP_WAIT); in ide_devset_execute() 169 rq->cmd_type = REQ_TYPE_SPECIAL; in ide_devset_execute() 170 rq->cmd_len = 5; in ide_devset_execute() 171 rq->cmd[0] = REQ_DEVSET_EXEC; in ide_devset_execute() 172 *(int *)&rq->cmd[1] = arg; in ide_devset_execute() 173 rq->special = setting->set; in ide_devset_execute() 175 if (blk_execute_rq(q, NULL, rq, 0)) in ide_devset_execute() 176 ret = rq->errors; in ide_devset_execute() 177 blk_put_request(rq); in ide_devset_execute() [all …]
|
D | ide-ioctls.c | 126 struct request *rq; in ide_cmd_ioctl() local 128 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in ide_cmd_ioctl() 129 rq->cmd_type = REQ_TYPE_ATA_TASKFILE; in ide_cmd_ioctl() 130 err = blk_execute_rq(drive->queue, NULL, rq, 0); in ide_cmd_ioctl() 131 blk_put_request(rq); in ide_cmd_ioctl() 221 struct request *rq; in generic_drive_reset() local 224 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in generic_drive_reset() 225 rq->cmd_type = REQ_TYPE_SPECIAL; in generic_drive_reset() 226 rq->cmd_len = 1; in generic_drive_reset() 227 rq->cmd[0] = REQ_DRIVE_RESET; in generic_drive_reset() [all …]
|
D | ide-disk.c | 81 static ide_startstop_t __ide_do_rw_disk(ide_drive_t *drive, struct request *rq, in __ide_do_rw_disk() argument 85 u16 nsectors = (u16)blk_rq_sectors(rq); in __ide_do_rw_disk() 93 if (block + blk_rq_sectors(rq) > 1ULL << 28) in __ide_do_rw_disk() 151 if (rq_data_dir(rq)) in __ide_do_rw_disk() 155 cmd.rq = rq; in __ide_do_rw_disk() 181 static ide_startstop_t ide_do_rw_disk(ide_drive_t *drive, struct request *rq, in ide_do_rw_disk() argument 187 BUG_ON(rq->cmd_type != REQ_TYPE_FS); in ide_do_rw_disk() 192 drive->name, rq_data_dir(rq) == READ ? "read" : "writ", in ide_do_rw_disk() 193 (unsigned long long)block, blk_rq_sectors(rq)); in ide_do_rw_disk() 196 hwif->rw_disk(drive, rq); in ide_do_rw_disk() [all …]
|
D | ide-tape.c | 272 struct request *rq = drive->hwif->rq; in idetape_analyze_error() local 273 u8 *sense = bio_data(rq->bio); in idetape_analyze_error() 281 rq->cmd[0], tape->sense_key, tape->asc, tape->ascq); in idetape_analyze_error() 285 rq->resid_len = tape->blk_size * get_unaligned_be32(&sense[3]); in idetape_analyze_error() 319 (blk_rq_bytes(rq) - rq->resid_len)) in idetape_analyze_error() 330 struct request *rq = drive->hwif->rq; in ide_tape_callback() local 334 ide_debug_log(IDE_DBG_FUNC, "cmd: 0x%x, dsc: %d, err: %d", rq->cmd[0], in ide_tape_callback() 351 (blk_rq_bytes(rq) - rq->resid_len) / tape->blk_size; in ide_tape_callback() 369 rq->errors = err; in ide_tape_callback() 383 drive->hwif->rq->cmd[0], tape->dsc_poll_freq); in ide_tape_stall_queue() [all …]
|
D | ide-taskfile.c | 187 struct request *rq = hwif->rq; in task_no_data_intr() local 189 if (blk_pm_request(rq)) in task_no_data_intr() 190 ide_complete_pm_rq(drive, rq); in task_no_data_intr() 289 cmd->rq->errors = 0; in ide_pio_datablock() 326 struct request *rq = drive->hwif->rq; in ide_finish_cmd() local 331 rq->errors = err; in ide_finish_cmd() 338 ide_complete_rq(drive, err ? -EIO : 0, blk_rq_bytes(rq)); in ide_finish_cmd() 396 ide_complete_rq(drive, 0, blk_rq_sectors(cmd->rq) << 9); in task_pio_intr() 429 struct request *rq; in ide_raw_taskfile() local 433 rq = blk_get_request(drive->queue, rw, __GFP_WAIT); in ide_raw_taskfile() [all …]
|
D | ide-lib.c | 94 struct request *rq = drive->hwif->rq; in ide_dump_ata_error() local 98 if (rq) in ide_dump_ata_error() 100 (unsigned long long)blk_rq_pos(rq)); in ide_dump_ata_error()
|
D | ide-cd_ioctl.c | 303 struct request *rq; in ide_cdrom_reset() local 306 rq = blk_get_request(drive->queue, READ, __GFP_WAIT); in ide_cdrom_reset() 307 rq->cmd_type = REQ_TYPE_SPECIAL; in ide_cdrom_reset() 308 rq->cmd_flags = REQ_QUIET; in ide_cdrom_reset() 309 ret = blk_execute_rq(drive->queue, cd->disk, rq, 0); in ide_cdrom_reset() 310 blk_put_request(rq); in ide_cdrom_reset()
|
D | pdc202xx_old.c | 152 struct request *rq = hwif->rq; in pdc202xx_dma_start() local 159 word_count = (blk_rq_sectors(rq) << 8); in pdc202xx_dma_start() 160 word_count = (rq_data_dir(rq) == READ) ? in pdc202xx_dma_start()
|
D | ide-dma.c | 108 blk_rq_sectors(cmd->rq) << 9); in ide_dma_intr() 492 if (hwif->rq) in ide_dma_timeout_retry() 493 hwif->rq->errors = 0; in ide_dma_timeout_retry()
|
D | ide-gd.c | 160 struct request *rq, sector_t sector) argument 162 return drive->disk_ops->do_request(drive, rq, sector);
|
/linux-4.1.27/include/trace/events/ |
D | block.h | 66 TP_PROTO(struct request_queue *q, struct request *rq), 68 TP_ARGS(q, rq), 76 __dynamic_array( char, cmd, blk_cmd_buf_len(rq) ) 80 __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0; 81 __entry->sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 82 0 : blk_rq_pos(rq); 83 __entry->nr_sector = (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? 84 0 : blk_rq_sectors(rq); 85 __entry->errors = rq->errors; 87 blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq)); [all …]
|
/linux-4.1.27/drivers/infiniband/hw/ipath/ |
D | ipath_srq.c | 61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in ipath_post_srq_receive() 67 spin_lock_irqsave(&srq->rq.lock, flags); in ipath_post_srq_receive() 68 wq = srq->rq.wq; in ipath_post_srq_receive() 70 if (next >= srq->rq.size) in ipath_post_srq_receive() 73 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in ipath_post_srq_receive() 87 spin_unlock_irqrestore(&srq->rq.lock, flags); in ipath_post_srq_receive() 135 srq->rq.size = srq_init_attr->attr.max_wr + 1; in ipath_create_srq() 136 srq->rq.max_sge = srq_init_attr->attr.max_sge; in ipath_create_srq() 137 sz = sizeof(struct ib_sge) * srq->rq.max_sge + in ipath_create_srq() [all …]
|
D | ipath_ud.c | 56 struct ipath_rq *rq; in ipath_ud_loopback() local 110 rq = &srq->rq; in ipath_ud_loopback() 114 rq = &qp->r_rq; in ipath_ud_loopback() 122 spin_lock_irqsave(&rq->lock, flags); in ipath_ud_loopback() 123 wq = rq->wq; in ipath_ud_loopback() 126 if (tail >= rq->size) in ipath_ud_loopback() 129 spin_unlock_irqrestore(&rq->lock, flags); in ipath_ud_loopback() 133 wqe = get_rwqe_ptr(rq, tail); in ipath_ud_loopback() 136 spin_unlock_irqrestore(&rq->lock, flags); in ipath_ud_loopback() 142 spin_unlock_irqrestore(&rq->lock, flags); in ipath_ud_loopback() [all …]
|
D | ipath_ruc.c | 169 struct ipath_rq *rq; in ipath_get_rwqe() local 180 rq = &srq->rq; in ipath_get_rwqe() 184 rq = &qp->r_rq; in ipath_get_rwqe() 187 spin_lock_irqsave(&rq->lock, flags); in ipath_get_rwqe() 193 wq = rq->wq; in ipath_get_rwqe() 196 if (tail >= rq->size) in ipath_get_rwqe() 205 wqe = get_rwqe_ptr(rq, tail); in ipath_get_rwqe() 206 if (++tail >= rq->size) in ipath_get_rwqe() 225 if (n >= rq->size) in ipath_get_rwqe() 228 n += rq->size - tail; in ipath_get_rwqe() [all …]
|
D | ipath_verbs.h | 322 struct ipath_rq rq; member 493 static inline struct ipath_rwqe *get_rwqe_ptr(struct ipath_rq *rq, in get_rwqe_ptr() argument 497 ((char *) rq->wq->wq + in get_rwqe_ptr() 499 rq->max_sge * sizeof(struct ib_sge)) * n); in get_rwqe_ptr()
|
/linux-4.1.27/drivers/infiniband/hw/qib/ |
D | qib_srq.c | 61 if ((unsigned) wr->num_sge > srq->rq.max_sge) { in qib_post_srq_receive() 67 spin_lock_irqsave(&srq->rq.lock, flags); in qib_post_srq_receive() 68 wq = srq->rq.wq; in qib_post_srq_receive() 70 if (next >= srq->rq.size) in qib_post_srq_receive() 73 spin_unlock_irqrestore(&srq->rq.lock, flags); in qib_post_srq_receive() 79 wqe = get_rwqe_ptr(&srq->rq, wq->head); in qib_post_srq_receive() 87 spin_unlock_irqrestore(&srq->rq.lock, flags); in qib_post_srq_receive() 132 srq->rq.size = srq_init_attr->attr.max_wr + 1; in qib_create_srq() 133 srq->rq.max_sge = srq_init_attr->attr.max_sge; in qib_create_srq() 134 sz = sizeof(struct ib_sge) * srq->rq.max_sge + in qib_create_srq() [all …]
|
D | qib_ruc.c | 141 struct qib_rq *rq; in qib_get_rwqe() local 152 rq = &srq->rq; in qib_get_rwqe() 156 rq = &qp->r_rq; in qib_get_rwqe() 159 spin_lock_irqsave(&rq->lock, flags); in qib_get_rwqe() 165 wq = rq->wq; in qib_get_rwqe() 168 if (tail >= rq->size) in qib_get_rwqe() 176 wqe = get_rwqe_ptr(rq, tail); in qib_get_rwqe() 182 if (++tail >= rq->size) in qib_get_rwqe() 201 if (n >= rq->size) in qib_get_rwqe() 204 n += rq->size - tail; in qib_get_rwqe() [all …]
|
D | qib_verbs.h | 384 struct qib_rq rq; member 622 static inline struct qib_rwqe *get_rwqe_ptr(struct qib_rq *rq, unsigned n) in get_rwqe_ptr() argument 625 ((char *) rq->wq->wq + in get_rwqe_ptr() 627 rq->max_sge * sizeof(struct ib_sge)) * n); in get_rwqe_ptr()
|
/linux-4.1.27/drivers/scsi/device_handler/ |
D | scsi_dh_alua.c | 112 struct request *rq; in get_alua_req() local 115 rq = blk_get_request(q, rw, GFP_NOIO); in get_alua_req() 117 if (IS_ERR(rq)) { in get_alua_req() 122 blk_rq_set_block_pc(rq); in get_alua_req() 124 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { in get_alua_req() 125 blk_put_request(rq); in get_alua_req() 131 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | in get_alua_req() 133 rq->retries = ALUA_FAILOVER_RETRIES; in get_alua_req() 134 rq->timeout = ALUA_FAILOVER_TIMEOUT * HZ; in get_alua_req() 136 return rq; in get_alua_req() [all …]
|
D | scsi_dh_emc.c | 273 struct request *rq; in get_req() local 276 rq = blk_get_request(sdev->request_queue, in get_req() 278 if (IS_ERR(rq)) { in get_req() 283 blk_rq_set_block_pc(rq); in get_req() 284 rq->cmd_len = COMMAND_SIZE(cmd); in get_req() 285 rq->cmd[0] = cmd; in get_req() 290 rq->cmd[1] = 0x10; in get_req() 291 rq->cmd[4] = len; in get_req() 295 rq->cmd[1] = 0x10; in get_req() 296 rq->cmd[8] = len; in get_req() [all …]
|
D | scsi_dh_rdac.c | 271 struct request *rq; in get_rdac_req() local 274 rq = blk_get_request(q, rw, GFP_NOIO); in get_rdac_req() 276 if (IS_ERR(rq)) { in get_rdac_req() 281 blk_rq_set_block_pc(rq); in get_rdac_req() 283 if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { in get_rdac_req() 284 blk_put_request(rq); in get_rdac_req() 290 rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | in get_rdac_req() 292 rq->retries = RDAC_RETRIES; in get_rdac_req() 293 rq->timeout = RDAC_TIMEOUT; in get_rdac_req() 295 return rq; in get_rdac_req() [all …]
|
/linux-4.1.27/drivers/s390/char/ |
D | raw3270.c | 136 struct raw3270_request *rq; in raw3270_request_alloc() local 139 rq = kzalloc(sizeof(struct raw3270_request), GFP_KERNEL | GFP_DMA); in raw3270_request_alloc() 140 if (!rq) in raw3270_request_alloc() 145 rq->buffer = kmalloc(size, GFP_KERNEL | GFP_DMA); in raw3270_request_alloc() 146 if (!rq->buffer) { in raw3270_request_alloc() 147 kfree(rq); in raw3270_request_alloc() 151 rq->size = size; in raw3270_request_alloc() 152 INIT_LIST_HEAD(&rq->list); in raw3270_request_alloc() 157 rq->ccw.cda = __pa(rq->buffer); in raw3270_request_alloc() 158 rq->ccw.flags = CCW_FLAG_SLI; in raw3270_request_alloc() [all …]
|
D | fs3270.c | 47 fs3270_wake_up(struct raw3270_request *rq, void *data) in fs3270_wake_up() argument 63 fs3270_do_io(struct raw3270_view *view, struct raw3270_request *rq) in fs3270_do_io() argument 69 rq->callback = fs3270_wake_up; in fs3270_do_io() 70 rq->callback_data = &fp->wait; in fs3270_do_io() 80 rc = raw3270_start(view, rq); in fs3270_do_io() 83 wait_event(fp->wait, raw3270_request_final(rq)); in fs3270_do_io() 93 fs3270_reset_callback(struct raw3270_request *rq, void *data) in fs3270_reset_callback() argument 97 fp = (struct fs3270 *) rq->view; in fs3270_reset_callback() 98 raw3270_request_reset(rq); in fs3270_reset_callback() 103 fs3270_restore_callback(struct raw3270_request *rq, void *data) in fs3270_restore_callback() argument [all …]
|
D | con3270.c | 192 con3270_write_callback(struct raw3270_request *rq, void *data) in con3270_write_callback() argument 194 raw3270_request_reset(rq); in con3270_write_callback() 195 xchg(&((struct con3270 *) rq->view)->write, rq); in con3270_write_callback() 350 con3270_read_callback(struct raw3270_request *rq, void *data) in con3270_read_callback() argument 352 raw3270_get_view(rq->view); in con3270_read_callback() 354 tasklet_schedule(&((struct con3270 *) rq->view)->readlet); in con3270_read_callback() 404 con3270_irq(struct con3270 *cp, struct raw3270_request *rq, struct irb *irb) in con3270_irq() argument 410 if (rq) { in con3270_irq() 412 rq->rc = -EIO; in con3270_irq() 415 rq->rescnt = irb->scsw.cmd.count; in con3270_irq()
|
D | raw3270.h | 121 raw3270_request_final(struct raw3270_request *rq) in raw3270_request_final() argument 123 return list_empty(&rq->list); in raw3270_request_final()
|
D | tty3270.c | 325 tty3270_write_callback(struct raw3270_request *rq, void *data) in tty3270_write_callback() argument 327 struct tty3270 *tp = container_of(rq->view, struct tty3270, view); in tty3270_write_callback() 329 if (rq->rc != 0) { in tty3270_write_callback() 334 raw3270_request_reset(rq); in tty3270_write_callback() 335 xchg(&tp->write, rq); in tty3270_write_callback() 588 tty3270_read_callback(struct raw3270_request *rq, void *data) in tty3270_read_callback() argument 590 struct tty3270 *tp = container_of(rq->view, struct tty3270, view); in tty3270_read_callback() 591 raw3270_get_view(rq->view); in tty3270_read_callback() 646 tty3270_irq(struct tty3270 *tp, struct raw3270_request *rq, struct irb *irb) in tty3270_irq() argument 656 if (rq) { in tty3270_irq() [all …]
|
/linux-4.1.27/drivers/usb/misc/ |
D | uss720.c | 95 struct uss720_async_request *rq = container_of(kref, struct uss720_async_request, ref_count); in destroy_async() local 96 struct parport_uss720_private *priv = rq->priv; in destroy_async() 99 if (likely(rq->urb)) in destroy_async() 100 usb_free_urb(rq->urb); in destroy_async() 101 kfree(rq->dr); in destroy_async() 103 list_del_init(&rq->asynclist); in destroy_async() 105 kfree(rq); in destroy_async() 113 struct uss720_async_request *rq; in async_complete() local 118 rq = urb->context; in async_complete() 119 priv = rq->priv; in async_complete() [all …]
|
/linux-4.1.27/drivers/char/ |
D | raw.c | 210 struct raw_config_request rq; in raw_ctl_ioctl() local 216 if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) in raw_ctl_ioctl() 219 return bind_set(rq.raw_minor, rq.block_major, rq.block_minor); in raw_ctl_ioctl() 222 if (copy_from_user(&rq, (void __user *) arg, sizeof(rq))) in raw_ctl_ioctl() 225 err = bind_get(rq.raw_minor, &dev); in raw_ctl_ioctl() 229 rq.block_major = MAJOR(dev); in raw_ctl_ioctl() 230 rq.block_minor = MINOR(dev); in raw_ctl_ioctl() 232 if (copy_to_user((void __user *)arg, &rq, sizeof(rq))) in raw_ctl_ioctl() 252 struct raw32_config_request rq; in raw_ctl_compat_ioctl() local 258 if (copy_from_user(&rq, user_req, sizeof(rq))) in raw_ctl_compat_ioctl() [all …]
|
/linux-4.1.27/drivers/net/ |
D | virtio_net.c | 105 struct receive_queue *rq; member 187 static void give_pages(struct receive_queue *rq, struct page *page) in give_pages() argument 193 end->private = (unsigned long)rq->pages; in give_pages() 194 rq->pages = page; in give_pages() 197 static struct page *get_a_page(struct receive_queue *rq, gfp_t gfp_mask) in get_a_page() argument 199 struct page *p = rq->pages; in get_a_page() 202 rq->pages = (struct page *)p->private; in get_a_page() 241 struct receive_queue *rq, in page_to_skb() argument 309 give_pages(rq, page); in page_to_skb() 326 struct receive_queue *rq, in receive_big() argument [all …]
|
D | ifb.c | 46 struct sk_buff_head rq; member 73 skb_queue_splice_tail_init(&dp->rq, &dp->tq); in ri_tasklet() 115 if ((skb = skb_peek(&dp->rq)) == NULL) { in ri_tasklet() 209 if (skb_queue_len(&dp->rq) >= dev->tx_queue_len) { in ifb_xmit() 213 __skb_queue_tail(&dp->rq, skb); in ifb_xmit() 228 __skb_queue_purge(&dp->rq); in ifb_close() 238 __skb_queue_head_init(&dp->rq); in ifb_open()
|
/linux-4.1.27/include/linux/ |
D | blkdev.h | 605 #define blk_noretry_request(rq) \ argument 606 ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \ 609 #define blk_account_rq(rq) \ argument 610 (((rq)->cmd_flags & REQ_STARTED) && \ 611 ((rq)->cmd_type == REQ_TYPE_FS)) 613 #define blk_pm_request(rq) \ argument 614 ((rq)->cmd_type == REQ_TYPE_PM_SUSPEND || \ 615 (rq)->cmd_type == REQ_TYPE_PM_RESUME) 617 #define blk_rq_cpu_valid(rq) ((rq)->cpu != -1) argument 618 #define blk_bidi_rq(rq) ((rq)->next_rq != NULL) argument [all …]
|
D | blk-mq.h | 82 struct request *rq; member 179 void blk_mq_free_request(struct request *rq); 180 void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq); 191 u32 blk_mq_unique_tag(struct request *rq); 206 int blk_mq_request_started(struct request *rq); 207 void blk_mq_start_request(struct request *rq); 208 void blk_mq_end_request(struct request *rq, int error); 209 void __blk_mq_end_request(struct request *rq, int error); 211 void blk_mq_requeue_request(struct request *rq); 212 void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); [all …]
|
D | blktrace_api.h | 60 extern void blk_add_driver_data(struct request_queue *q, struct request *rq, 76 # define blk_add_driver_data(q, rq, data, len) do {} while (0) argument 106 static inline int blk_cmd_buf_len(struct request *rq) in blk_cmd_buf_len() argument 108 return (rq->cmd_type == REQ_TYPE_BLOCK_PC) ? rq->cmd_len * 3 : 1; in blk_cmd_buf_len() 111 extern void blk_dump_cmd(char *buf, struct request *rq);
|
D | elevator.h | 137 extern int elv_set_request(struct request_queue *q, struct request *rq, 201 #define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq)) argument 205 #define rq_fifo_clear(rq) list_del_init(&(rq)->queuelist) argument
|
D | mii.h | 45 static inline struct mii_ioctl_data *if_mii(struct ifreq *rq) in if_mii() argument 47 return (struct mii_ioctl_data *) &rq->ifr_ifru; in if_mii()
|
D | ide.h | 309 struct request *rq; /* copy of request */ member 337 struct request *rq; member 488 struct request *rq; /* current request */ member 754 struct request *rq; member 1134 void ide_prep_sense(ide_drive_t *drive, struct request *rq); 1168 extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
|
D | isdn_ppp.h | 162 struct ippp_buf_queue rq[NUM_RCV_BUFFS]; /* packet queue for isdn_ppp_read() */ member
|
D | device-mapper.h | 52 struct request *rq, 410 union map_info *dm_get_rq_mapinfo(struct request *rq);
|
/linux-4.1.27/drivers/infiniband/hw/cxgb4/ |
D | t4.h | 333 struct t4_rq rq; member 342 return wq->rq.in_use; in t4_rqes_posted() 347 return wq->rq.in_use == 0; in t4_rq_empty() 352 return wq->rq.in_use == (wq->rq.size - 1); in t4_rq_full() 357 return wq->rq.size - 1 - wq->rq.in_use; in t4_rq_avail() 362 wq->rq.in_use++; in t4_rq_produce() 363 if (++wq->rq.pidx == wq->rq.size) in t4_rq_produce() 364 wq->rq.pidx = 0; in t4_rq_produce() 365 wq->rq.wq_pidx += DIV_ROUND_UP(len16*16, T4_EQ_ENTRY_SIZE); in t4_rq_produce() 366 if (wq->rq.wq_pidx >= wq->rq.size * T4_RQ_NUM_SLOTS) in t4_rq_produce() [all …]
|
D | qp.c | 157 wq->rq.memsize, wq->rq.queue, in destroy_qp() 158 dma_unmap_addr(&wq->rq, mapping)); in destroy_qp() 160 c4iw_rqtpool_free(rdev, wq->rq.rqt_hwaddr, wq->rq.rqt_size); in destroy_qp() 161 kfree(wq->rq.sw_rq); in destroy_qp() 163 c4iw_put_qpid(rdev, wq->rq.qid, uctx); in destroy_qp() 185 wq->rq.qid = c4iw_get_qpid(rdev, uctx); in create_qp() 186 if (!wq->rq.qid) { in create_qp() 199 wq->rq.sw_rq = kzalloc(wq->rq.size * sizeof *wq->rq.sw_rq, in create_qp() 201 if (!wq->rq.sw_rq) { in create_qp() 210 wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16)); in create_qp() [all …]
|
D | device.c | 138 le.qid = wq->rq.qid; in c4iw_log_wr_stats() 140 le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts; in c4iw_log_wr_stats() 141 le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts; in c4iw_log_wr_stats() 257 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp() 281 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp() 295 qp->wq.sq.qid, qp->wq.rq.qid, in dump_qp() 812 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, in c4iw_rdev_open() 813 rdev->lldi.vr->rq.size, in c4iw_rdev_open() 835 rdev->stats.rqt.total = rdev->lldi.vr->rq.size; in c4iw_rdev_open() 926 infop->vr->rq.size > 0 && infop->vr->qp.size > 0 && in rdma_supported() [all …]
|
D | cq.c | 205 int in_use = wq->rq.in_use - count; in c4iw_flush_rq() 209 wq, cq, wq->rq.in_use, count); in c4iw_flush_rq() 584 if (unlikely((CQE_WRID_MSN(hw_cqe) != (wq->rq.msn)))) { in poll_cq() 647 PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx); in poll_cq() 648 *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id; in poll_cq()
|
/linux-4.1.27/drivers/isdn/mISDN/ |
D | stack.c | 429 struct channel_req rq; in connect_layer1() local 445 rq.protocol = protocol; in connect_layer1() 446 rq.adr.channel = adr->channel; in connect_layer1() 447 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); in connect_layer1() 466 struct channel_req rq, rq2; in connect_Bstack() local 478 rq.protocol = protocol; in connect_Bstack() 479 rq.adr = *adr; in connect_Bstack() 480 err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq); in connect_Bstack() 483 ch->recv = rq.ch->send; in connect_Bstack() 484 ch->peer = rq.ch; in connect_Bstack() [all …]
|
D | l1oip_core.c | 994 open_dchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq) in open_dchannel() argument 999 if (rq->protocol == ISDN_P_NONE) in open_dchannel() 1002 (dch->dev.D.protocol != rq->protocol)) { in open_dchannel() 1005 __func__, dch->dev.D.protocol, rq->protocol); in open_dchannel() 1007 if (dch->dev.D.protocol != rq->protocol) in open_dchannel() 1008 dch->dev.D.protocol = rq->protocol; in open_dchannel() 1014 rq->ch = &dch->dev.D; in open_dchannel() 1021 open_bchannel(struct l1oip *hc, struct dchannel *dch, struct channel_req *rq) in open_bchannel() argument 1026 if (!test_channelmap(rq->adr.channel, dch->dev.channelmap)) in open_bchannel() 1028 if (rq->protocol == ISDN_P_NONE) in open_bchannel() [all …]
|
/linux-4.1.27/drivers/mtd/ |
D | mtd_blkdevs.c | 47 blk_cleanup_queue(dev->rq); in blktrans_dev_release() 133 struct request_queue *rq = dev->rq; in mtd_blktrans_work() local 137 spin_lock_irq(rq->queue_lock); in mtd_blktrans_work() 143 if (!req && !(req = blk_fetch_request(rq))) { in mtd_blktrans_work() 145 spin_unlock_irq(rq->queue_lock); in mtd_blktrans_work() 149 spin_lock_irq(rq->queue_lock); in mtd_blktrans_work() 160 spin_unlock_irq(rq->queue_lock); in mtd_blktrans_work() 166 spin_lock_irq(rq->queue_lock); in mtd_blktrans_work() 174 spin_unlock_irq(rq->queue_lock); in mtd_blktrans_work() 177 static void mtd_blktrans_request(struct request_queue *rq) in mtd_blktrans_request() argument [all …]
|
/linux-4.1.27/drivers/net/vmxnet3/ |
D | vmxnet3_drv.c | 561 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx, in vmxnet3_rq_alloc_rx_buf() argument 565 struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx]; in vmxnet3_rq_alloc_rx_buf() 566 struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx]; in vmxnet3_rq_alloc_rx_buf() 582 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf() 601 rq->stats.rx_buf_alloc_failure++; in vmxnet3_rq_alloc_rx_buf() 1137 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd, in vmxnet3_rx_error() argument 1140 rq->stats.drop_err++; in vmxnet3_rx_error() 1142 rq->stats.drop_fcs++; in vmxnet3_rx_error() 1144 rq->stats.drop_total++; in vmxnet3_rx_error() 1164 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, in vmxnet3_rq_rx_complete() argument [all …]
|
D | vmxnet3_int.h | 380 #define VMXNET3_RX_ALLOC_THRESHOLD(rq, ring_idx, adapter) \ argument 381 ((rq)->rx_ring[ring_idx].size >> 3)
|
/linux-4.1.27/drivers/scsi/ |
D | hpsa.h | 410 struct reply_queue_buffer *rq = &h->reply_queue[q]; in SA5_performant_completed() local 426 if ((((u32) rq->head[rq->current_entry]) & 1) == rq->wraparound) { in SA5_performant_completed() 427 register_value = rq->head[rq->current_entry]; in SA5_performant_completed() 428 rq->current_entry++; in SA5_performant_completed() 434 if (rq->current_entry == h->max_commands) { in SA5_performant_completed() 435 rq->current_entry = 0; in SA5_performant_completed() 436 rq->wraparound ^= 1; in SA5_performant_completed() 504 struct reply_queue_buffer *rq = &h->reply_queue[q]; in SA5_ioaccel_mode1_completed() local 508 register_value = rq->head[rq->current_entry]; in SA5_ioaccel_mode1_completed() 510 rq->head[rq->current_entry] = IOACCEL_MODE1_REPLY_UNUSED; in SA5_ioaccel_mode1_completed() [all …]
|
D | sd.c | 692 struct request *rq = cmd->request; in sd_setup_discard_cmnd() local 694 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk); in sd_setup_discard_cmnd() 695 sector_t sector = blk_rq_pos(rq); in sd_setup_discard_cmnd() 696 unsigned int nr_sectors = blk_rq_sectors(rq); in sd_setup_discard_cmnd() 697 unsigned int nr_bytes = blk_rq_bytes(rq); in sd_setup_discard_cmnd() 753 rq->completion_data = page; in sd_setup_discard_cmnd() 754 rq->timeout = SD_TIMEOUT; in sd_setup_discard_cmnd() 767 blk_add_request_payload(rq, page, len); in sd_setup_discard_cmnd() 769 rq->__data_len = nr_bytes; in sd_setup_discard_cmnd() 817 struct request *rq = cmd->request; in sd_setup_write_same_cmnd() local [all …]
|
D | sg.c | 146 struct request *rq; member 191 static void sg_rq_end_io(struct request *rq, int uptodate); 792 blk_end_request_all(srp->rq, -EIO); in sg_common_write() 804 srp->rq->timeout = timeout; in sg_common_write() 807 srp->rq, at_head, sg_rq_end_io); in sg_common_write() 1280 sg_rq_end_io(struct request *rq, int uptodate) in sg_rq_end_io() argument 1282 struct sg_request *srp = rq->end_io_data; in sg_rq_end_io() 1301 sense = rq->sense; in sg_rq_end_io() 1302 result = rq->errors; in sg_rq_end_io() 1303 resid = rq->resid_len; in sg_rq_end_io() [all …]
|
D | sr.c | 390 struct request *rq = SCpnt->request; in sr_init_command() local 396 SCpnt = rq->special; in sr_init_command() 397 cd = scsi_cd(rq->rq_disk); in sr_init_command() 408 "Finishing %u sectors\n", blk_rq_sectors(rq))); in sr_init_command() 440 if (rq_data_dir(rq) == WRITE) { in sr_init_command() 445 } else if (rq_data_dir(rq) == READ) { in sr_init_command() 448 blk_dump_rq_flags(rq, "Unknown sr command"); in sr_init_command() 471 if (((unsigned int)blk_rq_pos(rq) % (s_size >> 9)) || in sr_init_command() 482 (rq_data_dir(rq) == WRITE) ? in sr_init_command() 484 this_count, blk_rq_sectors(rq))); in sr_init_command() [all …]
|
D | scsi_lib.c | 1118 struct request *rq = cmd->request; in scsi_init_io() local 1119 bool is_mq = (rq->mq_ctx != NULL); in scsi_init_io() 1122 BUG_ON(!rq->nr_phys_segments); in scsi_init_io() 1124 error = scsi_init_sgtable(rq, &cmd->sdb); in scsi_init_io() 1128 if (blk_bidi_rq(rq)) { in scsi_init_io() 1129 if (!rq->q->mq_ops) { in scsi_init_io() 1137 rq->next_rq->special = bidi_sdb; in scsi_init_io() 1140 error = scsi_init_sgtable(rq->next_rq, rq->next_rq->special); in scsi_init_io() 1145 if (blk_integrity_rq(rq)) { in scsi_init_io() 1160 ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); in scsi_init_io() [all …]
|
D | virtio_scsi.c | 508 struct request *rq = sc->request; in virtio_scsi_init_hdr_pi() local 513 if (!rq || !scsi_prot_sg_count(sc)) in virtio_scsi_init_hdr_pi() 516 bi = blk_get_integrity(rq->rq_disk); in virtio_scsi_init_hdr_pi() 520 blk_rq_sectors(rq) * in virtio_scsi_init_hdr_pi() 524 blk_rq_sectors(rq) * in virtio_scsi_init_hdr_pi()
|
/linux-4.1.27/drivers/md/ |
D | dm.c | 105 union map_info *dm_get_rq_mapinfo(struct request *rq) in dm_get_rq_mapinfo() argument 107 if (rq && rq->end_io_data) in dm_get_rq_mapinfo() 108 return &((struct dm_rq_target_io *)rq->end_io_data)->info; in dm_get_rq_mapinfo() 629 static void free_clone_request(struct mapped_device *md, struct request *rq) in free_clone_request() argument 631 mempool_free(rq, md->rq_pool); in free_clone_request() 1044 static struct dm_rq_target_io *tio_from_request(struct request *rq) in tio_from_request() argument 1046 return (rq->q->mq_ops ? blk_mq_rq_to_pdu(rq) : rq->special); in tio_from_request() 1110 struct request *rq = tio->orig; in dm_end_request() local 1112 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { in dm_end_request() 1113 rq->errors = clone->errors; in dm_end_request() [all …]
|
D | dm-target.c | 140 static int io_err_clone_and_map_rq(struct dm_target *ti, struct request *rq, in io_err_clone_and_map_rq() argument
|
/linux-4.1.27/drivers/block/ |
D | osdblk.c | 97 struct request *rq; /* blk layer request */ member 249 __blk_end_request_all(orq->rq, ret); in osdblk_osd_complete() 300 struct request *rq; in osdblk_rq_fn() local 307 rq = blk_fetch_request(q); in osdblk_rq_fn() 308 if (!rq) in osdblk_rq_fn() 312 if (rq->cmd_type != REQ_TYPE_FS) { in osdblk_rq_fn() 313 blk_end_request_all(rq, 0); in osdblk_rq_fn() 324 do_flush = rq->cmd_flags & REQ_FLUSH; in osdblk_rq_fn() 325 do_write = (rq_data_dir(rq) == WRITE); in osdblk_rq_fn() 329 bio = bio_chain_clone(rq->bio, GFP_ATOMIC); in osdblk_rq_fn() [all …]
|
D | null_blk.c | 16 struct request *rq; member 218 blk_mq_end_request(cmd->rq, 0); in end_cmd() 221 INIT_LIST_HEAD(&cmd->rq->queuelist); in end_cmd() 222 blk_end_request_all(cmd->rq, 0); in end_cmd() 266 static void null_softirq_done_fn(struct request *rq) in null_softirq_done_fn() argument 269 end_cmd(blk_mq_rq_to_pdu(rq)); in null_softirq_done_fn() 271 end_cmd(rq->special); in null_softirq_done_fn() 281 blk_mq_complete_request(cmd->rq); in null_handle_cmd() 284 blk_complete_request(cmd->rq); in null_handle_cmd() 333 cmd->rq = req; in null_rq_prep_fn() [all …]
|
D | sx8.c | 262 struct request *rq; member 554 struct request *rq; in carm_get_special() local 570 rq = blk_get_request(host->oob_q, WRITE /* bogus */, GFP_KERNEL); in carm_get_special() 571 if (IS_ERR(rq)) { in carm_get_special() 578 crq->rq = rq; in carm_get_special() 623 crq->rq->cmd_type = REQ_TYPE_SPECIAL; in carm_array_info() 624 crq->rq->special = crq; in carm_array_info() 625 blk_execute_rq_nowait(host->oob_q, NULL, crq->rq, true, NULL); in carm_array_info() 664 crq->rq->cmd_type = REQ_TYPE_SPECIAL; in carm_send_special() 665 crq->rq->special = crq; in carm_send_special() [all …]
|
D | xen-blkfront.c | 120 struct request_queue *rq; member 610 static void do_blkif_request(struct request_queue *rq) in do_blkif_request() argument 620 while ((req = blk_peek_request(rq)) != NULL) { in do_blkif_request() 640 blk_requeue_request(rq, req); in do_blkif_request() 643 blk_stop_queue(rq); in do_blkif_request() 658 struct request_queue *rq; in xlvbd_init_blk_queue() local 661 rq = blk_init_queue(do_blkif_request, &info->io_lock); in xlvbd_init_blk_queue() 662 if (rq == NULL) in xlvbd_init_blk_queue() 665 queue_flag_set_unlocked(QUEUE_FLAG_VIRT, rq); in xlvbd_init_blk_queue() 668 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, rq); in xlvbd_init_blk_queue() [all …]
|
D | loop.c | 227 static int lo_write_simple(struct loop_device *lo, struct request *rq, in lo_write_simple() argument 234 rq_for_each_segment(bvec, rq, iter) { in lo_write_simple() 249 static int lo_write_transfer(struct loop_device *lo, struct request *rq, in lo_write_transfer() argument 261 rq_for_each_segment(bvec, rq, iter) { in lo_write_transfer() 279 static int lo_read_simple(struct loop_device *lo, struct request *rq, in lo_read_simple() argument 287 rq_for_each_segment(bvec, rq, iter) { in lo_read_simple() 298 __rq_for_each_bio(bio, rq) in lo_read_simple() 308 static int lo_read_transfer(struct loop_device *lo, struct request *rq, in lo_read_transfer() argument 322 rq_for_each_segment(bvec, rq, iter) { in lo_read_transfer() 346 __rq_for_each_bio(bio, rq) in lo_read_transfer() [all …]
|
D | cciss.c | 1812 static void cciss_softirq_done(struct request *rq) in cciss_softirq_done() argument 1814 CommandList_struct *c = rq->completion_data; in cciss_softirq_done() 1843 dev_dbg(&h->pdev->dev, "Done with %p\n", rq); in cciss_softirq_done() 1846 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) in cciss_softirq_done() 1847 rq->resid_len = c->err_info->ResidualCnt; in cciss_softirq_done() 1849 blk_end_request_all(rq, (rq->errors == 0) ? 0 : -EIO); in cciss_softirq_done() 3069 if (cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC) in evaluate_target_status() 3078 if (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC) in evaluate_target_status() 3089 (cmd->rq->cmd_type != REQ_TYPE_BLOCK_PC)) in evaluate_target_status() 3093 *retry_cmd = !(cmd->rq->cmd_type == REQ_TYPE_BLOCK_PC); in evaluate_target_status() [all …]
|
D | amiflop.c | 1344 struct request *rq = NULL; in set_next_request() local 1361 rq = blk_fetch_request(q); in set_next_request() 1362 if (rq) in set_next_request() 1370 return rq; in set_next_request() 1375 struct request *rq; in redo_fd_request() local 1384 rq = set_next_request(); in redo_fd_request() 1385 if (!rq) { in redo_fd_request() 1390 floppy = rq->rq_disk->private_data; in redo_fd_request() 1395 for (cnt = 0, err = 0; cnt < blk_rq_cur_sectors(rq); cnt++) { in redo_fd_request() 1398 blk_rq_pos(rq), cnt, in redo_fd_request() [all …]
|
D | pktcdvd.c | 702 struct request *rq; in pkt_generic_packet() local 705 rq = blk_get_request(q, (cgc->data_direction == CGC_DATA_WRITE) ? in pkt_generic_packet() 707 if (IS_ERR(rq)) in pkt_generic_packet() 708 return PTR_ERR(rq); in pkt_generic_packet() 709 blk_rq_set_block_pc(rq); in pkt_generic_packet() 712 ret = blk_rq_map_kern(q, rq, cgc->buffer, cgc->buflen, in pkt_generic_packet() 718 rq->cmd_len = COMMAND_SIZE(cgc->cmd[0]); in pkt_generic_packet() 719 memcpy(rq->cmd, cgc->cmd, CDROM_PACKET_SIZE); in pkt_generic_packet() 721 rq->timeout = 60*HZ; in pkt_generic_packet() 723 rq->cmd_flags |= REQ_QUIET; in pkt_generic_packet() [all …]
|
D | loop.h | 71 struct request *rq; member
|
D | rbd.c | 301 struct request *rq; /* block request */ member 2158 img_request->rq = NULL; in rbd_img_request_create() 2300 rbd_assert(img_request->rq != NULL); in rbd_img_obj_end_request() 2302 more = blk_update_request(img_request->rq, result, xferred); in rbd_img_obj_end_request() 2304 __blk_mq_end_request(img_request->rq, result); in rbd_img_obj_end_request() 3334 struct request *rq = blk_mq_rq_from_pdu(work); in rbd_queue_workfn() local 3335 struct rbd_device *rbd_dev = rq->q->queuedata; in rbd_queue_workfn() 3338 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT; in rbd_queue_workfn() 3339 u64 length = blk_rq_bytes(rq); in rbd_queue_workfn() 3344 if (rq->cmd_type != REQ_TYPE_FS) { in rbd_queue_workfn() [all …]
|
/linux-4.1.27/Documentation/locking/ |
D | lockstat.txt | 140 34 &rq->lock: 13128 13128 0.43 190.5… 142 36 &rq->lock 645 [<ffffffff8103bfc4>] task_rq_lock+… 143 37 &rq->lock 297 [<ffffffff8104ba65>] try_to_wake_u… 144 38 &rq->lock 360 [<ffffffff8103c4c5>] select_task_r… 145 39 &rq->lock 428 [<ffffffff81045f98>] scheduler_tic… 147 41 &rq->lock 77 [<ffffffff8103bfc4>] task_rq_lock+… 148 42 &rq->lock 174 [<ffffffff8104ba65>] try_to_wake_u… 149 43 &rq->lock 4715 [<ffffffff8103ed4b>] double_rq_loc… 150 44 &rq->lock 893 [<ffffffff81340524>] schedule+0x15… 154 48 &rq->lock/1: 1526 11488 0.33 388.7… [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mthca/ |
D | mthca_qp.c | 210 return qp->queue.direct.buf + (n << qp->rq.wqe_shift); in get_recv_wqe() 212 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf + in get_recv_wqe() 213 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); in get_recv_wqe() 499 qp_attr->cap.max_recv_wr = qp->rq.max; in mthca_query_qp() 501 qp_attr->cap.max_recv_sge = qp->rq.max_gs; in mthca_query_qp() 598 if (qp->rq.max) in __mthca_modify_qp() 599 qp_context->rq_size_stride = ilog2(qp->rq.max) << 3; in __mthca_modify_qp() 600 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; in __mthca_modify_qp() 761 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index); in __mthca_modify_qp() 827 mthca_wq_reset(&qp->rq); in __mthca_modify_qp() [all …]
|
/linux-4.1.27/samples/bpf/ |
D | tracex3_kern.c | 26 long rq = ctx->di; in bpf_prog1() local 29 bpf_map_update_elem(&my_map, &rq, &val, BPF_ANY); in bpf_prog1() 54 long rq = ctx->di; in bpf_prog2() local 58 value = bpf_map_lookup_elem(&my_map, &rq); in bpf_prog2() 65 bpf_map_delete_elem(&my_map, &rq); in bpf_prog2()
|
/linux-4.1.27/drivers/s390/block/ |
D | scm_blk.c | 246 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY); in scm_ensure_queue_restart() 256 blk_requeue_request(bdev->rq, scmrq->request[i]); in scm_request_requeue() 295 static void scm_blk_request(struct request_queue *rq) in scm_blk_request() argument 297 struct scm_device *scmdev = rq->queuedata; in scm_blk_request() 302 while ((req = blk_peek_request(rq))) { in scm_blk_request() 465 blk_run_queue(bdev->rq); in scm_blk_tasklet() 474 struct request_queue *rq; in scm_blk_dev_setup() local 495 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock); in scm_blk_dev_setup() 496 if (!rq) in scm_blk_dev_setup() 499 bdev->rq = rq; in scm_blk_dev_setup() [all …]
|
D | scm_blk.h | 18 struct request_queue *rq; member 48 #define to_aobrq(rq) container_of((void *) rq, struct aob_rq_header, data) argument
|
/linux-4.1.27/kernel/trace/ |
D | blktrace.c | 712 static void blk_add_trace_rq(struct request_queue *q, struct request *rq, in blk_add_trace_rq() argument 720 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { in blk_add_trace_rq() 722 __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags, in blk_add_trace_rq() 723 what, rq->errors, rq->cmd_len, rq->cmd); in blk_add_trace_rq() 726 __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes, in blk_add_trace_rq() 727 rq->cmd_flags, what, rq->errors, 0, NULL); in blk_add_trace_rq() 732 struct request_queue *q, struct request *rq) in blk_add_trace_rq_abort() argument 734 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT); in blk_add_trace_rq_abort() 738 struct request_queue *q, struct request *rq) in blk_add_trace_rq_insert() argument 740 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT); in blk_add_trace_rq_insert() [all …]
|
/linux-4.1.27/drivers/ptp/ |
D | ptp_chardev.c | 31 struct ptp_clock_request rq; in ptp_disable_pinfunc() local 34 memset(&rq, 0, sizeof(rq)); in ptp_disable_pinfunc() 40 rq.type = PTP_CLK_REQ_EXTTS; in ptp_disable_pinfunc() 41 rq.extts.index = chan; in ptp_disable_pinfunc() 42 err = ops->enable(ops, &rq, 0); in ptp_disable_pinfunc() 45 rq.type = PTP_CLK_REQ_PEROUT; in ptp_disable_pinfunc() 46 rq.perout.index = chan; in ptp_disable_pinfunc() 47 err = ops->enable(ops, &rq, 0); in ptp_disable_pinfunc()
|
D | ptp_ixp46x.c | 218 struct ptp_clock_request *rq, int on) in ptp_ixp_enable() argument 222 switch (rq->type) { in ptp_ixp_enable() 224 switch (rq->extts.index) { in ptp_ixp_enable()
|
D | ptp_pch.c | 488 struct ptp_clock_request *rq, int on) in ptp_pch_enable() argument 492 switch (rq->type) { in ptp_pch_enable() 494 switch (rq->extts.index) { in ptp_pch_enable()
|
/linux-4.1.27/drivers/block/aoe/ |
D | aoedev.c | 163 struct request *rq; in aoe_failip() local 169 rq = d->ip.rq; in aoe_failip() 170 if (rq == NULL) in aoe_failip() 175 n = (unsigned long) rq->special; in aoe_failip() 176 rq->special = (void *) --n; in aoe_failip() 178 if ((unsigned long) rq->special == 0) in aoe_failip() 179 aoe_end_request(d, rq, 0); in aoe_failip() 201 struct request *rq; in aoedev_downdev() local 229 while ((rq = blk_peek_request(d->blkq))) { in aoedev_downdev() 230 blk_start_request(rq); in aoedev_downdev() [all …]
|
D | aoecmd.c | 896 bufinit(struct buf *buf, struct request *rq, struct bio *bio) in bufinit() argument 899 buf->rq = rq; in bufinit() 908 struct request *rq; in nextbuf() local 918 rq = d->ip.rq; in nextbuf() 919 if (rq == NULL) { in nextbuf() 920 rq = blk_peek_request(q); in nextbuf() 921 if (rq == NULL) in nextbuf() 923 blk_start_request(rq); in nextbuf() 924 d->ip.rq = rq; in nextbuf() 925 d->ip.nxbio = rq->bio; in nextbuf() [all …]
|
D | aoe.h | 105 struct request *rq; member 177 struct request *rq; member
|
D | aoeblk.c | 281 struct request *rq; in aoeblk_request() local 287 while ((rq = blk_peek_request(q))) { in aoeblk_request() 288 blk_start_request(rq); in aoeblk_request() 289 aoe_end_request(d, rq, 1); in aoeblk_request()
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/i2c/ |
D | gk104.c | 27 gk104_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) in gk104_aux_stat() argument 31 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) { in gk104_aux_stat() 34 if ((stat & (4 << (i * 4)))) *rq |= 1 << i; in gk104_aux_stat()
|
D | g94.c | 27 g94_aux_stat(struct nvkm_i2c *i2c, u32 *hi, u32 *lo, u32 *rq, u32 *tx) in g94_aux_stat() argument 31 for (i = 0, *hi = *lo = *rq = *tx = 0; i < 8; i++) { in g94_aux_stat() 34 if ((stat & (4 << (i * 4)))) *rq |= 1 << i; in g94_aux_stat()
|
D | base.c | 364 u32 hi, lo, rq, tx, e; in nvkm_i2c_intr() local 367 impl->aux_stat(i2c, &hi, &lo, &rq, &tx); in nvkm_i2c_intr() 368 if (hi || lo || rq || tx) { in nvkm_i2c_intr() 375 if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ; in nvkm_i2c_intr()
|
/linux-4.1.27/drivers/infiniband/hw/mlx5/ |
D | qp.c | 96 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe() 126 struct mlx5_ib_wq *wq = send ? &qp->sq : &qp->rq; in mlx5_ib_read_user_wqe() 233 qp->rq.max_gs = 0; in set_rq_size() 234 qp->rq.wqe_cnt = 0; in set_rq_size() 235 qp->rq.wqe_shift = 0; in set_rq_size() 238 qp->rq.wqe_cnt = ucmd->rq_wqe_count; in set_rq_size() 239 qp->rq.wqe_shift = ucmd->rq_wqe_shift; in set_rq_size() 240 qp->rq.max_gs = (1 << qp->rq.wqe_shift) / sizeof(struct mlx5_wqe_data_seg) - qp->wq_sig; in set_rq_size() 241 qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size() 248 qp->rq.wqe_cnt = wq_size / wqe_size; in set_rq_size() [all …]
|
/linux-4.1.27/drivers/mtd/ubi/ |
D | block.c | 90 struct request_queue *rq; member 321 struct request *req = bd->rq; in ubiblock_queue_rq() 413 dev->rq = blk_mq_init_queue(&dev->tag_set); in ubiblock_create() 414 if (IS_ERR(dev->rq)) { in ubiblock_create() 416 ret = PTR_ERR(dev->rq); in ubiblock_create() 419 blk_queue_max_segments(dev->rq, UBI_MAX_SG_COUNT); in ubiblock_create() 421 dev->rq->queuedata = dev; in ubiblock_create() 422 dev->gd->queue = dev->rq; in ubiblock_create() 445 blk_cleanup_queue(dev->rq); in ubiblock_create() 463 blk_cleanup_queue(dev->rq); in ubiblock_cleanup()
|
/linux-4.1.27/net/sunrpc/xprtrdma/ |
D | xprt_rdma.h | 312 #define RPCRDMA_INLINE_READ_THRESHOLD(rq) \ argument 313 (rpcx_to_rdmad(rq->rq_xprt).inline_rsize) 315 #define RPCRDMA_INLINE_WRITE_THRESHOLD(rq)\ argument 316 (rpcx_to_rdmad(rq->rq_xprt).inline_wsize) 318 #define RPCRDMA_INLINE_PAD_VALUE(rq)\ argument 319 rpcx_to_rdmad(rq->rq_xprt).padding
|
/linux-4.1.27/drivers/char/agp/ |
D | isoch.c | 75 u32 rq; in agp_3_5_isochronous_node_enable() member 126 target.rq = (tstatus >> 24) & 0xff; in agp_3_5_isochronous_node_enable() 216 master[cdev].rq = master[cdev].n; in agp_3_5_isochronous_node_enable() 218 master[cdev].rq *= (1 << (master[cdev].y - 1)); in agp_3_5_isochronous_node_enable() 220 tot_rq += master[cdev].rq; in agp_3_5_isochronous_node_enable() 227 rq_async = target.rq - rq_isoch; in agp_3_5_isochronous_node_enable() 254 master[cdev].rq += (cdev == ndevs - 1) in agp_3_5_isochronous_node_enable() 266 mcmd |= master[cdev].rq << 24; in agp_3_5_isochronous_node_enable()
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/linux/ |
D | lustre_compat25.h | 139 #define queue_max_phys_segments(rq) queue_max_segments(rq) argument 140 #define queue_max_hw_segments(rq) queue_max_segments(rq) argument
|
/linux-4.1.27/fs/ncpfs/ |
D | sock.c | 217 struct ncp_request_reply *rq; in __ncptcp_try_send() local 222 rq = server->tx.creq; in __ncptcp_try_send() 223 if (!rq) in __ncptcp_try_send() 227 memcpy(iovc, rq->tx_ciov, rq->tx_iovlen * sizeof(iov[0])); in __ncptcp_try_send() 228 result = do_send(server->ncp_sock, iovc, rq->tx_iovlen, in __ncptcp_try_send() 229 rq->tx_totallen, MSG_NOSIGNAL | MSG_DONTWAIT); in __ncptcp_try_send() 236 __ncp_abort_request(server, rq, result); in __ncptcp_try_send() 239 if (result >= rq->tx_totallen) { in __ncptcp_try_send() 240 server->rcv.creq = rq; in __ncptcp_try_send() 244 rq->tx_totallen -= result; in __ncptcp_try_send() [all …]
|
/linux-4.1.27/drivers/net/ethernet/intel/igb/ |
D | igb_ptp.c | 461 struct ptp_clock_request *rq, int on) in igb_ptp_feature_enable_i210() argument 472 switch (rq->type) { in igb_ptp_feature_enable_i210() 476 rq->extts.index); in igb_ptp_feature_enable_i210() 480 if (rq->extts.index == 1) { in igb_ptp_feature_enable_i210() 491 igb_pin_extts(igb, rq->extts.index, pin); in igb_ptp_feature_enable_i210() 506 rq->perout.index); in igb_ptp_feature_enable_i210() 510 ts.tv_sec = rq->perout.period.sec; in igb_ptp_feature_enable_i210() 511 ts.tv_nsec = rq->perout.period.nsec; in igb_ptp_feature_enable_i210() 519 if (rq->perout.index == 1) { in igb_ptp_feature_enable_i210() 534 int i = rq->perout.index; in igb_ptp_feature_enable_i210() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/ocrdma/ |
D | ocrdma_verbs.c | 1234 uresp.rq_dbid = qp->rq.dbid; in ocrdma_copy_qp_uresp() 1236 uresp.rq_page_size = PAGE_ALIGN(qp->rq.len); in ocrdma_copy_qp_uresp() 1237 uresp.rq_page_addr[0] = virt_to_phys(qp->rq.va); in ocrdma_copy_qp_uresp() 1238 uresp.num_rqe_allocated = qp->rq.max_cnt; in ocrdma_copy_qp_uresp() 1301 kzalloc(sizeof(u64) * qp->rq.max_cnt, GFP_KERNEL); in ocrdma_alloc_wr_id_tbl() 1321 qp->rq.max_sges = attrs->cap.max_recv_sge; in ocrdma_set_qp_init_params() 1531 qp_attr->cap.max_recv_wr = qp->rq.max_cnt - 1; in ocrdma_query_qp() 1533 qp_attr->cap.max_recv_sge = qp->rq.max_sges; in ocrdma_query_qp() 1608 return (qp->rq.tail == qp->rq.head); in is_hw_rq_empty() 1673 wqe_idx = (le32_to_cpu(cqe->rq.buftag_qpn) >> in ocrdma_discard_cqes() [all …]
|
D | ocrdma_hw.c | 2067 qp->rq.head = 0; in ocrdma_init_hwq_ptr() 2068 qp->rq.tail = 0; in ocrdma_init_hwq_ptr() 2194 qp->rq.max_cnt = max_rqe_allocated; in ocrdma_set_create_qp_rq_cmd() 2197 qp->rq.va = dma_alloc_coherent(&pdev->dev, len, &pa, GFP_KERNEL); in ocrdma_set_create_qp_rq_cmd() 2198 if (!qp->rq.va) in ocrdma_set_create_qp_rq_cmd() 2200 memset(qp->rq.va, 0, len); in ocrdma_set_create_qp_rq_cmd() 2201 qp->rq.pa = pa; in ocrdma_set_create_qp_rq_cmd() 2202 qp->rq.len = len; in ocrdma_set_create_qp_rq_cmd() 2203 qp->rq.entry_size = dev->attr.rqe_size; in ocrdma_set_create_qp_rq_cmd() 2214 cmd->max_wqe_rqe |= (ilog2(qp->rq.max_cnt) << in ocrdma_set_create_qp_rq_cmd() [all …]
|
/linux-4.1.27/drivers/infiniband/hw/mlx4/ |
D | qp.c | 190 return get_wqe(qp, qp->rq.offset + (n << qp->rq.wqe_shift)); in get_recv_wqe() 389 qp->rq.wqe_cnt = qp->rq.max_gs = 0; in set_rq_size() 395 qp->rq.wqe_cnt = roundup_pow_of_two(max(1U, cap->max_recv_wr)); in set_rq_size() 396 qp->rq.max_gs = roundup_pow_of_two(max(1U, cap->max_recv_sge)); in set_rq_size() 397 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); in set_rq_size() 402 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; in set_rq_size() 403 cap->max_recv_sge = qp->rq.max_gs; in set_rq_size() 405 cap->max_recv_wr = qp->rq.max_post = in set_rq_size() 406 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt); in set_rq_size() 407 cap->max_recv_sge = min(qp->rq.max_gs, in set_rq_size() [all …]
|
/linux-4.1.27/include/linux/sunrpc/ |
D | svcauth.h | 119 int (*accept)(struct svc_rqst *rq, __be32 *authp); 120 int (*release)(struct svc_rqst *rq); 122 int (*set_client)(struct svc_rqst *rq);
|
/linux-4.1.27/net/sunrpc/ |
D | cache.c | 760 struct cache_request *rq; in cache_read() local 784 rq = container_of(rp->q.list.next, struct cache_request, q.list); in cache_read() 785 WARN_ON_ONCE(rq->q.reader); in cache_read() 787 rq->readers++; in cache_read() 790 if (rq->len == 0) { in cache_read() 791 err = cache_request(cd, rq); in cache_read() 794 rq->len = err; in cache_read() 797 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { in cache_read() 800 list_move(&rp->q.list, &rq->q.list); in cache_read() 803 if (rp->offset + count > rq->len) in cache_read() [all …]
|
/linux-4.1.27/drivers/block/mtip32xx/ |
D | mtip32xx.c | 180 struct request *rq; in mtip_get_int_command() local 182 rq = blk_mq_alloc_request(dd->queue, 0, __GFP_WAIT, true); in mtip_get_int_command() 183 return blk_mq_rq_to_pdu(rq); in mtip_get_int_command() 205 struct request *rq = mtip_rq_from_tag(dd, tag); in mtip_cmd_from_tag() local 207 return blk_mq_rq_to_pdu(rq); in mtip_cmd_from_tag() 232 struct request *rq; in mtip_async_complete() local 245 rq = mtip_rq_from_tag(dd, tag); in mtip_async_complete() 250 blk_mq_end_request(rq, status ? -EIO : 0); in mtip_async_complete() 2378 static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, in mtip_hw_submit_io() argument 2384 int dma_dir = rq_data_dir(rq) == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE; in mtip_hw_submit_io() [all …]
|
/linux-4.1.27/drivers/net/ethernet/intel/fm10k/ |
D | fm10k_ptp.c | 322 struct ptp_clock_request *rq, in fm10k_ptp_enable() argument 325 struct ptp_clock_time *t = &rq->perout.period; in fm10k_ptp_enable() 332 if (rq->type != PTP_CLK_REQ_PEROUT) in fm10k_ptp_enable() 336 if (rq->perout.index >= ptp->n_per_out) in fm10k_ptp_enable() 367 fm10k_write_sw_reg(hw, FM10K_SW_SYSTIME_PULSE(rq->perout.index), in fm10k_ptp_enable()
|
/linux-4.1.27/net/bridge/ |
D | br_ioctl.c | 112 static int old_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) in old_dev_ioctl() argument 117 if (copy_from_user(args, rq->ifr_data, sizeof(args))) in old_dev_ioctl() 378 int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) in br_dev_ioctl() argument 384 return old_dev_ioctl(dev, rq, cmd); in br_dev_ioctl() 388 return add_del_if(br, rq->ifr_ifindex, cmd == SIOCBRADDIF); in br_dev_ioctl()
|
/linux-4.1.27/drivers/isdn/hardware/mISDN/ |
D | w6692.c | 1008 open_bchannel(struct w6692_hw *card, struct channel_req *rq) in open_bchannel() argument 1012 if (rq->adr.channel == 0 || rq->adr.channel > 2) in open_bchannel() 1014 if (rq->protocol == ISDN_P_NONE) in open_bchannel() 1016 bch = &card->bc[rq->adr.channel - 1].bch; in open_bchannel() 1019 bch->ch.protocol = rq->protocol; in open_bchannel() 1020 rq->ch = &bch->ch; in open_bchannel() 1179 open_dchannel(struct w6692_hw *card, struct channel_req *rq, void *caller) in open_dchannel() argument 1183 if (rq->protocol != ISDN_P_TE_S0) in open_dchannel() 1185 if (rq->adr.channel == 1) in open_dchannel() 1188 rq->ch = &card->dch.dev.D; in open_dchannel() [all …]
|
D | hfcsusb.c | 425 struct channel_req *rq) in open_dchannel() argument 431 hw->name, __func__, hw->dch.dev.id, rq->adr.channel, in open_dchannel() 433 if (rq->protocol == ISDN_P_NONE) in open_dchannel() 441 if (rq->adr.channel == 1) { in open_dchannel() 452 hw->protocol = rq->protocol; in open_dchannel() 453 if (rq->protocol == ISDN_P_TE_S0) { in open_dchannel() 459 ch->protocol = rq->protocol; in open_dchannel() 462 if (rq->protocol != ch->protocol) in open_dchannel() 470 rq->ch = ch; in open_dchannel() 478 open_bchannel(struct hfcsusb *hw, struct channel_req *rq) in open_bchannel() argument [all …]
|
D | mISDNipac.c | 757 open_dchannel_caller(struct isac_hw *isac, struct channel_req *rq, void *caller) in open_dchannel_caller() argument 761 if (rq->protocol != ISDN_P_TE_S0) in open_dchannel_caller() 763 if (rq->adr.channel == 1) in open_dchannel_caller() 766 rq->ch = &isac->dch.dev.D; in open_dchannel_caller() 767 rq->ch->protocol = rq->protocol; in open_dchannel_caller() 769 _queue_data(rq->ch, PH_ACTIVATE_IND, MISDN_ID_ANY, in open_dchannel_caller() 775 open_dchannel(struct isac_hw *isac, struct channel_req *rq) in open_dchannel() argument 777 return open_dchannel_caller(isac, rq, __builtin_return_address(0)); in open_dchannel() 1497 open_bchannel(struct ipac_hw *ipac, struct channel_req *rq) in open_bchannel() argument 1501 if (rq->adr.channel == 0 || rq->adr.channel > 2) in open_bchannel() [all …]
|
D | avmfritz.c | 908 open_bchannel(struct fritzcard *fc, struct channel_req *rq) in open_bchannel() argument 912 if (rq->adr.channel == 0 || rq->adr.channel > 2) in open_bchannel() 914 if (rq->protocol == ISDN_P_NONE) in open_bchannel() 916 bch = &fc->bch[rq->adr.channel - 1]; in open_bchannel() 919 bch->ch.protocol = rq->protocol; in open_bchannel() 920 rq->ch = &bch->ch; in open_bchannel() 933 struct channel_req *rq; in avm_dctrl() local 939 rq = arg; in avm_dctrl() 940 if (rq->protocol == ISDN_P_TE_S0) in avm_dctrl() 941 err = fc->isac.open(&fc->isac, rq); in avm_dctrl() [all …]
|
D | hfcpci.c | 1890 struct channel_req *rq) in open_dchannel() argument 1897 if (rq->protocol == ISDN_P_NONE) in open_dchannel() 1899 if (rq->adr.channel == 1) { in open_dchannel() 1904 if (rq->protocol == ISDN_P_TE_S0) { in open_dchannel() 1909 hc->hw.protocol = rq->protocol; in open_dchannel() 1910 ch->protocol = rq->protocol; in open_dchannel() 1915 if (rq->protocol != ch->protocol) { in open_dchannel() 1918 if (rq->protocol == ISDN_P_TE_S0) { in open_dchannel() 1923 hc->hw.protocol = rq->protocol; in open_dchannel() 1924 ch->protocol = rq->protocol; in open_dchannel() [all …]
|
D | speedfax.c | 254 struct channel_req *rq; in sfax_dctrl() local 260 rq = arg; in sfax_dctrl() 261 if (rq->protocol == ISDN_P_TE_S0) in sfax_dctrl() 262 err = sf->isac.open(&sf->isac, rq); in sfax_dctrl() 264 err = sf->isar.open(&sf->isar, rq); in sfax_dctrl()
|
D | netjet.c | 863 open_bchannel(struct tiger_hw *card, struct channel_req *rq) in open_bchannel() argument 867 if (rq->adr.channel == 0 || rq->adr.channel > 2) in open_bchannel() 869 if (rq->protocol == ISDN_P_NONE) in open_bchannel() 871 bch = &card->bc[rq->adr.channel - 1].bch; in open_bchannel() 875 bch->ch.protocol = rq->protocol; in open_bchannel() 876 rq->ch = &bch->ch; in open_bchannel() 889 struct channel_req *rq; in nj_dctrl() local 895 rq = arg; in nj_dctrl() 896 if (rq->protocol == ISDN_P_TE_S0) in nj_dctrl() 897 err = card->isac.open(&card->isac, rq); in nj_dctrl() [all …]
|
D | hfcmulti.c | 4061 struct channel_req *rq) in open_dchannel() argument 4069 if (rq->protocol == ISDN_P_NONE) in open_dchannel() 4072 (dch->dev.D.protocol != rq->protocol)) { in open_dchannel() 4075 __func__, dch->dev.D.protocol, rq->protocol); in open_dchannel() 4078 (rq->protocol != ISDN_P_TE_S0)) in open_dchannel() 4080 if (dch->dev.D.protocol != rq->protocol) { in open_dchannel() 4081 if (rq->protocol == ISDN_P_TE_S0) { in open_dchannel() 4086 dch->dev.D.protocol = rq->protocol; in open_dchannel() 4094 rq->ch = &dch->dev.D; in open_dchannel() 4102 struct channel_req *rq) in open_bchannel() argument [all …]
|
/linux-4.1.27/tools/perf/scripts/python/ |
D | sched-migration.py | 269 rq = ts.rqs[cpu] 271 raw += "Last event : %s\n" % rq.event.__repr__() 274 raw += "Load = %d\n" % rq.load() 275 for t in rq.tasks: 281 rq = slice.rqs[cpu] 284 load_rate = rq.load() / float(slice.total_load) 294 top_color = rq.event.color()
|
/linux-4.1.27/drivers/block/paride/ |
D | pd.c | 721 struct request *rq; in pd_special_command() local 724 rq = blk_get_request(disk->gd->queue, READ, __GFP_WAIT); in pd_special_command() 725 if (IS_ERR(rq)) in pd_special_command() 726 return PTR_ERR(rq); in pd_special_command() 728 rq->cmd_type = REQ_TYPE_SPECIAL; in pd_special_command() 729 rq->special = func; in pd_special_command() 731 err = blk_execute_rq(disk->gd->queue, disk->gd, rq, 0); in pd_special_command() 733 blk_put_request(rq); in pd_special_command()
|
/linux-4.1.27/net/key/ |
D | af_key.c | 1887 parse_ipsecrequest(struct xfrm_policy *xp, struct sadb_x_ipsecrequest *rq) in parse_ipsecrequest() argument 1896 if (rq->sadb_x_ipsecrequest_mode == 0) in parse_ipsecrequest() 1899 t->id.proto = rq->sadb_x_ipsecrequest_proto; /* XXX check proto */ in parse_ipsecrequest() 1900 if ((mode = pfkey_mode_to_xfrm(rq->sadb_x_ipsecrequest_mode)) < 0) in parse_ipsecrequest() 1903 if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_USE) in parse_ipsecrequest() 1905 else if (rq->sadb_x_ipsecrequest_level == IPSEC_LEVEL_UNIQUE) { in parse_ipsecrequest() 1906 t->reqid = rq->sadb_x_ipsecrequest_reqid; in parse_ipsecrequest() 1915 u8 *sa = (u8 *) (rq + 1); in parse_ipsecrequest() 1942 struct sadb_x_ipsecrequest *rq = (void*)(pol+1); in parse_ipsecrequests() local 1948 if ((err = parse_ipsecrequest(xp, rq)) < 0) in parse_ipsecrequests() [all …]
|
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/fb/ |
D | gddr5.c | 39 int rq = ram->freq < 1000000; /* XXX */ in nvkm_gddr5_calc() local 95 ram->mr[3] |= (rq & 0x01) << 5; in nvkm_gddr5_calc()
|
/linux-4.1.27/drivers/staging/octeon/ |
D | ethernet-mdio.h | 46 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
D | ethernet-mdio.c | 106 int cvm_oct_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) in cvm_oct_ioctl() argument 116 return phy_mii_ioctl(priv->phydev, rq, cmd); in cvm_oct_ioctl()
|
/linux-4.1.27/drivers/staging/rtl8712/ |
D | osdep_intf.h | 42 int r871x_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/ |
D | sched.c | 114 BUG_ON(!list_empty(&ctx->rq)); in __spu_update_sched_info() 508 if (list_empty(&ctx->rq)) { in __spu_add_to_rq() 509 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); in __spu_add_to_rq() 527 if (!list_empty(&ctx->rq)) { in __spu_del_from_rq() 530 list_del_init(&ctx->rq); in __spu_del_from_rq() 845 struct list_head *rq = &spu_prio->runq[best]; in grab_runnable_context() local 847 list_for_each_entry(ctx, rq, rq) { in grab_runnable_context()
|
D | context.c | 61 INIT_LIST_HEAD(&ctx->rq); in alloc_spu_context() 93 BUG_ON(!list_empty(&ctx->rq)); in destroy_spu_context()
|
/linux-4.1.27/drivers/atm/ |
D | firestream.c | 679 long rq; in process_return_queue() local 683 while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { in process_return_queue() 684 fs_dprintk (FS_DEBUG_QUEUE, "reaping return queue entry at %lx\n", rq); in process_return_queue() 685 qe = bus_to_virt (rq); in process_return_queue() 705 long rq; in process_txdone_queue() local 711 while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { in process_txdone_queue() 712 fs_dprintk (FS_DEBUG_QUEUE, "reaping txdone entry at %lx\n", rq); in process_txdone_queue() 713 qe = bus_to_virt (rq); in process_txdone_queue() 775 long rq; in process_incoming() local 782 while (!((rq = read_fs (dev, Q_RP(q->offset))) & Q_EMPTY)) { in process_incoming() [all …]
|
/linux-4.1.27/drivers/cdrom/ |
D | cdrom.c | 2163 struct request *rq; in cdrom_read_cdda_bpc() local 2182 rq = blk_get_request(q, READ, GFP_KERNEL); in cdrom_read_cdda_bpc() 2183 if (IS_ERR(rq)) { in cdrom_read_cdda_bpc() 2184 ret = PTR_ERR(rq); in cdrom_read_cdda_bpc() 2187 blk_rq_set_block_pc(rq); in cdrom_read_cdda_bpc() 2189 ret = blk_rq_map_user(q, rq, NULL, ubuf, len, GFP_KERNEL); in cdrom_read_cdda_bpc() 2191 blk_put_request(rq); in cdrom_read_cdda_bpc() 2195 rq->cmd[0] = GPCMD_READ_CD; in cdrom_read_cdda_bpc() 2196 rq->cmd[1] = 1 << 2; in cdrom_read_cdda_bpc() 2197 rq->cmd[2] = (lba >> 24) & 0xff; in cdrom_read_cdda_bpc() [all …]
|
/linux-4.1.27/arch/alpha/include/asm/ |
D | agp_backend.h | 14 u32 rq : 8; member
|
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
D | service.c | 1171 struct ptlrpc_request *rq = NULL; in ptlrpc_at_add_timed() local 1190 list_for_each_entry_reverse(rq, in ptlrpc_at_add_timed() 1193 if (req->rq_deadline >= rq->rq_deadline) { in ptlrpc_at_add_timed() 1195 &rq->rq_timed_list); in ptlrpc_at_add_timed() 1382 struct ptlrpc_request *rq, *n; in ptlrpc_at_check_timed() local 1420 list_for_each_entry_safe(rq, n, in ptlrpc_at_check_timed() 1423 if (rq->rq_deadline > now + at_early_margin) { in ptlrpc_at_check_timed() 1426 rq->rq_deadline < deadline) in ptlrpc_at_check_timed() 1427 deadline = rq->rq_deadline; in ptlrpc_at_check_timed() 1431 ptlrpc_at_remove_timed(rq); in ptlrpc_at_check_timed() [all …]
|
/linux-4.1.27/drivers/staging/rtl8188eu/include/ |
D | osdep_intf.h | 38 int rtw_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
|
/linux-4.1.27/fs/nfsd/ |
D | nfsd.h | 108 static inline int nfsd_v4client(struct svc_rqst *rq) in nfsd_v4client() argument 110 return rq->rq_prog == NFS_PROGRAM && rq->rq_vers == 4; in nfsd_v4client()
|
/linux-4.1.27/drivers/scsi/bnx2fc/ |
D | bnx2fc_tgt.c | 709 tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size, in bnx2fc_alloc_session_resc() 711 if (!tgt->rq) { in bnx2fc_alloc_session_resc() 716 memset(tgt->rq, 0, tgt->rq_mem_size); in bnx2fc_alloc_session_resc() 889 if (tgt->rq) { in bnx2fc_free_session_resc() 891 tgt->rq, tgt->rq_dma); in bnx2fc_free_session_resc() 892 tgt->rq = NULL; in bnx2fc_free_session_resc()
|
/linux-4.1.27/net/9p/ |
D | trans_fd.c | 143 struct work_struct rq; member 303 m = container_of(work, struct p9_conn, rq); in p9_read_work() 395 schedule_work(&m->rq); in p9_read_work() 591 INIT_WORK(&m->rq, p9_read_work); in p9_conn_create() 634 schedule_work(&m->rq); in p9_poll_mux() 852 cancel_work_sync(&m->rq); in p9_conn_destroy()
|
/linux-4.1.27/include/linux/mtd/ |
D | blktrans.h | 49 struct request_queue *rq; member
|
/linux-4.1.27/drivers/net/ppp/ |
D | ppp_generic.c | 83 struct sk_buff_head rq; /* receive queue for pppd */ member 429 skb = skb_dequeue(&pf->rq); in ppp_read() 524 if (skb_peek(&pf->rq)) in ppp_poll() 1291 if (ppp->file.rq.qlen > PPP_MAX_RQLEN) in ppp_send_frame() 1293 skb_queue_tail(&ppp->file.rq, skb); in ppp_send_frame() 1672 skb_queue_tail(&pch->file.rq, skb); in ppp_input() 1674 while (pch->file.rq.qlen > PPP_MAX_RQLEN && in ppp_input() 1675 (skb = skb_dequeue(&pch->file.rq))) in ppp_input() 1822 skb_queue_tail(&ppp->file.rq, skb); in ppp_receive_nonmp_frame() 1824 while (ppp->file.rq.qlen > PPP_MAX_RQLEN && in ppp_receive_nonmp_frame() [all …]
|
/linux-4.1.27/drivers/scsi/osd/ |
D | osd_initiator.c | 442 static void _put_request(struct request *rq) in _put_request() argument 450 if (unlikely(rq->bio)) in _put_request() 451 blk_end_request(rq, -ENOMEM, blk_rq_bytes(rq)); in _put_request() 453 blk_put_request(rq); in _put_request() 458 struct request *rq = or->request; in osd_end_request() local 460 if (rq) { in osd_end_request() 461 if (rq->next_rq) { in osd_end_request() 462 _put_request(rq->next_rq); in osd_end_request() 463 rq->next_rq = NULL; in osd_end_request() 466 _put_request(rq); in osd_end_request()
|
/linux-4.1.27/drivers/video/fbdev/ |
D | mx3fb.c | 1529 struct dma_chan_request *rq = arg; in chan_filter() local 1536 if (!rq) in chan_filter() 1539 dev = rq->mx3fb->dev; in chan_filter() 1542 return rq->id == chan->chan_id && in chan_filter() 1564 struct dma_chan_request rq; in mx3fb_probe() local 1594 rq.mx3fb = mx3fb; in mx3fb_probe() 1599 rq.id = IDMAC_SDC_0; in mx3fb_probe() 1600 chan = dma_request_channel(mask, chan_filter, &rq); in mx3fb_probe()
|
/linux-4.1.27/drivers/net/ethernet/freescale/ |
D | gianfar_ptp.c | 363 struct ptp_clock_request *rq, int on) in ptp_gianfar_enable() argument 369 switch (rq->type) { in ptp_gianfar_enable() 371 switch (rq->extts.index) { in ptp_gianfar_enable()
|
/linux-4.1.27/drivers/media/platform/soc_camera/ |
D | mx3_camera.c | 613 struct dma_chan_request *rq = arg; in chan_filter() local 619 if (!rq) in chan_filter() 622 pdata = rq->mx3_cam->soc_host.v4l2_dev.dev->platform_data; in chan_filter() 624 return rq->id == chan->chan_id && in chan_filter() 764 struct dma_chan_request rq = {.mx3_cam = mx3_cam, in acquire_dma_channel() local 770 chan = dma_request_channel(mask, chan_filter, &rq); in acquire_dma_channel()
|
/linux-4.1.27/drivers/net/ethernet/chelsio/cxgb4/ |
D | sge.c | 2746 static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq, in free_rspq_fl() argument 2752 adap->sge.ingr_map[rq->cntxt_id - adap->sge.ingr_start] = NULL; in free_rspq_fl() 2754 rq->cntxt_id, fl_id, 0xffff); in free_rspq_fl() 2755 dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len, in free_rspq_fl() 2756 rq->desc, rq->phys_addr); in free_rspq_fl() 2757 napi_hash_del(&rq->napi); in free_rspq_fl() 2758 netif_napi_del(&rq->napi); in free_rspq_fl() 2759 rq->netdev = NULL; in free_rspq_fl() 2760 rq->cntxt_id = rq->abs_id = 0; in free_rspq_fl() 2761 rq->desc = NULL; in free_rspq_fl()
|
/linux-4.1.27/drivers/isdn/i4l/ |
D | isdn_ppp.c | 317 is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */ in isdn_ppp_open() 318 is->last = is->rq; in isdn_ppp_open() 375 kfree(is->rq[i].buf); in isdn_ppp_release() 376 is->rq[i].buf = NULL; in isdn_ppp_release() 378 is->first = is->rq + NUM_RCV_BUFFS - 1; /* receive queue */ in isdn_ppp_release() 379 is->last = is->rq; in isdn_ppp_release() 917 ippp_table[i]->first = ippp_table[i]->rq + NUM_RCV_BUFFS - 1; in isdn_ppp_init() 918 ippp_table[i]->last = ippp_table[i]->rq; in isdn_ppp_init() 921 ippp_table[i]->rq[j].buf = NULL; in isdn_ppp_init() 922 ippp_table[i]->rq[j].last = ippp_table[i]->rq + in isdn_ppp_init() [all …]
|
/linux-4.1.27/net/atm/ |
D | common.c | 231 struct sk_buff_head queue, *rq; in vcc_process_recv_queue() local 236 rq = &sk_atm(vcc)->sk_receive_queue; in vcc_process_recv_queue() 238 spin_lock_irqsave(&rq->lock, flags); in vcc_process_recv_queue() 239 skb_queue_splice_init(rq, &queue); in vcc_process_recv_queue() 240 spin_unlock_irqrestore(&rq->lock, flags); in vcc_process_recv_queue()
|
/linux-4.1.27/Documentation/block/ |
D | biodoc.txt | 338 in the command bytes. (i.e rq->cmd is now 16 bytes in size, and meant for 340 through rq->flags instead of via rq->cmd) 513 rq->queue is gone 518 unsigned long flags; /* also includes earlier rq->cmd settings */ 650 rq_for_each_segment(bio_vec, rq, iter) 663 nr_segments = blk_rq_map_sg(q, rq, scatterlist); 702 buffers) and expect only virtually mapped buffers, can access the rq->buffer 710 direct access requests which only specify rq->buffer without a valid rq->bio) 734 blk_queue_start_tag(struct request_queue *q, struct request *rq) 737 0 and 'depth' is assigned to the request (rq->tag holds this number), [all …]
|
/linux-4.1.27/drivers/infiniband/hw/cxgb3/ |
D | cxio_hal.c | 284 wq->rq = kzalloc(depth * sizeof(struct t3_swrq), GFP_KERNEL); in cxio_create_qp() 285 if (!wq->rq) in cxio_create_qp() 317 kfree(wq->rq); in cxio_create_qp() 345 kfree(wq->rq); in cxio_destroy_qp() 1308 *cookie = wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].wr_id; in cxio_poll_cq() 1309 if (wq->rq[Q_PTR2IDX(wq->rq_rptr, wq->rq_size_log2)].pbl_addr) in cxio_poll_cq() 1311 wq->rq[Q_PTR2IDX(wq->rq_rptr, in cxio_poll_cq()
|
/linux-4.1.27/drivers/net/phy/ |
D | dp83640.c | 473 struct ptp_clock_request *rq, int on) in ptp_dp83640_enable() argument 481 switch (rq->type) { in ptp_dp83640_enable() 483 index = rq->extts.index; in ptp_dp83640_enable() 494 if (rq->extts.flags & PTP_FALLING_EDGE) in ptp_dp83640_enable() 505 if (rq->perout.index >= N_PER_OUT) in ptp_dp83640_enable() 507 return periodic_output(clock, rq, on, rq->perout.index); in ptp_dp83640_enable()
|
/linux-4.1.27/drivers/net/usb/ |
D | ax88172a.c | 54 static int ax88172a_ioctl(struct net_device *net, struct ifreq *rq, int cmd) in ax88172a_ioctl() argument 62 return phy_mii_ioctl(net->phydev, rq, cmd); in ax88172a_ioctl()
|
/linux-4.1.27/drivers/net/ethernet/tile/ |
D | tilegx.c | 475 static int tile_hwtstamp_set(struct net_device *dev, struct ifreq *rq) in tile_hwtstamp_set() argument 480 if (copy_from_user(&config, rq->ifr_data, sizeof(config))) in tile_hwtstamp_set() 517 if (copy_to_user(rq->ifr_data, &config, sizeof(config))) in tile_hwtstamp_set() 524 static int tile_hwtstamp_get(struct net_device *dev, struct ifreq *rq) in tile_hwtstamp_get() argument 528 if (copy_to_user(rq->ifr_data, &priv->stamp_cfg, in tile_hwtstamp_get() 2093 static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) in tile_net_ioctl() argument 2096 return tile_hwtstamp_set(dev, rq); in tile_net_ioctl() 2098 return tile_hwtstamp_get(dev, rq); in tile_net_ioctl()
|
/linux-4.1.27/drivers/net/ethernet/stmicro/stmmac/ |
D | stmmac_ptp.c | 152 struct ptp_clock_request *rq, int on) in stmmac_enable() argument
|