Searched refs:dreq (Results 1 - 33 of 33) sorted by relevance

/linux-4.4.14/fs/nfs/
H A Dcache_lib.c67 void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq) nfs_cache_defer_req_put() argument
69 if (atomic_dec_and_test(&dreq->count)) nfs_cache_defer_req_put()
70 kfree(dreq); nfs_cache_defer_req_put()
75 struct nfs_cache_defer_req *dreq; nfs_dns_cache_revisit() local
77 dreq = container_of(d, struct nfs_cache_defer_req, deferred_req); nfs_dns_cache_revisit()
79 complete_all(&dreq->completion); nfs_dns_cache_revisit()
80 nfs_cache_defer_req_put(dreq); nfs_dns_cache_revisit()
85 struct nfs_cache_defer_req *dreq; nfs_dns_cache_defer() local
87 dreq = container_of(req, struct nfs_cache_defer_req, req); nfs_dns_cache_defer()
88 dreq->deferred_req.revisit = nfs_dns_cache_revisit; nfs_dns_cache_defer()
89 atomic_inc(&dreq->count); nfs_dns_cache_defer()
91 return &dreq->deferred_req; nfs_dns_cache_defer()
96 struct nfs_cache_defer_req *dreq; nfs_cache_defer_req_alloc() local
98 dreq = kzalloc(sizeof(*dreq), GFP_KERNEL); nfs_cache_defer_req_alloc()
99 if (dreq) { nfs_cache_defer_req_alloc()
100 init_completion(&dreq->completion); nfs_cache_defer_req_alloc()
101 atomic_set(&dreq->count, 1); nfs_cache_defer_req_alloc()
102 dreq->req.defer = nfs_dns_cache_defer; nfs_cache_defer_req_alloc()
104 return dreq; nfs_cache_defer_req_alloc()
107 int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq) nfs_cache_wait_for_upcall() argument
109 if (wait_for_completion_timeout(&dreq->completion, nfs_cache_wait_for_upcall()
H A Ddirect.c107 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
110 static inline void get_dreq(struct nfs_direct_req *dreq) get_dreq() argument
112 atomic_inc(&dreq->io_count); get_dreq()
115 static inline int put_dreq(struct nfs_direct_req *dreq) put_dreq() argument
117 return atomic_dec_and_test(&dreq->io_count); put_dreq()
120 void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq) nfs_direct_set_resched_writes() argument
122 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; nfs_direct_set_resched_writes()
127 nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr) nfs_direct_good_bytes() argument
132 if (dreq->mirror_count == 1) { nfs_direct_good_bytes()
133 dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes; nfs_direct_good_bytes()
134 dreq->count += hdr->good_bytes; nfs_direct_good_bytes()
137 count = dreq->mirrors[hdr->pgio_mirror_idx].count; nfs_direct_good_bytes()
138 if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) { nfs_direct_good_bytes()
139 count = hdr->io_start + hdr->good_bytes - dreq->io_start; nfs_direct_good_bytes()
140 dreq->mirrors[hdr->pgio_mirror_idx].count = count; nfs_direct_good_bytes()
142 /* update the dreq->count by finding the minimum agreed count from all nfs_direct_good_bytes()
144 count = dreq->mirrors[0].count; nfs_direct_good_bytes()
146 for (i = 1; i < dreq->mirror_count; i++) nfs_direct_good_bytes()
147 count = min(count, dreq->mirrors[i].count); nfs_direct_good_bytes()
149 dreq->count = count; nfs_direct_good_bytes()
155 * @dreq - direct request possibly spanning multiple servers
162 nfs_direct_select_verf(struct nfs_direct_req *dreq, nfs_direct_select_verf() argument
166 struct nfs_writeverf *verfp = &dreq->verf; nfs_direct_select_verf()
173 if (ds_clp && dreq->ds_cinfo.nbuckets > 0) { nfs_direct_select_verf()
174 if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets) nfs_direct_select_verf()
175 verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf; nfs_direct_select_verf()
186 * @dreq - direct request possibly spanning multiple servers
191 static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq, nfs_direct_set_hdr_verf() argument
196 verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx); nfs_direct_set_hdr_verf()
204 * @dreq - direct request possibly spanning multiple servers
211 static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq, nfs_direct_set_or_cmp_hdr_verf() argument
216 verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx); nfs_direct_set_or_cmp_hdr_verf()
218 nfs_direct_set_hdr_verf(dreq, hdr); nfs_direct_set_or_cmp_hdr_verf()
226 * @dreq - direct request possibly spanning multiple servers
232 static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, nfs_direct_cmp_commit_data_verf() argument
237 verfp = nfs_direct_select_verf(dreq, data->ds_clp, nfs_direct_cmp_commit_data_verf()
282 struct nfs_direct_req *dreq) nfs_init_cinfo_from_dreq()
284 cinfo->lock = &dreq->inode->i_lock; nfs_init_cinfo_from_dreq()
285 cinfo->mds = &dreq->mds_cinfo; nfs_init_cinfo_from_dreq()
286 cinfo->ds = &dreq->ds_cinfo; nfs_init_cinfo_from_dreq()
287 cinfo->dreq = dreq; nfs_init_cinfo_from_dreq()
291 static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq, nfs_direct_setup_mirroring() argument
300 dreq->mirror_count = mirror_count; nfs_direct_setup_mirroring()
305 struct nfs_direct_req *dreq; nfs_direct_req_alloc() local
307 dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL); nfs_direct_req_alloc()
308 if (!dreq) nfs_direct_req_alloc()
311 kref_init(&dreq->kref); nfs_direct_req_alloc()
312 kref_get(&dreq->kref); nfs_direct_req_alloc()
313 init_completion(&dreq->completion); nfs_direct_req_alloc()
314 INIT_LIST_HEAD(&dreq->mds_cinfo.list); nfs_direct_req_alloc()
315 dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */ nfs_direct_req_alloc()
316 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work); nfs_direct_req_alloc()
317 dreq->mirror_count = 1; nfs_direct_req_alloc()
318 spin_lock_init(&dreq->lock); nfs_direct_req_alloc()
320 return dreq; nfs_direct_req_alloc()
325 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); nfs_direct_req_free() local
327 nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo); nfs_direct_req_free()
328 if (dreq->l_ctx != NULL) nfs_direct_req_free()
329 nfs_put_lock_context(dreq->l_ctx); nfs_direct_req_free()
330 if (dreq->ctx != NULL) nfs_direct_req_free()
331 put_nfs_open_context(dreq->ctx); nfs_direct_req_free()
332 kmem_cache_free(nfs_direct_cachep, dreq); nfs_direct_req_free()
335 static void nfs_direct_req_release(struct nfs_direct_req *dreq) nfs_direct_req_release() argument
337 kref_put(&dreq->kref, nfs_direct_req_free); nfs_direct_req_release()
340 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq) nfs_dreq_bytes_left() argument
342 return dreq->bytes_left; nfs_dreq_bytes_left()
349 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) nfs_direct_wait() argument
354 if (dreq->iocb) nfs_direct_wait()
357 result = wait_for_completion_killable(&dreq->completion); nfs_direct_wait()
360 result = dreq->error; nfs_direct_wait()
362 result = dreq->count; nfs_direct_wait()
372 static void nfs_direct_complete(struct nfs_direct_req *dreq, bool write) nfs_direct_complete() argument
374 struct inode *inode = dreq->inode; nfs_direct_complete()
376 if (dreq->iocb && write) { nfs_direct_complete()
377 loff_t pos = dreq->iocb->ki_pos + dreq->count; nfs_direct_complete()
390 if (dreq->iocb) { nfs_direct_complete()
391 long res = (long) dreq->error; nfs_direct_complete()
393 res = (long) dreq->count; nfs_direct_complete()
394 dreq->iocb->ki_complete(dreq->iocb, res, 0); nfs_direct_complete()
397 complete_all(&dreq->completion); nfs_direct_complete()
399 nfs_direct_req_release(dreq); nfs_direct_complete()
415 struct nfs_direct_req *dreq = hdr->dreq; nfs_direct_read_completion() local
420 spin_lock(&dreq->lock); nfs_direct_read_completion()
422 dreq->error = hdr->error; nfs_direct_read_completion()
424 nfs_direct_good_bytes(dreq, hdr); nfs_direct_read_completion()
426 spin_unlock(&dreq->lock); nfs_direct_read_completion()
439 if (put_dreq(dreq)) nfs_direct_read_completion()
440 nfs_direct_complete(dreq, false); nfs_direct_read_completion()
457 get_dreq(hdr->dreq); nfs_direct_pgio_init()
474 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, nfs_direct_read_schedule_iovec() argument
479 struct inode *inode = dreq->inode; nfs_direct_read_schedule_iovec()
484 nfs_pageio_init_read(&desc, dreq->inode, false, nfs_direct_read_schedule_iovec()
486 get_dreq(dreq); nfs_direct_read_schedule_iovec()
487 desc.pg_dreq = dreq; nfs_direct_read_schedule_iovec()
508 req = nfs_create_request(dreq->ctx, pagevec[i], NULL, nfs_direct_read_schedule_iovec()
525 dreq->bytes_left -= req_len; nfs_direct_read_schedule_iovec()
541 nfs_direct_req_release(dreq); nfs_direct_read_schedule_iovec()
545 if (put_dreq(dreq)) nfs_direct_read_schedule_iovec()
546 nfs_direct_complete(dreq, false); nfs_direct_read_schedule_iovec()
576 struct nfs_direct_req *dreq; nfs_file_direct_read() local
597 dreq = nfs_direct_req_alloc(); nfs_file_direct_read()
598 if (dreq == NULL) nfs_file_direct_read()
601 dreq->inode = inode; nfs_file_direct_read()
602 dreq->bytes_left = count; nfs_file_direct_read()
603 dreq->io_start = pos; nfs_file_direct_read()
604 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); nfs_file_direct_read()
605 l_ctx = nfs_get_lock_context(dreq->ctx); nfs_file_direct_read()
610 dreq->l_ctx = l_ctx; nfs_file_direct_read()
612 dreq->iocb = iocb; nfs_file_direct_read()
615 result = nfs_direct_read_schedule_iovec(dreq, iter, pos); nfs_file_direct_read()
620 result = nfs_direct_wait(dreq); nfs_file_direct_read()
625 nfs_direct_req_release(dreq); nfs_file_direct_read()
629 nfs_direct_req_release(dreq); nfs_file_direct_read()
650 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) nfs_direct_write_reschedule() argument
659 nfs_init_cinfo_from_dreq(&cinfo, dreq); nfs_direct_write_reschedule()
660 nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); nfs_direct_write_reschedule()
662 dreq->count = 0; nfs_direct_write_reschedule()
663 for (i = 0; i < dreq->mirror_count; i++) nfs_direct_write_reschedule()
664 dreq->mirrors[i].count = 0; nfs_direct_write_reschedule()
665 get_dreq(dreq); nfs_direct_write_reschedule()
667 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false, nfs_direct_write_reschedule()
669 desc.pg_dreq = dreq; nfs_direct_write_reschedule()
672 nfs_direct_setup_mirroring(dreq, &desc, req); nfs_direct_write_reschedule()
679 dreq->flags = 0; nfs_direct_write_reschedule()
680 dreq->error = -EIO; nfs_direct_write_reschedule()
693 if (put_dreq(dreq)) nfs_direct_write_reschedule()
694 nfs_direct_write_complete(dreq, dreq->inode); nfs_direct_write_reschedule()
699 struct nfs_direct_req *dreq = data->dreq; nfs_direct_commit_complete() local
704 nfs_init_cinfo_from_dreq(&cinfo, dreq); nfs_direct_commit_complete()
708 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; nfs_direct_commit_complete()
709 } else if (nfs_direct_cmp_commit_data_verf(dreq, data)) { nfs_direct_commit_complete()
711 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; nfs_direct_commit_complete()
718 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { nfs_direct_commit_complete()
727 nfs_direct_write_complete(dreq, data->inode); nfs_direct_commit_complete()
740 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) nfs_direct_commit_schedule() argument
746 nfs_init_cinfo_from_dreq(&cinfo, dreq); nfs_direct_commit_schedule()
747 nfs_scan_commit(dreq->inode, &mds_list, &cinfo); nfs_direct_commit_schedule()
748 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo); nfs_direct_commit_schedule()
750 nfs_direct_write_reschedule(dreq); nfs_direct_commit_schedule()
755 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work); nfs_direct_write_schedule_work() local
756 int flags = dreq->flags; nfs_direct_write_schedule_work()
758 dreq->flags = 0; nfs_direct_write_schedule_work()
761 nfs_direct_commit_schedule(dreq); nfs_direct_write_schedule_work()
764 nfs_direct_write_reschedule(dreq); nfs_direct_write_schedule_work()
767 nfs_direct_complete(dreq, true); nfs_direct_write_schedule_work()
771 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) nfs_direct_write_complete() argument
773 schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */ nfs_direct_write_complete()
778 struct nfs_direct_req *dreq = hdr->dreq; nfs_direct_write_completion() local
786 nfs_init_cinfo_from_dreq(&cinfo, dreq); nfs_direct_write_completion()
788 spin_lock(&dreq->lock); nfs_direct_write_completion()
791 dreq->flags = 0; nfs_direct_write_completion()
792 dreq->error = hdr->error; nfs_direct_write_completion()
794 if (dreq->error == 0) { nfs_direct_write_completion()
795 nfs_direct_good_bytes(dreq, hdr); nfs_direct_write_completion()
797 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) nfs_direct_write_completion()
799 else if (dreq->flags == 0) { nfs_direct_write_completion()
800 nfs_direct_set_hdr_verf(dreq, hdr); nfs_direct_write_completion()
802 dreq->flags = NFS_ODIRECT_DO_COMMIT; nfs_direct_write_completion()
803 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { nfs_direct_write_completion()
805 if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr)) nfs_direct_write_completion()
806 dreq->flags = nfs_direct_write_completion()
811 spin_unlock(&dreq->lock); nfs_direct_write_completion()
826 if (put_dreq(dreq)) nfs_direct_write_completion()
827 nfs_direct_write_complete(dreq, hdr->inode); nfs_direct_write_completion()
860 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, nfs_direct_write_schedule_iovec() argument
865 struct inode *inode = dreq->inode; nfs_direct_write_schedule_iovec()
872 desc.pg_dreq = dreq; nfs_direct_write_schedule_iovec()
873 get_dreq(dreq); nfs_direct_write_schedule_iovec()
895 req = nfs_create_request(dreq->ctx, pagevec[i], NULL, nfs_direct_write_schedule_iovec()
902 nfs_direct_setup_mirroring(dreq, &desc, req); nfs_direct_write_schedule_iovec()
916 dreq->bytes_left -= req_len; nfs_direct_write_schedule_iovec()
931 nfs_direct_req_release(dreq); nfs_direct_write_schedule_iovec()
935 if (put_dreq(dreq)) nfs_direct_write_schedule_iovec()
936 nfs_direct_write_complete(dreq, dreq->inode); nfs_direct_write_schedule_iovec()
967 struct nfs_direct_req *dreq; nfs_file_direct_write() local
996 dreq = nfs_direct_req_alloc(); nfs_file_direct_write()
997 if (!dreq) nfs_file_direct_write()
1000 dreq->inode = inode; nfs_file_direct_write()
1001 dreq->bytes_left = iov_iter_count(iter); nfs_file_direct_write()
1002 dreq->io_start = pos; nfs_file_direct_write()
1003 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); nfs_file_direct_write()
1004 l_ctx = nfs_get_lock_context(dreq->ctx); nfs_file_direct_write()
1009 dreq->l_ctx = l_ctx; nfs_file_direct_write()
1011 dreq->iocb = iocb; nfs_file_direct_write()
1013 result = nfs_direct_write_schedule_iovec(dreq, iter, pos); nfs_file_direct_write()
1023 result = nfs_direct_wait(dreq); nfs_file_direct_write()
1035 nfs_direct_req_release(dreq); nfs_file_direct_write()
1039 nfs_direct_req_release(dreq); nfs_file_direct_write()
281 nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, struct nfs_direct_req *dreq) nfs_init_cinfo_from_dreq() argument
H A Dcache_lib.h23 extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq);
24 extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq);
H A Ddns_resolve.c273 struct nfs_cache_defer_req *dreq) do_cache_lookup()
279 ret = cache_check(cd, &(*item)->h, &dreq->req); do_cache_lookup()
315 struct nfs_cache_defer_req *dreq; do_cache_lookup_wait() local
318 dreq = nfs_cache_defer_req_alloc(); do_cache_lookup_wait()
319 if (!dreq) do_cache_lookup_wait()
321 ret = do_cache_lookup(cd, key, item, dreq); do_cache_lookup_wait()
323 ret = nfs_cache_wait_for_upcall(dreq); do_cache_lookup_wait()
327 nfs_cache_defer_req_put(dreq); do_cache_lookup_wait()
270 do_cache_lookup(struct cache_detail *cd, struct nfs_dns_ent *key, struct nfs_dns_ent **item, struct nfs_cache_defer_req *dreq) do_cache_lookup() argument
H A Dinternal.h495 struct nfs_direct_req *dreq);
516 struct nfs_direct_req *dreq); nfs_inode_dio_wait()
521 extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq);
522 extern void nfs_direct_set_resched_writes(struct nfs_direct_req *dreq);
626 if (!cinfo->dreq) { nfs_mark_page_unstable()
H A Dwrite.c844 cinfo->dreq = NULL; nfs_init_cinfo_from_inode()
850 struct nfs_direct_req *dreq) nfs_init_cinfo()
852 if (dreq) nfs_init_cinfo()
853 nfs_init_cinfo_from_dreq(cinfo, dreq); nfs_init_cinfo()
961 if ((ret == max) && !cinfo->dreq) list_for_each_entry_safe()
1638 data->dreq = cinfo->dreq; nfs_init_commit()
1662 if (!cinfo->dreq) nfs_retry_commit()
1751 nfs_init_cinfo(&cinfo, data->inode, data->dreq); nfs_commit_release_pages()
848 nfs_init_cinfo(struct nfs_commit_info *cinfo, struct inode *inode, struct nfs_direct_req *dreq) nfs_init_cinfo() argument
H A Dpagelist.c66 hdr->dreq = desc->pg_dreq; nfs_pgheader_init()
1224 desc->pg_dreq = hdr->dreq; nfs_pageio_resend()
H A Dpnfs_nfs.c107 if ((ret == max) && !cinfo->dreq) list_for_each_entry_safe()
H A Dnfs4proc.c4437 if (hdr->ds_clp != NULL || hdr->dreq != NULL) nfs4_write_need_cache_consistency_data()
/linux-4.4.14/net/dccp/
H A Dminisocks.c88 struct dccp_request_sock *dreq = dccp_rsk(req); dccp_create_openreq_child() local
95 newdp->dccps_service = dreq->dreq_service; dccp_create_openreq_child()
96 newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; dccp_create_openreq_child()
97 newdp->dccps_timestamp_time = dreq->dreq_timestamp_time; dccp_create_openreq_child()
112 newdp->dccps_iss = dreq->dreq_iss; dccp_create_openreq_child()
113 newdp->dccps_gss = dreq->dreq_gss; dccp_create_openreq_child()
115 newdp->dccps_isr = dreq->dreq_isr; dccp_create_openreq_child()
116 newdp->dccps_gsr = dreq->dreq_gsr; dccp_create_openreq_child()
121 if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) { dccp_create_openreq_child()
145 struct dccp_request_sock *dreq = dccp_rsk(req); dccp_check_req() local
151 if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) { dccp_check_req()
153 dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq; dccp_check_req()
173 dreq->dreq_iss, dreq->dreq_gss)) { dccp_check_req()
178 (unsigned long long) dreq->dreq_iss, dccp_check_req()
179 (unsigned long long) dreq->dreq_gss); dccp_check_req()
183 if (dccp_parse_options(sk, dreq, skb)) dccp_check_req()
250 struct dccp_request_sock *dreq = dccp_rsk(req); dccp_reqsk_init() local
255 dreq->dreq_timestamp_echo = 0; dccp_reqsk_init()
258 return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg); dccp_reqsk_init()
H A Doptions.c48 * @sk: client|server|listening dccp socket (when @dreq != NULL)
49 * @dreq: request socket to use during connection setup, or NULL
51 int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, dccp_parse_options() argument
102 if (dreq != NULL && (opt >= DCCPO_MIN_RX_CCID_SPECIFIC || dccp_parse_options()
128 rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, dccp_parse_options()
147 if (dreq != NULL) { dccp_parse_options()
148 dreq->dreq_timestamp_echo = ntohl(opt_val); dccp_parse_options()
149 dreq->dreq_timestamp_time = dccp_timestamp(); dccp_parse_options()
356 struct dccp_request_sock *dreq, dccp_insert_option_timestamp_echo()
363 if (dreq != NULL) { dccp_insert_option_timestamp_echo()
364 elapsed_time = dccp_timestamp() - dreq->dreq_timestamp_time; dccp_insert_option_timestamp_echo()
365 tstamp_echo = htonl(dreq->dreq_timestamp_echo); dccp_insert_option_timestamp_echo()
366 dreq->dreq_timestamp_echo = 0; dccp_insert_option_timestamp_echo()
592 int dccp_insert_options_rsk(struct dccp_request_sock *dreq, struct sk_buff *skb) dccp_insert_options_rsk() argument
596 if (dccp_feat_insert_opts(NULL, dreq, skb)) dccp_insert_options_rsk()
603 if (dreq->dreq_timestamp_echo != 0 && dccp_insert_options_rsk()
604 dccp_insert_option_timestamp_echo(NULL, dreq, skb)) dccp_insert_options_rsk()
355 dccp_insert_option_timestamp_echo(struct dccp_sock *dp, struct dccp_request_sock *dreq, struct sk_buff *skb) dccp_insert_option_timestamp_echo() argument
H A Doutput.c397 struct dccp_request_sock *dreq; dccp_make_response() local
416 dreq = dccp_rsk(req); dccp_make_response()
418 dccp_inc_seqno(&dreq->dreq_gss); dccp_make_response()
420 DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss; dccp_make_response()
423 if (dccp_feat_server_ccid_dependencies(dreq)) dccp_make_response()
426 if (dccp_insert_options_rsk(dreq, skb)) dccp_make_response()
438 dccp_hdr_set_seq(dh, dreq->dreq_gss); dccp_make_response()
439 dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr); dccp_make_response()
440 dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; dccp_make_response()
H A Dipv4.c568 struct dccp_request_sock *dreq; dccp_v4_conn_request() local
605 dreq = dccp_rsk(req); dccp_v4_conn_request()
606 if (dccp_parse_options(sk, dreq, skb)) dccp_v4_conn_request()
625 dreq->dreq_isr = dcb->dccpd_seq; dccp_v4_conn_request()
626 dreq->dreq_gsr = dreq->dreq_isr; dccp_v4_conn_request()
627 dreq->dreq_iss = dccp_v4_init_sequence(skb); dccp_v4_conn_request()
628 dreq->dreq_gss = dreq->dreq_iss; dccp_v4_conn_request()
629 dreq->dreq_service = service; dccp_v4_conn_request()
H A Dipv6.c301 struct dccp_request_sock *dreq; dccp_v6_conn_request() local
334 dreq = dccp_rsk(req); dccp_v6_conn_request()
335 if (dccp_parse_options(sk, dreq, skb)) dccp_v6_conn_request()
366 dreq->dreq_isr = dcb->dccpd_seq; dccp_v6_conn_request()
367 dreq->dreq_gsr = dreq->dreq_isr; dccp_v6_conn_request()
368 dreq->dreq_iss = dccp_v6_init_sequence(skb); dccp_v6_conn_request()
369 dreq->dreq_gss = dreq->dreq_iss; dccp_v6_conn_request()
370 dreq->dreq_service = service; dccp_v6_conn_request()
H A Dfeat.c632 * @dreq: used by the server only (all Changes/Confirms in LISTEN/RESPOND)
634 int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq, dccp_feat_insert_opts() argument
637 struct list_head *fn = dreq ? &dreq->dreq_featneg : &dp->dccps_featneg; dccp_feat_insert_opts()
998 int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq) dccp_feat_server_ccid_dependencies() argument
1000 struct list_head *fn = &dreq->dreq_featneg; dccp_feat_server_ccid_dependencies()
1385 * @dreq: used by the server during connection setup
1394 int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, dccp_feat_parse_options() argument
1398 struct list_head *fn = dreq ? &dreq->dreq_featneg : &dp->dccps_featneg; dccp_feat_parse_options()
H A Ddccp.h474 int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq);
/linux-4.4.14/drivers/crypto/marvell/
H A Dtdma.c40 void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq) mv_cesa_dma_step() argument
42 struct mv_cesa_engine *engine = dreq->base.engine; mv_cesa_dma_step()
54 writel_relaxed(dreq->chain.first->cur_dma, mv_cesa_dma_step()
59 void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq) mv_cesa_dma_cleanup() argument
63 for (tdma = dreq->chain.first; tdma;) { mv_cesa_dma_cleanup()
75 dreq->chain.first = NULL; mv_cesa_dma_cleanup()
76 dreq->chain.last = NULL; mv_cesa_dma_cleanup()
79 void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq, mv_cesa_dma_prepare() argument
84 for (tdma = dreq->chain.first; tdma; tdma = tdma->next) { mv_cesa_dma_prepare()
H A Dcipher.c170 struct mv_cesa_tdma_req *dreq = &creq->req.dma; mv_cesa_ablkcipher_dma_prepare() local
172 mv_cesa_dma_prepare(dreq, dreq->base.engine); mv_cesa_ablkcipher_dma_prepare()
298 struct mv_cesa_tdma_req *dreq = &creq->req.dma; mv_cesa_ablkcipher_dma_req_init() local
304 dreq->base.type = CESA_DMA_REQ; mv_cesa_ablkcipher_dma_req_init()
305 dreq->chain.first = NULL; mv_cesa_ablkcipher_dma_req_init()
306 dreq->chain.last = NULL; mv_cesa_ablkcipher_dma_req_init()
361 dreq->chain = chain; mv_cesa_ablkcipher_dma_req_init()
366 mv_cesa_dma_cleanup(dreq); mv_cesa_ablkcipher_dma_req_init()
H A Dhash.c51 struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma; mv_cesa_ahash_dma_alloc_cache() local
54 &dreq->cache_dma); mv_cesa_ahash_dma_alloc_cache()
302 struct mv_cesa_tdma_req *dreq = &creq->req.dma.base; mv_cesa_ahash_dma_prepare() local
304 mv_cesa_dma_prepare(dreq, dreq->base.engine); mv_cesa_ahash_dma_prepare()
603 struct mv_cesa_tdma_req *dreq = &ahashdreq->base; mv_cesa_ahash_dma_req_init() local
609 dreq->chain.first = NULL; mv_cesa_ahash_dma_req_init()
610 dreq->chain.last = NULL; mv_cesa_ahash_dma_req_init()
621 mv_cesa_tdma_desc_iter_init(&dreq->chain); mv_cesa_ahash_dma_req_init()
628 ret = mv_cesa_ahash_dma_add_cache(&dreq->chain, &iter, creq, flags); mv_cesa_ahash_dma_req_init()
639 ret = mv_cesa_dma_add_op_transfers(&dreq->chain, mv_cesa_ahash_dma_req_init()
650 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl, mv_cesa_ahash_dma_req_init()
668 op = mv_cesa_ahash_dma_last_req(&dreq->chain, &iter, creq, mv_cesa_ahash_dma_req_init()
671 op = mv_cesa_dma_add_frag(&dreq->chain, &creq->op_tmpl, mv_cesa_ahash_dma_req_init()
681 ret = mv_cesa_dma_add_dummy_end(&dreq->chain, flags); mv_cesa_ahash_dma_req_init()
695 mv_cesa_dma_cleanup(dreq); mv_cesa_ahash_dma_req_init()
H A Dcesa.h767 void mv_cesa_dma_step(struct mv_cesa_tdma_req *dreq);
769 static inline int mv_cesa_dma_process(struct mv_cesa_tdma_req *dreq, mv_cesa_dma_process() argument
781 void mv_cesa_dma_prepare(struct mv_cesa_tdma_req *dreq,
784 void mv_cesa_dma_cleanup(struct mv_cesa_tdma_req *dreq);
/linux-4.4.14/net/sunrpc/
H A Dcache.c535 static void __unhash_deferred_req(struct cache_deferred_req *dreq) __unhash_deferred_req() argument
537 hlist_del_init(&dreq->hash); __unhash_deferred_req()
538 if (!list_empty(&dreq->recent)) { __unhash_deferred_req()
539 list_del_init(&dreq->recent); __unhash_deferred_req()
544 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) __hash_deferred_req() argument
548 INIT_LIST_HEAD(&dreq->recent); __hash_deferred_req()
549 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); __hash_deferred_req()
552 static void setup_deferral(struct cache_deferred_req *dreq, setup_deferral() argument
557 dreq->item = item; setup_deferral()
561 __hash_deferred_req(dreq, item); setup_deferral()
565 list_add(&dreq->recent, &cache_defer_list); setup_deferral()
577 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) cache_restart_thread() argument
580 container_of(dreq, struct thread_deferred_req, handle); cache_restart_thread()
587 struct cache_deferred_req *dreq = &sleeper.handle; cache_wait_req() local
590 dreq->revisit = cache_restart_thread; cache_wait_req()
592 setup_deferral(dreq, item, 0); cache_wait_req()
646 struct cache_deferred_req *dreq; cache_defer_req() local
653 dreq = req->defer(req); cache_defer_req()
654 if (dreq == NULL) cache_defer_req()
656 setup_deferral(dreq, item, 1); cache_defer_req()
669 struct cache_deferred_req *dreq; cache_revisit_request() local
677 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash) cache_revisit_request()
678 if (dreq->item == item) { cache_revisit_request()
679 __unhash_deferred_req(dreq); cache_revisit_request()
680 list_add(&dreq->recent, &pending); cache_revisit_request()
686 dreq = list_entry(pending.next, struct cache_deferred_req, recent); cache_revisit_request()
687 list_del_init(&dreq->recent); cache_revisit_request()
688 dreq->revisit(dreq, 0); cache_revisit_request()
694 struct cache_deferred_req *dreq, *tmp; cache_clean_deferred() local
701 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { cache_clean_deferred()
702 if (dreq->owner == owner) { cache_clean_deferred()
703 __unhash_deferred_req(dreq); cache_clean_deferred()
704 list_add(&dreq->recent, &pending); cache_clean_deferred()
710 dreq = list_entry(pending.next, struct cache_deferred_req, recent); cache_clean_deferred()
711 list_del_init(&dreq->recent); cache_clean_deferred()
712 dreq->revisit(dreq, 1); cache_clean_deferred()
H A Dsvc_xprt.c1077 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) svc_revisit() argument
1080 container_of(dreq, struct svc_deferred_req, handle); svc_revisit()
/linux-4.4.14/drivers/s390/block/
H A Ddasd_diag.c173 struct dasd_diag_req *dreq; dasd_start_diag() local
184 dreq = (struct dasd_diag_req *) cqr->data; dasd_start_diag()
189 private->iob.block_count = dreq->block_count; dasd_start_diag()
191 private->iob.bio_list = dreq->bio; dasd_start_diag()
511 struct dasd_diag_req *dreq; dasd_diag_build_cp() local
550 dreq = (struct dasd_diag_req *) cqr->data;
551 dreq->block_count = count;
552 dbio = dreq->bio;
/linux-4.4.14/drivers/dma/
H A Dbcm2835-dma.c77 unsigned int dreq; member in struct:bcm2835_chan
439 if (c->dreq != 0) bcm2835_dma_prep_dma_cyclic()
441 BCM2835_DMA_PER_MAP(c->dreq); bcm2835_dma_prep_dma_cyclic()
579 to_bcm2835_dma_chan(chan)->dreq = spec->args[0]; bcm2835_dma_xlate()
/linux-4.4.14/fs/nilfs2/
H A Dbtree.c1726 union nilfs_bmap_ptr_req *dreq, nilfs_btree_prepare_convert_and_insert()
1740 dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key); nilfs_btree_prepare_convert_and_insert()
1744 ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat); nilfs_btree_prepare_convert_and_insert()
1751 nreq->bpr_ptr = dreq->bpr_ptr + 1; nilfs_btree_prepare_convert_and_insert()
1771 nilfs_bmap_abort_alloc_ptr(btree, dreq, dat); nilfs_btree_prepare_convert_and_insert()
1782 union nilfs_bmap_ptr_req *dreq, nilfs_btree_commit_convert_and_insert()
1802 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); nilfs_btree_commit_convert_and_insert()
1809 nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk); nilfs_btree_commit_convert_and_insert()
1824 nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); nilfs_btree_commit_convert_and_insert()
1831 nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, nilfs_btree_commit_convert_and_insert()
1838 nilfs_bmap_set_target_v(btree, key, dreq->bpr_ptr); nilfs_btree_commit_convert_and_insert()
1855 union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; nilfs_btree_convert_and_insert() local
1860 di = &dreq; nilfs_btree_convert_and_insert()
1864 di = &dreq; nilfs_btree_convert_and_insert()
1725 nilfs_btree_prepare_convert_and_insert(struct nilfs_bmap *btree, __u64 key, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head **bhp, struct nilfs_bmap_stats *stats) nilfs_btree_prepare_convert_and_insert() argument
1778 nilfs_btree_commit_convert_and_insert(struct nilfs_bmap *btree, __u64 key, __u64 ptr, const __u64 *keys, const __u64 *ptrs, int n, union nilfs_bmap_ptr_req *dreq, union nilfs_bmap_ptr_req *nreq, struct buffer_head *bh) nilfs_btree_commit_convert_and_insert() argument
/linux-4.4.14/drivers/usb/isp1760/
H A Disp1760-if.c226 if (of_property_read_bool(dp, "dreq-polarity")) isp1760_plat_probe()
/linux-4.4.14/drivers/net/ethernet/hisilicon/
H A Dhns_mdio.c380 /*3. reset dreq, and read reset st check*/ hns_mdio_reset()
/linux-4.4.14/include/linux/
H A Ddccp.h178 extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq,
H A Dnfs_xdr.h1392 struct nfs_direct_req *dreq; member in struct:nfs_pgio_header
1434 struct nfs_direct_req *dreq; /* O_DIRECT request */ member in struct:nfs_commit_info
1446 struct nfs_direct_req *dreq; /* O_DIRECT request */ member in struct:nfs_commit_data
/linux-4.4.14/fs/nfs/blocklayout/
H A Dblocklayout.c235 const bool is_dio = (header->dreq != NULL); bl_read_pagelist()
/linux-4.4.14/fs/nfs/flexfilelayout/
H A Dflexfilelayout.c915 if (!hdr->dreq) { ff_layout_reset_write()
922 nfs_direct_set_resched_writes(hdr->dreq); ff_layout_reset_write()
/linux-4.4.14/drivers/usb/gadget/udc/
H A Dnet2272.c780 /* deassert dreq */ net2272_kick_dma()
/linux-4.4.14/drivers/infiniband/core/
H A Dcm.c137 static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);

Completed in 2106 milliseconds