dreq 816 drivers/crypto/marvell/cesa.h void mv_cesa_dma_step(struct mv_cesa_req *dreq); dreq 818 drivers/crypto/marvell/cesa.h static inline int mv_cesa_dma_process(struct mv_cesa_req *dreq, dreq 830 drivers/crypto/marvell/cesa.h void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, dreq 832 drivers/crypto/marvell/cesa.h void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq); dreq 834 drivers/crypto/marvell/cesa.h struct mv_cesa_req *dreq); dreq 37 drivers/crypto/marvell/tdma.c void mv_cesa_dma_step(struct mv_cesa_req *dreq) dreq 39 drivers/crypto/marvell/tdma.c struct mv_cesa_engine *engine = dreq->engine; dreq 51 drivers/crypto/marvell/tdma.c writel_relaxed(dreq->chain.first->cur_dma, dreq 58 drivers/crypto/marvell/tdma.c void mv_cesa_dma_cleanup(struct mv_cesa_req *dreq) dreq 62 drivers/crypto/marvell/tdma.c for (tdma = dreq->chain.first; tdma;) { dreq 75 drivers/crypto/marvell/tdma.c dreq->chain.first = NULL; dreq 76 drivers/crypto/marvell/tdma.c dreq->chain.last = NULL; dreq 79 drivers/crypto/marvell/tdma.c void mv_cesa_dma_prepare(struct mv_cesa_req *dreq, dreq 84 drivers/crypto/marvell/tdma.c for (tdma = dreq->chain.first; tdma; tdma = tdma->next) { dreq 97 drivers/crypto/marvell/tdma.c struct mv_cesa_req *dreq) dreq 100 drivers/crypto/marvell/tdma.c engine->chain.first = dreq->chain.first; dreq 101 drivers/crypto/marvell/tdma.c engine->chain.last = dreq->chain.last; dreq 106 drivers/crypto/marvell/tdma.c last->next = dreq->chain.first; dreq 107 drivers/crypto/marvell/tdma.c engine->chain.last = dreq->chain.last; dreq 116 drivers/crypto/marvell/tdma.c !(dreq->chain.first->flags & CESA_TDMA_SET_STATE)) dreq 117 drivers/crypto/marvell/tdma.c last->next_dma = dreq->chain.first->cur_dma; dreq 74 drivers/dma/bcm2835-dma.c unsigned int dreq; dreq 661 drivers/dma/bcm2835-dma.c if (c->dreq != 0) dreq 662 drivers/dma/bcm2835-dma.c info |= BCM2835_DMA_PER_MAP(c->dreq); dreq 735 drivers/dma/bcm2835-dma.c if (c->dreq != 0) dreq 736 drivers/dma/bcm2835-dma.c info |= BCM2835_DMA_PER_MAP(c->dreq); dreq 878 drivers/dma/bcm2835-dma.c to_bcm2835_dma_chan(chan)->dreq = spec->args[0]; dreq 189 drivers/infiniband/core/cm.c static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER); dreq 168 drivers/s390/block/dasd_diag.c struct dasd_diag_req *dreq; dreq 179 drivers/s390/block/dasd_diag.c dreq = cqr->data; dreq 184 drivers/s390/block/dasd_diag.c private->iob.block_count = dreq->block_count; dreq 186 drivers/s390/block/dasd_diag.c private->iob.bio_list = dreq->bio; dreq 504 drivers/s390/block/dasd_diag.c struct dasd_diag_req *dreq; dreq 544 drivers/s390/block/dasd_diag.c dreq = (struct dasd_diag_req *) cqr->data; dreq 545 drivers/s390/block/dasd_diag.c dreq->block_count = count; dreq 546 drivers/s390/block/dasd_diag.c dbio = dreq->bio; dreq 266 fs/nfs/blocklayout/blocklayout.c const bool is_dio = (header->dreq != NULL); dreq 68 fs/nfs/cache_lib.c void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq) dreq 70 fs/nfs/cache_lib.c if (refcount_dec_and_test(&dreq->count)) dreq 71 fs/nfs/cache_lib.c kfree(dreq); dreq 76 fs/nfs/cache_lib.c struct nfs_cache_defer_req *dreq; dreq 78 fs/nfs/cache_lib.c dreq = container_of(d, struct nfs_cache_defer_req, deferred_req); dreq 80 fs/nfs/cache_lib.c complete(&dreq->completion); dreq 81 fs/nfs/cache_lib.c nfs_cache_defer_req_put(dreq); dreq 86 fs/nfs/cache_lib.c struct nfs_cache_defer_req *dreq; dreq 88 fs/nfs/cache_lib.c dreq = container_of(req, struct nfs_cache_defer_req, req); dreq 89 fs/nfs/cache_lib.c dreq->deferred_req.revisit = nfs_dns_cache_revisit; dreq 90 fs/nfs/cache_lib.c refcount_inc(&dreq->count); dreq 92 fs/nfs/cache_lib.c return &dreq->deferred_req; dreq 97 fs/nfs/cache_lib.c struct nfs_cache_defer_req *dreq; dreq 99 fs/nfs/cache_lib.c dreq = kzalloc(sizeof(*dreq), GFP_KERNEL); dreq 100 fs/nfs/cache_lib.c if (dreq) { dreq 101 fs/nfs/cache_lib.c init_completion(&dreq->completion); dreq 102 fs/nfs/cache_lib.c refcount_set(&dreq->count, 1); dreq 103 fs/nfs/cache_lib.c dreq->req.defer = nfs_dns_cache_defer; dreq 105 fs/nfs/cache_lib.c return dreq; dreq 108 fs/nfs/cache_lib.c int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq) dreq 110 fs/nfs/cache_lib.c if (wait_for_completion_timeout(&dreq->completion, dreq 24 fs/nfs/cache_lib.h extern void nfs_cache_defer_req_put(struct nfs_cache_defer_req *dreq); dreq 25 fs/nfs/cache_lib.h extern int nfs_cache_wait_for_upcall(struct nfs_cache_defer_req *dreq); dreq 102 fs/nfs/direct.c static void nfs_direct_write_complete(struct nfs_direct_req *dreq); dreq 105 fs/nfs/direct.c static inline void get_dreq(struct nfs_direct_req *dreq) dreq 107 fs/nfs/direct.c atomic_inc(&dreq->io_count); dreq 110 fs/nfs/direct.c static inline int put_dreq(struct nfs_direct_req *dreq) dreq 112 fs/nfs/direct.c return atomic_dec_and_test(&dreq->io_count); dreq 116 fs/nfs/direct.c nfs_direct_handle_truncated(struct nfs_direct_req *dreq, dreq 123 fs/nfs/direct.c if (dreq->max_count >= dreq_len) { dreq 124 fs/nfs/direct.c dreq->max_count = dreq_len; dreq 125 fs/nfs/direct.c if (dreq->count > dreq_len) dreq 126 fs/nfs/direct.c dreq->count = dreq_len; dreq 129 fs/nfs/direct.c dreq->error = hdr->error; dreq 131 fs/nfs/direct.c dreq->error = 0; dreq 136 fs/nfs/direct.c nfs_direct_count_bytes(struct nfs_direct_req *dreq, dreq 142 fs/nfs/direct.c if (hdr_end > dreq->io_start) dreq 143 fs/nfs/direct.c dreq_len = hdr_end - dreq->io_start; dreq 145 fs/nfs/direct.c nfs_direct_handle_truncated(dreq, hdr, dreq_len); dreq 147 fs/nfs/direct.c if (dreq_len > dreq->max_count) dreq 148 fs/nfs/direct.c dreq_len = dreq->max_count; dreq 150 fs/nfs/direct.c if (dreq->count < dreq_len) dreq 151 fs/nfs/direct.c dreq->count = dreq_len; dreq 163 fs/nfs/direct.c nfs_direct_select_verf(struct nfs_direct_req *dreq, dreq 167 fs/nfs/direct.c struct nfs_writeverf *verfp = &dreq->verf; dreq 174 fs/nfs/direct.c if (ds_clp && dreq->ds_cinfo.nbuckets > 0) { dreq 175 fs/nfs/direct.c if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets) dreq 176 fs/nfs/direct.c verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf; dreq 192 fs/nfs/direct.c static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq, dreq 197 fs/nfs/direct.c verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx); dreq 218 fs/nfs/direct.c static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq, dreq 223 fs/nfs/direct.c verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx); dreq 225 fs/nfs/direct.c nfs_direct_set_hdr_verf(dreq, hdr); dreq 239 fs/nfs/direct.c static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq, dreq 244 fs/nfs/direct.c verfp = nfs_direct_select_verf(dreq, data->ds_clp, dreq 287 fs/nfs/direct.c struct nfs_direct_req *dreq) dreq 289 fs/nfs/direct.c cinfo->inode = dreq->inode; dreq 290 fs/nfs/direct.c cinfo->mds = &dreq->mds_cinfo; dreq 291 fs/nfs/direct.c cinfo->ds = &dreq->ds_cinfo; dreq 292 fs/nfs/direct.c cinfo->dreq = dreq; dreq 298 fs/nfs/direct.c struct nfs_direct_req *dreq; dreq 300 fs/nfs/direct.c dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL); dreq 301 fs/nfs/direct.c if (!dreq) dreq 304 fs/nfs/direct.c kref_init(&dreq->kref); dreq 305 fs/nfs/direct.c kref_get(&dreq->kref); dreq 306 fs/nfs/direct.c init_completion(&dreq->completion); dreq 307 fs/nfs/direct.c INIT_LIST_HEAD(&dreq->mds_cinfo.list); dreq 308 fs/nfs/direct.c dreq->verf.committed = NFS_INVALID_STABLE_HOW; /* not set yet */ dreq 309 fs/nfs/direct.c INIT_WORK(&dreq->work, nfs_direct_write_schedule_work); dreq 310 fs/nfs/direct.c spin_lock_init(&dreq->lock); dreq 312 fs/nfs/direct.c return dreq; dreq 317 fs/nfs/direct.c struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); dreq 319 fs/nfs/direct.c nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo); dreq 320 fs/nfs/direct.c if (dreq->l_ctx != NULL) dreq 321 fs/nfs/direct.c nfs_put_lock_context(dreq->l_ctx); dreq 322 fs/nfs/direct.c if (dreq->ctx != NULL) dreq 323 fs/nfs/direct.c put_nfs_open_context(dreq->ctx); dreq 324 fs/nfs/direct.c kmem_cache_free(nfs_direct_cachep, dreq); dreq 327 fs/nfs/direct.c static void nfs_direct_req_release(struct nfs_direct_req *dreq) dreq 329 fs/nfs/direct.c kref_put(&dreq->kref, nfs_direct_req_free); dreq 332 fs/nfs/direct.c ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq) dreq 334 fs/nfs/direct.c return dreq->bytes_left; dreq 341 fs/nfs/direct.c static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq) dreq 346 fs/nfs/direct.c if (dreq->iocb) dreq 349 fs/nfs/direct.c result = wait_for_completion_killable(&dreq->completion); dreq 352 fs/nfs/direct.c result = dreq->count; dreq 353 fs/nfs/direct.c WARN_ON_ONCE(dreq->count < 0); dreq 356 fs/nfs/direct.c result = dreq->error; dreq 366 fs/nfs/direct.c static void nfs_direct_complete(struct nfs_direct_req *dreq) dreq 368 fs/nfs/direct.c struct inode *inode = dreq->inode; dreq 372 fs/nfs/direct.c if (dreq->iocb) { dreq 373 fs/nfs/direct.c long res = (long) dreq->error; dreq 374 fs/nfs/direct.c if (dreq->count != 0) { dreq 375 fs/nfs/direct.c res = (long) dreq->count; dreq 376 fs/nfs/direct.c WARN_ON_ONCE(dreq->count < 0); dreq 378 fs/nfs/direct.c dreq->iocb->ki_complete(dreq->iocb, res, 0); dreq 381 fs/nfs/direct.c complete(&dreq->completion); dreq 383 fs/nfs/direct.c nfs_direct_req_release(dreq); dreq 389 fs/nfs/direct.c struct nfs_direct_req *dreq = hdr->dreq; dreq 391 fs/nfs/direct.c spin_lock(&dreq->lock); dreq 393 fs/nfs/direct.c spin_unlock(&dreq->lock); dreq 397 fs/nfs/direct.c nfs_direct_count_bytes(dreq, hdr); dreq 398 fs/nfs/direct.c spin_unlock(&dreq->lock); dreq 405 fs/nfs/direct.c (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY)) dreq 412 fs/nfs/direct.c if (put_dreq(dreq)) dreq 413 fs/nfs/direct.c nfs_direct_complete(dreq); dreq 430 fs/nfs/direct.c get_dreq(hdr->dreq); dreq 447 fs/nfs/direct.c static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq, dreq 452 fs/nfs/direct.c struct inode *inode = dreq->inode; dreq 457 fs/nfs/direct.c nfs_pageio_init_read(&desc, dreq->inode, false, dreq 459 fs/nfs/direct.c get_dreq(dreq); dreq 460 fs/nfs/direct.c desc.pg_dreq = dreq; dreq 481 fs/nfs/direct.c req = nfs_create_request(dreq->ctx, pagevec[i], dreq 498 fs/nfs/direct.c dreq->bytes_left -= req_len; dreq 514 fs/nfs/direct.c nfs_direct_req_release(dreq); dreq 518 fs/nfs/direct.c if (put_dreq(dreq)) dreq 519 fs/nfs/direct.c nfs_direct_complete(dreq); dreq 547 fs/nfs/direct.c struct nfs_direct_req *dreq; dreq 563 fs/nfs/direct.c dreq = nfs_direct_req_alloc(); dreq 564 fs/nfs/direct.c if (dreq == NULL) dreq 567 fs/nfs/direct.c dreq->inode = inode; dreq 568 fs/nfs/direct.c dreq->bytes_left = dreq->max_count = count; dreq 569 fs/nfs/direct.c dreq->io_start = iocb->ki_pos; dreq 570 fs/nfs/direct.c dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); dreq 571 fs/nfs/direct.c l_ctx = nfs_get_lock_context(dreq->ctx); dreq 574 fs/nfs/direct.c nfs_direct_req_release(dreq); dreq 577 fs/nfs/direct.c dreq->l_ctx = l_ctx; dreq 579 fs/nfs/direct.c dreq->iocb = iocb; dreq 582 fs/nfs/direct.c dreq->flags = NFS_ODIRECT_SHOULD_DIRTY; dreq 587 fs/nfs/direct.c requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos); dreq 592 fs/nfs/direct.c result = nfs_direct_wait(dreq); dreq 603 fs/nfs/direct.c nfs_direct_req_release(dreq); dreq 622 fs/nfs/direct.c static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) dreq 630 fs/nfs/direct.c nfs_init_cinfo_from_dreq(&cinfo, dreq); dreq 631 fs/nfs/direct.c nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo); dreq 633 fs/nfs/direct.c dreq->count = 0; dreq 634 fs/nfs/direct.c dreq->max_count = 0; dreq 636 fs/nfs/direct.c dreq->max_count += req->wb_bytes; dreq 637 fs/nfs/direct.c dreq->verf.committed = NFS_INVALID_STABLE_HOW; dreq 638 fs/nfs/direct.c nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo); dreq 639 fs/nfs/direct.c get_dreq(dreq); dreq 641 fs/nfs/direct.c nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false, dreq 643 fs/nfs/direct.c desc.pg_dreq = dreq; dreq 651 fs/nfs/direct.c dreq->flags = 0; dreq 653 fs/nfs/direct.c dreq->error = desc.pg_error; dreq 655 fs/nfs/direct.c dreq->error = -EIO; dreq 668 fs/nfs/direct.c if (put_dreq(dreq)) dreq 669 fs/nfs/direct.c nfs_direct_write_complete(dreq); dreq 674 fs/nfs/direct.c struct nfs_direct_req *dreq = data->dreq; dreq 679 fs/nfs/direct.c nfs_init_cinfo_from_dreq(&cinfo, dreq); dreq 680 fs/nfs/direct.c if (status < 0 || nfs_direct_cmp_commit_data_verf(dreq, data)) dreq 681 fs/nfs/direct.c dreq->flags = NFS_ODIRECT_RESCHED_WRITES; dreq 686 fs/nfs/direct.c if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { dreq 700 fs/nfs/direct.c nfs_direct_write_complete(dreq); dreq 706 fs/nfs/direct.c struct nfs_direct_req *dreq = cinfo->dreq; dreq 708 fs/nfs/direct.c spin_lock(&dreq->lock); dreq 709 fs/nfs/direct.c dreq->flags = NFS_ODIRECT_RESCHED_WRITES; dreq 710 fs/nfs/direct.c spin_unlock(&dreq->lock); dreq 719 fs/nfs/direct.c static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) dreq 725 fs/nfs/direct.c nfs_init_cinfo_from_dreq(&cinfo, dreq); dreq 726 fs/nfs/direct.c nfs_scan_commit(dreq->inode, &mds_list, &cinfo); dreq 727 fs/nfs/direct.c res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo); dreq 729 fs/nfs/direct.c nfs_direct_write_reschedule(dreq); dreq 734 fs/nfs/direct.c struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work); dreq 735 fs/nfs/direct.c int flags = dreq->flags; dreq 737 fs/nfs/direct.c dreq->flags = 0; dreq 740 fs/nfs/direct.c nfs_direct_commit_schedule(dreq); dreq 743 fs/nfs/direct.c nfs_direct_write_reschedule(dreq); dreq 746 fs/nfs/direct.c nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping); dreq 747 fs/nfs/direct.c nfs_direct_complete(dreq); dreq 751 fs/nfs/direct.c static void nfs_direct_write_complete(struct nfs_direct_req *dreq) dreq 753 fs/nfs/direct.c queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */ dreq 758 fs/nfs/direct.c struct nfs_direct_req *dreq = hdr->dreq; dreq 763 fs/nfs/direct.c nfs_init_cinfo_from_dreq(&cinfo, dreq); dreq 765 fs/nfs/direct.c spin_lock(&dreq->lock); dreq 767 fs/nfs/direct.c spin_unlock(&dreq->lock); dreq 771 fs/nfs/direct.c nfs_direct_count_bytes(dreq, hdr); dreq 774 fs/nfs/direct.c if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) dreq 776 fs/nfs/direct.c else if (dreq->flags == 0) { dreq 777 fs/nfs/direct.c nfs_direct_set_hdr_verf(dreq, hdr); dreq 779 fs/nfs/direct.c dreq->flags = NFS_ODIRECT_DO_COMMIT; dreq 780 fs/nfs/direct.c } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { dreq 782 fs/nfs/direct.c if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr)) dreq 783 fs/nfs/direct.c dreq->flags = dreq 788 fs/nfs/direct.c spin_unlock(&dreq->lock); dreq 803 fs/nfs/direct.c if (put_dreq(dreq)) dreq 804 fs/nfs/direct.c nfs_direct_write_complete(dreq); dreq 821 fs/nfs/direct.c struct nfs_direct_req *dreq = hdr->dreq; dreq 823 fs/nfs/direct.c spin_lock(&dreq->lock); dreq 824 fs/nfs/direct.c if (dreq->error == 0) { dreq 825 fs/nfs/direct.c dreq->flags = NFS_ODIRECT_RESCHED_WRITES; dreq 830 fs/nfs/direct.c spin_unlock(&dreq->lock); dreq 852 fs/nfs/direct.c static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, dreq 857 fs/nfs/direct.c struct inode *inode = dreq->inode; dreq 864 fs/nfs/direct.c desc.pg_dreq = dreq; dreq 865 fs/nfs/direct.c get_dreq(dreq); dreq 887 fs/nfs/direct.c req = nfs_create_request(dreq->ctx, pagevec[i], dreq 912 fs/nfs/direct.c dreq->bytes_left -= req_len; dreq 927 fs/nfs/direct.c nfs_direct_req_release(dreq); dreq 931 fs/nfs/direct.c if (put_dreq(dreq)) dreq 932 fs/nfs/direct.c nfs_direct_write_complete(dreq); dreq 963 fs/nfs/direct.c struct nfs_direct_req *dreq; dreq 982 fs/nfs/direct.c dreq = nfs_direct_req_alloc(); dreq 983 fs/nfs/direct.c if (!dreq) dreq 986 fs/nfs/direct.c dreq->inode = inode; dreq 987 fs/nfs/direct.c dreq->bytes_left = dreq->max_count = count; dreq 988 fs/nfs/direct.c dreq->io_start = pos; dreq 989 fs/nfs/direct.c dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); dreq 990 fs/nfs/direct.c l_ctx = nfs_get_lock_context(dreq->ctx); dreq 993 fs/nfs/direct.c nfs_direct_req_release(dreq); dreq 996 fs/nfs/direct.c dreq->l_ctx = l_ctx; dreq 998 fs/nfs/direct.c dreq->iocb = iocb; dreq 1002 fs/nfs/direct.c requested = nfs_direct_write_schedule_iovec(dreq, iter, pos); dreq 1012 fs/nfs/direct.c result = nfs_direct_wait(dreq); dreq 1024 fs/nfs/direct.c nfs_direct_req_release(dreq); dreq 284 fs/nfs/dns_resolve.c struct nfs_cache_defer_req *dreq) dreq 290 fs/nfs/dns_resolve.c ret = cache_check(cd, &(*item)->h, &dreq->req); dreq 326 fs/nfs/dns_resolve.c struct nfs_cache_defer_req *dreq; dreq 329 fs/nfs/dns_resolve.c dreq = nfs_cache_defer_req_alloc(); dreq 330 fs/nfs/dns_resolve.c if (!dreq) dreq 332 fs/nfs/dns_resolve.c ret = do_cache_lookup(cd, key, item, dreq); dreq 334 fs/nfs/dns_resolve.c ret = nfs_cache_wait_for_upcall(dreq); dreq 338 fs/nfs/dns_resolve.c nfs_cache_defer_req_put(dreq); dreq 509 fs/nfs/internal.h struct nfs_direct_req *dreq); dreq 554 fs/nfs/internal.h struct nfs_direct_req *dreq); dreq 555 fs/nfs/internal.h extern ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq); dreq 660 fs/nfs/internal.h if (!cinfo->dreq) { dreq 5284 fs/nfs/nfs4proc.c if (hdr->ds_clp != NULL || hdr->dreq != NULL) dreq 55 fs/nfs/pagelist.c hdr->dreq = desc->pg_dreq; dreq 1248 fs/nfs/pagelist.c desc->pg_dreq = hdr->dreq; dreq 929 fs/nfs/write.c cinfo->dreq = NULL; dreq 935 fs/nfs/write.c struct nfs_direct_req *dreq) dreq 937 fs/nfs/write.c if (dreq) dreq 938 fs/nfs/write.c nfs_init_cinfo_from_dreq(cinfo, dreq); dreq 1071 fs/nfs/write.c if ((ret == max) && !cinfo->dreq) dreq 1764 fs/nfs/write.c data->dreq = cinfo->dreq; dreq 1788 fs/nfs/write.c if (!cinfo->dreq) dreq 1890 fs/nfs/write.c nfs_init_cinfo(&cinfo, data->inode, data->dreq); dreq 1727 fs/nilfs2/btree.c union nilfs_bmap_ptr_req *dreq, dreq 1741 fs/nilfs2/btree.c dreq->bpr_ptr = nilfs_btree_find_target_v(btree, NULL, key); dreq 1745 fs/nilfs2/btree.c ret = nilfs_bmap_prepare_alloc_ptr(btree, dreq, dat); dreq 1752 fs/nilfs2/btree.c nreq->bpr_ptr = dreq->bpr_ptr + 1; dreq 1772 fs/nilfs2/btree.c nilfs_bmap_abort_alloc_ptr(btree, dreq, dat); dreq 1783 fs/nilfs2/btree.c union nilfs_bmap_ptr_req *dreq, dreq 1803 fs/nilfs2/btree.c nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); dreq 1810 fs/nilfs2/btree.c nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, ncblk); dreq 1825 fs/nilfs2/btree.c nilfs_bmap_commit_alloc_ptr(btree, dreq, dat); dreq 1832 fs/nilfs2/btree.c nilfs_btree_node_insert(node, n, key, dreq->bpr_ptr, dreq 1839 fs/nilfs2/btree.c nilfs_bmap_set_target_v(btree, key, dreq->bpr_ptr); dreq 1856 fs/nilfs2/btree.c union nilfs_bmap_ptr_req dreq, nreq, *di, *ni; dreq 1861 fs/nilfs2/btree.c di = &dreq; dreq 1865 fs/nilfs2/btree.c di = &dreq; dreq 180 include/linux/dccp.h extern int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, dreq 1521 include/linux/nfs_xdr.h struct nfs_direct_req *dreq; dreq 1562 include/linux/nfs_xdr.h struct nfs_direct_req *dreq; /* O_DIRECT request */ dreq 1574 include/linux/nfs_xdr.h struct nfs_direct_req *dreq; /* O_DIRECT request */ dreq 471 net/dccp/dccp.h int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq); dreq 629 net/dccp/feat.c int dccp_feat_insert_opts(struct dccp_sock *dp, struct dccp_request_sock *dreq, dreq 632 net/dccp/feat.c struct list_head *fn = dreq ? &dreq->dreq_featneg : &dp->dccps_featneg; dreq 998 net/dccp/feat.c int dccp_feat_server_ccid_dependencies(struct dccp_request_sock *dreq) dreq 1000 net/dccp/feat.c struct list_head *fn = &dreq->dreq_featneg; dreq 1394 net/dccp/feat.c int dccp_feat_parse_options(struct sock *sk, struct dccp_request_sock *dreq, dreq 1398 net/dccp/feat.c struct list_head *fn = dreq ? &dreq->dreq_featneg : &dp->dccps_featneg; dreq 577 net/dccp/ipv4.c struct dccp_request_sock *dreq; dreq 608 net/dccp/ipv4.c dreq = dccp_rsk(req); dreq 609 net/dccp/ipv4.c if (dccp_parse_options(sk, dreq, skb)) dreq 629 net/dccp/ipv4.c dreq->dreq_isr = dcb->dccpd_seq; dreq 630 net/dccp/ipv4.c dreq->dreq_gsr = dreq->dreq_isr; dreq 631 net/dccp/ipv4.c dreq->dreq_iss = dccp_v4_init_sequence(skb); dreq 632 net/dccp/ipv4.c dreq->dreq_gss = dreq->dreq_iss; dreq 633 net/dccp/ipv4.c dreq->dreq_service = service; dreq 310 net/dccp/ipv6.c struct dccp_request_sock *dreq; dreq 343 net/dccp/ipv6.c dreq = dccp_rsk(req); dreq 344 net/dccp/ipv6.c if (dccp_parse_options(sk, dreq, skb)) dreq 376 net/dccp/ipv6.c dreq->dreq_isr = dcb->dccpd_seq; dreq 377 net/dccp/ipv6.c dreq->dreq_gsr = dreq->dreq_isr; dreq 378 net/dccp/ipv6.c dreq->dreq_iss = dccp_v6_init_sequence(skb); dreq 379 net/dccp/ipv6.c dreq->dreq_gss = dreq->dreq_iss; dreq 380 net/dccp/ipv6.c dreq->dreq_service = service; dreq 90 net/dccp/minisocks.c struct dccp_request_sock *dreq = dccp_rsk(req); dreq 97 net/dccp/minisocks.c newdp->dccps_service = dreq->dreq_service; dreq 98 net/dccp/minisocks.c newdp->dccps_timestamp_echo = dreq->dreq_timestamp_echo; dreq 99 net/dccp/minisocks.c newdp->dccps_timestamp_time = dreq->dreq_timestamp_time; dreq 114 net/dccp/minisocks.c newdp->dccps_iss = dreq->dreq_iss; dreq 115 net/dccp/minisocks.c newdp->dccps_gss = dreq->dreq_gss; dreq 117 net/dccp/minisocks.c newdp->dccps_isr = dreq->dreq_isr; dreq 118 net/dccp/minisocks.c newdp->dccps_gsr = dreq->dreq_gsr; dreq 123 net/dccp/minisocks.c if (dccp_feat_activate_values(newsk, &dreq->dreq_featneg)) { dreq 144 net/dccp/minisocks.c struct dccp_request_sock *dreq = dccp_rsk(req); dreq 152 net/dccp/minisocks.c spin_lock_bh(&dreq->dreq_lock); dreq 157 net/dccp/minisocks.c if (after48(DCCP_SKB_CB(skb)->dccpd_seq, dreq->dreq_gsr)) { dreq 159 net/dccp/minisocks.c dreq->dreq_gsr = DCCP_SKB_CB(skb)->dccpd_seq; dreq 179 net/dccp/minisocks.c dreq->dreq_iss, dreq->dreq_gss)) { dreq 184 net/dccp/minisocks.c (unsigned long long) dreq->dreq_iss, dreq 185 net/dccp/minisocks.c (unsigned long long) dreq->dreq_gss); dreq 189 net/dccp/minisocks.c if (dccp_parse_options(sk, dreq, skb)) dreq 206 net/dccp/minisocks.c spin_unlock_bh(&dreq->dreq_lock); dreq 256 net/dccp/minisocks.c struct dccp_request_sock *dreq = dccp_rsk(req); dreq 258 net/dccp/minisocks.c spin_lock_init(&dreq->dreq_lock); dreq 262 net/dccp/minisocks.c dreq->dreq_timestamp_echo = 0; dreq 265 net/dccp/minisocks.c return dccp_feat_clone_list(&dp->dccps_featneg, &dreq->dreq_featneg); dreq 47 net/dccp/options.c int dccp_parse_options(struct sock *sk, struct dccp_request_sock *dreq, dreq 98 net/dccp/options.c if (dreq != NULL && (opt >= DCCPO_MIN_RX_CCID_SPECIFIC || dreq 124 net/dccp/options.c rc = dccp_feat_parse_options(sk, dreq, mandatory, opt, dreq 143 net/dccp/options.c if (dreq != NULL) { dreq 144 net/dccp/options.c dreq->dreq_timestamp_echo = ntohl(opt_val); dreq 145 net/dccp/options.c dreq->dreq_timestamp_time = dccp_timestamp(); dreq 352 net/dccp/options.c struct dccp_request_sock *dreq, dreq 359 net/dccp/options.c if (dreq != NULL) { dreq 360 net/dccp/options.c elapsed_time = dccp_timestamp() - dreq->dreq_timestamp_time; dreq 361 net/dccp/options.c tstamp_echo = htonl(dreq->dreq_timestamp_echo); dreq 362 net/dccp/options.c dreq->dreq_timestamp_echo = 0; dreq 588 net/dccp/options.c int dccp_insert_options_rsk(struct dccp_request_sock *dreq, struct sk_buff *skb) dreq 592 net/dccp/options.c if (dccp_feat_insert_opts(NULL, dreq, skb)) dreq 599 net/dccp/options.c if (dreq->dreq_timestamp_echo != 0 && dreq 600 net/dccp/options.c dccp_insert_option_timestamp_echo(NULL, dreq, skb)) dreq 394 net/dccp/output.c struct dccp_request_sock *dreq; dreq 413 net/dccp/output.c dreq = dccp_rsk(req); dreq 415 net/dccp/output.c dccp_inc_seqno(&dreq->dreq_gss); dreq 417 net/dccp/output.c DCCP_SKB_CB(skb)->dccpd_seq = dreq->dreq_gss; dreq 420 net/dccp/output.c if (dccp_feat_server_ccid_dependencies(dreq)) dreq 423 net/dccp/output.c if (dccp_insert_options_rsk(dreq, skb)) dreq 435 net/dccp/output.c dccp_hdr_set_seq(dh, dreq->dreq_gss); dreq 436 net/dccp/output.c dccp_hdr_set_ack(dccp_hdr_ack_bits(skb), dreq->dreq_gsr); dreq 437 net/dccp/output.c dccp_hdr_response(skb)->dccph_resp_service = dreq->dreq_service; dreq 568 net/sunrpc/cache.c static void __unhash_deferred_req(struct cache_deferred_req *dreq) dreq 570 net/sunrpc/cache.c hlist_del_init(&dreq->hash); dreq 571 net/sunrpc/cache.c if (!list_empty(&dreq->recent)) { dreq 572 net/sunrpc/cache.c list_del_init(&dreq->recent); dreq 577 net/sunrpc/cache.c static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) dreq 581 net/sunrpc/cache.c INIT_LIST_HEAD(&dreq->recent); dreq 582 net/sunrpc/cache.c hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); dreq 585 net/sunrpc/cache.c static void setup_deferral(struct cache_deferred_req *dreq, dreq 590 net/sunrpc/cache.c dreq->item = item; dreq 594 net/sunrpc/cache.c __hash_deferred_req(dreq, item); dreq 598 net/sunrpc/cache.c list_add(&dreq->recent, &cache_defer_list); dreq 610 net/sunrpc/cache.c static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) dreq 613 net/sunrpc/cache.c container_of(dreq, struct thread_deferred_req, handle); dreq 620 net/sunrpc/cache.c struct cache_deferred_req *dreq = &sleeper.handle; dreq 623 net/sunrpc/cache.c dreq->revisit = cache_restart_thread; dreq 625 net/sunrpc/cache.c setup_deferral(dreq, item, 0); dreq 679 net/sunrpc/cache.c struct cache_deferred_req *dreq; dreq 686 net/sunrpc/cache.c dreq = req->defer(req); dreq 687 net/sunrpc/cache.c if (dreq == NULL) dreq 689 net/sunrpc/cache.c setup_deferral(dreq, item, 1); dreq 702 net/sunrpc/cache.c struct cache_deferred_req *dreq; dreq 710 net/sunrpc/cache.c hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash) dreq 711 net/sunrpc/cache.c if (dreq->item == item) { dreq 712 net/sunrpc/cache.c __unhash_deferred_req(dreq); dreq 713 net/sunrpc/cache.c list_add(&dreq->recent, &pending); dreq 719 net/sunrpc/cache.c dreq = list_entry(pending.next, struct cache_deferred_req, recent); dreq 720 net/sunrpc/cache.c list_del_init(&dreq->recent); dreq 721 net/sunrpc/cache.c dreq->revisit(dreq, 0); dreq 727 net/sunrpc/cache.c struct cache_deferred_req *dreq, *tmp; dreq 734 net/sunrpc/cache.c list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { dreq 735 net/sunrpc/cache.c if (dreq->owner == owner) { dreq 736 net/sunrpc/cache.c __unhash_deferred_req(dreq); dreq 737 net/sunrpc/cache.c list_add(&dreq->recent, &pending); dreq 743 net/sunrpc/cache.c dreq = list_entry(pending.next, struct cache_deferred_req, recent); dreq 744 net/sunrpc/cache.c list_del_init(&dreq->recent); dreq 745 net/sunrpc/cache.c dreq->revisit(dreq, 1); dreq 1139 net/sunrpc/svc_xprt.c static void svc_revisit(struct cache_deferred_req *dreq, int too_many) dreq 1142 net/sunrpc/svc_xprt.c container_of(dreq, struct svc_deferred_req, handle);