Home
last modified time | relevance | path

Searched refs:req (Results 1 – 200 of 1255) sorted by relevance

1234567

/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
Dclient.c50 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
51 static int ptlrpcd_check_work(struct ptlrpc_request *req);
139 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, in ptlrpc_prep_bulk_imp() argument
143 struct obd_import *imp = req->rq_import; in ptlrpc_prep_bulk_imp()
151 desc->bd_import_generation = req->rq_import_generation; in ptlrpc_prep_bulk_imp()
153 desc->bd_req = req; in ptlrpc_prep_bulk_imp()
159 req->rq_bulk = desc; in ptlrpc_prep_bulk_imp()
221 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req) in ptlrpc_at_set_req_timeout() argument
227 LASSERT(req->rq_import); in ptlrpc_at_set_req_timeout()
239 req->rq_timeout = req->rq_import->imp_server_timeout ? in ptlrpc_at_set_req_timeout()
[all …]
Dsec_null.c80 int null_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) in null_ctx_sign() argument
82 req->rq_reqbuf->lm_secflvr = SPTLRPC_FLVR_NULL; in null_ctx_sign()
84 if (!req->rq_import->imp_dlm_fake) { in null_ctx_sign()
85 struct obd_device *obd = req->rq_import->imp_obd; in null_ctx_sign()
87 null_encode_sec_part(req->rq_reqbuf, in null_ctx_sign()
90 req->rq_reqdata_len = req->rq_reqlen; in null_ctx_sign()
95 int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) in null_ctx_verify() argument
99 LASSERT(req->rq_repdata); in null_ctx_verify()
101 req->rq_repmsg = req->rq_repdata; in null_ctx_verify()
102 req->rq_replen = req->rq_repdata_len; in null_ctx_verify()
[all …]
Dsec.c352 int sptlrpc_req_get_ctx(struct ptlrpc_request *req) in sptlrpc_req_get_ctx() argument
354 struct obd_import *imp = req->rq_import; in sptlrpc_req_get_ctx()
358 LASSERT(!req->rq_cli_ctx); in sptlrpc_req_get_ctx()
365 req->rq_cli_ctx = get_my_ctx(sec); in sptlrpc_req_get_ctx()
369 if (!req->rq_cli_ctx) { in sptlrpc_req_get_ctx()
370 CERROR("req %p: fail to get context\n", req); in sptlrpc_req_get_ctx()
386 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync) in sptlrpc_req_put_ctx() argument
388 LASSERT(req); in sptlrpc_req_put_ctx()
389 LASSERT(req->rq_cli_ctx); in sptlrpc_req_put_ctx()
394 if (!list_empty(&req->rq_ctx_chain)) { in sptlrpc_req_put_ctx()
[all …]
Dsec_plain.c191 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) in plain_ctx_sign() argument
193 struct lustre_msg *msg = req->rq_reqbuf; in plain_ctx_sign()
196 msg->lm_secflvr = req->rq_flvr.sf_rpc; in plain_ctx_sign()
202 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg; in plain_ctx_sign()
204 if (req->rq_pack_udesc) in plain_ctx_sign()
206 if (req->rq_pack_bulk) in plain_ctx_sign()
209 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount, in plain_ctx_sign()
215 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) in plain_ctx_verify() argument
217 struct lustre_msg *msg = req->rq_repdata; in plain_ctx_verify()
227 swabbed = ptlrpc_rep_need_swab(req); in plain_ctx_verify()
[all …]
Devents.c57 struct ptlrpc_request *req = cbid->cbid_arg; in request_out_callback() local
63 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); in request_out_callback()
65 sptlrpc_request_out_callback(req); in request_out_callback()
66 spin_lock(&req->rq_lock); in request_out_callback()
67 req->rq_real_sent = ktime_get_real_seconds(); in request_out_callback()
69 req->rq_req_unlink = 0; in request_out_callback()
76 req->rq_net_err = 1; in request_out_callback()
77 ptlrpc_client_wake_req(req); in request_out_callback()
79 spin_unlock(&req->rq_lock); in request_out_callback()
81 ptlrpc_req_finished(req); in request_out_callback()
[all …]
Dniobuf.c113 static int ptlrpc_register_bulk(struct ptlrpc_request *req) in ptlrpc_register_bulk() argument
115 struct ptlrpc_bulk_desc *desc = req->rq_bulk; in ptlrpc_register_bulk()
138 if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY) in ptlrpc_register_bulk()
157 xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1); in ptlrpc_register_bulk()
159 req->rq_send_state != LUSTRE_IMP_REPLAY) || in ptlrpc_register_bulk()
207 req->rq_status = -ENOMEM; in ptlrpc_register_bulk()
213 req->rq_xid = --xid; in ptlrpc_register_bulk()
214 LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK), in ptlrpc_register_bulk()
216 desc->bd_last_xid, req->rq_xid); in ptlrpc_register_bulk()
230 desc->bd_last_xid, req->rq_xid, desc->bd_portal); in ptlrpc_register_bulk()
[all …]
Dllog_client.c86 struct ptlrpc_request *req = NULL; in llog_client_open() local
95 req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE); in llog_client_open()
96 if (req == NULL) { in llog_client_open()
102 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, in llog_client_open()
105 rc = ptlrpc_request_pack(req, LUSTRE_LOG_VERSION, in llog_client_open()
108 ptlrpc_request_free(req); in llog_client_open()
109 req = NULL; in llog_client_open()
112 ptlrpc_request_set_replen(req); in llog_client_open()
114 body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); in llog_client_open()
122 tmp = req_capsule_client_sized_get(&req->rq_pill, &RMF_NAME, in llog_client_open()
[all …]
Drecover.c73 struct ptlrpc_request *req = NULL; in ptlrpc_replay_next() local
110 req = list_entry(tmp, struct ptlrpc_request, in ptlrpc_replay_next()
114 if (req->rq_transno > last_transno) { in ptlrpc_replay_next()
122 req = list_entry(imp->imp_replay_cursor, in ptlrpc_replay_next()
125 if (req->rq_transno > last_transno) in ptlrpc_replay_next()
128 req = NULL; in ptlrpc_replay_next()
135 req = NULL; in ptlrpc_replay_next()
141 if (req == NULL) { in ptlrpc_replay_next()
143 req = list_entry(tmp, struct ptlrpc_request, in ptlrpc_replay_next()
146 if (req->rq_transno > last_transno) in ptlrpc_replay_next()
[all …]
Dservice.c63 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
64 static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
693 static void ptlrpc_server_free_request(struct ptlrpc_request *req) in ptlrpc_server_free_request() argument
695 LASSERT(atomic_read(&req->rq_refcount) == 0); in ptlrpc_server_free_request()
696 LASSERT(list_empty(&req->rq_timed_list)); in ptlrpc_server_free_request()
700 ptlrpc_req_drop_rs(req); in ptlrpc_server_free_request()
702 sptlrpc_svc_ctx_decref(req); in ptlrpc_server_free_request()
704 if (req != &req->rq_rqbd->rqbd_req) { in ptlrpc_server_free_request()
708 ptlrpc_request_cache_free(req); in ptlrpc_server_free_request()
716 static void ptlrpc_server_drop_request(struct ptlrpc_request *req) in ptlrpc_server_drop_request() argument
[all …]
/linux-4.4.14/drivers/macintosh/
Dvia-pmu68k.c106 static int pmu_send_request(struct adb_request *req, int sync);
114 static void pmu_done(struct adb_request *req);
192 volatile struct adb_request req; in pmu_init() local
197 pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB); in pmu_init()
199 while (!req.complete) { in pmu_init()
224 pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, in pmu_init()
227 while (!req.complete) { in pmu_init()
272 pmu_send_request(struct adb_request *req, int sync) in pmu_send_request() argument
278 req->complete = 1; in pmu_send_request()
284 switch (req->data[0]) { in pmu_send_request()
[all …]
Dadb-iop.c66 static void adb_iop_end_req(struct adb_request *req, int state) in adb_iop_end_req() argument
68 req->complete = 1; in adb_iop_end_req()
69 current_req = req->next; in adb_iop_end_req()
70 if (req->done) (*req->done)(req); in adb_iop_end_req()
82 struct adb_request *req; in adb_iop_complete() local
87 req = current_req; in adb_iop_complete()
88 if ((adb_iop_state == sending) && req && req->reply_expected) { in adb_iop_complete()
105 struct adb_request *req; in adb_iop_listen() local
113 req = current_req; in adb_iop_listen()
116 printk("adb_iop_listen %p: rcvd packet, %d bytes: %02X %02X", req, in adb_iop_listen()
[all …]
Dvia-macii.c86 static int macii_send_request(struct adb_request *req, int sync);
87 static int macii_write(struct adb_request *req);
123 static int request_is_queued(struct adb_request *req) { in request_is_queued() argument
129 if (cur == req) { in request_is_queued()
205 static struct adb_request req; in macii_queue_poll() local
215 BUG_ON(request_is_queued(&req)); in macii_queue_poll()
217 adb_request(&req, NULL, ADBREQ_NOSEND, 1, in macii_queue_poll()
220 req.sent = 0; in macii_queue_poll()
221 req.complete = 0; in macii_queue_poll()
222 req.reply_len = 0; in macii_queue_poll()
[all …]
Dvia-pmu.c184 static int pmu_send_request(struct adb_request *req, int sync);
216 int pmu_polled_request(struct adb_request *req);
531 struct adb_request req; in init_pmu() local
536 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); in init_pmu()
538 while (!req.complete) { in init_pmu()
563 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); in init_pmu()
564 while (!req.complete) in init_pmu()
569 pmu_request(&req, NULL, 1, PMU_GET_VERSION); in init_pmu()
570 pmu_wait_complete(&req); in init_pmu()
571 if (req.reply_len > 0) in init_pmu()
[all …]
Dvia-maciisi.c82 static int maciisi_send_request(struct adb_request* req, int sync);
83 static void maciisi_sync(struct adb_request *req);
84 static int maciisi_write(struct adb_request* req);
229 maciisi_send_request(struct adb_request* req, int sync) in maciisi_send_request() argument
238 req->complete = 1; in maciisi_send_request()
245 for (i = 0; i < req->nbytes; i++) { in maciisi_send_request()
246 printk(" %.2x", req->data[i]); in maciisi_send_request()
252 req->reply_expected = 1; in maciisi_send_request()
254 i = maciisi_write(req); in maciisi_send_request()
271 maciisi_sync(req); in maciisi_send_request()
[all …]
Dvia-cuda.c91 static int cuda_send_request(struct adb_request *req, int sync);
101 static int cuda_write(struct adb_request *req);
103 int cuda_request(struct adb_request *req,
120 struct adb_request req; in find_via_cuda() local
137 cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1); in find_via_cuda()
138 while (!req.complete) in find_via_cuda()
146 struct adb_request req; in find_via_cuda() local
190 cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1); in find_via_cuda()
191 while (!req.complete) in find_via_cuda()
307 cuda_send_request(struct adb_request *req, int sync) in cuda_send_request() argument
[all …]
Dadb.c116 static void printADBreply(struct adb_request *req)
120 printk("adb reply (%d)", req->reply_len);
121 for(i = 0; i < req->reply_len; i++)
122 printk(" %x", req->reply[i]);
132 struct adb_request req; in adb_scan_bus() local
137 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, in adb_scan_bus()
139 if (req.reply_len > 1) in adb_scan_bus()
156 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, in adb_scan_bus()
163 adb_request(&req, NULL, ADBREQ_SYNC, 3, in adb_scan_bus()
171 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, in adb_scan_bus()
[all …]
Dmacio-adb.c66 static int macio_send_request(struct adb_request *req, int sync);
166 static int macio_send_request(struct adb_request *req, int sync) in macio_send_request() argument
171 if (req->data[0] != ADB_PACKET) in macio_send_request()
174 for (i = 0; i < req->nbytes - 1; ++i) in macio_send_request()
175 req->data[i] = req->data[i+1]; in macio_send_request()
176 --req->nbytes; in macio_send_request()
178 req->next = NULL; in macio_send_request()
179 req->sent = 0; in macio_send_request()
180 req->complete = 0; in macio_send_request()
181 req->reply_len = 0; in macio_send_request()
[all …]
/linux-4.4.14/drivers/s390/scsi/
Dzfcp_fsf.c66 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) in zfcp_fsf_class_not_supp() argument
68 dev_err(&req->adapter->ccw_device->dev, "FCP device not " in zfcp_fsf_class_not_supp()
70 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); in zfcp_fsf_class_not_supp()
71 req->status |= ZFCP_STATUS_FSFREQ_ERROR; in zfcp_fsf_class_not_supp()
78 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) in zfcp_fsf_req_free() argument
80 if (likely(req->pool)) { in zfcp_fsf_req_free()
81 if (likely(req->qtcb)) in zfcp_fsf_req_free()
82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); in zfcp_fsf_req_free()
83 mempool_free(req, req->pool); in zfcp_fsf_req_free()
87 if (likely(req->qtcb)) in zfcp_fsf_req_free()
[all …]
Dzfcp_reqlist.h85 struct zfcp_fsf_req *req; in _zfcp_reqlist_find() local
89 list_for_each_entry(req, &rl->buckets[i], list) in _zfcp_reqlist_find()
90 if (req->req_id == req_id) in _zfcp_reqlist_find()
91 return req; in _zfcp_reqlist_find()
107 struct zfcp_fsf_req *req; in zfcp_reqlist_find() local
110 req = _zfcp_reqlist_find(rl, req_id); in zfcp_reqlist_find()
113 return req; in zfcp_reqlist_find()
132 struct zfcp_fsf_req *req; in zfcp_reqlist_find_rm() local
135 req = _zfcp_reqlist_find(rl, req_id); in zfcp_reqlist_find_rm()
136 if (req) in zfcp_reqlist_find_rm()
[all …]
Dzfcp_dbf.h279 void zfcp_dbf_hba_fsf_resp(char *tag, int level, struct zfcp_fsf_req *req) in zfcp_dbf_hba_fsf_resp() argument
281 if (debug_level_enabled(req->adapter->dbf->hba, level)) in zfcp_dbf_hba_fsf_resp()
282 zfcp_dbf_hba_fsf_res(tag, req); in zfcp_dbf_hba_fsf_resp()
290 void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req) in zfcp_dbf_hba_fsf_response() argument
292 struct fsf_qtcb *qtcb = req->qtcb; in zfcp_dbf_hba_fsf_response()
296 zfcp_dbf_hba_fsf_resp("fs_perr", 1, req); in zfcp_dbf_hba_fsf_response()
299 zfcp_dbf_hba_fsf_resp("fs_ferr", 1, req); in zfcp_dbf_hba_fsf_response()
301 } else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) || in zfcp_dbf_hba_fsf_response()
302 (req->fsf_command == FSF_QTCB_OPEN_LUN)) { in zfcp_dbf_hba_fsf_response()
303 zfcp_dbf_hba_fsf_resp("fs_open", 4, req); in zfcp_dbf_hba_fsf_response()
[all …]
/linux-4.4.14/drivers/block/drbd/
Ddrbd_req.c37 static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) in _drbd_start_io_acct() argument
39 generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9, in _drbd_start_io_acct()
44 static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) in _drbd_end_io_acct() argument
46 generic_end_io_acct(bio_data_dir(req->master_bio), in _drbd_end_io_acct()
47 &device->vdisk->part0, req->start_jif); in _drbd_end_io_acct()
53 struct drbd_request *req; in drbd_req_new() local
55 req = mempool_alloc(drbd_request_mempool, GFP_NOIO); in drbd_req_new()
56 if (!req) in drbd_req_new()
58 memset(req, 0, sizeof(*req)); in drbd_req_new()
60 drbd_req_make_private_bio(req, bio_src); in drbd_req_new()
[all …]
/linux-4.4.14/net/tipc/
Ddiscover.c184 static void disc_update(struct tipc_link_req *req) in disc_update() argument
186 if (!req->num_nodes) { in disc_update()
187 if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) || in disc_update()
188 (req->timer_intv > TIPC_LINK_REQ_FAST)) { in disc_update()
189 req->timer_intv = TIPC_LINK_REQ_INIT; in disc_update()
190 mod_timer(&req->timer, jiffies + req->timer_intv); in disc_update()
199 void tipc_disc_add_dest(struct tipc_link_req *req) in tipc_disc_add_dest() argument
201 spin_lock_bh(&req->lock); in tipc_disc_add_dest()
202 req->num_nodes++; in tipc_disc_add_dest()
203 spin_unlock_bh(&req->lock); in tipc_disc_add_dest()
[all …]
/linux-4.4.14/drivers/staging/rdma/hfi1/
Duser_sdma.c263 struct user_sdma_request *req; member
274 #define SDMA_DBG(req, fmt, ...) \ argument
275 hfi1_cdbg(SDMA, "[%u:%u:%u:%u] " fmt, (req)->pq->dd->unit, \
276 (req)->pq->ctxt, (req)->pq->subctxt, (req)->info.comp_idx, \
461 struct user_sdma_request *req = &pq->reqs[j]; in hfi1_user_sdma_free_queues() local
463 if (test_bit(SDMA_REQ_IN_USE, &req->flags)) { in hfi1_user_sdma_free_queues()
464 set_comp_state(req, ERROR, -ECOMM); in hfi1_user_sdma_free_queues()
465 user_sdma_free_request(req); in hfi1_user_sdma_free_queues()
495 struct user_sdma_request *req; in hfi1_user_sdma_process_request() local
498 if (iovec[idx].iov_len < sizeof(info) + sizeof(req->hdr)) { in hfi1_user_sdma_process_request()
[all …]
/linux-4.4.14/net/sunrpc/
Dbackchannel_rqst.c59 static void xprt_free_allocation(struct rpc_rqst *req) in xprt_free_allocation() argument
63 dprintk("RPC: free allocations for req= %p\n", req); in xprt_free_allocation()
64 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); in xprt_free_allocation()
65 xbufp = &req->rq_rcv_buf; in xprt_free_allocation()
67 xbufp = &req->rq_snd_buf; in xprt_free_allocation()
69 kfree(req); in xprt_free_allocation()
92 struct rpc_rqst *req; in xprt_alloc_bc_req() local
95 req = kzalloc(sizeof(*req), gfp_flags); in xprt_alloc_bc_req()
96 if (req == NULL) in xprt_alloc_bc_req()
99 req->rq_xprt = xprt; in xprt_alloc_bc_req()
[all …]
Dxprt.c184 struct rpc_rqst *req = task->tk_rqstp; in xprt_reserve_xprt() local
193 if (req != NULL) in xprt_reserve_xprt()
194 req->rq_ntrans++; in xprt_reserve_xprt()
203 if (req == NULL) in xprt_reserve_xprt()
205 else if (!req->rq_ntrans) in xprt_reserve_xprt()
235 struct rpc_rqst *req = task->tk_rqstp; in xprt_reserve_xprt_cong() local
243 if (req == NULL) { in xprt_reserve_xprt_cong()
249 req->rq_ntrans++; in xprt_reserve_xprt_cong()
254 if (req) in xprt_reserve_xprt_cong()
255 __xprt_put_cong(xprt, req); in xprt_reserve_xprt_cong()
[all …]
/linux-4.4.14/crypto/
Dchacha20poly1305.c50 struct ahash_request req; /* must be last member */ member
56 struct ablkcipher_request req; /* must be last member */ member
76 static inline void async_done_continue(struct aead_request *req, int err, in async_done_continue() argument
80 err = cont(req); in async_done_continue()
83 aead_request_complete(req, err); in async_done_continue()
86 static void chacha_iv(u8 *iv, struct aead_request *req, u32 icb) in chacha_iv() argument
88 struct chachapoly_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); in chacha_iv()
93 memcpy(iv + sizeof(leicb) + ctx->saltlen, req->iv, in chacha_iv()
97 static int poly_verify_tag(struct aead_request *req) in poly_verify_tag() argument
99 struct chachapoly_req_ctx *rctx = aead_request_ctx(req); in poly_verify_tag()
[all …]
Dseqiv.c35 static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) in seqiv_complete2() argument
37 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); in seqiv_complete2()
46 geniv = skcipher_givcrypt_reqtfm(req); in seqiv_complete2()
47 memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv)); in seqiv_complete2()
55 struct skcipher_givcrypt_request *req = base->data; in seqiv_complete() local
57 seqiv_complete2(req, err); in seqiv_complete()
58 skcipher_givcrypt_complete(req, err); in seqiv_complete()
61 static void seqiv_aead_encrypt_complete2(struct aead_request *req, int err) in seqiv_aead_encrypt_complete2() argument
63 struct aead_request *subreq = aead_request_ctx(req); in seqiv_aead_encrypt_complete2()
72 geniv = crypto_aead_reqtfm(req); in seqiv_aead_encrypt_complete2()
[all …]
Dechainiv.c60 static void echainiv_encrypt_complete2(struct aead_request *req, int err) in echainiv_encrypt_complete2() argument
62 struct aead_request *subreq = aead_request_ctx(req); in echainiv_encrypt_complete2()
72 geniv = crypto_aead_reqtfm(req); in echainiv_encrypt_complete2()
77 if (req->iv != subreq->iv) in echainiv_encrypt_complete2()
78 memcpy(req->iv, subreq->iv, ivsize); in echainiv_encrypt_complete2()
81 if (req->iv != subreq->iv) in echainiv_encrypt_complete2()
88 struct aead_request *req = base->data; in echainiv_encrypt_complete() local
90 echainiv_encrypt_complete2(req, err); in echainiv_encrypt_complete()
91 aead_request_complete(req, err); in echainiv_encrypt_complete()
94 static int echainiv_encrypt(struct aead_request *req) in echainiv_encrypt() argument
[all …]
Dgcm.c64 int (*complete)(struct aead_request *req, u32 flags);
91 static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc);
94 struct aead_request *req) in crypto_gcm_reqctx() argument
96 unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); in crypto_gcm_reqctx()
98 return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); in crypto_gcm_reqctx()
101 static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) in crypto_gcm_setkey_done() argument
103 struct crypto_gcm_setkey_result *result = req->data; in crypto_gcm_setkey_done()
125 struct ablkcipher_request req; in crypto_gcm_setkey() member
145 ablkcipher_request_set_tfm(&data->req, ctr); in crypto_gcm_setkey()
146 ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | in crypto_gcm_setkey()
[all …]
Dahash.c131 int crypto_hash_walk_first(struct ahash_request *req, in crypto_hash_walk_first() argument
134 walk->total = req->nbytes; in crypto_hash_walk_first()
141 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); in crypto_hash_walk_first()
142 walk->sg = req->src; in crypto_hash_walk_first()
143 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; in crypto_hash_walk_first()
149 int crypto_ahash_walk_first(struct ahash_request *req, in crypto_ahash_walk_first() argument
152 walk->total = req->nbytes; in crypto_ahash_walk_first()
159 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); in crypto_ahash_walk_first()
160 walk->sg = req->src; in crypto_ahash_walk_first()
161 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; in crypto_ahash_walk_first()
[all …]
Dzlib.c144 struct comp_request *req) in zlib_compress_update() argument
150 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); in zlib_compress_update()
151 stream->next_in = req->next_in; in zlib_compress_update()
152 stream->avail_in = req->avail_in; in zlib_compress_update()
153 stream->next_out = req->next_out; in zlib_compress_update()
154 stream->avail_out = req->avail_out; in zlib_compress_update()
170 ret = req->avail_out - stream->avail_out; in zlib_compress_update()
173 req->avail_in - stream->avail_in, ret); in zlib_compress_update()
174 req->next_in = stream->next_in; in zlib_compress_update()
175 req->avail_in = stream->avail_in; in zlib_compress_update()
[all …]
Dauthenc.c45 static void authenc_request_complete(struct aead_request *req, int err) in authenc_request_complete() argument
48 aead_request_complete(req, err); in authenc_request_complete()
120 struct aead_request *req = areq->data; in authenc_geniv_ahash_done() local
121 struct crypto_aead *authenc = crypto_aead_reqtfm(req); in authenc_geniv_ahash_done()
124 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); in authenc_geniv_ahash_done()
130 scatterwalk_map_and_copy(ahreq->result, req->dst, in authenc_geniv_ahash_done()
131 req->assoclen + req->cryptlen, in authenc_geniv_ahash_done()
135 aead_request_complete(req, err); in authenc_geniv_ahash_done()
138 static int crypto_authenc_genicv(struct aead_request *req, unsigned int flags) in crypto_authenc_genicv() argument
140 struct crypto_aead *authenc = crypto_aead_reqtfm(req); in crypto_authenc_genicv()
[all …]
Dauthencesn.c48 static void authenc_esn_request_complete(struct aead_request *req, int err) in authenc_esn_request_complete() argument
51 aead_request_complete(req, err); in authenc_esn_request_complete()
100 static int crypto_authenc_esn_genicv_tail(struct aead_request *req, in crypto_authenc_esn_genicv_tail() argument
103 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); in crypto_authenc_esn_genicv_tail()
105 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); in crypto_authenc_esn_genicv_tail()
110 unsigned int assoclen = req->assoclen; in crypto_authenc_esn_genicv_tail()
111 unsigned int cryptlen = req->cryptlen; in crypto_authenc_esn_genicv_tail()
112 struct scatterlist *dst = req->dst; in crypto_authenc_esn_genicv_tail()
127 struct aead_request *req = areq->data; in authenc_esn_geniv_ahash_done() local
129 err = err ?: crypto_authenc_esn_genicv_tail(req, 0); in authenc_esn_geniv_ahash_done()
[all …]
Dccm.c57 struct aead_request *req) in crypto_ccm_reqctx() argument
59 unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); in crypto_ccm_reqctx()
61 return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); in crypto_ccm_reqctx()
129 static int format_input(u8 *info, struct aead_request *req, in format_input() argument
132 struct crypto_aead *aead = crypto_aead_reqtfm(req); in format_input()
133 unsigned int lp = req->iv[0]; in format_input()
139 memcpy(info, req->iv, 16); in format_input()
145 if (req->assoclen) in format_input()
251 static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, in crypto_ccm_auth() argument
254 struct crypto_aead *aead = crypto_aead_reqtfm(req); in crypto_ccm_auth()
[all …]
Dchainiv.c48 static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) in chainiv_givencrypt() argument
50 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); in chainiv_givencrypt()
52 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); in chainiv_givencrypt()
57 ablkcipher_request_set_callback(subreq, req->creq.base.flags & in chainiv_givencrypt()
59 req->creq.base.complete, in chainiv_givencrypt()
60 req->creq.base.data); in chainiv_givencrypt()
61 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, in chainiv_givencrypt()
62 req->creq.nbytes, req->creq.info); in chainiv_givencrypt()
68 memcpy(req->giv, ctx->iv, ivsize); in chainiv_givencrypt()
137 static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req) in async_chainiv_postpone_request() argument
[all …]
Dcryptd.c137 struct crypto_async_request *req, *backlog; in cryptd_queue_worker() local
149 req = crypto_dequeue_request(&cpu_queue->queue); in cryptd_queue_worker()
153 if (!req) in cryptd_queue_worker()
158 req->complete(req, 0); in cryptd_queue_worker()
200 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, in cryptd_blkcipher_crypt() argument
211 rctx = ablkcipher_request_ctx(req); in cryptd_blkcipher_crypt()
217 desc.info = req->info; in cryptd_blkcipher_crypt()
220 err = crypt(&desc, req->dst, req->src, req->nbytes); in cryptd_blkcipher_crypt()
222 req->base.complete = rctx->complete; in cryptd_blkcipher_crypt()
226 rctx->complete(&req->base, err); in cryptd_blkcipher_crypt()
[all …]
Dskcipher.c51 static int skcipher_crypt_blkcipher(struct skcipher_request *req, in skcipher_crypt_blkcipher() argument
57 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); in skcipher_crypt_blkcipher()
61 .info = req->iv, in skcipher_crypt_blkcipher()
62 .flags = req->base.flags, in skcipher_crypt_blkcipher()
66 return crypt(&desc, req->dst, req->src, req->cryptlen); in skcipher_crypt_blkcipher()
69 static int skcipher_encrypt_blkcipher(struct skcipher_request *req) in skcipher_encrypt_blkcipher() argument
71 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); in skcipher_encrypt_blkcipher()
75 return skcipher_crypt_blkcipher(req, alg->encrypt); in skcipher_encrypt_blkcipher()
78 static int skcipher_decrypt_blkcipher(struct skcipher_request *req) in skcipher_decrypt_blkcipher() argument
80 struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); in skcipher_decrypt_blkcipher()
[all …]
Dablk_helper.c54 int __ablk_encrypt(struct ablkcipher_request *req) in __ablk_encrypt() argument
56 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); in __ablk_encrypt()
61 desc.info = req->info; in __ablk_encrypt()
65 &desc, req->dst, req->src, req->nbytes); in __ablk_encrypt()
69 int ablk_encrypt(struct ablkcipher_request *req) in ablk_encrypt() argument
71 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); in ablk_encrypt()
76 ablkcipher_request_ctx(req); in ablk_encrypt()
78 *cryptd_req = *req; in ablk_encrypt()
83 return __ablk_encrypt(req); in ablk_encrypt()
88 int ablk_decrypt(struct ablkcipher_request *req) in ablk_decrypt() argument
[all …]
Drsa.c78 static int rsa_enc(struct akcipher_request *req) in rsa_enc() argument
80 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); in rsa_enc()
94 if (req->dst_len < mpi_get_size(pkey->n)) { in rsa_enc()
95 req->dst_len = mpi_get_size(pkey->n); in rsa_enc()
101 m = mpi_read_raw_from_sgl(req->src, req->src_len); in rsa_enc()
109 ret = mpi_write_to_sgl(c, req->dst, &req->dst_len, &sign); in rsa_enc()
123 static int rsa_dec(struct akcipher_request *req) in rsa_dec() argument
125 struct crypto_akcipher *tfm = crypto_akcipher_reqtfm(req); in rsa_dec()
139 if (req->dst_len < mpi_get_size(pkey->n)) { in rsa_dec()
140 req->dst_len = mpi_get_size(pkey->n); in rsa_dec()
[all …]
Deseqiv.c42 static void eseqiv_complete2(struct skcipher_givcrypt_request *req) in eseqiv_complete2() argument
44 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); in eseqiv_complete2()
45 struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); in eseqiv_complete2()
47 memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail, in eseqiv_complete2()
54 struct skcipher_givcrypt_request *req = base->data; in eseqiv_complete() local
59 eseqiv_complete2(req); in eseqiv_complete()
62 skcipher_givcrypt_complete(req, err); in eseqiv_complete()
65 static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) in eseqiv_givencrypt() argument
67 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); in eseqiv_givencrypt()
69 struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); in eseqiv_givencrypt()
[all …]
Dmcryptd.c156 struct crypto_async_request *req, *backlog; in mcryptd_queue_worker() local
174 req = crypto_dequeue_request(&cpu_queue->queue); in mcryptd_queue_worker()
178 if (!req) { in mcryptd_queue_worker()
185 req->complete(req, 0); in mcryptd_queue_worker()
317 static int mcryptd_hash_enqueue(struct ahash_request *req, in mcryptd_hash_enqueue() argument
322 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); in mcryptd_hash_enqueue()
323 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); in mcryptd_hash_enqueue()
327 rctx->complete = req->base.complete; in mcryptd_hash_enqueue()
328 req->base.complete = complete; in mcryptd_hash_enqueue()
330 ret = mcryptd_enqueue_request(queue, &req->base, rctx); in mcryptd_hash_enqueue()
[all …]
/linux-4.4.14/include/crypto/
Dskcipher.h58 int (*encrypt)(struct skcipher_request *req);
59 int (*decrypt)(struct skcipher_request *req);
75 struct skcipher_givcrypt_request *req) in skcipher_givcrypt_reqtfm() argument
77 return crypto_ablkcipher_reqtfm(&req->creq); in skcipher_givcrypt_reqtfm()
81 struct skcipher_givcrypt_request *req) in crypto_skcipher_givencrypt() argument
84 crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); in crypto_skcipher_givencrypt()
85 return crt->givencrypt(req); in crypto_skcipher_givencrypt()
89 struct skcipher_givcrypt_request *req) in crypto_skcipher_givdecrypt() argument
92 crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); in crypto_skcipher_givdecrypt()
93 return crt->givdecrypt(req); in crypto_skcipher_givdecrypt()
[all …]
Dakcipher.h93 int (*sign)(struct akcipher_request *req);
94 int (*verify)(struct akcipher_request *req);
95 int (*encrypt)(struct akcipher_request *req);
96 int (*decrypt)(struct akcipher_request *req);
161 static inline void akcipher_request_set_tfm(struct akcipher_request *req, in akcipher_request_set_tfm() argument
164 req->base.tfm = crypto_akcipher_tfm(tfm); in akcipher_request_set_tfm()
168 struct akcipher_request *req) in crypto_akcipher_reqtfm() argument
170 return __crypto_akcipher_tfm(req->base.tfm); in crypto_akcipher_reqtfm()
194 struct akcipher_request *req; in akcipher_request_alloc() local
196 req = kmalloc(sizeof(*req) + crypto_akcipher_reqsize(tfm), gfp); in akcipher_request_alloc()
[all …]
Dhash.h129 int (*init)(struct ahash_request *req);
130 int (*update)(struct ahash_request *req);
131 int (*final)(struct ahash_request *req);
132 int (*finup)(struct ahash_request *req);
133 int (*digest)(struct ahash_request *req);
134 int (*export)(struct ahash_request *req, void *out);
135 int (*import)(struct ahash_request *req, const void *in);
196 int (*init)(struct ahash_request *req);
197 int (*update)(struct ahash_request *req);
198 int (*final)(struct ahash_request *req);
[all …]
Daead.h138 int (*encrypt)(struct aead_request *req);
139 int (*decrypt)(struct aead_request *req);
301 static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req) in crypto_aead_reqtfm() argument
303 return __crypto_aead_cast(req->base.tfm); in crypto_aead_reqtfm()
325 static inline int crypto_aead_encrypt(struct aead_request *req) in crypto_aead_encrypt() argument
327 return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req); in crypto_aead_encrypt()
352 static inline int crypto_aead_decrypt(struct aead_request *req) in crypto_aead_decrypt() argument
354 struct crypto_aead *aead = crypto_aead_reqtfm(req); in crypto_aead_decrypt()
356 if (req->cryptlen < crypto_aead_authsize(aead)) in crypto_aead_decrypt()
359 return crypto_aead_alg(aead)->decrypt(req); in crypto_aead_decrypt()
[all …]
/linux-4.4.14/drivers/s390/cio/
Dccwreq.c42 struct ccw_request *req = &cdev->private->req; in ccwreq_next_path() local
44 if (!req->singlepath) { in ccwreq_next_path()
45 req->mask = 0; in ccwreq_next_path()
48 req->retries = req->maxretries; in ccwreq_next_path()
49 req->mask = lpm_adjust(req->mask >> 1, req->lpm); in ccwreq_next_path()
51 return req->mask; in ccwreq_next_path()
59 struct ccw_request *req = &cdev->private->req; in ccwreq_stop() local
61 if (req->done) in ccwreq_stop()
63 req->done = 1; in ccwreq_stop()
66 if (rc && rc != -ENODEV && req->drc) in ccwreq_stop()
[all …]
Ddevice_pgid.c58 struct ccw_request *req = &cdev->private->req; in nop_build_cp() local
65 req->cp = cp; in nop_build_cp()
74 struct ccw_request *req = &cdev->private->req; in nop_do() local
76 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & in nop_do()
78 if (!req->lpm) in nop_do()
106 struct ccw_request *req = &cdev->private->req; in nop_callback() local
110 sch->vpm |= req->lpm; in nop_callback()
113 cdev->private->path_noirq_mask |= req->lpm; in nop_callback()
116 cdev->private->path_notoper_mask |= req->lpm; in nop_callback()
122 req->lpm >>= 1; in nop_callback()
[all …]
/linux-4.4.14/arch/powerpc/platforms/52xx/
Dmpc52xx_lpbfifo.c53 struct mpc52xx_lpbfifo_request *req; member
63 static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) in mpc52xx_lpbfifo_kick() argument
65 size_t transfer_size = req->size - req->pos; in mpc52xx_lpbfifo_kick()
71 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); in mpc52xx_lpbfifo_kick()
72 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; in mpc52xx_lpbfifo_kick()
73 int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; in mpc52xx_lpbfifo_kick()
96 data = req->data + req->pos; in mpc52xx_lpbfifo_kick()
150 bd->data[0] = req->data_phys + req->pos; in mpc52xx_lpbfifo_kick()
164 req->offset + req->pos); in mpc52xx_lpbfifo_kick()
167 bit_fields = req->cs << 24 | 0x000008; in mpc52xx_lpbfifo_kick()
[all …]
/linux-4.4.14/net/ceph/
Dosd_client.c32 struct ceph_osd_request *req);
34 struct ceph_osd_request *req);
36 struct ceph_osd_request *req);
37 static void __enqueue_request(struct ceph_osd_request *req);
39 struct ceph_osd_request *req);
315 struct ceph_osd_request *req = container_of(kref, in ceph_osdc_release_request() local
319 dout("%s %p (r_request %p r_reply %p)\n", __func__, req, in ceph_osdc_release_request()
320 req->r_request, req->r_reply); in ceph_osdc_release_request()
321 WARN_ON(!RB_EMPTY_NODE(&req->r_node)); in ceph_osdc_release_request()
322 WARN_ON(!list_empty(&req->r_req_lru_item)); in ceph_osdc_release_request()
[all …]
Dmon_client.c434 struct ceph_mon_generic_request *req; in __lookup_generic_req() local
438 req = rb_entry(n, struct ceph_mon_generic_request, node); in __lookup_generic_req()
439 if (tid < req->tid) in __lookup_generic_req()
441 else if (tid > req->tid) in __lookup_generic_req()
444 return req; in __lookup_generic_req()
454 struct ceph_mon_generic_request *req = NULL; in __insert_generic_request() local
458 req = rb_entry(parent, struct ceph_mon_generic_request, node); in __insert_generic_request()
459 if (new->tid < req->tid) in __insert_generic_request()
461 else if (new->tid > req->tid) in __insert_generic_request()
473 struct ceph_mon_generic_request *req = in release_generic_request() local
[all …]
/linux-4.4.14/fs/ncpfs/
Dsock.c59 struct list_head req; member
76 struct ncp_request_reply *req; in ncp_alloc_req() local
78 req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL); in ncp_alloc_req()
79 if (!req) in ncp_alloc_req()
82 init_waitqueue_head(&req->wq); in ncp_alloc_req()
83 atomic_set(&req->refs, (1)); in ncp_alloc_req()
84 req->status = RQ_IDLE; in ncp_alloc_req()
86 return req; in ncp_alloc_req()
89 static void ncp_req_get(struct ncp_request_reply *req) in ncp_req_get() argument
91 atomic_inc(&req->refs); in ncp_req_get()
[all …]
/linux-4.4.14/drivers/crypto/marvell/
Dcipher.c43 struct ablkcipher_request *req) in mv_cesa_ablkcipher_req_iter_init() argument
45 mv_cesa_req_dma_iter_init(&iter->base, req->nbytes); in mv_cesa_ablkcipher_req_iter_init()
46 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); in mv_cesa_ablkcipher_req_iter_init()
47 mv_cesa_sg_dma_iter_init(&iter->dst, req->dst, DMA_FROM_DEVICE); in mv_cesa_ablkcipher_req_iter_init()
60 mv_cesa_ablkcipher_dma_cleanup(struct ablkcipher_request *req) in mv_cesa_ablkcipher_dma_cleanup() argument
62 struct mv_cesa_ablkcipher_req *creq = ablkcipher_request_ctx(req); in mv_cesa_ablkcipher_dma_cleanup()
64 if (req->dst != req->src) { in mv_cesa_ablkcipher_dma_cleanup()
65 dma_unmap_sg(cesa_dev->dev, req->dst, creq->dst_nents, in mv_cesa_ablkcipher_dma_cleanup()
67 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, in mv_cesa_ablkcipher_dma_cleanup()
70 dma_unmap_sg(cesa_dev->dev, req->src, creq->src_nents, in mv_cesa_ablkcipher_dma_cleanup()
[all …]
Dhash.c27 struct ahash_request *req) in mv_cesa_ahash_req_iter_init() argument
29 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); in mv_cesa_ahash_req_iter_init()
30 unsigned int len = req->nbytes + creq->cache_ptr; in mv_cesa_ahash_req_iter_init()
36 mv_cesa_sg_dma_iter_init(&iter->src, req->src, DMA_TO_DEVICE); in mv_cesa_ahash_req_iter_init()
51 struct mv_cesa_ahash_dma_req *dreq = &creq->req.dma; in mv_cesa_ahash_dma_alloc_cache()
71 static int mv_cesa_ahash_alloc_cache(struct ahash_request *req) in mv_cesa_ahash_alloc_cache() argument
73 struct mv_cesa_ahash_req *creq = ahash_request_ctx(req); in mv_cesa_ahash_alloc_cache()
74 gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? in mv_cesa_ahash_alloc_cache()
81 if (creq->req.base.type == CESA_DMA_REQ) in mv_cesa_ahash_alloc_cache()
92 creq->req.dma.cache_dma); in mv_cesa_ahash_dma_free_cache()
[all …]
/linux-4.4.14/include/net/
Drequest_sock.h36 struct request_sock *req);
38 struct request_sock *req);
41 void (*destructor)(struct request_sock *req);
42 void (*syn_ack_timeout)(const struct request_sock *req);
45 int inet_rtx_syn_ack(const struct sock *parent, struct request_sock *req);
76 static inline struct sock *req_to_sk(struct request_sock *req) in req_to_sk() argument
78 return (struct sock *)req; in req_to_sk()
85 struct request_sock *req; in reqsk_alloc() local
87 req = kmem_cache_alloc(ops->slab, GFP_ATOMIC | __GFP_NOWARN); in reqsk_alloc()
89 if (req) { in reqsk_alloc()
[all …]
/linux-4.4.14/arch/um/drivers/
Dmconsole_user.c40 static int mconsole_reply_v0(struct mc_request *req, char *reply) in mconsole_reply_v0() argument
48 msg.msg_name = &(req->origin); in mconsole_reply_v0()
49 msg.msg_namelen = req->originlen; in mconsole_reply_v0()
56 return sendmsg(req->originating_fd, &msg, 0); in mconsole_reply_v0()
59 static struct mconsole_command *mconsole_parse(struct mc_request *req) in mconsole_parse() argument
66 if (!strncmp(req->request.data, cmd->command, in mconsole_parse()
79 int mconsole_get_request(int fd, struct mc_request *req) in mconsole_get_request() argument
83 req->originlen = sizeof(req->origin); in mconsole_get_request()
84 req->len = recvfrom(fd, &req->request, sizeof(req->request), 0, in mconsole_get_request()
85 (struct sockaddr *) req->origin, &req->originlen); in mconsole_get_request()
[all …]
Dmconsole_kern.c59 struct mconsole_entry *req; in mc_work_proc() local
64 req = list_entry(mc_requests.next, struct mconsole_entry, list); in mc_work_proc()
65 list_del(&req->list); in mc_work_proc()
67 req->request.cmd->handler(&req->request); in mc_work_proc()
68 kfree(req); in mc_work_proc()
79 static struct mc_request req; /* that's OK */ in mconsole_interrupt() local
82 while (mconsole_get_request(fd, &req)) { in mconsole_interrupt()
83 if (req.cmd->context == MCONSOLE_INTR) in mconsole_interrupt()
84 (*req.cmd->handler)(&req); in mconsole_interrupt()
88 mconsole_reply(&req, "Out of memory", 1, 0); in mconsole_interrupt()
[all …]
Dmconsole.h51 void (*handler)(struct mc_request *req);
72 extern int mconsole_reply_len(struct mc_request *req, const char *reply,
74 extern int mconsole_reply(struct mc_request *req, const char *str, int err,
77 extern void mconsole_version(struct mc_request *req);
78 extern void mconsole_help(struct mc_request *req);
79 extern void mconsole_halt(struct mc_request *req);
80 extern void mconsole_reboot(struct mc_request *req);
81 extern void mconsole_config(struct mc_request *req);
82 extern void mconsole_remove(struct mc_request *req);
83 extern void mconsole_sysrq(struct mc_request *req);
[all …]
Dubd_kern.c47 struct request *req; member
449 struct io_thread_req *req; in ubd_handler() local
456 n = os_read_file(thread_fd, &req, in ubd_handler()
458 if(n != sizeof(req)){ in ubd_handler()
466 blk_end_request(req->req, 0, req->length); in ubd_handler()
467 kfree(req); in ubd_handler()
1191 static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, in cowify_req() argument
1194 __u64 sector = req->offset >> 9; in cowify_req()
1197 if(req->length > (sizeof(req->sector_mask) * 8) << 9) in cowify_req()
1200 if(req->op == UBD_READ) { in cowify_req()
[all …]
/linux-4.4.14/drivers/staging/emxx_udc/
Demxx_udc.c179 udc->ep0_req.req.buf = p_buf; in _nbu2ss_create_ep0_packet()
180 udc->ep0_req.req.length = length; in _nbu2ss_create_ep0_packet()
181 udc->ep0_req.req.dma = 0; in _nbu2ss_create_ep0_packet()
182 udc->ep0_req.req.zero = TRUE; in _nbu2ss_create_ep0_packet()
183 udc->ep0_req.req.complete = _nbu2ss_ep0_complete; in _nbu2ss_create_ep0_packet()
184 udc->ep0_req.req.status = -EINPROGRESS; in _nbu2ss_create_ep0_packet()
185 udc->ep0_req.req.context = udc; in _nbu2ss_create_ep0_packet()
186 udc->ep0_req.req.actual = 0; in _nbu2ss_create_ep0_packet()
475 struct nbu2ss_req *req, in _nbu2ss_dma_map_single() argument
479 if (req->req.dma == DMA_ADDR_INVALID) { in _nbu2ss_dma_map_single()
[all …]
/linux-4.4.14/net/9p/
Dclient.c239 struct p9_req_t *req; in p9_tag_alloc() local
270 req = &c->reqs[row][col]; in p9_tag_alloc()
271 if (!req->wq) { in p9_tag_alloc()
272 req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_NOFS); in p9_tag_alloc()
273 if (!req->wq) in p9_tag_alloc()
275 init_waitqueue_head(req->wq); in p9_tag_alloc()
278 if (!req->tc) in p9_tag_alloc()
279 req->tc = p9_fcall_alloc(alloc_msize); in p9_tag_alloc()
280 if (!req->rc) in p9_tag_alloc()
281 req->rc = p9_fcall_alloc(alloc_msize); in p9_tag_alloc()
[all …]
/linux-4.4.14/fs/nfs/
Dpagelist.c61 hdr->req = nfs_list_entry(mirror->pg_list.next); in nfs_pgheader_init()
63 hdr->cred = hdr->req->wb_context->cred; in nfs_pgheader_init()
64 hdr->io_start = req_offset(hdr->req); in nfs_pgheader_init()
168 nfs_page_group_lock(struct nfs_page *req, bool nonblock) in nfs_page_group_lock() argument
170 struct nfs_page *head = req->wb_head; in nfs_page_group_lock()
191 nfs_page_group_lock_wait(struct nfs_page *req) in nfs_page_group_lock_wait() argument
193 struct nfs_page *head = req->wb_head; in nfs_page_group_lock_wait()
206 nfs_page_group_unlock(struct nfs_page *req) in nfs_page_group_unlock() argument
208 struct nfs_page *head = req->wb_head; in nfs_page_group_unlock()
224 nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit) in nfs_page_group_sync_on_bit_locked() argument
[all …]
Dwrite.c44 static void nfs_redirty_request(struct nfs_page *req);
49 static void nfs_clear_request_commit(struct nfs_page *req);
110 struct nfs_page *req = NULL; in nfs_page_find_head_request_locked() local
113 req = (struct nfs_page *)page_private(page); in nfs_page_find_head_request_locked()
115 req = nfs_page_search_commits_for_head_request_locked(nfsi, in nfs_page_find_head_request_locked()
118 if (req) { in nfs_page_find_head_request_locked()
119 WARN_ON_ONCE(req->wb_head != req); in nfs_page_find_head_request_locked()
120 kref_get(&req->wb_kref); in nfs_page_find_head_request_locked()
123 return req; in nfs_page_find_head_request_locked()
134 struct nfs_page *req = NULL; in nfs_page_find_head_request() local
[all …]
Ddirect.c293 struct nfs_page *req) in nfs_direct_setup_mirroring() argument
298 mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req); in nfs_direct_setup_mirroring()
402 static void nfs_direct_readpage_release(struct nfs_page *req) in nfs_direct_readpage_release() argument
405 d_inode(req->wb_context->dentry)->i_sb->s_id, in nfs_direct_readpage_release()
406 (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)), in nfs_direct_readpage_release()
407 req->wb_bytes, in nfs_direct_readpage_release()
408 (long long)req_offset(req)); in nfs_direct_readpage_release()
409 nfs_release_request(req); in nfs_direct_readpage_release()
429 struct nfs_page *req = nfs_list_entry(hdr->pages.next); in nfs_direct_read_completion() local
430 struct page *page = req->wb_page; in nfs_direct_read_completion()
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/mdc/
Dmdc_reint.c108 struct ptlrpc_request *req; in mdc_setattr() local
124 req = ptlrpc_request_alloc(class_exp2cliimp(exp), in mdc_setattr()
126 if (req == NULL) { in mdc_setattr()
131 req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, in mdc_setattr()
133 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen); in mdc_setattr()
134 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, in mdc_setattr()
137 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); in mdc_setattr()
139 ptlrpc_request_free(req); in mdc_setattr()
149 mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len); in mdc_setattr()
151 ptlrpc_request_set_replen(req); in mdc_setattr()
[all …]
Dmdc_request.c58 static inline int mdc_queue_wait(struct ptlrpc_request *req) in mdc_queue_wait() argument
60 struct client_obd *cli = &req->rq_import->imp_obd->u.cli; in mdc_queue_wait()
70 rc = ptlrpc_queue_wait(req); in mdc_queue_wait()
78 struct ptlrpc_request *req; in mdc_getstatus() local
82 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), in mdc_getstatus()
85 if (req == NULL) in mdc_getstatus()
88 mdc_pack_body(req, NULL, 0, 0, -1, 0); in mdc_getstatus()
89 req->rq_send_state = LUSTRE_IMP_FULL; in mdc_getstatus()
91 ptlrpc_request_set_replen(req); in mdc_getstatus()
93 rc = ptlrpc_queue_wait(req); in mdc_getstatus()
[all …]
Dmdc_locks.c231 static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc) in mdc_clear_replay_flag() argument
234 if (req->rq_replay) { in mdc_clear_replay_flag()
235 spin_lock(&req->rq_lock); in mdc_clear_replay_flag()
236 req->rq_replay = 0; in mdc_clear_replay_flag()
237 spin_unlock(&req->rq_lock); in mdc_clear_replay_flag()
239 if (rc && req->rq_transno != 0) { in mdc_clear_replay_flag()
240 DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc); in mdc_clear_replay_flag()
256 static void mdc_realloc_openmsg(struct ptlrpc_request *req, in mdc_realloc_openmsg() argument
262 rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4, in mdc_realloc_openmsg()
278 struct ptlrpc_request *req; in mdc_intent_open_pack() local
[all …]
Dmdc_internal.h45 void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid,
47 int mdc_pack_req(struct ptlrpc_request *req, int version, int opc);
48 void mdc_is_subdir_pack(struct ptlrpc_request *req, const struct lu_fid *pfid,
50 void mdc_swap_layouts_pack(struct ptlrpc_request *req,
52 void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, __u32 size,
54 void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
56 void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
58 void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
61 void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
64 void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
[all …]
/linux-4.4.14/drivers/media/usb/dvb-usb-v2/
Dec168.c28 static int ec168_ctrl_msg(struct dvb_usb_device *d, struct ec168_req *req) in ec168_ctrl_msg() argument
35 switch (req->cmd) { in ec168_ctrl_msg()
41 request = req->cmd; in ec168_ctrl_msg()
45 request = req->cmd; in ec168_ctrl_msg()
65 KBUILD_MODNAME, req->cmd); in ec168_ctrl_msg()
70 buf = kmalloc(req->size, GFP_KERNEL); in ec168_ctrl_msg()
78 memcpy(buf, req->data, req->size); in ec168_ctrl_msg()
87 ret = usb_control_msg(d->udev, pipe, request, requesttype, req->value, in ec168_ctrl_msg()
88 req->index, buf, req->size, EC168_USB_TIMEOUT); in ec168_ctrl_msg()
90 dvb_usb_dbg_usb_control_msg(d->udev, request, requesttype, req->value, in ec168_ctrl_msg()
[all …]
Dce6230.c26 static int ce6230_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req) in ce6230_ctrl_msg() argument
36 request = req->cmd; in ce6230_ctrl_msg()
37 value = req->value; in ce6230_ctrl_msg()
38 index = req->index; in ce6230_ctrl_msg()
40 switch (req->cmd) { in ce6230_ctrl_msg()
53 KBUILD_MODNAME, req->cmd); in ce6230_ctrl_msg()
58 buf = kmalloc(req->data_len, GFP_KERNEL); in ce6230_ctrl_msg()
66 memcpy(buf, req->data, req->data_len); in ce6230_ctrl_msg()
76 buf, req->data_len, CE6230_USB_TIMEOUT); in ce6230_ctrl_msg()
79 buf, req->data_len); in ce6230_ctrl_msg()
[all …]
Daf9015.c31 static int af9015_ctrl_msg(struct dvb_usb_device *d, struct req_t *req) in af9015_ctrl_msg() argument
41 state->buf[0] = req->cmd; in af9015_ctrl_msg()
43 state->buf[2] = req->i2c_addr; in af9015_ctrl_msg()
44 state->buf[3] = req->addr >> 8; in af9015_ctrl_msg()
45 state->buf[4] = req->addr & 0xff; in af9015_ctrl_msg()
46 state->buf[5] = req->mbox; in af9015_ctrl_msg()
47 state->buf[6] = req->addr_len; in af9015_ctrl_msg()
48 state->buf[7] = req->data_len; in af9015_ctrl_msg()
50 switch (req->cmd) { in af9015_ctrl_msg()
63 if (((req->addr & 0xff00) == 0xff00) || in af9015_ctrl_msg()
[all …]
/linux-4.4.14/drivers/base/power/
Dqos.c141 static int apply_constraint(struct dev_pm_qos_request *req, in apply_constraint() argument
144 struct dev_pm_qos *qos = req->dev->power.qos; in apply_constraint()
147 switch(req->type) { in apply_constraint()
150 &req->data.pnode, action, value); in apply_constraint()
155 req); in apply_constraint()
160 &req->data.pnode, action, value); in apply_constraint()
163 req->dev->power.set_latency_tolerance(req->dev, value); in apply_constraint()
167 ret = pm_qos_update_flags(&qos->flags, &req->data.flr, in apply_constraint()
237 struct dev_pm_qos_request *req, *tmp; in dev_pm_qos_constraints_destroy() local
261 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { in dev_pm_qos_constraints_destroy()
[all …]
/linux-4.4.14/net/bluetooth/
Dhci_request.c30 void hci_req_init(struct hci_request *req, struct hci_dev *hdev) in hci_req_init() argument
32 skb_queue_head_init(&req->cmd_q); in hci_req_init()
33 req->hdev = hdev; in hci_req_init()
34 req->err = 0; in hci_req_init()
37 static int req_run(struct hci_request *req, hci_req_complete_t complete, in req_run() argument
40 struct hci_dev *hdev = req->hdev; in req_run()
44 BT_DBG("length %u", skb_queue_len(&req->cmd_q)); in req_run()
49 if (req->err) { in req_run()
50 skb_queue_purge(&req->cmd_q); in req_run()
51 return req->err; in req_run()
[all …]
Dhci_request.h33 void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
34 int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
35 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
36 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
38 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
47 void hci_req_add_le_scan_disable(struct hci_request *req);
48 void hci_req_add_le_passive_scan(struct hci_request *req);
51 void __hci_update_page_scan(struct hci_request *req);
53 int hci_update_random_address(struct hci_request *req, bool require_privacy,
57 void __hci_update_background_scan(struct hci_request *req);
[all …]
/linux-4.4.14/block/
Dbsg-lib.c55 struct request *req = job->req; in bsg_job_done() local
56 struct request *rsp = req->next_rq; in bsg_job_done()
59 err = job->req->errors = result; in bsg_job_done()
62 job->req->sense_len = sizeof(u32); in bsg_job_done()
64 job->req->sense_len = job->reply_len; in bsg_job_done()
66 req->resid_len = 0; in bsg_job_done()
74 blk_complete_request(req); in bsg_job_done()
90 static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) in bsg_map_buffer() argument
92 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); in bsg_map_buffer()
94 BUG_ON(!req->nr_phys_segments); in bsg_map_buffer()
[all …]
Dblk-merge.c470 struct request *req, in ll_new_hw_segment() argument
475 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) in ll_new_hw_segment()
478 if (blk_integrity_merge_bio(q, req, bio) == false) in ll_new_hw_segment()
485 req->nr_phys_segments += nr_phys_segs; in ll_new_hw_segment()
489 req->cmd_flags |= REQ_NOMERGE; in ll_new_hw_segment()
490 if (req == q->last_merge) in ll_new_hw_segment()
495 int ll_back_merge_fn(struct request_queue *q, struct request *req, in ll_back_merge_fn() argument
498 if (req_gap_back_merge(req, bio)) in ll_back_merge_fn()
500 if (blk_integrity_rq(req) && in ll_back_merge_fn()
501 integrity_req_gap_back_merge(req, bio)) in ll_back_merge_fn()
[all …]
Dblk-timeout.c78 void blk_delete_timer(struct request *req) in blk_delete_timer() argument
80 list_del_init(&req->timeout_list); in blk_delete_timer()
83 static void blk_rq_timed_out(struct request *req) in blk_rq_timed_out() argument
85 struct request_queue *q = req->q; in blk_rq_timed_out()
89 ret = q->rq_timed_out_fn(req); in blk_rq_timed_out()
93 __blk_complete_request(req); in blk_rq_timed_out()
96 blk_add_timer(req); in blk_rq_timed_out()
97 blk_clear_rq_complete(req); in blk_rq_timed_out()
157 void blk_abort_request(struct request *req) in blk_abort_request() argument
159 if (blk_mark_rq_complete(req)) in blk_abort_request()
[all …]
Dblk-core.c1461 void __blk_put_request(struct request_queue *q, struct request *req) in __blk_put_request() argument
1467 blk_mq_free_request(req); in __blk_put_request()
1471 blk_pm_put_request(req); in __blk_put_request()
1473 elv_completed_request(q, req); in __blk_put_request()
1476 WARN_ON(req->bio != NULL); in __blk_put_request()
1482 if (req->cmd_flags & REQ_ALLOCED) { in __blk_put_request()
1483 unsigned int flags = req->cmd_flags; in __blk_put_request()
1484 struct request_list *rl = blk_rq_rl(req); in __blk_put_request()
1486 BUG_ON(!list_empty(&req->queuelist)); in __blk_put_request()
1487 BUG_ON(ELV_ON_HASH(req)); in __blk_put_request()
[all …]
/linux-4.4.14/drivers/scsi/device_handler/
Dscsi_dh_hp_sw.c108 struct request *req; in hp_sw_tur() local
112 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); in hp_sw_tur()
113 if (IS_ERR(req)) in hp_sw_tur()
116 blk_rq_set_block_pc(req); in hp_sw_tur()
117 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | in hp_sw_tur()
119 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); in hp_sw_tur()
120 req->cmd[0] = TEST_UNIT_READY; in hp_sw_tur()
121 req->timeout = HP_SW_TIMEOUT; in hp_sw_tur()
122 req->sense = h->sense; in hp_sw_tur()
123 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); in hp_sw_tur()
[all …]
/linux-4.4.14/kernel/power/
Dqos.c192 struct pm_qos_request *req; in pm_qos_dbg_show_requests() local
229 plist_for_each_entry(req, &c->list, node) { in pm_qos_dbg_show_requests()
232 if ((req->node).prio != c->default_value) { in pm_qos_dbg_show_requests()
238 (req->node).prio, state); in pm_qos_dbg_show_requests()
331 struct pm_qos_flags_request *req) in pm_qos_flags_remove_req() argument
335 list_del(&req->node); in pm_qos_flags_remove_req()
336 list_for_each_entry(req, &pqf->list, node) in pm_qos_flags_remove_req()
337 val |= req->flags; in pm_qos_flags_remove_req()
354 struct pm_qos_flags_request *req, in pm_qos_update_flags() argument
366 pm_qos_flags_remove_req(pqf, req); in pm_qos_update_flags()
[all …]
/linux-4.4.14/fs/fuse/
Ddev.c37 static void fuse_request_init(struct fuse_req *req, struct page **pages, in fuse_request_init() argument
41 memset(req, 0, sizeof(*req)); in fuse_request_init()
44 INIT_LIST_HEAD(&req->list); in fuse_request_init()
45 INIT_LIST_HEAD(&req->intr_entry); in fuse_request_init()
46 init_waitqueue_head(&req->waitq); in fuse_request_init()
47 atomic_set(&req->count, 1); in fuse_request_init()
48 req->pages = pages; in fuse_request_init()
49 req->page_descs = page_descs; in fuse_request_init()
50 req->max_pages = npages; in fuse_request_init()
51 __set_bit(FR_PENDING, &req->flags); in fuse_request_init()
[all …]
Dfile.c84 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) in fuse_release_end() argument
86 iput(req->misc.release.inode); in fuse_release_end()
92 struct fuse_req *req = ff->reserved_req; in fuse_file_put() local
99 __clear_bit(FR_BACKGROUND, &req->flags); in fuse_file_put()
100 iput(req->misc.release.inode); in fuse_file_put()
101 fuse_put_request(ff->fc, req); in fuse_file_put()
103 __clear_bit(FR_BACKGROUND, &req->flags); in fuse_file_put()
104 fuse_request_send(ff->fc, req); in fuse_file_put()
105 iput(req->misc.release.inode); in fuse_file_put()
106 fuse_put_request(ff->fc, req); in fuse_file_put()
[all …]
/linux-4.4.14/drivers/i2c/busses/
Di2c-opal.c51 static int i2c_opal_send_request(u32 bus_id, struct opal_i2c_request *req) in i2c_opal_send_request() argument
64 rc = opal_i2c_request(token, bus_id, req); in i2c_opal_send_request()
89 struct opal_i2c_request req; in i2c_opal_master_xfer() local
95 memset(&req, 0, sizeof(req)); in i2c_opal_master_xfer()
100 req.type = (msgs[0].flags & I2C_M_RD) ? in i2c_opal_master_xfer()
102 req.addr = cpu_to_be16(msgs[0].addr); in i2c_opal_master_xfer()
103 req.size = cpu_to_be32(msgs[0].len); in i2c_opal_master_xfer()
104 req.buffer_ra = cpu_to_be64(__pa(msgs[0].buf)); in i2c_opal_master_xfer()
107 req.type = (msgs[1].flags & I2C_M_RD) ? in i2c_opal_master_xfer()
109 req.addr = cpu_to_be16(msgs[0].addr); in i2c_opal_master_xfer()
[all …]
/linux-4.4.14/fs/ubifs/
Dbudget.c375 const struct ubifs_budget_req *req) in calc_idx_growth() argument
379 znodes = req->new_ino + (req->new_page << UBIFS_BLOCKS_PER_PAGE_SHIFT) + in calc_idx_growth()
380 req->new_dent; in calc_idx_growth()
391 const struct ubifs_budget_req *req) in calc_data_growth() argument
395 data_growth = req->new_ino ? c->bi.inode_budget : 0; in calc_data_growth()
396 if (req->new_page) in calc_data_growth()
398 if (req->new_dent) in calc_data_growth()
400 data_growth += req->new_ino_d; in calc_data_growth()
411 const struct ubifs_budget_req *req) in calc_dd_growth() argument
415 dd_growth = req->dirtied_page ? c->bi.page_budget : 0; in calc_dd_growth()
[all …]
/linux-4.4.14/drivers/usb/isp1760/
Disp1760-udc.c30 struct usb_request req; member
46 static inline struct isp1760_request *req_to_udc_req(struct usb_request *req) in req_to_udc_req() argument
48 return container_of(req, struct isp1760_request, req); in req_to_udc_req()
127 struct isp1760_request *req, in isp1760_udc_request_complete() argument
134 req, status); in isp1760_udc_request_complete()
136 req->ep = NULL; in isp1760_udc_request_complete()
137 req->req.status = status; in isp1760_udc_request_complete()
138 req->req.complete(&ep->ep, &req->req); in isp1760_udc_request_complete()
180 struct isp1760_request *req) in isp1760_udc_receive() argument
191 __func__, len, req->req.actual, req->req.length); in isp1760_udc_receive()
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/include/
Dlustre_net.h269 #define ptlrpc_req_async_args(req) ((void *)&req->rq_async_args) argument
484 struct ptlrpc_request *req,
1519 struct ptlrpc_request *req, int rc) in ptlrpc_req_interpret() argument
1521 if (req->rq_interpret_reply != NULL) { in ptlrpc_req_interpret()
1522 req->rq_status = req->rq_interpret_reply(env, req, in ptlrpc_req_interpret()
1523 &req->rq_async_args, in ptlrpc_req_interpret()
1525 return req->rq_status; in ptlrpc_req_interpret()
1536 static inline bool ptlrpc_nrs_req_can_move(struct ptlrpc_request *req) in ptlrpc_nrs_req_can_move() argument
1538 struct ptlrpc_nrs_request *nrq = &req->rq_nrq; in ptlrpc_nrs_req_can_move()
1546 return nrq->nr_enqueued && !nrq->nr_started && !req->rq_hp; in ptlrpc_nrs_req_can_move()
[all …]
Dlustre_sec.h386 struct ptlrpc_request *req);
399 struct ptlrpc_request *req);
412 struct ptlrpc_request *req);
425 struct ptlrpc_request *req);
448 struct ptlrpc_request *req,
465 struct ptlrpc_request *req,
612 struct ptlrpc_request *req,
623 struct ptlrpc_request *req);
636 struct ptlrpc_request *req,
649 struct ptlrpc_request *req);
[all …]
/linux-4.4.14/arch/m68k/mac/
Dmisc.c39 struct adb_request req; in cuda_read_time() local
42 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0) in cuda_read_time()
44 while (!req.complete) in cuda_read_time()
47 time = (req.reply[3] << 24) | (req.reply[4] << 16) in cuda_read_time()
48 | (req.reply[5] << 8) | req.reply[6]; in cuda_read_time()
54 struct adb_request req; in cuda_write_time() local
56 if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, in cuda_write_time()
60 while (!req.complete) in cuda_write_time()
66 struct adb_request req; in cuda_read_pram() local
67 if (cuda_request(&req, NULL, 4, CUDA_PACKET, CUDA_GET_PRAM, in cuda_read_pram()
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/lov/
Dlov_request.c64 struct lov_request *req = list_entry(pos, in lov_finish_set() local
67 list_del_init(&req->rq_link); in lov_finish_set()
69 if (req->rq_oi.oi_oa) in lov_finish_set()
70 kmem_cache_free(obdo_cachep, req->rq_oi.oi_oa); in lov_finish_set()
71 kfree(req->rq_oi.oi_osfs); in lov_finish_set()
72 kfree(req); in lov_finish_set()
93 struct lov_request *req, int rc) in lov_update_set() argument
95 req->rq_complete = 1; in lov_update_set()
96 req->rq_rc = rc; in lov_update_set()
106 struct lov_request *req, int rc) in lov_update_common_set() argument
[all …]
/linux-4.4.14/fs/nilfs2/
Ddat.c54 struct nilfs_palloc_req *req, int create) in nilfs_dat_prepare_entry() argument
56 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, in nilfs_dat_prepare_entry()
57 create, &req->pr_entry_bh); in nilfs_dat_prepare_entry()
61 struct nilfs_palloc_req *req) in nilfs_dat_commit_entry() argument
63 mark_buffer_dirty(req->pr_entry_bh); in nilfs_dat_commit_entry()
65 brelse(req->pr_entry_bh); in nilfs_dat_commit_entry()
69 struct nilfs_palloc_req *req) in nilfs_dat_abort_entry() argument
71 brelse(req->pr_entry_bh); in nilfs_dat_abort_entry()
74 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req) in nilfs_dat_prepare_alloc() argument
78 ret = nilfs_palloc_prepare_alloc_entry(dat, req); in nilfs_dat_prepare_alloc()
[all …]
Difile.c68 struct nilfs_palloc_req req; in nilfs_ifile_create_inode() local
71 req.pr_entry_nr = 0; /* 0 says find free inode from beginning of in nilfs_ifile_create_inode()
73 req.pr_entry_bh = NULL; in nilfs_ifile_create_inode()
75 ret = nilfs_palloc_prepare_alloc_entry(ifile, &req); in nilfs_ifile_create_inode()
77 ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1, in nilfs_ifile_create_inode()
78 &req.pr_entry_bh); in nilfs_ifile_create_inode()
80 nilfs_palloc_abort_alloc_entry(ifile, &req); in nilfs_ifile_create_inode()
83 brelse(req.pr_entry_bh); in nilfs_ifile_create_inode()
86 nilfs_palloc_commit_alloc_entry(ifile, &req); in nilfs_ifile_create_inode()
87 mark_buffer_dirty(req.pr_entry_bh); in nilfs_ifile_create_inode()
[all …]
Dalloc.c509 struct nilfs_palloc_req *req) in nilfs_palloc_prepare_alloc_entry() argument
524 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); in nilfs_palloc_prepare_alloc_entry()
531 maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr, in nilfs_palloc_prepare_alloc_entry()
558 req->pr_entry_nr = in nilfs_palloc_prepare_alloc_entry()
563 req->pr_desc_bh = desc_bh; in nilfs_palloc_prepare_alloc_entry()
564 req->pr_bitmap_bh = bitmap_bh; in nilfs_palloc_prepare_alloc_entry()
593 struct nilfs_palloc_req *req) in nilfs_palloc_commit_alloc_entry() argument
595 mark_buffer_dirty(req->pr_bitmap_bh); in nilfs_palloc_commit_alloc_entry()
596 mark_buffer_dirty(req->pr_desc_bh); in nilfs_palloc_commit_alloc_entry()
599 brelse(req->pr_bitmap_bh); in nilfs_palloc_commit_alloc_entry()
[all …]
/linux-4.4.14/drivers/net/ethernet/emulex/benet/
Dbe_cmds.c935 struct be_cmd_req_eq_create *req; in be_cmd_eq_create() local
943 req = embedded_payload(wrb); in be_cmd_eq_create()
945 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, in be_cmd_eq_create()
946 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, in be_cmd_eq_create()
953 req->hdr.version = ver; in be_cmd_eq_create()
954 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); in be_cmd_eq_create()
956 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); in be_cmd_eq_create()
958 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); in be_cmd_eq_create()
959 AMAP_SET_BITS(struct amap_eq_context, count, req->context, in be_cmd_eq_create()
961 be_dws_cpu_to_le(req->context, sizeof(req->context)); in be_cmd_eq_create()
[all …]
/linux-4.4.14/drivers/target/sbp/
Dsbp_target.c67 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req) in read_peer_guid() argument
72 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, in read_peer_guid()
73 req->node_addr, req->generation, req->speed, in read_peer_guid()
79 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, in read_peer_guid()
80 req->node_addr, req->generation, req->speed, in read_peer_guid()
290 struct sbp_management_agent *agent, struct sbp_management_request *req, in sbp_management_request_login() argument
303 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret); in sbp_management_request_login()
306 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); in sbp_management_request_login()
308 req->status.status = cpu_to_be32( in sbp_management_request_login()
314 ret = read_peer_guid(&guid, req); in sbp_management_request_login()
[all …]
/linux-4.4.14/drivers/scsi/csiostor/
Dcsio_scsi.c163 csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr) in csio_scsi_fcp_cmnd() argument
166 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); in csio_scsi_fcp_cmnd()
178 if (req->nsge) in csio_scsi_fcp_cmnd()
179 if (req->datadir == DMA_TO_DEVICE) in csio_scsi_fcp_cmnd()
201 csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size) in csio_scsi_init_cmd_wr() argument
203 struct csio_hw *hw = req->lnode->hwp; in csio_scsi_init_cmd_wr()
204 struct csio_rnode *rn = req->rnode; in csio_scsi_init_cmd_wr()
215 wr->cookie = (uintptr_t) req; in csio_scsi_init_cmd_wr()
216 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); in csio_scsi_init_cmd_wr()
217 wr->tmo_val = (uint8_t) req->tmo; in csio_scsi_init_cmd_wr()
[all …]
/linux-4.4.14/drivers/base/
Ddevtmpfs.c39 static struct req { struct
40 struct req *next; argument
85 struct req req; in devtmpfs_create_node() local
90 req.mode = 0; in devtmpfs_create_node()
91 req.uid = GLOBAL_ROOT_UID; in devtmpfs_create_node()
92 req.gid = GLOBAL_ROOT_GID; in devtmpfs_create_node()
93 req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp); in devtmpfs_create_node()
94 if (!req.name) in devtmpfs_create_node()
97 if (req.mode == 0) in devtmpfs_create_node()
98 req.mode = 0600; in devtmpfs_create_node()
[all …]
/linux-4.4.14/drivers/usb/gadget/udc/
Dgr_udc.c96 struct gr_request *req) in gr_dbgprint_request() argument
98 int buflen = ep->is_in ? req->req.length : req->req.actual; in gr_dbgprint_request()
102 dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen, in gr_dbgprint_request()
105 rowlen, 4, req->req.buf, plen, false); in gr_dbgprint_request()
117 struct gr_request *req) {} in gr_dbgprint_request() argument
134 struct gr_request *req; in gr_seq_ep_show() local
167 list_for_each_entry(req, &ep->queue, queue) { in gr_seq_ep_show()
171 seq_printf(seq, " 0x%p: 0x%p %d %d\n", req, in gr_seq_ep_show()
172 &req->req.buf, req->req.actual, req->req.length); in gr_seq_ep_show()
174 next = req->first_desc; in gr_seq_ep_show()
[all …]
Dpxa25x_udc.c303 struct pxa25x_request *req; in pxa25x_ep_alloc_request() local
305 req = kzalloc(sizeof(*req), gfp_flags); in pxa25x_ep_alloc_request()
306 if (!req) in pxa25x_ep_alloc_request()
309 INIT_LIST_HEAD (&req->queue); in pxa25x_ep_alloc_request()
310 return &req->req; in pxa25x_ep_alloc_request()
320 struct pxa25x_request *req; in pxa25x_ep_free_request() local
322 req = container_of (_req, struct pxa25x_request, req); in pxa25x_ep_free_request()
323 WARN_ON(!list_empty (&req->queue)); in pxa25x_ep_free_request()
324 kfree(req); in pxa25x_ep_free_request()
332 static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status) in done() argument
[all …]
Dgoku_udc.c268 struct goku_request *req; in goku_alloc_request() local
272 req = kzalloc(sizeof *req, gfp_flags); in goku_alloc_request()
273 if (!req) in goku_alloc_request()
276 INIT_LIST_HEAD(&req->queue); in goku_alloc_request()
277 return &req->req; in goku_alloc_request()
283 struct goku_request *req; in goku_free_request() local
288 req = container_of(_req, struct goku_request, req); in goku_free_request()
289 WARN_ON(!list_empty(&req->queue)); in goku_free_request()
290 kfree(req); in goku_free_request()
296 done(struct goku_ep *ep, struct goku_request *req, int status) in done() argument
[all …]
Dpxa27x_udc.c142 struct pxa27x_request *req; in queues_dbg_show() local
160 list_for_each_entry(req, &ep->queue, queue) { in queues_dbg_show()
162 &req->req, req->req.actual, in queues_dbg_show()
163 req->req.length, req->req.buf); in queues_dbg_show()
647 struct pxa27x_request *req; in pxa_ep_alloc_request() local
649 req = kzalloc(sizeof *req, gfp_flags); in pxa_ep_alloc_request()
650 if (!req) in pxa_ep_alloc_request()
653 INIT_LIST_HEAD(&req->queue); in pxa_ep_alloc_request()
654 req->in_use = 0; in pxa_ep_alloc_request()
655 req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); in pxa_ep_alloc_request()
[all …]
Datmel_usba_udc.c39 struct usba_request *req, *req_copy; in queue_dbg_open() local
48 list_for_each_entry(req, &ep->queue, queue) { in queue_dbg_open()
49 req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC); in queue_dbg_open()
61 list_for_each_entry_safe(req, req_copy, queue_data, queue) { in queue_dbg_open()
62 list_del(&req->queue); in queue_dbg_open()
63 kfree(req); in queue_dbg_open()
87 struct usba_request *req, *tmp_req; in queue_dbg_read() local
95 list_for_each_entry_safe(req, tmp_req, queue, queue) { in queue_dbg_read()
98 req->req.buf, req->req.length, in queue_dbg_read()
99 req->req.no_interrupt ? 'i' : 'I', in queue_dbg_read()
[all …]
Domap_udc.c270 struct omap_req *req; in omap_alloc_request() local
272 req = kzalloc(sizeof(*req), gfp_flags); in omap_alloc_request()
273 if (!req) in omap_alloc_request()
276 INIT_LIST_HEAD(&req->queue); in omap_alloc_request()
278 return &req->req; in omap_alloc_request()
284 struct omap_req *req = container_of(_req, struct omap_req, req); in omap_free_request() local
286 kfree(req); in omap_free_request()
292 done(struct omap_ep *ep, struct omap_req *req, int status) in done() argument
297 list_del_init(&req->queue); in done()
299 if (req->req.status == -EINPROGRESS) in done()
[all …]
Damd5536udc.c555 udc_free_request(&ep->ep, &ep->bna_dummy_req->req); in udc_ep_disable()
567 struct udc_request *req; in udc_alloc_request() local
577 req = kzalloc(sizeof(struct udc_request), gfp); in udc_alloc_request()
578 if (!req) in udc_alloc_request()
581 req->req.dma = DMA_DONT_USE; in udc_alloc_request()
582 INIT_LIST_HEAD(&req->queue); in udc_alloc_request()
587 &req->td_phys); in udc_alloc_request()
589 kfree(req); in udc_alloc_request()
595 req, dma_desc, in udc_alloc_request()
596 (unsigned long)req->td_phys); in udc_alloc_request()
[all …]
Dfotg210-udc.c61 static void fotg210_done(struct fotg210_ep *ep, struct fotg210_request *req, in fotg210_done() argument
64 list_del_init(&req->queue); in fotg210_done()
68 req->req.status = -ESHUTDOWN; in fotg210_done()
70 req->req.status = status; in fotg210_done()
73 usb_gadget_giveback_request(&ep->ep, &req->req); in fotg210_done()
215 struct fotg210_request *req; in fotg210_ep_disable() local
223 req = list_entry(ep->queue.next, in fotg210_ep_disable()
226 fotg210_done(ep, req, -ECONNRESET); in fotg210_ep_disable()
236 struct fotg210_request *req; in fotg210_ep_alloc_request() local
238 req = kzalloc(sizeof(struct fotg210_request), gfp_flags); in fotg210_ep_alloc_request()
[all …]
Dr8a66597-udc.c38 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
40 struct r8a66597_request *req);
45 struct r8a66597_request *req, int status);
572 struct r8a66597_request *req) in start_ep0_write() argument
579 if (req->req.length == 0) { in start_ep0_write()
582 transfer_complete(ep, req, 0); in start_ep0_write()
585 irq_ep0_write(ep, req); in start_ep0_write()
634 struct r8a66597_request *req) in sudmac_alloc_channel() argument
666 return usb_gadget_map_request(&r8a66597->gadget, &req->req, dma->dir); in sudmac_alloc_channel()
671 struct r8a66597_request *req) in sudmac_free_channel() argument
[all …]
Dmv_u3d_core.c134 actual = curr_req->req.length; in mv_u3d_process_ep_req()
177 curr_req->req.actual = actual; in mv_u3d_process_ep_req()
187 void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status) in mv_u3d_done() argument
195 list_del_init(&req->queue); in mv_u3d_done()
198 if (req->req.status == -EINPROGRESS) in mv_u3d_done()
199 req->req.status = status; in mv_u3d_done()
201 status = req->req.status; in mv_u3d_done()
204 if (!req->chain) in mv_u3d_done()
206 req->trb_head->trb_hw, req->trb_head->trb_dma); in mv_u3d_done()
209 (dma_addr_t)req->trb_head->trb_dma, in mv_u3d_done()
[all …]
Ds3c2410_udc.c253 struct s3c2410_request *req, int status) in s3c2410_udc_done() argument
257 list_del_init(&req->queue); in s3c2410_udc_done()
259 if (likely(req->req.status == -EINPROGRESS)) in s3c2410_udc_done()
260 req->req.status = status; in s3c2410_udc_done()
262 status = req->req.status; in s3c2410_udc_done()
265 usb_gadget_giveback_request(&ep->ep, &req->req); in s3c2410_udc_done()
277 struct s3c2410_request *req; in s3c2410_udc_nuke() local
278 req = list_entry(ep->queue.next, struct s3c2410_request, in s3c2410_udc_nuke()
280 s3c2410_udc_done(ep, req, status); in s3c2410_udc_nuke()
297 struct s3c2410_request *req, in s3c2410_udc_write_packet() argument
[all …]
Dfusb300_udc.c36 static void done(struct fusb300_ep *ep, struct fusb300_request *req,
254 struct fusb300_request *req; in fusb300_disable() local
262 req = list_entry(ep->queue.next, struct fusb300_request, queue); in fusb300_disable()
264 done(ep, req, -ECONNRESET); in fusb300_disable()
274 struct fusb300_request *req; in fusb300_alloc_request() local
276 req = kzalloc(sizeof(struct fusb300_request), gfp_flags); in fusb300_alloc_request()
277 if (!req) in fusb300_alloc_request()
279 INIT_LIST_HEAD(&req->queue); in fusb300_alloc_request()
281 return &req->req; in fusb300_alloc_request()
286 struct fusb300_request *req; in fusb300_free_request() local
[all …]
Dudc-xilinx.c112 #define to_xusb_req(req) container_of((req), struct xusb_req, usb_req) argument
187 struct xusb_req *req; member
380 static int xudc_dma_send(struct xusb_ep *ep, struct xusb_req *req, in xudc_dma_send() argument
388 src = req->usb_req.dma + req->usb_req.actual; in xudc_dma_send()
389 if (req->usb_req.length) in xudc_dma_send()
434 static int xudc_dma_receive(struct xusb_ep *ep, struct xusb_req *req, in xudc_dma_receive() argument
442 dst = req->usb_req.dma + req->usb_req.actual; in xudc_dma_receive()
483 static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req, in xudc_eptxrx() argument
494 rc = xudc_dma_send(ep, req, bufferptr, bufferlen); in xudc_eptxrx()
496 rc = xudc_dma_receive(ep, req, bufferptr, bufferlen); in xudc_eptxrx()
[all …]
Dm66592-udc.c38 static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req);
39 static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req);
44 struct m66592_request *req, int status);
497 static void start_ep0_write(struct m66592_ep *ep, struct m66592_request *req) in start_ep0_write() argument
506 if (req->req.length == 0) { in start_ep0_write()
509 transfer_complete(ep, req, 0); in start_ep0_write()
512 irq_ep0_write(ep, req); in start_ep0_write()
516 static void start_packet_write(struct m66592_ep *ep, struct m66592_request *req) in start_packet_write() argument
529 irq_packet_write(ep, req); in start_packet_write()
532 static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req) in start_packet_read() argument
[all …]
Dfsl_udc_core.c161 static void done(struct fsl_ep *ep, struct fsl_req *req, int status) in done() argument
172 list_del_init(&req->queue); in done()
175 if (req->req.status == -EINPROGRESS) in done()
176 req->req.status = status; in done()
178 status = req->req.status; in done()
181 next_td = req->head; in done()
182 for (j = 0; j < req->dtd_count; j++) { in done()
184 if (j != req->dtd_count - 1) { in done()
190 usb_gadget_unmap_request(&ep->udc->gadget, &req->req, ep_is_in(ep)); in done()
194 ep->ep.name, &req->req, status, in done()
[all …]
Dpch_udc.c410 struct usb_request req; member
1446 static void complete_req(struct pch_udc_ep *ep, struct pch_udc_request *req, in complete_req() argument
1454 list_del_init(&req->queue); in complete_req()
1457 if (req->req.status == -EINPROGRESS) in complete_req()
1458 req->req.status = status; in complete_req()
1460 status = req->req.status; in complete_req()
1463 if (req->dma_mapped) { in complete_req()
1464 if (req->dma == DMA_ADDR_INVALID) { in complete_req()
1466 dma_unmap_single(&dev->pdev->dev, req->req.dma, in complete_req()
1467 req->req.length, in complete_req()
[all …]
Dmv_udc_core.c143 actual = curr_req->req.length; in process_ep_req()
203 curr_req->req.actual = actual; in process_ep_req()
213 static void done(struct mv_ep *ep, struct mv_req *req, int status) in done() argument
224 list_del_init(&req->queue); in done()
227 if (req->req.status == -EINPROGRESS) in done()
228 req->req.status = status; in done()
230 status = req->req.status; in done()
233 next_td = req->head; in done()
234 for (j = 0; j < req->dtd_count; j++) { in done()
236 if (j != req->dtd_count - 1) in done()
[all …]
Dnet2280.c554 struct net2280_request *req; in net2280_alloc_request() local
562 req = kzalloc(sizeof(*req), gfp_flags); in net2280_alloc_request()
563 if (!req) in net2280_alloc_request()
566 INIT_LIST_HEAD(&req->queue); in net2280_alloc_request()
573 &req->td_dma); in net2280_alloc_request()
575 kfree(req); in net2280_alloc_request()
580 req->td = td; in net2280_alloc_request()
582 return &req->req; in net2280_alloc_request()
588 struct net2280_request *req; in net2280_free_request() local
597 req = container_of(_req, struct net2280_request, req); in net2280_free_request()
[all …]
Dfsl_qe_udc.c80 static void done(struct qe_ep *ep, struct qe_req *req, int status) in done() argument
89 list_del_init(&req->queue); in done()
92 if (req->req.status == -EINPROGRESS) in done()
93 req->req.status = status; in done()
95 status = req->req.status; in done()
97 if (req->mapped) { in done()
99 req->req.dma, req->req.length, in done()
103 req->req.dma = DMA_ADDR_INVALID; in done()
104 req->mapped = 0; in done()
107 req->req.dma, req->req.length, in done()
[all …]
Dnet2272.c333 struct net2272_request *req; in net2272_alloc_request() local
339 req = kzalloc(sizeof(*req), gfp_flags); in net2272_alloc_request()
340 if (!req) in net2272_alloc_request()
343 INIT_LIST_HEAD(&req->queue); in net2272_alloc_request()
345 return &req->req; in net2272_alloc_request()
352 struct net2272_request *req; in net2272_free_request() local
358 req = container_of(_req, struct net2272_request, req); in net2272_free_request()
359 WARN_ON(!list_empty(&req->queue)); in net2272_free_request()
360 kfree(req); in net2272_free_request()
364 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status) in net2272_done() argument
[all …]
/linux-4.4.14/drivers/usb/gadget/legacy/
Ddbgp.c27 struct usb_request *req; member
91 static void dbgp_complete(struct usb_ep *ep, struct usb_request *req) in dbgp_complete() argument
95 int status = req->status; in dbgp_complete()
107 dbgp_consume(req->buf, req->actual); in dbgp_complete()
109 req->length = DBGP_REQ_LEN; in dbgp_complete()
110 err = usb_ep_queue(ep, req, GFP_ATOMIC); in dbgp_complete()
119 kfree(req->buf); in dbgp_complete()
120 usb_ep_free_request(dbgp.o_ep, req); in dbgp_complete()
130 struct usb_request *req; in dbgp_enable_ep_req() local
132 req = usb_ep_alloc_request(ep, GFP_KERNEL); in dbgp_enable_ep_req()
[all …]
/linux-4.4.14/arch/s390/pci/
Dpci_insn.c16 static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset) in zpci_err_insn() argument
19 u64 req; in zpci_err_insn() member
23 } __packed data = {req, offset, cc, status}; in zpci_err_insn()
29 static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) in __mpcifc() argument
37 : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib) in __mpcifc()
39 *status = req >> 24 & 0xff; in __mpcifc()
43 int zpci_mod_fc(u64 req, struct zpci_fib *fib) in zpci_mod_fc() argument
48 cc = __mpcifc(req, fib, &status); in zpci_mod_fc()
54 zpci_err_insn(cc, status, req, 0); in zpci_mod_fc()
102 static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status) in __pcilg() argument
[all …]
/linux-4.4.14/drivers/net/ethernet/broadcom/bnxt/
Dbnxt_sriov.c41 struct hwrm_func_cfg_input req = {0}; in bnxt_set_vf_spoofchk() local
66 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); in bnxt_set_vf_spoofchk()
67 req.vf_id = cpu_to_le16(vf->fw_fid); in bnxt_set_vf_spoofchk()
68 req.flags = cpu_to_le32(func_flags); in bnxt_set_vf_spoofchk()
69 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); in bnxt_set_vf_spoofchk()
112 struct hwrm_func_cfg_input req = {0}; in bnxt_set_vf_mac() local
130 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_CFG, -1, -1); in bnxt_set_vf_mac()
131 req.vf_id = cpu_to_le16(vf->fw_fid); in bnxt_set_vf_mac()
132 req.flags = cpu_to_le32(vf->func_flags); in bnxt_set_vf_mac()
133 req.enables = cpu_to_le32(FUNC_CFG_REQ_ENABLES_DFLT_MAC_ADDR); in bnxt_set_vf_mac()
[all …]
/linux-4.4.14/drivers/crypto/qce/
Dsha.c40 struct ahash_request *req = ahash_request_cast(async_req); in qce_ahash_done() local
41 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); in qce_ahash_done()
42 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); in qce_ahash_done()
54 dma_unmap_sg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE); in qce_ahash_done()
58 if (req->result) in qce_ahash_done()
59 memcpy(req->result, result->auth_iv, digestsize); in qce_ahash_done()
68 req->src = rctx->src_orig; in qce_ahash_done()
69 req->nbytes = rctx->nbytes_orig; in qce_ahash_done()
78 struct ahash_request *req = ahash_request_cast(async_req); in qce_ahash_async_req_handle() local
79 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); in qce_ahash_async_req_handle()
[all …]
Dablkcipher.c28 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); in qce_ablkcipher_done() local
29 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); in qce_ablkcipher_done()
37 diff_dst = (req->src != req->dst) ? true : false; in qce_ablkcipher_done()
62 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); in qce_ablkcipher_async_req_handle() local
63 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); in qce_ablkcipher_async_req_handle()
64 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); in qce_ablkcipher_async_req_handle()
73 rctx->iv = req->info; in qce_ablkcipher_async_req_handle()
75 rctx->cryptlen = req->nbytes; in qce_ablkcipher_async_req_handle()
77 diff_dst = (req->src != req->dst) ? true : false; in qce_ablkcipher_async_req_handle()
81 rctx->src_nents = sg_nents_for_len(req->src, req->nbytes); in qce_ablkcipher_async_req_handle()
[all …]
/linux-4.4.14/drivers/net/wireless/ti/wl18xx/
Dscan.c42 struct cfg80211_scan_request *req) in wl18xx_scan_send() argument
82 WARN_ON(req->n_ssids > 1); in wl18xx_scan_send()
90 wlcore_set_scan_chan_params(wl, cmd_channels, req->channels, in wl18xx_scan_send()
91 req->n_channels, req->n_ssids, in wl18xx_scan_send()
101 if (req->no_cck) in wl18xx_scan_send()
106 if (req->n_ssids) { in wl18xx_scan_send()
107 cmd->ssid_len = req->ssids[0].ssid_len; in wl18xx_scan_send()
108 memcpy(cmd->ssid, req->ssids[0].ssid, cmd->ssid_len); in wl18xx_scan_send()
116 req->ssids ? req->ssids[0].ssid : NULL, in wl18xx_scan_send()
117 req->ssids ? req->ssids[0].ssid_len : 0, in wl18xx_scan_send()
[all …]
/linux-4.4.14/fs/ceph/
Ddir.c314 struct ceph_mds_request *req; in ceph_readdir() local
326 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); in ceph_readdir()
327 if (IS_ERR(req)) in ceph_readdir()
328 return PTR_ERR(req); in ceph_readdir()
329 err = ceph_alloc_readdir_reply_buffer(req, inode); in ceph_readdir()
331 ceph_mdsc_put_request(req); in ceph_readdir()
335 req->r_direct_mode = USE_AUTH_MDS; in ceph_readdir()
336 req->r_direct_hash = ceph_frag_value(frag); in ceph_readdir()
337 req->r_direct_is_hash = true; in ceph_readdir()
339 req->r_path2 = kstrdup(fi->last_name, GFP_KERNEL); in ceph_readdir()
[all …]
Dmds_client.c515 static void put_request_session(struct ceph_mds_request *req) in put_request_session() argument
517 if (req->r_session) { in put_request_session()
518 ceph_put_mds_session(req->r_session); in put_request_session()
519 req->r_session = NULL; in put_request_session()
525 struct ceph_mds_request *req = container_of(kref, in ceph_mdsc_release_request() local
528 destroy_reply_info(&req->r_reply_info); in ceph_mdsc_release_request()
529 if (req->r_request) in ceph_mdsc_release_request()
530 ceph_msg_put(req->r_request); in ceph_mdsc_release_request()
531 if (req->r_reply) in ceph_mdsc_release_request()
532 ceph_msg_put(req->r_reply); in ceph_mdsc_release_request()
[all …]
Dexport.c73 struct ceph_mds_request *req; in __fh_to_dentry() local
75 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO, in __fh_to_dentry()
77 if (IS_ERR(req)) in __fh_to_dentry()
78 return ERR_CAST(req); in __fh_to_dentry()
80 req->r_ino1 = vino; in __fh_to_dentry()
81 req->r_num_caps = 1; in __fh_to_dentry()
82 err = ceph_mdsc_do_request(mdsc, NULL, req); in __fh_to_dentry()
83 inode = req->r_target_inode; in __fh_to_dentry()
86 ceph_mdsc_put_request(req); in __fh_to_dentry()
128 struct ceph_mds_request *req; in __get_parent() local
[all …]
Ddebugfs.c53 struct ceph_mds_request *req; in mdsc_show() local
61 req = rb_entry(rp, struct ceph_mds_request, r_node); in mdsc_show()
63 if (req->r_request && req->r_session) in mdsc_show()
64 seq_printf(s, "%lld\tmds%d\t", req->r_tid, in mdsc_show()
65 req->r_session->s_mds); in mdsc_show()
66 else if (!req->r_request) in mdsc_show()
67 seq_printf(s, "%lld\t(no request)\t", req->r_tid); in mdsc_show()
69 seq_printf(s, "%lld\t(no session)\t", req->r_tid); in mdsc_show()
71 seq_printf(s, "%s", ceph_mds_op_name(req->r_op)); in mdsc_show()
73 if (req->r_got_unsafe) in mdsc_show()
[all …]
Dlocks.c13 struct ceph_mds_request *req);
40 struct ceph_mds_request *req; in ceph_lock_message() local
48 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); in ceph_lock_message()
49 if (IS_ERR(req)) in ceph_lock_message()
50 return PTR_ERR(req); in ceph_lock_message()
51 req->r_inode = inode; in ceph_lock_message()
53 req->r_num_caps = 1; in ceph_lock_message()
68 req->r_args.filelock_change.rule = lock_type; in ceph_lock_message()
69 req->r_args.filelock_change.type = cmd; in ceph_lock_message()
70 req->r_args.filelock_change.owner = cpu_to_le64(owner); in ceph_lock_message()
[all …]
Dioctl.c67 struct ceph_mds_request *req; in ceph_ioctl_set_layout() local
106 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT, in ceph_ioctl_set_layout()
108 if (IS_ERR(req)) in ceph_ioctl_set_layout()
109 return PTR_ERR(req); in ceph_ioctl_set_layout()
110 req->r_inode = inode; in ceph_ioctl_set_layout()
112 req->r_num_caps = 1; in ceph_ioctl_set_layout()
114 req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; in ceph_ioctl_set_layout()
116 req->r_args.setlayout.layout.fl_stripe_unit = in ceph_ioctl_set_layout()
118 req->r_args.setlayout.layout.fl_stripe_count = in ceph_ioctl_set_layout()
120 req->r_args.setlayout.layout.fl_object_size = in ceph_ioctl_set_layout()
[all …]
/linux-4.4.14/drivers/misc/genwqe/
Dcard_ddcb.c204 struct ddcb_requ *req; in ddcb_requ_alloc() local
206 req = kzalloc(sizeof(*req), GFP_KERNEL); in ddcb_requ_alloc()
207 if (!req) in ddcb_requ_alloc()
210 return &req->cmd; in ddcb_requ_alloc()
215 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); in ddcb_requ_free() local
217 kfree(req); in ddcb_requ_free()
220 static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req) in ddcb_requ_get_state() argument
222 return req->req_state; in ddcb_requ_get_state()
225 static inline void ddcb_requ_set_state(struct ddcb_requ *req, in ddcb_requ_set_state() argument
228 req->req_state = new_state; in ddcb_requ_set_state()
[all …]
/linux-4.4.14/net/ipv4/
Dinet_connection_sock.c307 struct request_sock *req; in inet_csk_accept() local
333 req = reqsk_queue_remove(queue, sk); in inet_csk_accept()
334 newsk = req->sk; in inet_csk_accept()
337 tcp_rsk(req)->tfo_listener) { in inet_csk_accept()
339 if (tcp_rsk(req)->tfo_listener) { in inet_csk_accept()
346 req->sk = NULL; in inet_csk_accept()
347 req = NULL; in inet_csk_accept()
353 if (req) in inet_csk_accept()
354 reqsk_put(req); in inet_csk_accept()
358 req = NULL; in inet_csk_accept()
[all …]
Dtcp_minisocks.c367 void tcp_openreq_init_rwin(struct request_sock *req, in tcp_openreq_init_rwin() argument
371 struct inet_request_sock *ireq = inet_rsk(req); in tcp_openreq_init_rwin()
384 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); in tcp_openreq_init_rwin()
388 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) in tcp_openreq_init_rwin()
389 req->rsk_window_clamp = full_space; in tcp_openreq_init_rwin()
394 &req->rsk_rcv_wnd, in tcp_openreq_init_rwin()
395 &req->rsk_window_clamp, in tcp_openreq_init_rwin()
404 const struct request_sock *req) in tcp_ecn_openreq_child() argument
406 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; in tcp_ecn_openreq_child()
445 struct request_sock *req, in tcp_create_openreq_child() argument
[all …]
Dudp_diag.c22 const struct inet_diag_req_v2 *req, in sk_diag_dump() argument
28 return inet_sk_diag_fill(sk, NULL, skb, req, in sk_diag_dump()
36 const struct inet_diag_req_v2 *req) in udp_dump_one() argument
43 if (req->sdiag_family == AF_INET) in udp_dump_one()
45 req->id.idiag_src[0], req->id.idiag_sport, in udp_dump_one()
46 req->id.idiag_dst[0], req->id.idiag_dport, in udp_dump_one()
47 req->id.idiag_if, tbl); in udp_dump_one()
49 else if (req->sdiag_family == AF_INET6) in udp_dump_one()
51 (struct in6_addr *)req->id.idiag_src, in udp_dump_one()
52 req->id.idiag_sport, in udp_dump_one()
[all …]
/linux-4.4.14/arch/arm64/crypto/
Daes-ce-ccm-glue.c68 static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) in ccm_init_mac() argument
70 struct crypto_aead *aead = crypto_aead_reqtfm(req); in ccm_init_mac()
72 u32 l = req->iv[0] + 1; in ccm_init_mac()
89 memcpy(maciv, req->iv, AES_BLOCK_SIZE - l); in ccm_init_mac()
99 if (req->assoclen) in ccm_init_mac()
102 memset(&req->iv[AES_BLOCK_SIZE - l], 0, l); in ccm_init_mac()
106 static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) in ccm_calculate_auth_mac() argument
108 struct crypto_aead *aead = crypto_aead_reqtfm(req); in ccm_calculate_auth_mac()
112 u32 len = req->assoclen; in ccm_calculate_auth_mac()
127 scatterwalk_start(&walk, req->src); in ccm_calculate_auth_mac()
[all …]
/linux-4.4.14/fs/lockd/
Dclntproc.c124 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) in nlmclnt_setlockargs() argument
126 struct nlm_args *argp = &req->a_args; in nlmclnt_setlockargs()
128 char *nodename = req->a_host->h_rpcclnt->cl_nodename; in nlmclnt_setlockargs()
133 lock->oh.data = req->a_owner; in nlmclnt_setlockargs()
134 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", in nlmclnt_setlockargs()
143 static void nlmclnt_release_lockargs(struct nlm_rqst *req) in nlmclnt_release_lockargs() argument
145 WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL); in nlmclnt_release_lockargs()
249 nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) in nlmclnt_call() argument
251 struct nlm_host *host = req->a_host; in nlmclnt_call()
253 struct nlm_args *argp = &req->a_args; in nlmclnt_call()
[all …]
/linux-4.4.14/arch/powerpc/platforms/powermac/
Dtime.c107 struct adb_request req; in cuda_get_time() local
110 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0) in cuda_get_time()
112 while (!req.complete) in cuda_get_time()
114 if (req.reply_len != 7) in cuda_get_time()
116 req.reply_len); in cuda_get_time()
117 now = (req.reply[3] << 24) + (req.reply[4] << 16) in cuda_get_time()
118 + (req.reply[5] << 8) + req.reply[6]; in cuda_get_time()
127 struct adb_request req; in cuda_set_rtc_time() local
130 if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, in cuda_set_rtc_time()
134 while (!req.complete) in cuda_set_rtc_time()
[all …]
/linux-4.4.14/drivers/crypto/
Dpicoxcell_crypto.c95 struct crypto_async_request *req; member
101 void (*complete)(struct spacc_req *req);
187 static int spacc_ablk_submit(struct spacc_req *req);
327 struct spacc_req *req = aead_request_ctx(areq); in spacc_aead_make_ddts() local
328 struct spacc_engine *engine = req->engine; in spacc_aead_make_ddts()
336 if (req->is_encrypt) in spacc_aead_make_ddts()
350 src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); in spacc_aead_make_ddts()
354 dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); in spacc_aead_make_ddts()
358 req->src_ddt = src_ddt; in spacc_aead_make_ddts()
359 req->dst_ddt = dst_ddt; in spacc_aead_make_ddts()
[all …]
Dmv_cesa.c260 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); in mv_process_current_q() local
261 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); in mv_process_current_q()
262 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); in mv_process_current_q()
275 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); in mv_process_current_q()
315 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); in mv_crypto_algo_completion() local
316 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); in mv_crypto_algo_completion()
324 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); in mv_crypto_algo_completion()
329 struct ahash_request *req = ahash_request_cast(cpg->cur_req); in mv_process_hash_current() local
330 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); in mv_process_hash_current()
331 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); in mv_process_hash_current()
[all …]
Dmxs-dcp.c197 struct ablkcipher_request *req, int init) in mxs_dcp_run_aes() argument
201 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); in mxs_dcp_run_aes()
253 struct ablkcipher_request *req = ablkcipher_request_cast(arq); in mxs_dcp_aes_block_crypt() local
255 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); in mxs_dcp_aes_block_crypt()
257 struct scatterlist *dst = req->dst; in mxs_dcp_aes_block_crypt()
258 struct scatterlist *src = req->src; in mxs_dcp_aes_block_crypt()
259 const int nents = sg_nents(req->src); in mxs_dcp_aes_block_crypt()
282 memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); in mxs_dcp_aes_block_crypt()
289 for_each_sg(req->src, src, nents, i) { in mxs_dcp_aes_block_crypt()
309 ret = mxs_dcp_run_aes(actx, req, init); in mxs_dcp_aes_block_crypt()
[all …]
Dimg-hash.c132 struct ahash_request *req; member
165 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); in img_hash_start()
211 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); in img_hash_dma_callback()
224 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); in img_hash_xmit_dma()
254 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); in img_hash_write_via_cpu()
256 ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg), in img_hash_write_via_cpu()
257 ctx->buffer, hdev->req->nbytes); in img_hash_write_via_cpu()
259 ctx->total = hdev->req->nbytes; in img_hash_write_via_cpu()
269 static int img_hash_finish(struct ahash_request *req) in img_hash_finish() argument
271 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); in img_hash_finish()
[all …]
Dsahara.c551 static int sahara_aes_process(struct ablkcipher_request *req) in sahara_aes_process() argument
562 req->nbytes, req->src, req->dst); in sahara_aes_process()
565 dev->total = req->nbytes; in sahara_aes_process()
566 dev->in_sg = req->src; in sahara_aes_process()
567 dev->out_sg = req->dst; in sahara_aes_process()
569 rctx = ablkcipher_request_ctx(req); in sahara_aes_process()
570 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); in sahara_aes_process()
574 if ((dev->flags & FLAGS_CBC) && req->info) in sahara_aes_process()
575 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128); in sahara_aes_process()
638 static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode) in sahara_aes_crypt() argument
[all …]
/linux-4.4.14/fs/cifs/
Dsmb2pdu.c338 assemble_neg_contexts(struct smb2_negotiate_req *req) in assemble_neg_contexts() argument
342 char *pneg_ctxt = (char *)req + OFFSET_OF_NEG_CONTEXT + 4; in assemble_neg_contexts()
348 req->NegotiateContextOffset = cpu_to_le32(OFFSET_OF_NEG_CONTEXT); in assemble_neg_contexts()
349 req->NegotiateContextCount = cpu_to_le16(2); in assemble_neg_contexts()
350 inc_rfc1001_len(req, 4 + sizeof(struct smb2_preauth_neg_context) + 2 in assemble_neg_contexts()
354 static void assemble_neg_contexts(struct smb2_negotiate_req *req) in assemble_neg_contexts() argument
379 struct smb2_negotiate_req *req; in SMB2_negotiate() local
396 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req); in SMB2_negotiate()
400 req->hdr.SessionId = 0; in SMB2_negotiate()
402 req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); in SMB2_negotiate()
[all …]
/linux-4.4.14/drivers/mtd/ubi/
Dcdev.c440 struct ubi_leb_change_req req; in vol_cdev_ioctl() local
442 err = copy_from_user(&req, argp, in vol_cdev_ioctl()
457 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || in vol_cdev_ioctl()
458 req.bytes < 0 || req.bytes > vol->usable_leb_size) in vol_cdev_ioctl()
465 err = ubi_start_leb_change(ubi, vol, &req); in vol_cdev_ioctl()
466 if (req.bytes == 0) in vol_cdev_ioctl()
505 struct ubi_map_req req; in vol_cdev_ioctl() local
507 err = copy_from_user(&req, argp, sizeof(struct ubi_map_req)); in vol_cdev_ioctl()
512 err = ubi_leb_map(desc, req.lnum); in vol_cdev_ioctl()
547 struct ubi_set_vol_prop_req req; in vol_cdev_ioctl() local
[all …]
/linux-4.4.14/drivers/scsi/be2iscsi/
Dbe_mgmt.c163 struct be_cmd_req_modify_eq_delay *req; in be_cmd_modify_eq_delay() local
175 req = embedded_payload(wrb); in be_cmd_modify_eq_delay()
178 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); in be_cmd_modify_eq_delay()
179 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, in be_cmd_modify_eq_delay()
180 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); in be_cmd_modify_eq_delay()
182 req->num_eq = cpu_to_le32(num); in be_cmd_modify_eq_delay()
184 req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); in be_cmd_modify_eq_delay()
185 req->delay[i].phase = 0; in be_cmd_modify_eq_delay()
186 req->delay[i].delay_multiplier = in be_cmd_modify_eq_delay()
211 struct be_cmd_reopen_session_req *req; in mgmt_reopen_session() local
[all …]
Dbe_cmds.c831 struct be_cmd_req_eq_create *req = embedded_payload(wrb); in beiscsi_cmd_eq_create() local
839 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); in beiscsi_cmd_eq_create()
841 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, in beiscsi_cmd_eq_create()
842 OPCODE_COMMON_EQ_CREATE, sizeof(*req)); in beiscsi_cmd_eq_create()
844 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); in beiscsi_cmd_eq_create()
846 AMAP_SET_BITS(struct amap_eq_context, func, req->context, in beiscsi_cmd_eq_create()
848 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); in beiscsi_cmd_eq_create()
849 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); in beiscsi_cmd_eq_create()
850 AMAP_SET_BITS(struct amap_eq_context, count, req->context, in beiscsi_cmd_eq_create()
852 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, in beiscsi_cmd_eq_create()
[all …]
/linux-4.4.14/drivers/scsi/qla2xxx/
Dqla_mid.c485 vha->req = base_vha->req; in qla24xx_create_vhost()
486 host->can_queue = base_vha->req->length + 128; in qla24xx_create_vhost()
516 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) in qla25xx_free_req_que() argument
519 uint16_t que_id = req->id; in qla25xx_free_req_que()
521 dma_free_coherent(&ha->pdev->dev, (req->length + 1) * in qla25xx_free_req_que()
522 sizeof(request_t), req->ring, req->dma); in qla25xx_free_req_que()
523 req->ring = NULL; in qla25xx_free_req_que()
524 req->dma = 0; in qla25xx_free_req_que()
531 kfree(req->outstanding_cmds); in qla25xx_free_req_que()
532 kfree(req); in qla25xx_free_req_que()
[all …]
Dqla_iocb.c98 struct req_que *req = vha->req; in qla2x00_prep_cont_type0_iocb() local
100 req->ring_index++; in qla2x00_prep_cont_type0_iocb()
101 if (req->ring_index == req->length) { in qla2x00_prep_cont_type0_iocb()
102 req->ring_index = 0; in qla2x00_prep_cont_type0_iocb()
103 req->ring_ptr = req->ring; in qla2x00_prep_cont_type0_iocb()
105 req->ring_ptr++; in qla2x00_prep_cont_type0_iocb()
108 cont_pkt = (cont_entry_t *)req->ring_ptr; in qla2x00_prep_cont_type0_iocb()
123 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) in qla2x00_prep_cont_type1_iocb() argument
128 req->ring_index++; in qla2x00_prep_cont_type1_iocb()
129 if (req->ring_index == req->length) { in qla2x00_prep_cont_type1_iocb()
[all …]
/linux-4.4.14/drivers/isdn/hardware/eicon/
Ddivasfunc.c67 IDI_SYNC_REQ req; in diva_xdi_didd_register_adapter() local
81 req.didd_remove_adapter.e.Req = 0; in diva_xdi_didd_register_adapter()
82 req.didd_add_adapter.e.Rc = IDI_SYNC_REQ_DIDD_ADD_ADAPTER; in diva_xdi_didd_register_adapter()
83 req.didd_add_adapter.info.descriptor = (void *) &d; in diva_xdi_didd_register_adapter()
84 DAdapter.request((ENTITY *)&req); in diva_xdi_didd_register_adapter()
85 if (req.didd_add_adapter.e.Rc != 0xff) { in diva_xdi_didd_register_adapter()
97 IDI_SYNC_REQ req; in diva_xdi_didd_remove_adapter() local
102 req.didd_remove_adapter.e.Req = 0; in diva_xdi_didd_remove_adapter()
103 req.didd_remove_adapter.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER; in diva_xdi_didd_remove_adapter()
104 req.didd_remove_adapter.info.p_request = in diva_xdi_didd_remove_adapter()
[all …]
Ddiddfunc.c54 IDI_SYNC_REQ req; in connect_didd() local
63 req.didd_notify.e.Req = 0; in connect_didd()
64 req.didd_notify.e.Rc = in connect_didd()
66 req.didd_notify.info.callback = (void *)didd_callback; in connect_didd()
67 req.didd_notify.info.context = NULL; in connect_didd()
68 _DAdapter.request((ENTITY *)&req); in connect_didd()
69 if (req.didd_notify.e.Rc != 0xff) in connect_didd()
71 notify_handle = req.didd_notify.info.handle; in connect_didd()
84 IDI_SYNC_REQ req; in disconnect_didd() local
86 req.didd_notify.e.Req = 0; in disconnect_didd()
[all …]
/linux-4.4.14/drivers/net/ethernet/broadcom/bnx2x/
Dbnx2x_vfpf.c55 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); in bnx2x_vfpf_prep()
226 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; in bnx2x_vfpf_acquire() local
234 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); in bnx2x_vfpf_acquire()
241 req->vfdev_info.vf_id = vf_id; in bnx2x_vfpf_acquire()
242 req->vfdev_info.vf_os = 0; in bnx2x_vfpf_acquire()
243 req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION; in bnx2x_vfpf_acquire()
245 req->resc_request.num_rxqs = rx_count; in bnx2x_vfpf_acquire()
246 req->resc_request.num_txqs = tx_count; in bnx2x_vfpf_acquire()
247 req->resc_request.num_sbs = bp->igu_sb_cnt; in bnx2x_vfpf_acquire()
248 req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS; in bnx2x_vfpf_acquire()
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/osc/
Dosc_request.c106 struct ptlrpc_request *req, void *data, int rc);
201 static inline void osc_pack_req_body(struct ptlrpc_request *req, in osc_pack_req_body() argument
206 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); in osc_pack_req_body()
209 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, in osc_pack_req_body()
214 struct ptlrpc_request *req, in osc_getattr_interpret() argument
222 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); in osc_getattr_interpret()
225 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, in osc_getattr_interpret()
244 struct ptlrpc_request *req; in osc_getattr_async() local
248 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); in osc_getattr_async()
249 if (req == NULL) in osc_getattr_async()
[all …]
/linux-4.4.14/fs/coda/
Dpsdev.c101 struct upc_req *req = NULL; in coda_psdev_write() local
153 req = tmp; in coda_psdev_write()
154 list_del(&req->uc_chain); in coda_psdev_write()
160 if (!req) { in coda_psdev_write()
168 if (req->uc_outSize < nbytes) { in coda_psdev_write()
170 __func__, req->uc_outSize, (long)nbytes, in coda_psdev_write()
172 nbytes = req->uc_outSize; /* don't have more space! */ in coda_psdev_write()
174 if (copy_from_user(req->uc_data, buf, nbytes)) { in coda_psdev_write()
175 req->uc_flags |= CODA_REQ_ABORT; in coda_psdev_write()
176 wake_up(&req->uc_sleep); in coda_psdev_write()
[all …]
/linux-4.4.14/drivers/crypto/nx/
Dnx-aes-gcm.c111 struct aead_request *req, in nx_gca() argument
124 scatterwalk_start(&walk, req->src); in nx_gca()
149 req->src, processed, &to_process); in nx_gca()
160 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); in nx_gca()
180 static int gmac(struct aead_request *req, struct blkcipher_desc *desc, in gmac() argument
185 crypto_aead_ctx(crypto_aead_reqtfm(req)); in gmac()
217 req->src, processed, &to_process); in gmac()
231 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); in gmac()
254 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, in gcm_empty() argument
259 crypto_aead_ctx(crypto_aead_reqtfm(req)); in gcm_empty()
[all …]
Dnx-aes-ccm.c169 struct aead_request *req, in generate_pat() argument
235 scatterwalk_map_and_copy(b1 + 2, req->src, 0, in generate_pat()
240 scatterwalk_map_and_copy(b1 + 6, req->src, 0, in generate_pat()
275 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); in generate_pat()
299 req->src, processed, in generate_pat()
317 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); in generate_pat()
341 static int ccm_nx_decrypt(struct aead_request *req, in ccm_nx_decrypt() argument
345 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); in ccm_nx_decrypt()
347 unsigned int nbytes = req->cryptlen; in ccm_nx_decrypt()
348 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); in ccm_nx_decrypt()
[all …]
/linux-4.4.14/drivers/scsi/snic/
Dsnic_res.h28 snic_icmnd_init(struct snic_host_req *req, u32 cmnd_id, u32 host_id, u64 ctx, in snic_icmnd_init() argument
33 snic_io_hdr_enc(&req->hdr, SNIC_REQ_ICMND, 0, cmnd_id, host_id, sg_cnt, in snic_icmnd_init()
36 req->u.icmnd.flags = cpu_to_le16(flags); in snic_icmnd_init()
37 req->u.icmnd.tgt_id = cpu_to_le64(tgt_id); in snic_icmnd_init()
38 memcpy(&req->u.icmnd.lun_id, lun, LUN_ADDR_LEN); in snic_icmnd_init()
39 req->u.icmnd.cdb_len = cdb_len; in snic_icmnd_init()
40 memset(req->u.icmnd.cdb, 0, SNIC_CDB_LEN); in snic_icmnd_init()
41 memcpy(req->u.icmnd.cdb, scsi_cdb, cdb_len); in snic_icmnd_init()
42 req->u.icmnd.data_len = cpu_to_le32(data_len); in snic_icmnd_init()
43 req->u.icmnd.sg_addr = cpu_to_le64(sgl_addr); in snic_icmnd_init()
[all …]
Dsnic_io.c100 struct snic_host_req *req = buf->os_buf; in snic_free_wq_buf() local
107 rqi = req_to_rqi(req); in snic_free_wq_buf()
223 rqi->req = (struct snic_host_req *) (rqi + 1); in snic_req_init()
227 rqi->req = (struct snic_host_req *)(rqi + 1); in snic_req_init()
241 memset(rqi->req, 0, rqi->req_len); in snic_req_init()
244 rqi->req->hdr.init_ctx = (ulong) rqi; in snic_req_init()
257 struct snic_host_req *req = NULL; in snic_abort_req_init() local
266 req = mempool_alloc(snic->req_pool[SNIC_REQ_TM_CACHE], GFP_ATOMIC); in snic_abort_req_init()
267 if (!req) { in snic_abort_req_init()
274 rqi->abort_req = req; in snic_abort_req_init()
[all …]
/linux-4.4.14/arch/arm/crypto/
Dghash-ce-core.S14 SHASH .req q0
15 SHASH2 .req q1
16 T1 .req q2
17 T2 .req q3
18 MASK .req q4
19 XL .req q5
20 XM .req q6
21 XH .req q7
22 IN1 .req q7
24 SHASH_L .req d0
[all …]
Dsha1-ce-core.S18 k0 .req q0
19 k1 .req q1
20 k2 .req q2
21 k3 .req q3
23 ta0 .req q4
24 ta1 .req q5
25 tb0 .req q5
26 tb1 .req q4
28 dga .req q6
29 dgb .req q7
[all …]
Dghash-ce-glue.c151 static int ghash_async_init(struct ahash_request *req) in ghash_async_init() argument
153 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); in ghash_async_init()
155 struct ahash_request *cryptd_req = ahash_request_ctx(req); in ghash_async_init()
159 memcpy(cryptd_req, req, sizeof(*req)); in ghash_async_init()
167 desc->flags = req->base.flags; in ghash_async_init()
172 static int ghash_async_update(struct ahash_request *req) in ghash_async_update() argument
174 struct ahash_request *cryptd_req = ahash_request_ctx(req); in ghash_async_update()
177 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); in ghash_async_update()
181 memcpy(cryptd_req, req, sizeof(*req)); in ghash_async_update()
186 return shash_ahash_update(req, desc); in ghash_async_update()
[all …]
/linux-4.4.14/arch/powerpc/platforms/512x/
Dmpc512x_lpbfifo.c43 struct mpc512x_lpbfifo_request *req; member
67 struct mpc512x_lpbfifo_request *req = NULL; in mpc512x_lpbfifo_irq() local
76 req = lpbfifo.req; in mpc512x_lpbfifo_irq()
77 if (!req || req->dir == MPC512X_LPBFIFO_REQ_DIR_READ) { in mpc512x_lpbfifo_irq()
98 lpbfifo.req = NULL; in mpc512x_lpbfifo_irq()
102 if (req->callback) in mpc512x_lpbfifo_irq()
103 req->callback(req); in mpc512x_lpbfifo_irq()
119 struct mpc512x_lpbfifo_request *req = NULL; in mpc512x_lpbfifo_callback() local
129 req = lpbfifo.req; in mpc512x_lpbfifo_callback()
130 if (!req) { in mpc512x_lpbfifo_callback()
[all …]
/linux-4.4.14/drivers/infiniband/core/
Daddr.c184 static void queue_req(struct addr_req *req) in queue_req() argument
190 if (time_after_eq(req->timeout, temp_req->timeout)) in queue_req()
194 list_add(&req->list, &temp_req->list); in queue_req()
196 if (req_list.next == &req->list) in queue_req()
197 set_timeout(req->timeout); in queue_req()
335 struct addr_req *req, *temp_req; in process_req() local
342 list_for_each_entry_safe(req, temp_req, &req_list, list) { in process_req()
343 if (req->status == -ENODATA) { in process_req()
344 src_in = (struct sockaddr *) &req->src_addr; in process_req()
345 dst_in = (struct sockaddr *) &req->dst_addr; in process_req()
[all …]
/linux-4.4.14/drivers/mfd/
Dpcf50633-adc.c85 adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req) in adc_enqueue_request() argument
101 adc->queue[tail] = req; in adc_enqueue_request()
114 struct pcf50633_adc_sync_request *req = param; in pcf50633_adc_sync_read_callback() local
116 req->result = result; in pcf50633_adc_sync_read_callback()
117 complete(&req->completion); in pcf50633_adc_sync_read_callback()
122 struct pcf50633_adc_sync_request req; in pcf50633_adc_sync_read() local
125 init_completion(&req.completion); in pcf50633_adc_sync_read()
128 pcf50633_adc_sync_read_callback, &req); in pcf50633_adc_sync_read()
132 wait_for_completion(&req.completion); in pcf50633_adc_sync_read()
134 return req.result; in pcf50633_adc_sync_read()
[all …]
/linux-4.4.14/drivers/input/touchscreen/
Dad7877.c215 struct ser_req *req; in ad7877_read() local
218 req = kzalloc(sizeof *req, GFP_KERNEL); in ad7877_read()
219 if (!req) in ad7877_read()
222 spi_message_init(&req->msg); in ad7877_read()
224 req->command = (u16) (AD7877_WRITEADD(AD7877_REG_CTRL1) | in ad7877_read()
226 req->xfer[0].tx_buf = &req->command; in ad7877_read()
227 req->xfer[0].len = 2; in ad7877_read()
228 req->xfer[0].cs_change = 1; in ad7877_read()
230 req->xfer[1].rx_buf = &req->sample; in ad7877_read()
231 req->xfer[1].len = 2; in ad7877_read()
[all …]
/linux-4.4.14/fs/ecryptfs/
Dkthread.c61 struct ecryptfs_open_req *req; in ecryptfs_threadfn() local
73 req = list_first_entry(&ecryptfs_kthread_ctl.req_list, in ecryptfs_threadfn()
76 list_del(&req->kthread_ctl_list); in ecryptfs_threadfn()
77 *req->lower_file = dentry_open(&req->path, in ecryptfs_threadfn()
79 complete(&req->done); in ecryptfs_threadfn()
106 struct ecryptfs_open_req *req, *tmp; in ecryptfs_destroy_kthread() local
110 list_for_each_entry_safe(req, tmp, &ecryptfs_kthread_ctl.req_list, in ecryptfs_destroy_kthread()
112 list_del(&req->kthread_ctl_list); in ecryptfs_destroy_kthread()
113 *req->lower_file = ERR_PTR(-EIO); in ecryptfs_destroy_kthread()
114 complete(&req->done); in ecryptfs_destroy_kthread()
[all …]
/linux-4.4.14/drivers/usb/gadget/function/
Df_printer.c237 struct usb_request *req; in printer_req_alloc() local
239 req = usb_ep_alloc_request(ep, gfp_flags); in printer_req_alloc()
241 if (req != NULL) { in printer_req_alloc()
242 req->length = len; in printer_req_alloc()
243 req->buf = kmalloc(len, gfp_flags); in printer_req_alloc()
244 if (req->buf == NULL) { in printer_req_alloc()
245 usb_ep_free_request(ep, req); in printer_req_alloc()
250 return req; in printer_req_alloc()
254 printer_req_free(struct usb_ep *ep, struct usb_request *req) in printer_req_free() argument
256 if (ep != NULL && req != NULL) { in printer_req_free()
[all …]
Duvc_video.c62 uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video, in uvc_video_encode_bulk() argument
65 void *mem = req->buf; in uvc_video_encode_bulk()
84 req->length = video->req_size - len; in uvc_video_encode_bulk()
85 req->zero = video->payload_size == video->max_payload_size; in uvc_video_encode_bulk()
102 uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video, in uvc_video_encode_isoc() argument
105 void *mem = req->buf; in uvc_video_encode_isoc()
118 req->length = video->req_size - len; in uvc_video_encode_isoc()
163 uvc_video_complete(struct usb_ep *ep, struct usb_request *req) in uvc_video_complete() argument
165 struct uvc_video *video = req->context; in uvc_video_complete()
171 switch (req->status) { in uvc_video_complete()
[all …]
/linux-4.4.14/drivers/staging/lustre/lustre/ldlm/
Dldlm_lockd.c157 static void ldlm_handle_cp_callback(struct ptlrpc_request *req, in ldlm_handle_cp_callback() argument
180 lvb_len = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB, RCL_CLIENT); in ldlm_handle_cp_callback()
234 ldlm_convert_policy_to_local(req->rq_export, in ldlm_handle_cp_callback()
266 rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_CLIENT, in ldlm_handle_cp_callback()
306 static void ldlm_handle_gl_callback(struct ptlrpc_request *req, in ldlm_handle_gl_callback() argument
316 rc = lock->l_glimpse_ast(lock, req); in ldlm_handle_gl_callback()
318 if (req->rq_repmsg != NULL) { in ldlm_handle_gl_callback()
319 ptlrpc_reply(req); in ldlm_handle_gl_callback()
321 req->rq_status = rc; in ldlm_handle_gl_callback()
322 ptlrpc_error(req); in ldlm_handle_gl_callback()
[all …]
/linux-4.4.14/drivers/usb/gadget/
Du_f.c19 struct usb_request *req; in alloc_ep_req() local
21 req = usb_ep_alloc_request(ep, GFP_ATOMIC); in alloc_ep_req()
22 if (req) { in alloc_ep_req()
23 req->length = len ?: default_len; in alloc_ep_req()
24 req->buf = kmalloc(req->length, GFP_ATOMIC); in alloc_ep_req()
25 if (!req->buf) { in alloc_ep_req()
26 usb_ep_free_request(ep, req); in alloc_ep_req()
27 req = NULL; in alloc_ep_req()
30 return req; in alloc_ep_req()
/linux-4.4.14/include/crypto/internal/
Dskcipher.h58 int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req);
59 int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req);
88 struct skcipher_givcrypt_request *req) in skcipher_givcrypt_reqctx() argument
90 return ablkcipher_request_ctx(&req->creq); in skcipher_givcrypt_reqctx()
93 static inline void ablkcipher_request_complete(struct ablkcipher_request *req, in ablkcipher_request_complete() argument
96 req->base.complete(&req->base, err); in ablkcipher_request_complete()
100 struct skcipher_givcrypt_request *req, int err) in skcipher_givcrypt_complete() argument
102 ablkcipher_request_complete(&req->creq, err); in skcipher_givcrypt_complete()
105 static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) in ablkcipher_request_flags() argument
107 return req->base.flags; in ablkcipher_request_flags()
[all …]
Daead.h68 static inline void *aead_request_ctx(struct aead_request *req) in aead_request_ctx() argument
70 return req->__ctx; in aead_request_ctx()
73 static inline void aead_request_complete(struct aead_request *req, int err) in aead_request_complete() argument
75 req->base.complete(&req->base, err); in aead_request_complete()
78 static inline u32 aead_request_flags(struct aead_request *req) in aead_request_flags() argument
80 return req->base.flags; in aead_request_flags()
140 struct crypto_async_request *req; in aead_dequeue_request() local
142 req = crypto_dequeue_request(&queue->base); in aead_dequeue_request()
144 return req ? container_of(req, struct aead_request, base) : NULL; in aead_dequeue_request()
149 struct crypto_async_request *req; in aead_get_backlog() local
[all …]
/linux-4.4.14/drivers/s390/char/
Dsclp.c60 static void sclp_suspend_req_cb(struct sclp_req *req, void *data) in sclp_suspend_req_cb() argument
228 struct sclp_req *req; in __sclp_req_queue_find_next_timeout() local
230 list_for_each_entry(req, &sclp_req_queue, list) { in __sclp_req_queue_find_next_timeout()
231 if (!req->queue_expires) in __sclp_req_queue_find_next_timeout()
234 (time_before(req->queue_expires, expires_next))) in __sclp_req_queue_find_next_timeout()
235 expires_next = req->queue_expires; in __sclp_req_queue_find_next_timeout()
246 struct sclp_req *req; in __sclp_req_queue_remove_expired_req() local
251 list_for_each_entry(req, &sclp_req_queue, list) { in __sclp_req_queue_remove_expired_req()
252 if (!req->queue_expires) in __sclp_req_queue_remove_expired_req()
254 if (time_before_eq(req->queue_expires, now)) { in __sclp_req_queue_remove_expired_req()
[all …]
Dcon3215.c111 struct raw3215_req *req; in raw3215_alloc_req() local
115 req = raw3215_freelist; in raw3215_alloc_req()
116 raw3215_freelist = req->next; in raw3215_alloc_req()
118 return req; in raw3215_alloc_req()
124 static inline void raw3215_free_req(struct raw3215_req *req) in raw3215_free_req() argument
128 if (req->type == RAW3215_FREE) in raw3215_free_req()
130 req->type = RAW3215_FREE; in raw3215_free_req()
132 req->next = raw3215_freelist; in raw3215_free_req()
133 raw3215_freelist = req; in raw3215_free_req()
145 struct raw3215_req *req; in raw3215_mk_read_req() local
[all …]
/linux-4.4.14/drivers/net/usb/
Dcdc-phonet.c55 static void tx_complete(struct urb *req);
56 static void rx_complete(struct urb *req);
64 struct urb *req = NULL; in usbpn_xmit() local
71 req = usb_alloc_urb(0, GFP_ATOMIC); in usbpn_xmit()
72 if (!req) in usbpn_xmit()
74 usb_fill_bulk_urb(req, pnd->usb, pnd->tx_pipe, skb->data, skb->len, in usbpn_xmit()
76 req->transfer_flags = URB_ZERO_PACKET; in usbpn_xmit()
77 err = usb_submit_urb(req, GFP_ATOMIC); in usbpn_xmit()
79 usb_free_urb(req); in usbpn_xmit()
96 static void tx_complete(struct urb *req) in tx_complete() argument
[all …]
/linux-4.4.14/net/sunrpc/xprtrdma/
Dbackchannel.c24 struct rpcrdma_req *req = rpcr_to_rdmar(rqst); in rpcrdma_bc_free_rqst() local
27 list_del(&req->rl_all); in rpcrdma_bc_free_rqst()
30 rpcrdma_destroy_req(&r_xprt->rx_ia, req); in rpcrdma_bc_free_rqst()
40 struct rpcrdma_req *req; in rpcrdma_bc_setup_rqst() local
44 req = rpcrdma_create_req(r_xprt); in rpcrdma_bc_setup_rqst()
45 if (!req) in rpcrdma_bc_setup_rqst()
47 req->rl_backchannel = true; in rpcrdma_bc_setup_rqst()
53 req->rl_rdmabuf = rb; in rpcrdma_bc_setup_rqst()
59 rb->rg_owner = req; in rpcrdma_bc_setup_rqst()
60 req->rl_sendbuf = rb; in rpcrdma_bc_setup_rqst()
[all …]
/linux-4.4.14/drivers/crypto/amcc/
Dcrypto4xx_alg.c73 int crypto4xx_encrypt(struct ablkcipher_request *req) in crypto4xx_encrypt() argument
75 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); in crypto4xx_encrypt()
82 return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, in crypto4xx_encrypt()
83 req->nbytes, req->info, in crypto4xx_encrypt()
87 int crypto4xx_decrypt(struct ablkcipher_request *req) in crypto4xx_decrypt() argument
89 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); in crypto4xx_decrypt()
96 return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, in crypto4xx_decrypt()
97 req->nbytes, req->info, in crypto4xx_decrypt()
237 int crypto4xx_hash_init(struct ahash_request *req) in crypto4xx_hash_init() argument
239 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); in crypto4xx_hash_init()
[all …]
/linux-4.4.14/drivers/crypto/ccp/
Dccp-crypto-aes.c27 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); in ccp_aes_complete() local
28 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); in ccp_aes_complete()
29 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); in ccp_aes_complete()
35 memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); in ccp_aes_complete()
70 static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt) in ccp_aes_crypt() argument
72 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); in ccp_aes_crypt()
73 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); in ccp_aes_crypt()
84 (req->nbytes & (AES_BLOCK_SIZE - 1))) in ccp_aes_crypt()
88 if (!req->info) in ccp_aes_crypt()
91 memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); in ccp_aes_crypt()
[all …]
Dccp-crypto-main.c66 struct crypto_async_request *req; member
140 struct crypto_async_request *req = crypto_cmd->req; in ccp_crypto_complete() local
141 struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); in ccp_crypto_complete()
148 req->complete(req, -EINPROGRESS); in ccp_crypto_complete()
161 backlog->req->complete(backlog->req, -EINPROGRESS); in ccp_crypto_complete()
166 req->complete(req, -EINPROGRESS); in ccp_crypto_complete()
171 ret = ctx->complete(req, ret); in ccp_crypto_complete()
172 req->complete(req, ret); in ccp_crypto_complete()
185 ctx = crypto_tfm_ctx(held->req->tfm); in ccp_crypto_complete()
187 ret = ctx->complete(held->req, ret); in ccp_crypto_complete()
[all …]
Dccp-crypto-aes-xts.c86 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); in ccp_aes_xts_complete() local
87 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); in ccp_aes_xts_complete()
92 memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); in ccp_aes_xts_complete()
117 static int ccp_aes_xts_crypt(struct ablkcipher_request *req, in ccp_aes_xts_crypt() argument
121 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); in ccp_aes_xts_crypt()
122 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); in ccp_aes_xts_crypt()
123 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); in ccp_aes_xts_crypt()
131 if (req->nbytes & (AES_BLOCK_SIZE - 1)) in ccp_aes_xts_crypt()
134 if (!req->info) in ccp_aes_xts_crypt()
138 if (req->nbytes <= unit_size_map[0].size) { in ccp_aes_xts_crypt()
[all …]
/linux-4.4.14/drivers/crypto/caam/
Dcaamhash.c131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
141 int (*update)(struct ahash_request *req);
142 int (*final)(struct ahash_request *req);
143 int (*finup)(struct ahash_request *req);
612 struct ahash_request *req, int dst_len) in ahash_unmap() argument
615 dma_unmap_sg(dev, req->src, edesc->src_nents, DMA_TO_DEVICE); in ahash_unmap()
626 struct ahash_request *req, int dst_len, u32 flag) in ahash_unmap_ctx() argument
628 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); in ahash_unmap_ctx()
[all …]
/linux-4.4.14/drivers/block/
Dnbd.c113 static void nbd_end_request(struct nbd_device *nbd, struct request *req) in nbd_end_request() argument
115 int error = req->errors ? -EIO : 0; in nbd_end_request()
116 struct request_queue *q = req->q; in nbd_end_request()
119 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req, in nbd_end_request()
123 __blk_end_request_all(req, error); in nbd_end_request()
236 static int nbd_send_req(struct nbd_device *nbd, struct request *req) in nbd_send_req() argument
240 unsigned long size = blk_rq_bytes(req); in nbd_send_req()
243 if (req->cmd_type == REQ_TYPE_DRV_PRIV) in nbd_send_req()
245 else if (req->cmd_flags & REQ_DISCARD) in nbd_send_req()
247 else if (req->cmd_flags & REQ_FLUSH) in nbd_send_req()
[all …]
Dmg_disk.c112 struct request *req; member
151 if (__blk_end_request(host->req, err, nr_bytes)) in mg_end_request()
154 host->req = NULL; in mg_end_request()
160 return mg_end_request(host, err, blk_rq_cur_bytes(host->req)); in mg_end_request_cur()
168 if (host->req) in mg_dump_status()
169 name = host->req->rq_disk->disk_name; in mg_dump_status()
205 if (host->req) in mg_dump_status()
207 (unsigned int)blk_rq_pos(host->req)); in mg_dump_status()
445 if (host->req) in mg_bad_rw_intr()
446 if (++host->req->errors >= MG_MAX_ERRORS || in mg_bad_rw_intr()
[all …]
/linux-4.4.14/arch/um/os-Linux/
Daio.c132 static int do_not_aio(struct aio_thread_req *req) in do_not_aio() argument
138 actual = lseek64(req->io_fd, req->offset, SEEK_SET); in do_not_aio()
139 if (actual != req->offset) in do_not_aio()
142 switch (req->type) { in do_not_aio()
144 n = read(req->io_fd, req->buf, req->len); in do_not_aio()
147 n = write(req->io_fd, req->buf, req->len); in do_not_aio()
150 n = read(req->io_fd, &c, sizeof(c)); in do_not_aio()
154 req->type); in do_not_aio()
171 struct aio_thread_req req; in not_aio_thread() local
177 err = read(aio_req_fd_r, &req, sizeof(req)); in not_aio_thread()
[all …]
/linux-4.4.14/drivers/regulator/
Dqcom_smd-regulator.c49 struct rpm_regulator_req *req, in rpm_reg_write_active() argument
56 req, size); in rpm_reg_write_active()
62 struct rpm_regulator_req req; in rpm_reg_enable() local
65 req.key = cpu_to_le32(RPM_KEY_SWEN); in rpm_reg_enable()
66 req.nbytes = cpu_to_le32(sizeof(u32)); in rpm_reg_enable()
67 req.value = cpu_to_le32(1); in rpm_reg_enable()
69 ret = rpm_reg_write_active(vreg, &req, sizeof(req)); in rpm_reg_enable()
86 struct rpm_regulator_req req; in rpm_reg_disable() local
89 req.key = cpu_to_le32(RPM_KEY_SWEN); in rpm_reg_disable()
90 req.nbytes = cpu_to_le32(sizeof(u32)); in rpm_reg_disable()
[all …]
/linux-4.4.14/drivers/staging/rdma/amso1100/
Dc2_intr.c121 struct c2_vq_req *req; in handle_vq() local
166 req = (struct c2_vq_req *) (unsigned long) host_msg->context; in handle_vq()
167 if (req == NULL) { in handle_vq()
183 if (!err) switch (req->event) { in handle_vq()
185 c2_set_qp_state(req->qp, in handle_vq()
198 cm_event.event = req->event; in handle_vq()
200 cm_event.local_addr = req->cm_id->local_addr; in handle_vq()
201 cm_event.remote_addr = req->cm_id->remote_addr; in handle_vq()
204 req->cm_id->event_handler(req->cm_id, &cm_event); in handle_vq()
210 req->reply_msg = (u64) (unsigned long) (reply_msg); in handle_vq()
[all …]
/linux-4.4.14/drivers/infiniband/hw/mlx4/
Dmcg.c543 struct mcast_req *req = NULL; in mlx4_ib_mcg_timeout_handler() local
550 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); in mlx4_ib_mcg_timeout_handler()
551 list_del(&req->group_list); in mlx4_ib_mcg_timeout_handler()
552 list_del(&req->func_list); in mlx4_ib_mcg_timeout_handler()
553 --group->func[req->func].num_pend_reqs; in mlx4_ib_mcg_timeout_handler()
555 kfree(req); in mlx4_ib_mcg_timeout_handler()
585 struct mcast_req *req) in handle_leave_req() argument
589 if (req->clean) in handle_leave_req()
590 leave_mask = group->func[req->func].join_state; in handle_leave_req()
592 status = check_leave(group, req->func, leave_mask); in handle_leave_req()
[all …]
/linux-4.4.14/arch/arm64/lib/
Dstrlen.S38 srcin .req x0
39 len .req x0
42 src .req x1
43 data1 .req x2
44 data2 .req x3
45 data2a .req x4
46 has_nul1 .req x5
47 has_nul2 .req x6
48 tmp1 .req x7
49 tmp2 .req x8
[all …]
Dstrnlen.S39 srcin .req x0
40 len .req x0
41 limit .req x1
44 src .req x2
45 data1 .req x3
46 data2 .req x4
47 data2a .req x5
48 has_nul1 .req x6
49 has_nul2 .req x7
50 tmp1 .req x8
[all …]
/linux-4.4.14/drivers/scsi/
Dscsi_lib.c221 struct request *req; in scsi_execute() local
225 req = blk_get_request(sdev->request_queue, write, __GFP_RECLAIM); in scsi_execute()
226 if (IS_ERR(req)) in scsi_execute()
228 blk_rq_set_block_pc(req); in scsi_execute()
230 if (bufflen && blk_rq_map_kern(sdev->request_queue, req, in scsi_execute()
234 req->cmd_len = COMMAND_SIZE(cmd[0]); in scsi_execute()
235 memcpy(req->cmd, cmd, req->cmd_len); in scsi_execute()
236 req->sense = sense; in scsi_execute()
237 req->sense_len = 0; in scsi_execute()
238 req->retries = retries; in scsi_execute()
[all …]
/linux-4.4.14/drivers/memstick/host/
Dtifm_ms.c74 struct memstick_request *req; member
192 if (host->req->long_data) { in tifm_ms_transfer_data()
193 length = host->req->sg.length - host->block_pos; in tifm_ms_transfer_data()
194 off = host->req->sg.offset + host->block_pos; in tifm_ms_transfer_data()
196 length = host->req->data_len - host->block_pos; in tifm_ms_transfer_data()
205 if (host->req->long_data) { in tifm_ms_transfer_data()
206 pg = nth_page(sg_page(&host->req->sg), in tifm_ms_transfer_data()
215 buf = host->req->data + host->block_pos; in tifm_ms_transfer_data()
216 p_cnt = host->req->data_len - host->block_pos; in tifm_ms_transfer_data()
219 t_size = host->req->data_dir == WRITE in tifm_ms_transfer_data()
[all …]
/linux-4.4.14/drivers/net/fddi/skfp/
Dess.c555 struct smt_sba_alc_req *req ; in ess_send_alc_req() local
586 req = smtod(mb,struct smt_sba_alc_req *) ; in ess_send_alc_req()
587 req->smt.smt_tid = smc->ess.alloc_trans_id = smt_get_tid(smc) ; in ess_send_alc_req()
588 req->smt.smt_dest = smt_sba_da ; in ess_send_alc_req()
591 req->s_type.para.p_type = SMT_P0015 ; in ess_send_alc_req()
592 req->s_type.para.p_len = sizeof(struct smt_p_0015) - PARA_LEN ; in ess_send_alc_req()
593 req->s_type.res_type = SYNC_BW ; in ess_send_alc_req()
596 req->cmd.para.p_type = SMT_P0016 ; in ess_send_alc_req()
597 req->cmd.para.p_len = sizeof(struct smt_p_0016) - PARA_LEN ; in ess_send_alc_req()
598 req->cmd.sba_cmd = REQUEST_ALLOCATION ; in ess_send_alc_req()
[all …]
/linux-4.4.14/include/trace/events/
Dhswadsp.h245 struct sst_hsw_audio_data_format_ipc *req),
247 TP_ARGS(stream, req),
262 __entry->frequency = req->frequency;
263 __entry->bitdepth = req->bitdepth;
264 __entry->map = req->map;
265 __entry->config = req->config;
266 __entry->style = req->style;
267 __entry->ch_num = req->ch_num;
268 __entry->valid_bit = req->valid_bit;
281 struct sst_hsw_ipc_stream_alloc_req *req),
[all …]
/linux-4.4.14/drivers/misc/mic/scif/
Dscif_rma_list.c85 int scif_query_tcw(struct scif_endpt *ep, struct scif_rma_req *req) in scif_query_tcw() argument
87 struct list_head *item, *temp, *head = req->head; in scif_query_tcw()
89 u64 start_va_window, start_va_req = req->va_for_temp; in scif_query_tcw()
90 u64 end_va_window, end_va_req = start_va_req + req->nr_bytes; in scif_query_tcw()
92 if (!req->nr_bytes) in scif_query_tcw()
115 if ((window->prot & req->prot) == req->prot) { in scif_query_tcw()
118 *req->out_window = window; in scif_query_tcw()
123 req->nr_bytes += in scif_query_tcw()
125 req->va_for_temp = start_va_window; in scif_query_tcw()
128 req->nr_bytes += end_va_window - end_va_req; in scif_query_tcw()
[all …]
/linux-4.4.14/drivers/infiniband/hw/cxgb3/
Diwch_cm.c173 struct cpl_tid_release *req; in release_tid() local
175 skb = get_skb(skb, sizeof *req, GFP_KERNEL); in release_tid()
178 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); in release_tid()
179 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in release_tid()
180 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); in release_tid()
188 struct cpl_set_tcb_field *req; in iwch_quiesce_tid() local
189 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); in iwch_quiesce_tid()
193 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); in iwch_quiesce_tid()
194 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); in iwch_quiesce_tid()
195 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); in iwch_quiesce_tid()
[all …]
/linux-4.4.14/drivers/crypto/qat/qat_common/
Dqat_asym_algs.c112 struct icp_qat_fw_pke_request req; member
120 struct qat_rsa_request *req = PTR_ALIGN(akcipher_request_ctx(areq), 64); in qat_rsa_cb() local
121 struct device *dev = &GET_DEV(req->ctx->inst->accel_dev); in qat_rsa_cb()
127 if (req->src_align) in qat_rsa_cb()
128 dma_free_coherent(dev, req->ctx->key_sz, req->src_align, in qat_rsa_cb()
129 req->in.enc.m); in qat_rsa_cb()
131 dma_unmap_single(dev, req->in.enc.m, req->ctx->key_sz, in qat_rsa_cb()
134 areq->dst_len = req->ctx->key_sz; in qat_rsa_cb()
135 if (req->dst_align) { in qat_rsa_cb()
136 char *ptr = req->dst_align; in qat_rsa_cb()
[all …]
/linux-4.4.14/drivers/media/usb/as102/
Das10x_cmd_stream.c38 sizeof(pcmd->body.add_pid_filter.req)); in as10x_cmd_add_PID_filter()
41 pcmd->body.add_pid_filter.req.proc_id = in as10x_cmd_add_PID_filter()
43 pcmd->body.add_pid_filter.req.pid = cpu_to_le16(filter->pid); in as10x_cmd_add_PID_filter()
44 pcmd->body.add_pid_filter.req.stream_type = filter->type; in as10x_cmd_add_PID_filter()
47 pcmd->body.add_pid_filter.req.idx = filter->idx; in as10x_cmd_add_PID_filter()
49 pcmd->body.add_pid_filter.req.idx = 0xFF; in as10x_cmd_add_PID_filter()
54 sizeof(pcmd->body.add_pid_filter.req) in as10x_cmd_add_PID_filter()
95 sizeof(pcmd->body.del_pid_filter.req)); in as10x_cmd_del_PID_filter()
98 pcmd->body.del_pid_filter.req.proc_id = in as10x_cmd_del_PID_filter()
100 pcmd->body.del_pid_filter.req.pid = cpu_to_le16(pid_value); in as10x_cmd_del_PID_filter()
[all …]
/linux-4.4.14/net/core/
Drequest_sock.c97 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, in reqsk_fastopen_remove() argument
100 struct sock *lsk = req->rsk_listener; in reqsk_fastopen_remove()
108 tcp_rsk(req)->tfo_listener = false; in reqsk_fastopen_remove()
109 if (req->sk) /* the child socket hasn't been accepted yet */ in reqsk_fastopen_remove()
117 reqsk_put(req); in reqsk_fastopen_remove()
127 req->rsk_timer.expires = jiffies + 60*HZ; in reqsk_fastopen_remove()
129 fastopenq->rskq_rst_head = req; in reqsk_fastopen_remove()
131 fastopenq->rskq_rst_tail->dl_next = req; in reqsk_fastopen_remove()
133 req->dl_next = NULL; in reqsk_fastopen_remove()
134 fastopenq->rskq_rst_tail = req; in reqsk_fastopen_remove()
/linux-4.4.14/arch/x86/crypto/
Daesni-intel_glue.c595 struct lrw_crypt_req req = { in lrw_encrypt() local
608 ret = lrw_crypt(desc, dst, src, nbytes, &req); in lrw_encrypt()
619 struct lrw_crypt_req req = { in lrw_decrypt() local
632 ret = lrw_crypt(desc, dst, src, nbytes, &req); in lrw_decrypt()
746 struct xts_crypt_req req = { in xts_encrypt() local
760 ret = xts_crypt(desc, dst, src, nbytes, &req); in xts_encrypt()
771 struct xts_crypt_req req = { in xts_decrypt() local
785 ret = xts_crypt(desc, dst, src, nbytes, &req); in xts_decrypt()
818 rfc4106_set_hash_subkey_done(struct crypto_async_request *req, int err) in rfc4106_set_hash_subkey_done() argument
820 struct aesni_gcm_set_hash_subkey_result *result = req->data; in rfc4106_set_hash_subkey_done()
[all …]
Dghash-clmulni-intel_glue.c165 static int ghash_async_init(struct ahash_request *req) in ghash_async_init() argument
167 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); in ghash_async_init()
169 struct ahash_request *cryptd_req = ahash_request_ctx(req); in ghash_async_init()
173 memcpy(cryptd_req, req, sizeof(*req)); in ghash_async_init()
181 desc->flags = req->base.flags; in ghash_async_init()
186 static int ghash_async_update(struct ahash_request *req) in ghash_async_update() argument
188 struct ahash_request *cryptd_req = ahash_request_ctx(req); in ghash_async_update()
191 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); in ghash_async_update()
195 memcpy(cryptd_req, req, sizeof(*req)); in ghash_async_update()
200 return shash_ahash_update(req, desc); in ghash_async_update()
[all …]
/linux-4.4.14/drivers/usb/chipidea/
Dudc.c364 u32 mul = hwreq->req.length / hwep->ep.maxpacket; in add_td_to_list()
366 if (hwreq->req.length == 0 in add_td_to_list()
367 || hwreq->req.length % hwep->ep.maxpacket) in add_td_to_list()
372 temp = (u32) (hwreq->req.dma + hwreq->req.actual); in add_td_to_list()
382 hwreq->req.actual += length; in add_td_to_list()
417 unsigned rest = hwreq->req.length; in _hardware_enqueue()
422 if (hwreq->req.status == -EALREADY) in _hardware_enqueue()
425 hwreq->req.status = -EALREADY; in _hardware_enqueue()
427 ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir); in _hardware_enqueue()
435 if (hwreq->req.dma % PAGE_SIZE) in _hardware_enqueue()
[all …]
/linux-4.4.14/include/linux/
Dnfs_page.h111 #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) argument
136 struct nfs_page *req);
138 extern void nfs_unlock_request(struct nfs_page *req);
149 nfs_lock_request(struct nfs_page *req) in nfs_lock_request() argument
151 return !test_and_set_bit(PG_BUSY, &req->wb_flags); in nfs_lock_request()
160 nfs_list_add_request(struct nfs_page *req, struct list_head *head) in nfs_list_add_request() argument
162 list_add_tail(&req->wb_list, head); in nfs_list_add_request()
171 nfs_list_remove_request(struct nfs_page *req) in nfs_list_remove_request() argument
173 if (list_empty(&req->wb_list)) in nfs_list_remove_request()
175 list_del_init(&req->wb_list); in nfs_list_remove_request()
[all …]
/linux-4.4.14/drivers/media/usb/dvb-usb/
Daz6027.c301 static int az6027_usb_in_op(struct dvb_usb_device *d, u8 req, in az6027_usb_in_op() argument
310 req, in az6027_usb_in_op()
324 deb_xfer("in: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index); in az6027_usb_in_op()
332 u8 req, in az6027_usb_out_op() argument
340 deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index); in az6027_usb_out_op()
348 req, in az6027_usb_out_op()
369 u8 req; in az6027_streaming_ctrl() local
376 req = 0xBC; in az6027_streaming_ctrl()
381 ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); in az6027_streaming_ctrl()
416 u8 req; in az6027_ci_read_attribute_mem() local
[all …]
/linux-4.4.14/drivers/iio/adc/
Dtwl4030-madc.c80 struct twl4030_madc_request req; in twl4030_madc_read() local
83 req.method = madc->use_second_irq ? TWL4030_MADC_SW2 : TWL4030_MADC_SW1; in twl4030_madc_read()
85 req.channels = BIT(chan->channel); in twl4030_madc_read()
86 req.active = false; in twl4030_madc_read()
87 req.func_cb = NULL; in twl4030_madc_read()
88 req.type = TWL4030_MADC_WAIT; in twl4030_madc_read()
89 req.raw = !(mask == IIO_CHAN_INFO_PROCESSED); in twl4030_madc_read()
90 req.do_avg = (mask == IIO_CHAN_INFO_AVERAGE_RAW); in twl4030_madc_read()
92 ret = twl4030_madc_conversion(&req); in twl4030_madc_read()
96 *val = req.rbuf[chan->channel]; in twl4030_madc_read()
[all …]

1234567