Searched refs:req (Results 1 - 200 of 1377) sorted by relevance

1234567

/linux-4.1.27/drivers/usb/gadget/
H A Du_f.c19 struct usb_request *req; alloc_ep_req() local
21 req = usb_ep_alloc_request(ep, GFP_ATOMIC); alloc_ep_req()
22 if (req) { alloc_ep_req()
23 req->length = len ?: default_len; alloc_ep_req()
24 req->buf = kmalloc(req->length, GFP_ATOMIC); alloc_ep_req()
25 if (!req->buf) { alloc_ep_req()
26 usb_ep_free_request(ep, req); alloc_ep_req()
27 req = NULL; alloc_ep_req()
30 return req; alloc_ep_req()
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/
H A Dsec_null.c79 int null_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) null_ctx_sign() argument
81 req->rq_reqbuf->lm_secflvr = SPTLRPC_FLVR_NULL; null_ctx_sign()
83 if (!req->rq_import->imp_dlm_fake) { null_ctx_sign()
84 struct obd_device *obd = req->rq_import->imp_obd; null_ctx_sign()
85 null_encode_sec_part(req->rq_reqbuf, null_ctx_sign()
88 req->rq_reqdata_len = req->rq_reqlen; null_ctx_sign()
93 int null_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) null_ctx_verify() argument
97 LASSERT(req->rq_repdata); null_ctx_verify()
99 req->rq_repmsg = req->rq_repdata; null_ctx_verify()
100 req->rq_replen = req->rq_repdata_len; null_ctx_verify()
102 if (req->rq_early) { null_ctx_verify()
103 cksums = lustre_msg_get_cksum(req->rq_repdata); null_ctx_verify()
104 cksumc = lustre_msg_calc_cksum(req->rq_repmsg); null_ctx_verify()
155 struct ptlrpc_request *req, null_alloc_reqbuf()
158 if (!req->rq_reqbuf) { null_alloc_reqbuf()
161 LASSERT(!req->rq_pool); null_alloc_reqbuf()
162 OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_size); null_alloc_reqbuf()
163 if (!req->rq_reqbuf) null_alloc_reqbuf()
166 req->rq_reqbuf_len = alloc_size; null_alloc_reqbuf()
168 LASSERT(req->rq_pool); null_alloc_reqbuf()
169 LASSERT(req->rq_reqbuf_len >= msgsize); null_alloc_reqbuf()
170 memset(req->rq_reqbuf, 0, msgsize); null_alloc_reqbuf()
173 req->rq_reqmsg = req->rq_reqbuf; null_alloc_reqbuf()
179 struct ptlrpc_request *req) null_free_reqbuf()
181 if (!req->rq_pool) { null_free_reqbuf()
182 LASSERTF(req->rq_reqmsg == req->rq_reqbuf, null_free_reqbuf()
183 "req %p: reqmsg %p is not reqbuf %p in null sec\n", null_free_reqbuf()
184 req, req->rq_reqmsg, req->rq_reqbuf); null_free_reqbuf()
185 LASSERTF(req->rq_reqbuf_len >= req->rq_reqlen, null_free_reqbuf()
186 "req %p: reqlen %d should smaller than buflen %d\n", null_free_reqbuf()
187 req, req->rq_reqlen, req->rq_reqbuf_len); null_free_reqbuf()
189 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); null_free_reqbuf()
190 req->rq_reqbuf = NULL; null_free_reqbuf()
191 req->rq_reqbuf_len = 0; null_free_reqbuf()
197 struct ptlrpc_request *req, null_alloc_repbuf()
205 OBD_ALLOC_LARGE(req->rq_repbuf, msgsize); null_alloc_repbuf()
206 if (!req->rq_repbuf) null_alloc_repbuf()
209 req->rq_repbuf_len = msgsize; null_alloc_repbuf()
215 struct ptlrpc_request *req) null_free_repbuf()
217 LASSERT(req->rq_repbuf); null_free_repbuf()
219 OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len); null_free_repbuf()
220 req->rq_repbuf = NULL; null_free_repbuf()
221 req->rq_repbuf_len = 0; null_free_repbuf()
226 struct ptlrpc_request *req, null_enlarge_reqbuf()
230 struct lustre_msg *oldbuf = req->rq_reqmsg; null_enlarge_reqbuf()
233 LASSERT(req->rq_reqbuf); null_enlarge_reqbuf()
234 LASSERT(req->rq_reqbuf == req->rq_reqmsg); null_enlarge_reqbuf()
235 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen); null_enlarge_reqbuf()
236 LASSERT(req->rq_reqlen == lustre_packed_msg_size(oldbuf)); null_enlarge_reqbuf()
239 oldsize = req->rq_reqbuf->lm_buflens[segment]; null_enlarge_reqbuf()
240 req->rq_reqbuf->lm_buflens[segment] = newsize; null_enlarge_reqbuf()
242 req->rq_reqbuf->lm_buflens[segment] = oldsize; null_enlarge_reqbuf()
245 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newmsg_size); null_enlarge_reqbuf()
247 if (req->rq_reqbuf_len < newmsg_size) { null_enlarge_reqbuf()
260 if (req->rq_import) null_enlarge_reqbuf()
261 spin_lock(&req->rq_import->imp_lock); null_enlarge_reqbuf()
262 memcpy(newbuf, req->rq_reqbuf, req->rq_reqlen); null_enlarge_reqbuf()
264 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); null_enlarge_reqbuf()
265 req->rq_reqbuf = req->rq_reqmsg = newbuf; null_enlarge_reqbuf()
266 req->rq_reqbuf_len = alloc_size; null_enlarge_reqbuf()
268 if (req->rq_import) null_enlarge_reqbuf()
269 spin_unlock(&req->rq_import->imp_lock); null_enlarge_reqbuf()
272 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize); null_enlarge_reqbuf()
273 req->rq_reqlen = newmsg_size; null_enlarge_reqbuf()
284 int null_accept(struct ptlrpc_request *req) null_accept() argument
286 LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == null_accept()
289 if (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL) { null_accept()
290 CERROR("Invalid rpc flavor 0x%x\n", req->rq_flvr.sf_rpc); null_accept()
294 req->rq_sp_from = null_decode_sec_part(req->rq_reqbuf); null_accept()
296 req->rq_reqmsg = req->rq_reqbuf; null_accept()
297 req->rq_reqlen = req->rq_reqdata_len; null_accept()
299 req->rq_svc_ctx = &null_svc_ctx; null_accept()
300 atomic_inc(&req->rq_svc_ctx->sc_refcount); null_accept()
306 int null_alloc_rs(struct ptlrpc_request *req, int msgsize) null_alloc_rs() argument
313 rs = req->rq_reply_state; null_alloc_rs()
326 rs->rs_svc_ctx = req->rq_svc_ctx; null_alloc_rs()
327 atomic_inc(&req->rq_svc_ctx->sc_refcount); null_alloc_rs()
333 req->rq_reply_state = rs; null_alloc_rs()
348 int null_authorize(struct ptlrpc_request *req) null_authorize() argument
350 struct ptlrpc_reply_state *rs = req->rq_reply_state; null_authorize()
355 rs->rs_repdata_len = req->rq_replen; null_authorize()
357 if (likely(req->rq_packed_final)) { null_authorize()
358 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) null_authorize()
359 req->rq_reply_off = lustre_msg_early_size(); null_authorize()
361 req->rq_reply_off = 0; null_authorize()
367 req->rq_reply_off = 0; null_authorize()
154 null_alloc_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int msgsize) null_alloc_reqbuf() argument
178 null_free_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req) null_free_reqbuf() argument
196 null_alloc_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int msgsize) null_alloc_repbuf() argument
214 null_free_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req) null_free_repbuf() argument
225 null_enlarge_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int segment, int newsize) null_enlarge_reqbuf() argument
H A Dclient.c50 static int ptlrpc_send_new_req(struct ptlrpc_request *req);
51 static int ptlrpcd_check_work(struct ptlrpc_request *req);
128 * Prepare bulk descriptor for specified outgoing request \a req that
134 struct ptlrpc_bulk_desc *ptlrpc_prep_bulk_imp(struct ptlrpc_request *req, ptlrpc_prep_bulk_imp() argument
138 struct obd_import *imp = req->rq_import; ptlrpc_prep_bulk_imp()
146 desc->bd_import_generation = req->rq_import_generation; ptlrpc_prep_bulk_imp()
148 desc->bd_req = req; ptlrpc_prep_bulk_imp()
153 /* This makes req own desc, and free it when she frees herself */ ptlrpc_prep_bulk_imp()
154 req->rq_bulk = desc; ptlrpc_prep_bulk_imp()
214 * Set server timelimit for this req, i.e. how long are we willing to wait
217 void ptlrpc_at_set_req_timeout(struct ptlrpc_request *req) ptlrpc_at_set_req_timeout() argument
223 LASSERT(req->rq_import); ptlrpc_at_set_req_timeout()
234 req->rq_timeout = req->rq_import->imp_server_timeout ? ptlrpc_at_set_req_timeout()
237 at = &req->rq_import->imp_at; ptlrpc_at_set_req_timeout()
238 idx = import_at_get_index(req->rq_import, ptlrpc_at_set_req_timeout()
239 req->rq_request_portal); ptlrpc_at_set_req_timeout()
241 req->rq_timeout = at_est2timeout(serv_est); ptlrpc_at_set_req_timeout()
248 lustre_msg_set_timeout(req->rq_reqmsg, req->rq_timeout); ptlrpc_at_set_req_timeout()
253 static void ptlrpc_at_adj_service(struct ptlrpc_request *req, ptlrpc_at_adj_service() argument
260 LASSERT(req->rq_import); ptlrpc_at_adj_service()
261 at = &req->rq_import->imp_at; ptlrpc_at_adj_service()
263 idx = import_at_get_index(req->rq_import, req->rq_request_portal); ptlrpc_at_adj_service()
269 req->rq_import->imp_obd->obd_name, req->rq_request_portal, ptlrpc_at_adj_service()
274 int ptlrpc_at_get_net_latency(struct ptlrpc_request *req) ptlrpc_at_get_net_latency() argument
276 return AT_OFF ? 0 : at_get(&req->rq_import->imp_at.iat_net_latency); ptlrpc_at_get_net_latency()
280 static void ptlrpc_at_adj_net_latency(struct ptlrpc_request *req, ptlrpc_at_adj_net_latency() argument
287 LASSERT(req->rq_import); ptlrpc_at_adj_net_latency()
289 if (service_time > now - req->rq_sent + 3) { ptlrpc_at_adj_net_latency()
297 CDEBUG((lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ? ptlrpc_at_adj_net_latency()
301 cfs_time_sub(now, req->rq_sent)); ptlrpc_at_adj_net_latency()
306 nl = max_t(int, now - req->rq_sent - ptlrpc_at_adj_net_latency()
308 at = &req->rq_import->imp_at; ptlrpc_at_adj_net_latency()
313 req->rq_import->imp_obd->obd_name, ptlrpc_at_adj_net_latency()
315 &req->rq_import->imp_connection->c_remote_uuid), ptlrpc_at_adj_net_latency()
319 static int unpack_reply(struct ptlrpc_request *req) unpack_reply() argument
323 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) { unpack_reply()
324 rc = ptlrpc_unpack_rep_msg(req, req->rq_replen); unpack_reply()
326 DEBUG_REQ(D_ERROR, req, "unpack_rep failed: %d", rc); unpack_reply()
331 rc = lustre_unpack_rep_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF); unpack_reply()
333 DEBUG_REQ(D_ERROR, req, "unpack ptlrpc body failed: %d", rc); unpack_reply()
343 static int ptlrpc_at_recv_early_reply(struct ptlrpc_request *req) ptlrpc_at_recv_early_reply() argument
349 req->rq_early = 0; ptlrpc_at_recv_early_reply()
350 spin_unlock(&req->rq_lock); ptlrpc_at_recv_early_reply()
352 rc = sptlrpc_cli_unwrap_early_reply(req, &early_req); ptlrpc_at_recv_early_reply()
354 spin_lock(&req->rq_lock); ptlrpc_at_recv_early_reply()
361 ptlrpc_at_adj_service(req, ptlrpc_at_recv_early_reply()
363 ptlrpc_at_adj_net_latency(req, ptlrpc_at_recv_early_reply()
370 spin_lock(&req->rq_lock); ptlrpc_at_recv_early_reply()
374 /* Adjust the local timeout for this req */ ptlrpc_at_recv_early_reply()
375 ptlrpc_at_set_req_timeout(req); ptlrpc_at_recv_early_reply()
377 spin_lock(&req->rq_lock); ptlrpc_at_recv_early_reply()
378 olddl = req->rq_deadline; ptlrpc_at_recv_early_reply()
381 req->rq_deadline = get_seconds() + req->rq_timeout + ptlrpc_at_recv_early_reply()
382 ptlrpc_at_get_net_latency(req); ptlrpc_at_recv_early_reply()
384 DEBUG_REQ(D_ADAPTTO, req, ptlrpc_at_recv_early_reply()
386 req->rq_early_count, ptlrpc_at_recv_early_reply()
387 cfs_time_sub(req->rq_deadline, get_seconds()), ptlrpc_at_recv_early_reply()
388 cfs_time_sub(req->rq_deadline, olddl)); ptlrpc_at_recv_early_reply()
410 struct ptlrpc_request *req; ptlrpc_request_cache_alloc() local
412 OBD_SLAB_ALLOC_PTR_GFP(req, request_cache, flags); ptlrpc_request_cache_alloc()
413 return req; ptlrpc_request_cache_alloc()
416 void ptlrpc_request_cache_free(struct ptlrpc_request *req) ptlrpc_request_cache_free() argument
418 OBD_SLAB_FREE_PTR(req, request_cache); ptlrpc_request_cache_free()
428 struct ptlrpc_request *req; ptlrpc_free_rq_pool() local
434 req = list_entry(l, struct ptlrpc_request, rq_list); ptlrpc_free_rq_pool()
435 list_del(&req->rq_list); ptlrpc_free_rq_pool()
436 LASSERT(req->rq_reqbuf); ptlrpc_free_rq_pool()
437 LASSERT(req->rq_reqbuf_len == pool->prp_rq_size); ptlrpc_free_rq_pool()
438 OBD_FREE_LARGE(req->rq_reqbuf, pool->prp_rq_size); ptlrpc_free_rq_pool()
439 ptlrpc_request_cache_free(req); ptlrpc_free_rq_pool()
465 struct ptlrpc_request *req; ptlrpc_add_rqs_to_pool() local
469 req = ptlrpc_request_cache_alloc(GFP_NOFS); ptlrpc_add_rqs_to_pool()
470 if (!req) ptlrpc_add_rqs_to_pool()
474 ptlrpc_request_cache_free(req); ptlrpc_add_rqs_to_pool()
477 req->rq_reqbuf = msg; ptlrpc_add_rqs_to_pool()
478 req->rq_reqbuf_len = size; ptlrpc_add_rqs_to_pool()
479 req->rq_pool = pool; ptlrpc_add_rqs_to_pool()
481 list_add_tail(&req->rq_list, &pool->prp_req_list); ptlrpc_add_rqs_to_pool()
795 struct ptlrpc_request *req = ptlrpc_request_alloc(imp, format); ptlrpc_request_alloc_pack() local
798 if (req) { ptlrpc_request_alloc_pack()
799 rc = ptlrpc_request_pack(req, version, opcode); ptlrpc_request_alloc_pack()
801 ptlrpc_request_free(req); ptlrpc_request_alloc_pack()
802 req = NULL; ptlrpc_request_alloc_pack()
805 return req; ptlrpc_request_alloc_pack()
924 struct ptlrpc_request *req = ptlrpc_set_destroy() local
928 LASSERT(req->rq_phase == expected_phase); ptlrpc_set_destroy()
937 struct ptlrpc_request *req = ptlrpc_set_destroy() local
940 list_del_init(&req->rq_set_chain); ptlrpc_set_destroy()
942 LASSERT(req->rq_phase == expected_phase); ptlrpc_set_destroy()
944 if (req->rq_phase == RQ_PHASE_NEW) { ptlrpc_set_destroy()
945 ptlrpc_req_interpret(NULL, req, -EBADR); ptlrpc_set_destroy()
949 spin_lock(&req->rq_lock); ptlrpc_set_destroy()
950 req->rq_set = NULL; ptlrpc_set_destroy()
951 req->rq_invalid_rqset = 0; ptlrpc_set_destroy()
952 spin_unlock(&req->rq_lock); ptlrpc_set_destroy()
954 ptlrpc_req_finished(req); ptlrpc_set_destroy()
990 struct ptlrpc_request *req) ptlrpc_set_add_req()
992 LASSERT(list_empty(&req->rq_set_chain)); ptlrpc_set_add_req()
995 list_add_tail(&req->rq_set_chain, &set->set_requests); ptlrpc_set_add_req()
996 req->rq_set = set; ptlrpc_set_add_req()
998 req->rq_queued_time = cfs_time_current(); ptlrpc_set_add_req()
1000 if (req->rq_reqmsg != NULL) ptlrpc_set_add_req()
1001 lustre_msg_set_jobid(req->rq_reqmsg, NULL); ptlrpc_set_add_req()
1006 ptlrpc_send_new_req(req); ptlrpc_set_add_req()
1016 struct ptlrpc_request *req) ptlrpc_set_add_new_req()
1021 LASSERT(req->rq_set == NULL); ptlrpc_set_add_new_req()
1028 req->rq_set = set; ptlrpc_set_add_new_req()
1029 req->rq_queued_time = cfs_time_current(); ptlrpc_set_add_new_req()
1030 list_add_tail(&req->rq_set_chain, &set->set_new_requests); ptlrpc_set_add_new_req()
1058 struct ptlrpc_request *req, int *status) ptlrpc_import_delay_req()
1065 if (req->rq_ctx_init || req->rq_ctx_fini) { ptlrpc_import_delay_req()
1068 DEBUG_REQ(D_ERROR, req, "Uninitialized import."); ptlrpc_import_delay_req()
1072 DEBUG_REQ(lustre_msg_get_opc(req->rq_reqmsg) == OBD_PING ? ptlrpc_import_delay_req()
1073 D_HA : D_ERROR, req, "IMP_CLOSED "); ptlrpc_import_delay_req()
1075 } else if (ptlrpc_send_limit_expired(req)) { ptlrpc_import_delay_req()
1077 DEBUG_REQ(D_ERROR, req, "send limit expired "); ptlrpc_import_delay_req()
1079 } else if (req->rq_send_state == LUSTRE_IMP_CONNECTING && ptlrpc_import_delay_req()
1083 DEBUG_REQ(D_ERROR, req, "invalidate in flight"); ptlrpc_import_delay_req()
1088 DEBUG_REQ(D_NET, req, "IMP_INVALID"); ptlrpc_import_delay_req()
1090 } else if (req->rq_import_generation != imp->imp_generation) { ptlrpc_import_delay_req()
1091 DEBUG_REQ(D_ERROR, req, "req wrong generation:"); ptlrpc_import_delay_req()
1093 } else if (req->rq_send_state != imp->imp_state) { ptlrpc_import_delay_req()
1096 DEBUG_REQ(D_ERROR, req, "invalidate in flight"); ptlrpc_import_delay_req()
1098 } else if (imp->imp_dlm_fake || req->rq_no_delay) { ptlrpc_import_delay_req()
1100 } else if (req->rq_allow_replay && ptlrpc_import_delay_req()
1105 DEBUG_REQ(D_HA, req, "allow during recovery.\n"); ptlrpc_import_delay_req()
1115 * Decide if the error message regarding provided request \a req
1120 static int ptlrpc_console_allow(struct ptlrpc_request *req) ptlrpc_console_allow() argument
1125 LASSERT(req->rq_reqmsg != NULL); ptlrpc_console_allow()
1126 opc = lustre_msg_get_opc(req->rq_reqmsg); ptlrpc_console_allow()
1130 if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) && ptlrpc_console_allow()
1134 if (req->rq_timedout) ptlrpc_console_allow()
1138 err = lustre_msg_get_status(req->rq_repmsg); ptlrpc_console_allow()
1150 static int ptlrpc_check_status(struct ptlrpc_request *req) ptlrpc_check_status() argument
1154 err = lustre_msg_get_status(req->rq_repmsg); ptlrpc_check_status()
1155 if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) { ptlrpc_check_status()
1156 struct obd_import *imp = req->rq_import; ptlrpc_check_status()
1157 __u32 opc = lustre_msg_get_opc(req->rq_reqmsg); ptlrpc_check_status()
1158 if (ptlrpc_console_allow(req)) ptlrpc_check_status()
1168 DEBUG_REQ(D_INFO, req, "status is %d", err); ptlrpc_check_status()
1171 DEBUG_REQ(D_INFO, req, "status is %d", err); ptlrpc_check_status()
1182 static void ptlrpc_save_versions(struct ptlrpc_request *req) ptlrpc_save_versions() argument
1184 struct lustre_msg *repmsg = req->rq_repmsg; ptlrpc_save_versions()
1185 struct lustre_msg *reqmsg = req->rq_reqmsg; ptlrpc_save_versions()
1188 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) ptlrpc_save_versions()
1198 * Callback function called when client receives RPC reply for \a req.
1200 * The return value would be assigned to req->rq_status by the caller
1204 static int after_reply(struct ptlrpc_request *req) after_reply() argument
1206 struct obd_import *imp = req->rq_import; after_reply()
1207 struct obd_device *obd = req->rq_import->imp_obd; after_reply()
1214 LASSERT(!req->rq_receiving_reply && !req->rq_reply_unlink); after_reply()
1216 if (req->rq_reply_truncate) { after_reply()
1217 if (ptlrpc_no_resend(req)) { after_reply()
1218 DEBUG_REQ(D_ERROR, req, "reply buffer overflow, expected: %d, actual size: %d", after_reply()
1219 req->rq_nob_received, req->rq_repbuf_len); after_reply()
1223 sptlrpc_cli_free_repbuf(req); after_reply()
1228 req->rq_replen = req->rq_nob_received; after_reply()
1229 req->rq_nob_received = 0; after_reply()
1230 spin_lock(&req->rq_lock); after_reply()
1231 req->rq_resend = 1; after_reply()
1232 spin_unlock(&req->rq_lock); after_reply()
1240 rc = sptlrpc_cli_unwrap_reply(req); after_reply()
1242 DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc); after_reply()
1249 if (req->rq_resend) after_reply()
1252 rc = unpack_reply(req); after_reply()
1257 if (lustre_msg_get_status(req->rq_repmsg) == -EINPROGRESS && after_reply()
1258 ptlrpc_no_resend(req) == 0 && !req->rq_no_retry_einprogress) { after_reply()
1261 DEBUG_REQ(D_RPCTRACE, req, "Resending request on EINPROGRESS"); after_reply()
1262 spin_lock(&req->rq_lock); after_reply()
1263 req->rq_resend = 1; after_reply()
1264 spin_unlock(&req->rq_lock); after_reply()
1265 req->rq_nr_resend++; after_reply()
1268 if (!req->rq_bulk) { after_reply()
1271 req->rq_xid = ptlrpc_next_xid(); after_reply()
1272 DEBUG_REQ(D_RPCTRACE, req, "Allocating new xid for resend on EINPROGRESS"); after_reply()
1276 ptlrpc_at_set_req_timeout(req); after_reply()
1281 if (req->rq_nr_resend > req->rq_timeout) after_reply()
1282 req->rq_sent = now + req->rq_timeout; after_reply()
1284 req->rq_sent = now + req->rq_nr_resend; after_reply()
1290 timediff = cfs_timeval_sub(&work_start, &req->rq_arrival_time, NULL); after_reply()
1294 ptlrpc_lprocfs_rpc_sent(req, timediff); after_reply()
1297 if (lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_REPLY && after_reply()
1298 lustre_msg_get_type(req->rq_repmsg) != PTL_RPC_MSG_ERR) { after_reply()
1299 DEBUG_REQ(D_ERROR, req, "invalid packet received (type=%u)", after_reply()
1300 lustre_msg_get_type(req->rq_repmsg)); after_reply()
1304 if (lustre_msg_get_opc(req->rq_reqmsg) != OBD_PING) after_reply()
1306 ptlrpc_at_adj_service(req, lustre_msg_get_timeout(req->rq_repmsg)); after_reply()
1307 ptlrpc_at_adj_net_latency(req, after_reply()
1308 lustre_msg_get_service_time(req->rq_repmsg)); after_reply()
1310 rc = ptlrpc_check_status(req); after_reply()
1320 if (req->rq_send_state != LUSTRE_IMP_FULL || after_reply()
1324 ptlrpc_request_handle_notconn(req); after_reply()
1332 ldlm_cli_update_pool(req); after_reply()
1338 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY)) { after_reply()
1339 req->rq_transno = lustre_msg_get_transno(req->rq_repmsg); after_reply()
1340 lustre_msg_set_transno(req->rq_reqmsg, req->rq_transno); after_reply()
1349 if (req->rq_transno != 0 && after_reply()
1350 (req->rq_transno > after_reply()
1351 lustre_msg_get_last_committed(req->rq_repmsg) || after_reply()
1352 req->rq_replay)) { after_reply()
1354 ptlrpc_save_versions(req); after_reply()
1355 ptlrpc_retain_replayable_request(req, imp); after_reply()
1356 } else if (req->rq_commit_cb != NULL && after_reply()
1357 list_empty(&req->rq_replay_list)) { after_reply()
1362 req->rq_commit_cb(req); after_reply()
1369 if (lustre_msg_get_last_committed(req->rq_repmsg)) { after_reply()
1371 lustre_msg_get_last_committed(req->rq_repmsg); after_reply()
1397 * Helper function to send request \a req over the network for the first time
1401 static int ptlrpc_send_new_req(struct ptlrpc_request *req) ptlrpc_send_new_req() argument
1403 struct obd_import *imp = req->rq_import; ptlrpc_send_new_req()
1406 LASSERT(req->rq_phase == RQ_PHASE_NEW); ptlrpc_send_new_req()
1407 if (req->rq_sent && (req->rq_sent > get_seconds()) && ptlrpc_send_new_req()
1408 (!req->rq_generation_set || ptlrpc_send_new_req()
1409 req->rq_import_generation == imp->imp_generation)) ptlrpc_send_new_req()
1412 ptlrpc_rqphase_move(req, RQ_PHASE_RPC); ptlrpc_send_new_req()
1416 if (!req->rq_generation_set) ptlrpc_send_new_req()
1417 req->rq_import_generation = imp->imp_generation; ptlrpc_send_new_req()
1419 if (ptlrpc_import_delay_req(imp, req, &rc)) { ptlrpc_send_new_req()
1420 spin_lock(&req->rq_lock); ptlrpc_send_new_req()
1421 req->rq_waiting = 1; ptlrpc_send_new_req()
1422 spin_unlock(&req->rq_lock); ptlrpc_send_new_req()
1424 DEBUG_REQ(D_HA, req, "req from PID %d waiting for recovery: (%s != %s)", ptlrpc_send_new_req()
1425 lustre_msg_get_status(req->rq_reqmsg), ptlrpc_send_new_req()
1426 ptlrpc_import_state_name(req->rq_send_state), ptlrpc_send_new_req()
1428 LASSERT(list_empty(&req->rq_list)); ptlrpc_send_new_req()
1429 list_add_tail(&req->rq_list, &imp->imp_delayed_list); ptlrpc_send_new_req()
1430 atomic_inc(&req->rq_import->imp_inflight); ptlrpc_send_new_req()
1437 req->rq_status = rc; ptlrpc_send_new_req()
1438 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); ptlrpc_send_new_req()
1442 LASSERT(list_empty(&req->rq_list)); ptlrpc_send_new_req()
1443 list_add_tail(&req->rq_list, &imp->imp_sending_list); ptlrpc_send_new_req()
1444 atomic_inc(&req->rq_import->imp_inflight); ptlrpc_send_new_req()
1447 lustre_msg_set_status(req->rq_reqmsg, current_pid()); ptlrpc_send_new_req()
1449 rc = sptlrpc_req_refresh_ctx(req, -1); ptlrpc_send_new_req()
1451 if (req->rq_err) { ptlrpc_send_new_req()
1452 req->rq_status = rc; ptlrpc_send_new_req()
1455 spin_lock(&req->rq_lock); ptlrpc_send_new_req()
1456 req->rq_wait_ctx = 1; ptlrpc_send_new_req()
1457 spin_unlock(&req->rq_lock); ptlrpc_send_new_req()
1464 lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, ptlrpc_send_new_req()
1466 lustre_msg_get_opc(req->rq_reqmsg)); ptlrpc_send_new_req()
1468 rc = ptl_send_rpc(req, 0); ptlrpc_send_new_req()
1470 DEBUG_REQ(D_HA, req, "send failed (%d); expect timeout", rc); ptlrpc_send_new_req()
1471 spin_lock(&req->rq_lock); ptlrpc_send_new_req()
1472 req->rq_net_err = 1; ptlrpc_send_new_req()
1473 spin_unlock(&req->rq_lock); ptlrpc_send_new_req()
1521 struct ptlrpc_request *req = ptlrpc_check_set() local
1524 struct obd_import *imp = req->rq_import; ptlrpc_check_set()
1536 if (req->rq_phase == RQ_PHASE_NEW && ptlrpc_check_set()
1537 ptlrpc_send_new_req(req)) { ptlrpc_check_set()
1542 if (req->rq_phase == RQ_PHASE_NEW && req->rq_sent) ptlrpc_check_set()
1546 if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend && ptlrpc_check_set()
1547 req->rq_sent > get_seconds()) ptlrpc_check_set()
1550 if (!(req->rq_phase == RQ_PHASE_RPC || ptlrpc_check_set()
1551 req->rq_phase == RQ_PHASE_BULK || ptlrpc_check_set()
1552 req->rq_phase == RQ_PHASE_INTERPRET || ptlrpc_check_set()
1553 req->rq_phase == RQ_PHASE_UNREGISTERING || ptlrpc_check_set()
1554 req->rq_phase == RQ_PHASE_COMPLETE)) { ptlrpc_check_set()
1555 DEBUG_REQ(D_ERROR, req, "bad phase %x", req->rq_phase); ptlrpc_check_set()
1559 if (req->rq_phase == RQ_PHASE_UNREGISTERING) { ptlrpc_check_set()
1560 LASSERT(req->rq_next_phase != req->rq_phase); ptlrpc_check_set()
1561 LASSERT(req->rq_next_phase != RQ_PHASE_UNDEFINED); ptlrpc_check_set()
1570 if (ptlrpc_client_recv_or_unlink(req) || ptlrpc_check_set()
1571 ptlrpc_client_bulk_active(req)) ptlrpc_check_set()
1591 ptlrpc_rqphase_move(req, req->rq_next_phase); ptlrpc_check_set()
1594 if (req->rq_phase == RQ_PHASE_COMPLETE) { ptlrpc_check_set()
1595 list_move_tail(&req->rq_set_chain, &comp_reqs); ptlrpc_check_set()
1599 if (req->rq_phase == RQ_PHASE_INTERPRET) ptlrpc_check_set()
1605 if (req->rq_net_err && !req->rq_timedout) { ptlrpc_check_set()
1606 ptlrpc_expire_one_request(req, 1); ptlrpc_check_set()
1611 if (ptlrpc_client_recv_or_unlink(req) || ptlrpc_check_set()
1612 ptlrpc_client_bulk_active(req)) ptlrpc_check_set()
1615 if (req->rq_no_resend) { ptlrpc_check_set()
1616 if (req->rq_status == 0) ptlrpc_check_set()
1617 req->rq_status = -EIO; ptlrpc_check_set()
1618 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); ptlrpc_check_set()
1625 if (req->rq_err) { ptlrpc_check_set()
1626 spin_lock(&req->rq_lock); ptlrpc_check_set()
1627 req->rq_replied = 0; ptlrpc_check_set()
1628 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1629 if (req->rq_status == 0) ptlrpc_check_set()
1630 req->rq_status = -EIO; ptlrpc_check_set()
1631 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); ptlrpc_check_set()
1644 if (req->rq_intr && (req->rq_timedout || req->rq_waiting || ptlrpc_check_set()
1645 req->rq_wait_ctx)) { ptlrpc_check_set()
1646 req->rq_status = -EINTR; ptlrpc_check_set()
1647 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); ptlrpc_check_set()
1651 if (req->rq_phase == RQ_PHASE_RPC) { ptlrpc_check_set()
1652 if (req->rq_timedout || req->rq_resend || ptlrpc_check_set()
1653 req->rq_waiting || req->rq_wait_ctx) { ptlrpc_check_set()
1656 if (!ptlrpc_unregister_reply(req, 1)) ptlrpc_check_set()
1660 if (ptlrpc_import_delay_req(imp, req, ptlrpc_check_set()
1664 list_del_init(&req->rq_list); ptlrpc_check_set()
1665 list_add_tail(&req->rq_list, ptlrpc_check_set()
1673 req->rq_status = status; ptlrpc_check_set()
1674 ptlrpc_rqphase_move(req, ptlrpc_check_set()
1679 if (ptlrpc_no_resend(req) && ptlrpc_check_set()
1680 !req->rq_wait_ctx) { ptlrpc_check_set()
1681 req->rq_status = -ENOTCONN; ptlrpc_check_set()
1682 ptlrpc_rqphase_move(req, ptlrpc_check_set()
1688 list_del_init(&req->rq_list); ptlrpc_check_set()
1689 list_add_tail(&req->rq_list, ptlrpc_check_set()
1694 spin_lock(&req->rq_lock); ptlrpc_check_set()
1695 req->rq_waiting = 0; ptlrpc_check_set()
1696 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1698 if (req->rq_timedout || req->rq_resend) { ptlrpc_check_set()
1700 * let's mark req as resend. */ ptlrpc_check_set()
1701 spin_lock(&req->rq_lock); ptlrpc_check_set()
1702 req->rq_resend = 1; ptlrpc_check_set()
1703 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1704 if (req->rq_bulk) { ptlrpc_check_set()
1707 if (!ptlrpc_unregister_bulk(req, 1)) ptlrpc_check_set()
1711 old_xid = req->rq_xid; ptlrpc_check_set()
1712 req->rq_xid = ptlrpc_next_xid(); ptlrpc_check_set()
1714 old_xid, req->rq_xid); ptlrpc_check_set()
1721 status = sptlrpc_req_refresh_ctx(req, -1); ptlrpc_check_set()
1723 if (req->rq_err) { ptlrpc_check_set()
1724 req->rq_status = status; ptlrpc_check_set()
1725 spin_lock(&req->rq_lock); ptlrpc_check_set()
1726 req->rq_wait_ctx = 0; ptlrpc_check_set()
1727 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1730 spin_lock(&req->rq_lock); ptlrpc_check_set()
1731 req->rq_wait_ctx = 1; ptlrpc_check_set()
1732 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1737 spin_lock(&req->rq_lock); ptlrpc_check_set()
1738 req->rq_wait_ctx = 0; ptlrpc_check_set()
1739 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1742 rc = ptl_send_rpc(req, 0); ptlrpc_check_set()
1744 DEBUG_REQ(D_HA, req, ptlrpc_check_set()
1747 spin_lock(&req->rq_lock); ptlrpc_check_set()
1748 req->rq_net_err = 1; ptlrpc_check_set()
1749 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1756 spin_lock(&req->rq_lock); ptlrpc_check_set()
1758 if (ptlrpc_client_early(req)) { ptlrpc_check_set()
1759 ptlrpc_at_recv_early_reply(req); ptlrpc_check_set()
1760 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1765 if (ptlrpc_client_recv(req)) { ptlrpc_check_set()
1766 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1771 if (!ptlrpc_client_replied(req)) { ptlrpc_check_set()
1772 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1776 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1780 unregistered = ptlrpc_unregister_reply(req, 1); ptlrpc_check_set()
1784 req->rq_status = after_reply(req); ptlrpc_check_set()
1785 if (req->rq_resend) ptlrpc_check_set()
1793 if (req->rq_bulk == NULL || req->rq_status < 0) { ptlrpc_check_set()
1794 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); ptlrpc_check_set()
1798 ptlrpc_rqphase_move(req, RQ_PHASE_BULK); ptlrpc_check_set()
1801 LASSERT(req->rq_phase == RQ_PHASE_BULK); ptlrpc_check_set()
1802 if (ptlrpc_client_bulk_active(req)) ptlrpc_check_set()
1805 if (req->rq_bulk->bd_failure) { ptlrpc_check_set()
1810 DEBUG_REQ(D_ERROR, req, "bulk transfer failed"); ptlrpc_check_set()
1811 req->rq_status = -EIO; ptlrpc_check_set()
1814 ptlrpc_rqphase_move(req, RQ_PHASE_INTERPRET); ptlrpc_check_set()
1817 LASSERT(req->rq_phase == RQ_PHASE_INTERPRET); ptlrpc_check_set()
1821 if (!unregistered && !ptlrpc_unregister_reply(req, 1)) { ptlrpc_check_set()
1823 ptlrpc_unregister_bulk(req, 1); ptlrpc_check_set()
1827 if (!ptlrpc_unregister_bulk(req, 1)) ptlrpc_check_set()
1832 LASSERT(!req->rq_receiving_reply); ptlrpc_check_set()
1834 ptlrpc_req_interpret(env, req, req->rq_status); ptlrpc_check_set()
1836 if (ptlrpcd_check_work(req)) { ptlrpc_check_set()
1840 ptlrpc_rqphase_move(req, RQ_PHASE_COMPLETE); ptlrpc_check_set()
1842 CDEBUG(req->rq_reqmsg != NULL ? D_RPCTRACE : 0, ptlrpc_check_set()
1845 lustre_msg_get_status(req->rq_reqmsg), req->rq_xid, ptlrpc_check_set()
1847 lustre_msg_get_opc(req->rq_reqmsg)); ptlrpc_check_set()
1852 * ptlrpc_import_delay_req(req, status) find it impossible to ptlrpc_check_set()
1854 if (!list_empty(&req->rq_list)) { ptlrpc_check_set()
1855 list_del_init(&req->rq_list); ptlrpc_check_set()
1870 list_del_init(&req->rq_set_chain); ptlrpc_check_set()
1871 spin_lock(&req->rq_lock); ptlrpc_check_set()
1872 req->rq_set = NULL; ptlrpc_check_set()
1873 req->rq_invalid_rqset = 0; ptlrpc_check_set()
1874 spin_unlock(&req->rq_lock); ptlrpc_check_set()
1877 if (req->rq_status != 0) ptlrpc_check_set()
1878 set->set_rc = req->rq_status; ptlrpc_check_set()
1879 ptlrpc_req_finished(req); ptlrpc_check_set()
1881 list_move_tail(&req->rq_set_chain, &comp_reqs); ptlrpc_check_set()
1895 * Time out request \a req. is \a async_unlink is set, that means do not wait
1899 int ptlrpc_expire_one_request(struct ptlrpc_request *req, int async_unlink) ptlrpc_expire_one_request() argument
1901 struct obd_import *imp = req->rq_import; ptlrpc_expire_one_request()
1904 spin_lock(&req->rq_lock); ptlrpc_expire_one_request()
1905 req->rq_timedout = 1; ptlrpc_expire_one_request()
1906 spin_unlock(&req->rq_lock); ptlrpc_expire_one_request()
1908 DEBUG_REQ(D_WARNING, req, "Request sent has %s: [sent "CFS_DURATION_T ptlrpc_expire_one_request()
1910 req->rq_net_err ? "failed due to network error" : ptlrpc_expire_one_request()
1911 ((req->rq_real_sent == 0 || ptlrpc_expire_one_request()
1912 time_before((unsigned long)req->rq_real_sent, (unsigned long)req->rq_sent) || ptlrpc_expire_one_request()
1913 cfs_time_aftereq(req->rq_real_sent, req->rq_deadline)) ? ptlrpc_expire_one_request()
1915 req->rq_sent, req->rq_real_sent); ptlrpc_expire_one_request()
1920 ptlrpc_unregister_reply(req, async_unlink); ptlrpc_expire_one_request()
1921 ptlrpc_unregister_bulk(req, async_unlink); ptlrpc_expire_one_request()
1927 DEBUG_REQ(D_HA, req, "NULL import: already cleaned up?"); ptlrpc_expire_one_request()
1939 if (req->rq_ctx_init || req->rq_ctx_fini || ptlrpc_expire_one_request()
1940 req->rq_send_state != LUSTRE_IMP_FULL || ptlrpc_expire_one_request()
1942 DEBUG_REQ(D_RPCTRACE, req, "err -110, sent_state=%s (now=%s)", ptlrpc_expire_one_request()
1943 ptlrpc_import_state_name(req->rq_send_state), ptlrpc_expire_one_request()
1945 spin_lock(&req->rq_lock); ptlrpc_expire_one_request()
1946 req->rq_status = -ETIMEDOUT; ptlrpc_expire_one_request()
1947 req->rq_err = 1; ptlrpc_expire_one_request()
1948 spin_unlock(&req->rq_lock); ptlrpc_expire_one_request()
1954 if (ptlrpc_no_resend(req)) { ptlrpc_expire_one_request()
1955 DEBUG_REQ(D_RPCTRACE, req, "TIMEOUT-NORESEND:"); ptlrpc_expire_one_request()
1959 ptlrpc_fail_import(imp, lustre_msg_get_conn_cnt(req->rq_reqmsg)); ptlrpc_expire_one_request()
1981 struct ptlrpc_request *req = ptlrpc_expired_set() local
1986 if (req->rq_wait_ctx) ptlrpc_expired_set()
1990 if (!((req->rq_phase == RQ_PHASE_RPC && ptlrpc_expired_set()
1991 !req->rq_waiting && !req->rq_resend) || ptlrpc_expired_set()
1992 (req->rq_phase == RQ_PHASE_BULK))) ptlrpc_expired_set()
1995 if (req->rq_timedout || /* already dealt with */ ptlrpc_expired_set()
1996 req->rq_deadline > now) /* not expired */ ptlrpc_expired_set()
2001 ptlrpc_expire_one_request(req, 1); ptlrpc_expired_set()
2014 * Sets rq_intr flag in \a req under spinlock.
2016 void ptlrpc_mark_interrupted(struct ptlrpc_request *req) ptlrpc_mark_interrupted() argument
2018 spin_lock(&req->rq_lock); ptlrpc_mark_interrupted()
2019 req->rq_intr = 1; ptlrpc_mark_interrupted()
2020 spin_unlock(&req->rq_lock); ptlrpc_mark_interrupted()
2037 struct ptlrpc_request *req = ptlrpc_interrupted_set() local
2041 if (req->rq_phase != RQ_PHASE_RPC && ptlrpc_interrupted_set()
2042 req->rq_phase != RQ_PHASE_UNREGISTERING) ptlrpc_interrupted_set()
2045 ptlrpc_mark_interrupted(req); ptlrpc_interrupted_set()
2058 struct ptlrpc_request *req; ptlrpc_set_next_timeout() local
2062 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); ptlrpc_set_next_timeout()
2067 if (!(((req->rq_phase == RQ_PHASE_RPC) && !req->rq_waiting) || ptlrpc_set_next_timeout()
2068 (req->rq_phase == RQ_PHASE_BULK) || ptlrpc_set_next_timeout()
2069 (req->rq_phase == RQ_PHASE_NEW))) ptlrpc_set_next_timeout()
2075 if (req->rq_timedout) ptlrpc_set_next_timeout()
2081 if (req->rq_wait_ctx) ptlrpc_set_next_timeout()
2084 if (req->rq_phase == RQ_PHASE_NEW) ptlrpc_set_next_timeout()
2085 deadline = req->rq_sent; ptlrpc_set_next_timeout()
2086 else if (req->rq_phase == RQ_PHASE_RPC && req->rq_resend) ptlrpc_set_next_timeout()
2087 deadline = req->rq_sent; ptlrpc_set_next_timeout()
2089 deadline = req->rq_sent + req->rq_timeout; ptlrpc_set_next_timeout()
2109 struct ptlrpc_request *req; ptlrpc_set_wait() local
2117 req = list_entry(tmp, struct ptlrpc_request, ptlrpc_set_wait()
2119 if (req->rq_phase == RQ_PHASE_NEW) ptlrpc_set_wait()
2120 (void)ptlrpc_send_new_req(req); ptlrpc_set_wait()
2130 * req times out */ ptlrpc_set_wait()
2148 * complete, or an in-flight req times out. ptlrpc_set_wait()
2183 req = list_entry(tmp, struct ptlrpc_request, ptlrpc_set_wait()
2185 spin_lock(&req->rq_lock); ptlrpc_set_wait()
2186 req->rq_invalid_rqset = 1; ptlrpc_set_wait()
2187 spin_unlock(&req->rq_lock); ptlrpc_set_wait()
2196 req = list_entry(tmp, struct ptlrpc_request, rq_set_chain); ptlrpc_set_wait()
2198 LASSERT(req->rq_phase == RQ_PHASE_COMPLETE); ptlrpc_set_wait()
2199 if (req->rq_status != 0) ptlrpc_set_wait()
2200 rc = req->rq_status; ptlrpc_set_wait()
2237 LASSERTF(!request->rq_receiving_reply, "req %p\n", request); __ptlrpc_free_req()
2238 LASSERTF(request->rq_rqbd == NULL, "req %p\n", request);/* client-side */ __ptlrpc_free_req()
2239 LASSERTF(list_empty(&request->rq_list), "req %p\n", request); __ptlrpc_free_req()
2240 LASSERTF(list_empty(&request->rq_set_chain), "req %p\n", request); __ptlrpc_free_req()
2241 LASSERTF(list_empty(&request->rq_exp_list), "req %p\n", request); __ptlrpc_free_req()
2242 LASSERTF(!request->rq_replay, "req %p\n", request); __ptlrpc_free_req()
2255 LASSERTF(list_empty(&request->rq_replay_list), "req %p\n", request); __ptlrpc_free_req()
2400 * unlinked before returning a req to the pool. ptlrpc_unregister_reply()
2429 static void ptlrpc_free_request(struct ptlrpc_request *req) ptlrpc_free_request() argument
2431 spin_lock(&req->rq_lock); ptlrpc_free_request()
2432 req->rq_replay = 0; ptlrpc_free_request()
2433 spin_unlock(&req->rq_lock); ptlrpc_free_request()
2435 if (req->rq_commit_cb != NULL) ptlrpc_free_request()
2436 req->rq_commit_cb(req); ptlrpc_free_request()
2437 list_del_init(&req->rq_replay_list); ptlrpc_free_request()
2439 __ptlrpc_req_finished(req, 1); ptlrpc_free_request()
2445 void ptlrpc_request_committed(struct ptlrpc_request *req, int force) ptlrpc_request_committed() argument
2447 struct obd_import *imp = req->rq_import; ptlrpc_request_committed()
2450 if (list_empty(&req->rq_replay_list)) { ptlrpc_request_committed()
2455 if (force || req->rq_transno <= imp->imp_peer_committed_transno) ptlrpc_request_committed()
2456 ptlrpc_free_request(req); ptlrpc_request_committed()
2472 struct ptlrpc_request *req, *saved; ptlrpc_free_committed() local
2495 list_for_each_entry_safe(req, saved, &imp->imp_replay_list, ptlrpc_free_committed()
2498 LASSERT(req != last_req); ptlrpc_free_committed()
2499 last_req = req; ptlrpc_free_committed()
2501 if (req->rq_transno == 0) { ptlrpc_free_committed()
2502 DEBUG_REQ(D_EMERG, req, "zero transno during replay"); ptlrpc_free_committed()
2505 if (req->rq_import_generation < imp->imp_generation) { ptlrpc_free_committed()
2506 DEBUG_REQ(D_RPCTRACE, req, "free request with old gen"); ptlrpc_free_committed()
2511 if (req->rq_transno > imp->imp_peer_committed_transno) { ptlrpc_free_committed()
2512 DEBUG_REQ(D_RPCTRACE, req, "stopping search"); ptlrpc_free_committed()
2516 if (req->rq_replay) { ptlrpc_free_committed()
2517 DEBUG_REQ(D_RPCTRACE, req, "keeping (FL_REPLAY)"); ptlrpc_free_committed()
2518 list_move_tail(&req->rq_replay_list, ptlrpc_free_committed()
2523 DEBUG_REQ(D_INFO, req, "commit (last_committed %llu)", ptlrpc_free_committed()
2526 ptlrpc_free_request(req); ptlrpc_free_committed()
2531 list_for_each_entry_safe(req, saved, &imp->imp_committed_list, ptlrpc_free_committed()
2533 LASSERT(req->rq_transno != 0); ptlrpc_free_committed()
2534 if (req->rq_import_generation < imp->imp_generation) { ptlrpc_free_committed()
2535 DEBUG_REQ(D_RPCTRACE, req, "free stale open request"); ptlrpc_free_committed()
2536 ptlrpc_free_request(req); ptlrpc_free_committed()
2552 void ptlrpc_resend_req(struct ptlrpc_request *req) ptlrpc_resend_req() argument
2554 DEBUG_REQ(D_HA, req, "going to resend"); ptlrpc_resend_req()
2555 spin_lock(&req->rq_lock); ptlrpc_resend_req()
2559 if (ptlrpc_client_replied(req)) { ptlrpc_resend_req()
2560 spin_unlock(&req->rq_lock); ptlrpc_resend_req()
2561 DEBUG_REQ(D_HA, req, "it has reply, so skip it"); ptlrpc_resend_req()
2565 lustre_msg_set_handle(req->rq_reqmsg, &(struct lustre_handle){ 0 }); ptlrpc_resend_req()
2566 req->rq_status = -EAGAIN; ptlrpc_resend_req()
2568 req->rq_resend = 1; ptlrpc_resend_req()
2569 req->rq_net_err = 0; ptlrpc_resend_req()
2570 req->rq_timedout = 0; ptlrpc_resend_req()
2571 if (req->rq_bulk) { ptlrpc_resend_req()
2572 __u64 old_xid = req->rq_xid; ptlrpc_resend_req()
2575 req->rq_xid = ptlrpc_next_xid(); ptlrpc_resend_req()
2577 old_xid, req->rq_xid); ptlrpc_resend_req()
2579 ptlrpc_client_wake_req(req); ptlrpc_resend_req()
2580 spin_unlock(&req->rq_lock); ptlrpc_resend_req()
2585 void ptlrpc_restart_req(struct ptlrpc_request *req) ptlrpc_restart_req() argument
2587 DEBUG_REQ(D_HA, req, "restarting (possibly-)completed request"); ptlrpc_restart_req()
2588 req->rq_status = -ERESTARTSYS; ptlrpc_restart_req()
2590 spin_lock(&req->rq_lock); ptlrpc_restart_req()
2591 req->rq_restart = 1; ptlrpc_restart_req()
2592 req->rq_timedout = 0; ptlrpc_restart_req()
2593 ptlrpc_client_wake_req(req); ptlrpc_restart_req()
2594 spin_unlock(&req->rq_lock); ptlrpc_restart_req()
2599 * Grab additional reference on a request \a req
2601 struct ptlrpc_request *ptlrpc_request_addref(struct ptlrpc_request *req) ptlrpc_request_addref() argument
2603 atomic_inc(&req->rq_refcount); ptlrpc_request_addref()
2604 return req; ptlrpc_request_addref()
2612 void ptlrpc_retain_replayable_request(struct ptlrpc_request *req, ptlrpc_retain_replayable_request() argument
2619 if (req->rq_transno == 0) { ptlrpc_retain_replayable_request()
2620 DEBUG_REQ(D_EMERG, req, "saving request with zero transno"); ptlrpc_retain_replayable_request()
2626 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT); ptlrpc_retain_replayable_request()
2629 if (!list_empty(&req->rq_replay_list)) ptlrpc_retain_replayable_request()
2632 lustre_msg_add_flags(req->rq_reqmsg, MSG_REPLAY); ptlrpc_retain_replayable_request()
2636 ptlrpc_request_addref(req); ptlrpc_retain_replayable_request()
2644 * opens, so use req->rq_xid as a secondary key. ptlrpc_retain_replayable_request()
2648 if (iter->rq_transno > req->rq_transno) ptlrpc_retain_replayable_request()
2651 if (iter->rq_transno == req->rq_transno) { ptlrpc_retain_replayable_request()
2652 LASSERT(iter->rq_xid != req->rq_xid); ptlrpc_retain_replayable_request()
2653 if (iter->rq_xid > req->rq_xid) ptlrpc_retain_replayable_request()
2657 list_add(&req->rq_replay_list, &iter->rq_replay_list); ptlrpc_retain_replayable_request()
2661 list_add(&req->rq_replay_list, &imp->imp_replay_list); ptlrpc_retain_replayable_request()
2669 int ptlrpc_queue_wait(struct ptlrpc_request *req) ptlrpc_queue_wait() argument
2674 LASSERT(req->rq_set == NULL); ptlrpc_queue_wait()
2675 LASSERT(!req->rq_receiving_reply); ptlrpc_queue_wait()
2684 lustre_msg_set_status(req->rq_reqmsg, current_pid()); ptlrpc_queue_wait()
2687 ptlrpc_request_addref(req); ptlrpc_queue_wait()
2688 ptlrpc_set_add_req(set, req); ptlrpc_queue_wait()
2707 struct ptlrpc_request *req, ptlrpc_replay_interpret()
2711 struct obd_import *imp = req->rq_import; ptlrpc_replay_interpret()
2715 if (!ptlrpc_client_replied(req)) { ptlrpc_replay_interpret()
2721 if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR && ptlrpc_replay_interpret()
2722 (lustre_msg_get_status(req->rq_repmsg) == -ENOTCONN || ptlrpc_replay_interpret()
2723 lustre_msg_get_status(req->rq_repmsg) == -ENODEV)) { ptlrpc_replay_interpret()
2724 rc = lustre_msg_get_status(req->rq_repmsg); ptlrpc_replay_interpret()
2729 if (lustre_msg_get_status(req->rq_repmsg) == -EOVERFLOW) { ptlrpc_replay_interpret()
2731 DEBUG_REQ(D_WARNING, req, "Version mismatch during replay\n"); ptlrpc_replay_interpret()
2736 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status); ptlrpc_replay_interpret()
2739 LASSERTF(lustre_msg_get_transno(req->rq_reqmsg) == ptlrpc_replay_interpret()
2740 lustre_msg_get_transno(req->rq_repmsg) || ptlrpc_replay_interpret()
2741 lustre_msg_get_transno(req->rq_repmsg) == 0, ptlrpc_replay_interpret()
2743 lustre_msg_get_transno(req->rq_reqmsg), ptlrpc_replay_interpret()
2744 lustre_msg_get_transno(req->rq_repmsg)); ptlrpc_replay_interpret()
2749 if (lustre_msg_get_flags(req->rq_repmsg) & MSG_VERSION_REPLAY) ptlrpc_replay_interpret()
2751 imp->imp_last_replay_transno = lustre_msg_get_transno(req->rq_reqmsg); ptlrpc_replay_interpret()
2756 if (req->rq_transno > lustre_msg_get_transno(req->rq_reqmsg)) { ptlrpc_replay_interpret()
2757 DEBUG_REQ(D_ERROR, req, ptlrpc_replay_interpret()
2759 req->rq_transno, ptlrpc_replay_interpret()
2760 lustre_msg_get_transno(req->rq_reqmsg)); ptlrpc_replay_interpret()
2765 DEBUG_REQ(D_HA, req, "got rep"); ptlrpc_replay_interpret()
2768 if (req->rq_replay_cb) ptlrpc_replay_interpret()
2769 req->rq_replay_cb(req); ptlrpc_replay_interpret()
2771 if (ptlrpc_client_replied(req) && ptlrpc_replay_interpret()
2772 lustre_msg_get_status(req->rq_repmsg) != aa->praa_old_status) { ptlrpc_replay_interpret()
2773 DEBUG_REQ(D_ERROR, req, "status %d, old was %d", ptlrpc_replay_interpret()
2774 lustre_msg_get_status(req->rq_repmsg), ptlrpc_replay_interpret()
2778 lustre_msg_set_status(req->rq_repmsg, aa->praa_old_status); ptlrpc_replay_interpret()
2785 if (req->rq_transno == 0) ptlrpc_replay_interpret()
2791 req->rq_send_state = aa->praa_old_state; ptlrpc_replay_interpret()
2805 int ptlrpc_replay_req(struct ptlrpc_request *req) ptlrpc_replay_req() argument
2809 LASSERT(req->rq_import->imp_state == LUSTRE_IMP_REPLAY); ptlrpc_replay_req()
2811 LASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); ptlrpc_replay_req()
2812 aa = ptlrpc_req_async_args(req); ptlrpc_replay_req()
2816 aa->praa_old_state = req->rq_send_state; ptlrpc_replay_req()
2817 req->rq_send_state = LUSTRE_IMP_REPLAY; ptlrpc_replay_req()
2818 req->rq_phase = RQ_PHASE_NEW; ptlrpc_replay_req()
2819 req->rq_next_phase = RQ_PHASE_UNDEFINED; ptlrpc_replay_req()
2820 if (req->rq_repmsg) ptlrpc_replay_req()
2821 aa->praa_old_status = lustre_msg_get_status(req->rq_repmsg); ptlrpc_replay_req()
2822 req->rq_status = 0; ptlrpc_replay_req()
2823 req->rq_interpret_reply = ptlrpc_replay_interpret; ptlrpc_replay_req()
2825 ptlrpc_at_set_req_timeout(req); ptlrpc_replay_req()
2829 lustre_msg_set_service_time(req->rq_reqmsg, ptlrpc_replay_req()
2830 ptlrpc_at_get_net_latency(req)); ptlrpc_replay_req()
2831 DEBUG_REQ(D_HA, req, "REPLAY"); ptlrpc_replay_req()
2833 atomic_inc(&req->rq_import->imp_replay_inflight); ptlrpc_replay_req()
2834 ptlrpc_request_addref(req); /* ptlrpcd needs a ref */ ptlrpc_replay_req()
2836 ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); ptlrpc_replay_req()
2859 struct ptlrpc_request *req = ptlrpc_abort_inflight() local
2862 DEBUG_REQ(D_RPCTRACE, req, "inflight"); ptlrpc_abort_inflight()
2864 spin_lock(&req->rq_lock); ptlrpc_abort_inflight()
2865 if (req->rq_import_generation < imp->imp_generation) { ptlrpc_abort_inflight()
2866 req->rq_err = 1; ptlrpc_abort_inflight()
2867 req->rq_status = -EIO; ptlrpc_abort_inflight()
2868 ptlrpc_client_wake_req(req); ptlrpc_abort_inflight()
2870 spin_unlock(&req->rq_lock); ptlrpc_abort_inflight()
2874 struct ptlrpc_request *req = ptlrpc_abort_inflight() local
2877 DEBUG_REQ(D_RPCTRACE, req, "aborting waiting req"); ptlrpc_abort_inflight()
2879 spin_lock(&req->rq_lock); ptlrpc_abort_inflight()
2880 if (req->rq_import_generation < imp->imp_generation) { ptlrpc_abort_inflight()
2881 req->rq_err = 1; ptlrpc_abort_inflight()
2882 req->rq_status = -EIO; ptlrpc_abort_inflight()
2883 ptlrpc_client_wake_req(req); ptlrpc_abort_inflight()
2885 spin_unlock(&req->rq_lock); ptlrpc_abort_inflight()
2907 struct ptlrpc_request *req = ptlrpc_abort_set() local
2911 spin_lock(&req->rq_lock); ptlrpc_abort_set()
2912 if (req->rq_phase != RQ_PHASE_RPC) { ptlrpc_abort_set()
2913 spin_unlock(&req->rq_lock); ptlrpc_abort_set()
2917 req->rq_err = 1; ptlrpc_abort_set()
2918 req->rq_status = -EINTR; ptlrpc_abort_set()
2919 ptlrpc_client_wake_req(req); ptlrpc_abort_set()
2920 spin_unlock(&req->rq_lock); ptlrpc_abort_set()
3030 static void ptlrpcd_add_work_req(struct ptlrpc_request *req) ptlrpcd_add_work_req() argument
3032 /* re-initialize the req */ ptlrpcd_add_work_req()
3033 req->rq_timeout = obd_timeout; ptlrpcd_add_work_req()
3034 req->rq_sent = get_seconds(); ptlrpcd_add_work_req()
3035 req->rq_deadline = req->rq_sent + req->rq_timeout; ptlrpcd_add_work_req()
3036 req->rq_reply_deadline = req->rq_deadline; ptlrpcd_add_work_req()
3037 req->rq_phase = RQ_PHASE_INTERPRET; ptlrpcd_add_work_req()
3038 req->rq_next_phase = RQ_PHASE_COMPLETE; ptlrpcd_add_work_req()
3039 req->rq_xid = ptlrpc_next_xid(); ptlrpcd_add_work_req()
3040 req->rq_import_generation = req->rq_import->imp_generation; ptlrpcd_add_work_req()
3042 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); ptlrpcd_add_work_req()
3046 struct ptlrpc_request *req, void *data, int rc) work_interpreter()
3050 LASSERT(ptlrpcd_check_work(req)); work_interpreter()
3055 list_del_init(&req->rq_set_chain); work_interpreter()
3056 req->rq_set = NULL; work_interpreter()
3058 if (atomic_dec_return(&req->rq_refcount) > 1) { work_interpreter()
3059 atomic_set(&req->rq_refcount, 2); work_interpreter()
3060 ptlrpcd_add_work_req(req); work_interpreter()
3067 static int ptlrpcd_check_work(struct ptlrpc_request *req) ptlrpcd_check_work() argument
3069 return req->rq_pill.rc_fmt == (void *)&worker_format; ptlrpcd_check_work()
3078 struct ptlrpc_request *req = NULL; ptlrpcd_alloc_work() local
3087 req = ptlrpc_request_cache_alloc(GFP_NOFS); ptlrpcd_alloc_work()
3088 if (req == NULL) { ptlrpcd_alloc_work()
3093 req->rq_send_state = LUSTRE_IMP_FULL; ptlrpcd_alloc_work()
3094 req->rq_type = PTL_RPC_MSG_REQUEST; ptlrpcd_alloc_work()
3095 req->rq_import = class_import_get(imp); ptlrpcd_alloc_work()
3096 req->rq_export = NULL; ptlrpcd_alloc_work()
3097 req->rq_interpret_reply = work_interpreter; ptlrpcd_alloc_work()
3099 req->rq_receiving_reply = 0; ptlrpcd_alloc_work()
3100 req->rq_req_unlink = req->rq_reply_unlink = 0; ptlrpcd_alloc_work()
3101 req->rq_no_delay = req->rq_no_resend = 1; ptlrpcd_alloc_work()
3102 req->rq_pill.rc_fmt = (void *)&worker_format; ptlrpcd_alloc_work()
3104 spin_lock_init(&req->rq_lock); ptlrpcd_alloc_work()
3105 INIT_LIST_HEAD(&req->rq_list); ptlrpcd_alloc_work()
3106 INIT_LIST_HEAD(&req->rq_replay_list); ptlrpcd_alloc_work()
3107 INIT_LIST_HEAD(&req->rq_set_chain); ptlrpcd_alloc_work()
3108 INIT_LIST_HEAD(&req->rq_history_list); ptlrpcd_alloc_work()
3109 INIT_LIST_HEAD(&req->rq_exp_list); ptlrpcd_alloc_work()
3110 init_waitqueue_head(&req->rq_reply_waitq); ptlrpcd_alloc_work()
3111 init_waitqueue_head(&req->rq_set_waitq); ptlrpcd_alloc_work()
3112 atomic_set(&req->rq_refcount, 1); ptlrpcd_alloc_work()
3114 CLASSERT(sizeof(*args) <= sizeof(req->rq_async_args)); ptlrpcd_alloc_work()
3115 args = ptlrpc_req_async_args(req); ptlrpcd_alloc_work()
3119 return req; ptlrpcd_alloc_work()
3125 struct ptlrpc_request *req = handler; ptlrpcd_destroy_work() local
3127 if (req) ptlrpcd_destroy_work()
3128 ptlrpc_req_finished(req); ptlrpcd_destroy_work()
3134 struct ptlrpc_request *req = handler; ptlrpcd_queue_work() local
3137 * Check if the req is already being queued. ptlrpcd_queue_work()
3139 * Here comes a trick: it lacks a way of checking if a req is being ptlrpcd_queue_work()
3140 * processed reliably in ptlrpc. Here I have to use refcount of req ptlrpcd_queue_work()
3142 * req as opaque data. - Jinshan ptlrpcd_queue_work()
3144 LASSERT(atomic_read(&req->rq_refcount) > 0); ptlrpcd_queue_work()
3145 if (atomic_inc_return(&req->rq_refcount) == 2) ptlrpcd_queue_work()
3146 ptlrpcd_add_work_req(req); ptlrpcd_queue_work()
989 ptlrpc_set_add_req(struct ptlrpc_request_set *set, struct ptlrpc_request *req) ptlrpc_set_add_req() argument
1015 ptlrpc_set_add_new_req(struct ptlrpcd_ctl *pc, struct ptlrpc_request *req) ptlrpc_set_add_new_req() argument
1057 ptlrpc_import_delay_req(struct obd_import *imp, struct ptlrpc_request *req, int *status) ptlrpc_import_delay_req() argument
2706 ptlrpc_replay_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *data, int rc) ptlrpc_replay_interpret() argument
3045 work_interpreter(const struct lu_env *env, struct ptlrpc_request *req, void *data, int rc) work_interpreter() argument
H A Dsec.c318 struct ptlrpc_request *req, *next; sptlrpc_cli_ctx_wakeup() local
321 list_for_each_entry_safe(req, next, &ctx->cc_req_list, sptlrpc_cli_ctx_wakeup()
323 list_del_init(&req->rq_ctx_chain); sptlrpc_cli_ctx_wakeup()
324 ptlrpc_client_wake_req(req); sptlrpc_cli_ctx_wakeup()
387 * Given a \a req, find or allocate a appropriate context for it.
388 * \pre req->rq_cli_ctx == NULL.
390 * \retval 0 succeed, and req->rq_cli_ctx is set.
391 * \retval -ev error number, and req->rq_cli_ctx == NULL.
393 int sptlrpc_req_get_ctx(struct ptlrpc_request *req) sptlrpc_req_get_ctx() argument
395 struct obd_import *imp = req->rq_import; sptlrpc_req_get_ctx()
399 LASSERT(!req->rq_cli_ctx); sptlrpc_req_get_ctx()
406 req->rq_cli_ctx = get_my_ctx(sec); sptlrpc_req_get_ctx()
410 if (!req->rq_cli_ctx) { sptlrpc_req_get_ctx()
411 CERROR("req %p: fail to get context\n", req); sptlrpc_req_get_ctx()
419 * Drop the context for \a req.
420 * \pre req->rq_cli_ctx != NULL.
421 * \post req->rq_cli_ctx == NULL.
427 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync) sptlrpc_req_put_ctx() argument
429 LASSERT(req); sptlrpc_req_put_ctx()
430 LASSERT(req->rq_cli_ctx); sptlrpc_req_put_ctx()
435 if (!list_empty(&req->rq_ctx_chain)) { sptlrpc_req_put_ctx()
436 spin_lock(&req->rq_cli_ctx->cc_lock); sptlrpc_req_put_ctx()
437 list_del_init(&req->rq_ctx_chain); sptlrpc_req_put_ctx()
438 spin_unlock(&req->rq_cli_ctx->cc_lock); sptlrpc_req_put_ctx()
441 sptlrpc_cli_ctx_put(req->rq_cli_ctx, sync); sptlrpc_req_put_ctx()
442 req->rq_cli_ctx = NULL; sptlrpc_req_put_ctx()
446 int sptlrpc_req_ctx_switch(struct ptlrpc_request *req, sptlrpc_req_ctx_switch() argument
455 LASSERT(req->rq_reqmsg); sptlrpc_req_ctx_switch()
456 LASSERT(req->rq_reqlen); sptlrpc_req_ctx_switch()
457 LASSERT(req->rq_replen); sptlrpc_req_ctx_switch()
459 CDEBUG(D_SEC, "req %p: switch ctx %p(%u->%s) -> %p(%u->%s), switch sec %p(%s) -> %p(%s)\n", sptlrpc_req_ctx_switch()
460 req, sptlrpc_req_ctx_switch()
467 old_flvr = req->rq_flvr; sptlrpc_req_ctx_switch()
470 reqmsg_size = req->rq_reqlen; sptlrpc_req_ctx_switch()
475 memcpy(reqmsg, req->rq_reqmsg, reqmsg_size); sptlrpc_req_ctx_switch()
478 /* release old req/rep buf */ sptlrpc_req_ctx_switch()
479 req->rq_cli_ctx = oldctx; sptlrpc_req_ctx_switch()
480 sptlrpc_cli_free_reqbuf(req); sptlrpc_req_ctx_switch()
481 sptlrpc_cli_free_repbuf(req); sptlrpc_req_ctx_switch()
482 req->rq_cli_ctx = newctx; sptlrpc_req_ctx_switch()
485 sptlrpc_req_set_flavor(req, 0); sptlrpc_req_ctx_switch()
491 rc = sptlrpc_cli_alloc_reqbuf(req, reqmsg_size); sptlrpc_req_ctx_switch()
493 LASSERT(req->rq_reqmsg); sptlrpc_req_ctx_switch()
494 memcpy(req->rq_reqmsg, reqmsg, reqmsg_size); sptlrpc_req_ctx_switch()
497 req->rq_flvr = old_flvr; sptlrpc_req_ctx_switch()
506 * If current context of \a req is dead somehow, e.g. we just switched flavor
508 * no switch is needed, \a req will end up with the same context.
513 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req) sptlrpc_req_replace_dead_ctx() argument
515 struct ptlrpc_cli_ctx *oldctx = req->rq_cli_ctx; sptlrpc_req_replace_dead_ctx()
522 sptlrpc_req_put_ctx(req, 0); sptlrpc_req_replace_dead_ctx()
524 rc = sptlrpc_req_get_ctx(req); sptlrpc_req_replace_dead_ctx()
526 LASSERT(!req->rq_cli_ctx); sptlrpc_req_replace_dead_ctx()
529 req->rq_cli_ctx = oldctx; sptlrpc_req_replace_dead_ctx()
533 newctx = req->rq_cli_ctx; sptlrpc_req_replace_dead_ctx()
552 rc = sptlrpc_req_ctx_switch(req, oldctx, newctx); sptlrpc_req_replace_dead_ctx()
555 sptlrpc_req_put_ctx(req, 0); sptlrpc_req_replace_dead_ctx()
556 req->rq_cli_ctx = oldctx; sptlrpc_req_replace_dead_ctx()
560 LASSERT(req->rq_cli_ctx == newctx); sptlrpc_req_replace_dead_ctx()
579 struct ptlrpc_request *req = data; ctx_refresh_timeout() local
583 lustre_msg_set_conn_cnt(req->rq_reqmsg, req->rq_import->imp_conn_cnt); ctx_refresh_timeout()
585 rc = ptlrpc_expire_one_request(req, 1); ctx_refresh_timeout()
593 req->rq_cli_ctx->cc_ops->force_die(req->rq_cli_ctx, 0); ctx_refresh_timeout()
600 struct ptlrpc_request *req = data; ctx_refresh_interrupt() local
602 spin_lock(&req->rq_lock); ctx_refresh_interrupt()
603 req->rq_intr = 1; ctx_refresh_interrupt()
604 spin_unlock(&req->rq_lock); ctx_refresh_interrupt()
608 void req_off_ctx_list(struct ptlrpc_request *req, struct ptlrpc_cli_ctx *ctx) req_off_ctx_list() argument
611 if (!list_empty(&req->rq_ctx_chain)) req_off_ctx_list()
612 list_del_init(&req->rq_ctx_chain); req_off_ctx_list()
617 * To refresh the context of \req, if it's not up-to-date.
630 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout) sptlrpc_req_refresh_ctx() argument
632 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; sptlrpc_req_refresh_ctx()
639 if (req->rq_ctx_init || req->rq_ctx_fini) sptlrpc_req_refresh_ctx()
648 rc = import_sec_validate_get(req->rq_import, &sec); sptlrpc_req_refresh_ctx()
652 if (sec->ps_flvr.sf_rpc != req->rq_flvr.sf_rpc) { sptlrpc_req_refresh_ctx()
653 CDEBUG(D_SEC, "req %p: flavor has changed %x -> %x\n", sptlrpc_req_refresh_ctx()
654 req, req->rq_flvr.sf_rpc, sec->ps_flvr.sf_rpc); sptlrpc_req_refresh_ctx()
655 req_off_ctx_list(req, ctx); sptlrpc_req_refresh_ctx()
656 sptlrpc_req_replace_dead_ctx(req); sptlrpc_req_refresh_ctx()
657 ctx = req->rq_cli_ctx; sptlrpc_req_refresh_ctx()
672 req_off_ctx_list(req, ctx); sptlrpc_req_refresh_ctx()
677 spin_lock(&req->rq_lock); sptlrpc_req_refresh_ctx()
678 req->rq_err = 1; sptlrpc_req_refresh_ctx()
679 spin_unlock(&req->rq_lock); sptlrpc_req_refresh_ctx()
680 req_off_ctx_list(req, ctx); sptlrpc_req_refresh_ctx()
712 unlikely(req->rq_reqmsg) && sptlrpc_req_refresh_ctx()
713 lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) { sptlrpc_req_refresh_ctx()
714 req_off_ctx_list(req, ctx); sptlrpc_req_refresh_ctx()
719 req_off_ctx_list(req, ctx); sptlrpc_req_refresh_ctx()
723 if (req->rq_import->imp_deactive) { sptlrpc_req_refresh_ctx()
724 spin_lock(&req->rq_lock); sptlrpc_req_refresh_ctx()
725 req->rq_err = 1; sptlrpc_req_refresh_ctx()
726 spin_unlock(&req->rq_lock); sptlrpc_req_refresh_ctx()
730 rc = sptlrpc_req_replace_dead_ctx(req); sptlrpc_req_refresh_ctx()
732 LASSERT(ctx == req->rq_cli_ctx); sptlrpc_req_refresh_ctx()
733 CERROR("req %p: failed to replace dead ctx %p: %d\n", sptlrpc_req_refresh_ctx()
734 req, ctx, rc); sptlrpc_req_refresh_ctx()
735 spin_lock(&req->rq_lock); sptlrpc_req_refresh_ctx()
736 req->rq_err = 1; sptlrpc_req_refresh_ctx()
737 spin_unlock(&req->rq_lock); sptlrpc_req_refresh_ctx()
741 ctx = req->rq_cli_ctx; sptlrpc_req_refresh_ctx()
750 if (list_empty(&req->rq_ctx_chain)) sptlrpc_req_refresh_ctx()
751 list_add(&req->rq_ctx_chain, &ctx->cc_req_list); sptlrpc_req_refresh_ctx()
758 LASSERT(req->rq_receiving_reply == 0); sptlrpc_req_refresh_ctx()
759 spin_lock(&req->rq_lock); sptlrpc_req_refresh_ctx()
760 req->rq_err = 0; sptlrpc_req_refresh_ctx()
761 req->rq_timedout = 0; sptlrpc_req_refresh_ctx()
762 req->rq_resend = 0; sptlrpc_req_refresh_ctx()
763 req->rq_restart = 0; sptlrpc_req_refresh_ctx()
764 spin_unlock(&req->rq_lock); sptlrpc_req_refresh_ctx()
767 ctx_refresh_interrupt, req); sptlrpc_req_refresh_ctx()
768 rc = l_wait_event(req->rq_reply_waitq, ctx_check_refresh(ctx), &lwi); sptlrpc_req_refresh_ctx()
777 * - someone invalidate the req and call ptlrpc_client_wake_req(), sptlrpc_req_refresh_ctx()
782 req_off_ctx_list(req, ctx); sptlrpc_req_refresh_ctx()
792 * Initialize flavor settings for \a req, according to \a opcode.
798 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode) sptlrpc_req_set_flavor() argument
802 LASSERT(req->rq_import); sptlrpc_req_set_flavor()
803 LASSERT(req->rq_cli_ctx); sptlrpc_req_set_flavor()
804 LASSERT(req->rq_cli_ctx->cc_sec); sptlrpc_req_set_flavor()
805 LASSERT(req->rq_bulk_read == 0 || req->rq_bulk_write == 0); sptlrpc_req_set_flavor()
813 req->rq_bulk_read = 1; sptlrpc_req_set_flavor()
817 req->rq_bulk_write = 1; sptlrpc_req_set_flavor()
820 req->rq_ctx_init = 1; sptlrpc_req_set_flavor()
823 req->rq_ctx_fini = 1; sptlrpc_req_set_flavor()
827 LASSERT(req->rq_ctx_init == 0); sptlrpc_req_set_flavor()
828 LASSERT(req->rq_ctx_fini == 0); sptlrpc_req_set_flavor()
831 req->rq_pack_udesc = 0; sptlrpc_req_set_flavor()
832 req->rq_pack_bulk = 0; sptlrpc_req_set_flavor()
836 sec = req->rq_cli_ctx->cc_sec; sptlrpc_req_set_flavor()
839 req->rq_flvr = sec->ps_flvr; sptlrpc_req_set_flavor()
844 if (unlikely(req->rq_ctx_init)) sptlrpc_req_set_flavor()
845 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_NULL); sptlrpc_req_set_flavor()
846 else if (unlikely(req->rq_ctx_fini)) sptlrpc_req_set_flavor()
847 flvr_set_svc(&req->rq_flvr.sf_rpc, SPTLRPC_SVC_INTG); sptlrpc_req_set_flavor()
851 (req->rq_flvr.sf_rpc != SPTLRPC_FLVR_NULL)) sptlrpc_req_set_flavor()
852 req->rq_pack_udesc = 1; sptlrpc_req_set_flavor()
855 if ((req->rq_bulk_read || req->rq_bulk_write) && sptlrpc_req_set_flavor()
856 sptlrpc_flavor_has_bulk(&req->rq_flvr)) sptlrpc_req_set_flavor()
857 req->rq_pack_bulk = 1; sptlrpc_req_set_flavor()
860 void sptlrpc_request_out_callback(struct ptlrpc_request *req) sptlrpc_request_out_callback() argument
862 if (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc) != SPTLRPC_SVC_PRIV) sptlrpc_request_out_callback()
865 LASSERT(req->rq_clrbuf); sptlrpc_request_out_callback()
866 if (req->rq_pool || !req->rq_reqbuf) sptlrpc_request_out_callback()
869 OBD_FREE(req->rq_reqbuf, req->rq_reqbuf_len); sptlrpc_request_out_callback()
870 req->rq_reqbuf = NULL; sptlrpc_request_out_callback()
871 req->rq_reqbuf_len = 0; sptlrpc_request_out_callback()
883 struct ptlrpc_request *req = NULL; sptlrpc_import_check_ctx() local
906 req = ptlrpc_request_cache_alloc(GFP_NOFS); sptlrpc_import_check_ctx()
907 if (!req) sptlrpc_import_check_ctx()
910 spin_lock_init(&req->rq_lock); sptlrpc_import_check_ctx()
911 atomic_set(&req->rq_refcount, 10000); sptlrpc_import_check_ctx()
912 INIT_LIST_HEAD(&req->rq_ctx_chain); sptlrpc_import_check_ctx()
913 init_waitqueue_head(&req->rq_reply_waitq); sptlrpc_import_check_ctx()
914 init_waitqueue_head(&req->rq_set_waitq); sptlrpc_import_check_ctx()
915 req->rq_import = imp; sptlrpc_import_check_ctx()
916 req->rq_flvr = sec->ps_flvr; sptlrpc_import_check_ctx()
917 req->rq_cli_ctx = ctx; sptlrpc_import_check_ctx()
919 rc = sptlrpc_req_refresh_ctx(req, 0); sptlrpc_import_check_ctx()
920 LASSERT(list_empty(&req->rq_ctx_chain)); sptlrpc_import_check_ctx()
921 sptlrpc_cli_ctx_put(req->rq_cli_ctx, 1); sptlrpc_import_check_ctx()
922 ptlrpc_request_cache_free(req); sptlrpc_import_check_ctx()
929 * upon the request message of \a req. After this function called,
930 * req->rq_reqmsg is still accessible as clear text.
932 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req) sptlrpc_cli_wrap_request() argument
934 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; sptlrpc_cli_wrap_request()
939 LASSERT(req->rq_reqbuf || req->rq_clrbuf); sptlrpc_cli_wrap_request()
944 if (req->rq_bulk) { sptlrpc_cli_wrap_request()
945 rc = sptlrpc_cli_wrap_bulk(req, req->rq_bulk); sptlrpc_cli_wrap_request()
950 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { sptlrpc_cli_wrap_request()
955 rc = ctx->cc_ops->sign(ctx, req); sptlrpc_cli_wrap_request()
959 rc = ctx->cc_ops->seal(ctx, req); sptlrpc_cli_wrap_request()
966 LASSERT(req->rq_reqdata_len); sptlrpc_cli_wrap_request()
967 LASSERT(req->rq_reqdata_len % 8 == 0); sptlrpc_cli_wrap_request()
968 LASSERT(req->rq_reqdata_len <= req->rq_reqbuf_len); sptlrpc_cli_wrap_request()
974 static int do_cli_unwrap_reply(struct ptlrpc_request *req) do_cli_unwrap_reply() argument
976 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; do_cli_unwrap_reply()
981 LASSERT(req->rq_repbuf); do_cli_unwrap_reply()
982 LASSERT(req->rq_repdata); do_cli_unwrap_reply()
983 LASSERT(req->rq_repmsg == NULL); do_cli_unwrap_reply()
985 req->rq_rep_swab_mask = 0; do_cli_unwrap_reply()
987 rc = __lustre_unpack_msg(req->rq_repdata, req->rq_repdata_len); do_cli_unwrap_reply()
990 lustre_set_rep_swabbed(req, MSG_PTLRPC_HEADER_OFF); do_cli_unwrap_reply()
994 CERROR("failed unpack reply: x%llu\n", req->rq_xid); do_cli_unwrap_reply()
998 if (req->rq_repdata_len < sizeof(struct lustre_msg)) { do_cli_unwrap_reply()
1000 req->rq_repdata_len); do_cli_unwrap_reply()
1004 if (SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr) != do_cli_unwrap_reply()
1005 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)) { do_cli_unwrap_reply()
1007 SPTLRPC_FLVR_POLICY(req->rq_repdata->lm_secflvr), do_cli_unwrap_reply()
1008 SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc)); do_cli_unwrap_reply()
1012 switch (SPTLRPC_FLVR_SVC(req->rq_flvr.sf_rpc)) { do_cli_unwrap_reply()
1017 rc = ctx->cc_ops->verify(ctx, req); do_cli_unwrap_reply()
1021 rc = ctx->cc_ops->unseal(ctx, req); do_cli_unwrap_reply()
1026 LASSERT(rc || req->rq_repmsg || req->rq_resend); do_cli_unwrap_reply()
1028 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL && do_cli_unwrap_reply()
1029 !req->rq_ctx_init) do_cli_unwrap_reply()
1030 req->rq_rep_swab_mask = 0; do_cli_unwrap_reply()
1036 * message of \a req. After return successfully, req->rq_repmsg points to
1042 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req) sptlrpc_cli_unwrap_reply() argument
1044 LASSERT(req->rq_repbuf); sptlrpc_cli_unwrap_reply()
1045 LASSERT(req->rq_repdata == NULL); sptlrpc_cli_unwrap_reply()
1046 LASSERT(req->rq_repmsg == NULL); sptlrpc_cli_unwrap_reply()
1047 LASSERT(req->rq_reply_off + req->rq_nob_received <= req->rq_repbuf_len); sptlrpc_cli_unwrap_reply()
1049 if (req->rq_reply_off == 0 && sptlrpc_cli_unwrap_reply()
1050 (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) { sptlrpc_cli_unwrap_reply()
1055 if (req->rq_reply_off % 8 != 0) { sptlrpc_cli_unwrap_reply()
1056 CERROR("reply at odd offset %u\n", req->rq_reply_off); sptlrpc_cli_unwrap_reply()
1060 req->rq_repdata = (struct lustre_msg *) sptlrpc_cli_unwrap_reply()
1061 (req->rq_repbuf + req->rq_reply_off); sptlrpc_cli_unwrap_reply()
1062 req->rq_repdata_len = req->rq_nob_received; sptlrpc_cli_unwrap_reply()
1064 return do_cli_unwrap_reply(req); sptlrpc_cli_unwrap_reply()
1069 * reply message of \a req. We expect the rq_reply_off is 0, and
1082 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req, sptlrpc_cli_unwrap_early_reply() argument
1094 early_size = req->rq_nob_received; sptlrpc_cli_unwrap_early_reply()
1103 spin_lock(&req->rq_lock); sptlrpc_cli_unwrap_early_reply()
1105 if (req->rq_replied) { sptlrpc_cli_unwrap_early_reply()
1106 spin_unlock(&req->rq_lock); sptlrpc_cli_unwrap_early_reply()
1111 LASSERT(req->rq_repbuf); sptlrpc_cli_unwrap_early_reply()
1112 LASSERT(req->rq_repdata == NULL); sptlrpc_cli_unwrap_early_reply()
1113 LASSERT(req->rq_repmsg == NULL); sptlrpc_cli_unwrap_early_reply()
1115 if (req->rq_reply_off != 0) { sptlrpc_cli_unwrap_early_reply()
1116 CERROR("early reply with offset %u\n", req->rq_reply_off); sptlrpc_cli_unwrap_early_reply()
1117 spin_unlock(&req->rq_lock); sptlrpc_cli_unwrap_early_reply()
1122 if (req->rq_nob_received != early_size) { sptlrpc_cli_unwrap_early_reply()
1125 early_size, req->rq_nob_received); sptlrpc_cli_unwrap_early_reply()
1126 spin_unlock(&req->rq_lock); sptlrpc_cli_unwrap_early_reply()
1131 if (req->rq_nob_received < sizeof(struct lustre_msg)) { sptlrpc_cli_unwrap_early_reply()
1133 req->rq_nob_received); sptlrpc_cli_unwrap_early_reply()
1134 spin_unlock(&req->rq_lock); sptlrpc_cli_unwrap_early_reply()
1139 memcpy(early_buf, req->rq_repbuf, early_size); sptlrpc_cli_unwrap_early_reply()
1140 spin_unlock(&req->rq_lock); sptlrpc_cli_unwrap_early_reply()
1143 early_req->rq_cli_ctx = sptlrpc_cli_ctx_get(req->rq_cli_ctx); sptlrpc_cli_unwrap_early_reply()
1144 early_req->rq_flvr = req->rq_flvr; sptlrpc_cli_unwrap_early_reply()
1150 early_req->rq_reqmsg = req->rq_reqmsg; sptlrpc_cli_unwrap_early_reply()
1531 * Used by ptlrpc client to allocate request buffer of \a req. Upon return
1532 * successfully, req->rq_reqmsg points to a buffer with size \a msgsize.
1534 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize) sptlrpc_cli_alloc_reqbuf() argument
1536 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; sptlrpc_cli_alloc_reqbuf()
1543 LASSERT(req->rq_reqmsg == NULL); sptlrpc_cli_alloc_reqbuf()
1547 rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize); sptlrpc_cli_alloc_reqbuf()
1549 LASSERT(req->rq_reqmsg); sptlrpc_cli_alloc_reqbuf()
1550 LASSERT(req->rq_reqbuf || req->rq_clrbuf); sptlrpc_cli_alloc_reqbuf()
1553 if (req->rq_pool) sptlrpc_cli_alloc_reqbuf()
1554 memset(req->rq_reqmsg, 0, msgsize); sptlrpc_cli_alloc_reqbuf()
1561 * Used by ptlrpc client to free request buffer of \a req. After this
1562 * req->rq_reqmsg is set to NULL and should not be accessed anymore.
1564 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req) sptlrpc_cli_free_reqbuf() argument
1566 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; sptlrpc_cli_free_reqbuf()
1574 if (req->rq_reqbuf == NULL && req->rq_clrbuf == NULL) sptlrpc_cli_free_reqbuf()
1578 policy->sp_cops->free_reqbuf(ctx->cc_sec, req); sptlrpc_cli_free_reqbuf()
1579 req->rq_reqmsg = NULL; sptlrpc_cli_free_reqbuf()
1628 * by req->rq_reqmsg to size \a newsize, all previously filled-in data will be
1635 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req, sptlrpc_cli_enlarge_reqbuf() argument
1638 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; sptlrpc_cli_enlarge_reqbuf()
1640 struct lustre_msg *msg = req->rq_reqmsg; sptlrpc_cli_enlarge_reqbuf()
1652 return cops->enlarge_reqbuf(ctx->cc_sec, req, segment, newsize); sptlrpc_cli_enlarge_reqbuf()
1657 * Used by ptlrpc client to allocate reply buffer of \a req.
1659 * \note After this, req->rq_repmsg is still not accessible.
1661 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize) sptlrpc_cli_alloc_repbuf() argument
1663 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; sptlrpc_cli_alloc_repbuf()
1670 if (req->rq_repbuf) sptlrpc_cli_alloc_repbuf()
1674 return policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize); sptlrpc_cli_alloc_repbuf()
1678 * Used by ptlrpc client to free reply buffer of \a req. After this
1679 * req->rq_repmsg is set to NULL and should not be accessed anymore.
1681 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req) sptlrpc_cli_free_repbuf() argument
1683 struct ptlrpc_cli_ctx *ctx = req->rq_cli_ctx; sptlrpc_cli_free_repbuf()
1691 if (req->rq_repbuf == NULL) sptlrpc_cli_free_repbuf()
1693 LASSERT(req->rq_repbuf_len); sptlrpc_cli_free_repbuf()
1696 policy->sp_cops->free_repbuf(ctx->cc_sec, req); sptlrpc_cli_free_repbuf()
1697 req->rq_repmsg = NULL; sptlrpc_cli_free_repbuf()
1725 struct ptlrpc_request *req) flavor_allowed()
1727 struct sptlrpc_flavor *flvr = &req->rq_flvr; flavor_allowed()
1732 if ((req->rq_ctx_init || req->rq_ctx_fini) && flavor_allowed()
1744 * Given an export \a exp, check whether the flavor of incoming \a req
1749 struct ptlrpc_request *req) sptlrpc_target_export_check()
1762 if (req->rq_ctx_fini) sptlrpc_target_export_check()
1768 * the first req with the new flavor, then treat it as current flavor, sptlrpc_target_export_check()
1773 flavor_allowed(&exp->exp_flvr_old[1], req)) { sptlrpc_target_export_check()
1791 if (req->rq_auth_gss && sptlrpc_target_export_check()
1792 !(req->rq_ctx_init && sptlrpc_target_export_check()
1793 (req->rq_auth_usr_root || req->rq_auth_usr_mdt || sptlrpc_target_export_check()
1794 req->rq_auth_usr_ost))) { sptlrpc_target_export_check()
1797 req->rq_auth_gss, req->rq_ctx_init, sptlrpc_target_export_check()
1798 req->rq_auth_usr_root, req->rq_auth_usr_mdt, sptlrpc_target_export_check()
1799 req->rq_auth_usr_ost); sptlrpc_target_export_check()
1807 req->rq_svc_ctx, &flavor); sptlrpc_target_export_check()
1812 if (likely(flavor_allowed(&exp->exp_flvr, req))) { sptlrpc_target_export_check()
1815 if (!req->rq_auth_gss || !req->rq_ctx_init || sptlrpc_target_export_check()
1816 (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt && sptlrpc_target_export_check()
1817 !req->rq_auth_usr_ost)) { sptlrpc_target_export_check()
1841 req->rq_svc_ctx, sptlrpc_target_export_check()
1851 req->rq_svc_ctx); sptlrpc_target_export_check()
1857 if (flavor_allowed(&exp->exp_flvr_old[0], req)) { sptlrpc_target_export_check()
1874 req->rq_flvr.sf_rpc); sptlrpc_target_export_check()
1881 if (flavor_allowed(&exp->exp_flvr_old[1], req)) { sptlrpc_target_export_check()
1899 req->rq_flvr.sf_rpc); sptlrpc_target_export_check()
1908 CWARN("exp %p(%s): req %p (%u|%u|%u|%u|%u|%u) with unauthorized flavor %x, expect %x|%x(%+ld)|%x(%+ld)\n", sptlrpc_target_export_check()
1910 req, req->rq_auth_gss, req->rq_ctx_init, req->rq_ctx_fini, sptlrpc_target_export_check()
1911 req->rq_auth_usr_root, req->rq_auth_usr_mdt, req->rq_auth_usr_ost, sptlrpc_target_export_check()
1912 req->rq_flvr.sf_rpc, sptlrpc_target_export_check()
1966 static int sptlrpc_svc_check_from(struct ptlrpc_request *req, int svc_rc) sptlrpc_svc_check_from() argument
1969 if (!req->rq_auth_gss || svc_rc == SECSVC_DROP) sptlrpc_svc_check_from()
1972 switch (req->rq_sp_from) { sptlrpc_svc_check_from()
1974 if (req->rq_auth_usr_mdt || req->rq_auth_usr_ost) { sptlrpc_svc_check_from()
1975 DEBUG_REQ(D_ERROR, req, "faked source CLI"); sptlrpc_svc_check_from()
1980 if (!req->rq_auth_usr_mdt) { sptlrpc_svc_check_from()
1981 DEBUG_REQ(D_ERROR, req, "faked source MDT"); sptlrpc_svc_check_from()
1986 if (!req->rq_auth_usr_ost) { sptlrpc_svc_check_from()
1987 DEBUG_REQ(D_ERROR, req, "faked source OST"); sptlrpc_svc_check_from()
1993 if (!req->rq_auth_usr_root && !req->rq_auth_usr_mdt && sptlrpc_svc_check_from()
1994 !req->rq_auth_usr_ost) { sptlrpc_svc_check_from()
1995 DEBUG_REQ(D_ERROR, req, "faked source MGC/MGS"); sptlrpc_svc_check_from()
2001 DEBUG_REQ(D_ERROR, req, "invalid source %u", req->rq_sp_from); sptlrpc_svc_check_from()
2010 * incoming \a req. This must be the first thing to do with a incoming
2013 * \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
2014 * clear text, size is req->rq_reqlen; also req->rq_svc_ctx is set.
2019 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req) sptlrpc_svc_unwrap_request() argument
2022 struct lustre_msg *msg = req->rq_reqbuf; sptlrpc_svc_unwrap_request()
2026 LASSERT(req->rq_reqmsg == NULL); sptlrpc_svc_unwrap_request()
2027 LASSERT(req->rq_repmsg == NULL); sptlrpc_svc_unwrap_request()
2028 LASSERT(req->rq_svc_ctx == NULL); sptlrpc_svc_unwrap_request()
2030 req->rq_req_swab_mask = 0; sptlrpc_svc_unwrap_request()
2032 rc = __lustre_unpack_msg(msg, req->rq_reqdata_len); sptlrpc_svc_unwrap_request()
2035 lustre_set_req_swabbed(req, MSG_PTLRPC_HEADER_OFF); sptlrpc_svc_unwrap_request()
2040 libcfs_id2str(req->rq_peer), req->rq_xid); sptlrpc_svc_unwrap_request()
2044 req->rq_flvr.sf_rpc = WIRE_FLVR(msg->lm_secflvr); sptlrpc_svc_unwrap_request()
2045 req->rq_sp_from = LUSTRE_SP_ANY; sptlrpc_svc_unwrap_request()
2046 req->rq_auth_uid = -1; sptlrpc_svc_unwrap_request()
2047 req->rq_auth_mapped_uid = -1; sptlrpc_svc_unwrap_request()
2049 policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc); sptlrpc_svc_unwrap_request()
2051 CERROR("unsupported rpc flavor %x\n", req->rq_flvr.sf_rpc); sptlrpc_svc_unwrap_request()
2056 rc = policy->sp_sops->accept(req); sptlrpc_svc_unwrap_request()
2058 LASSERT(req->rq_reqmsg || rc != SECSVC_OK); sptlrpc_svc_unwrap_request()
2059 LASSERT(req->rq_svc_ctx || rc == SECSVC_DROP); sptlrpc_svc_unwrap_request()
2065 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) sptlrpc_svc_unwrap_request()
2066 req->rq_req_swab_mask = 0; sptlrpc_svc_unwrap_request()
2069 rc = sptlrpc_svc_check_from(req, rc); sptlrpc_svc_unwrap_request()
2074 * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2075 * req->rq_reply_state is set, and req->rq_reply_state->rs_msg point to
2078 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen) sptlrpc_svc_alloc_rs() argument
2084 LASSERT(req->rq_svc_ctx); sptlrpc_svc_alloc_rs()
2085 LASSERT(req->rq_svc_ctx->sc_policy); sptlrpc_svc_alloc_rs()
2087 policy = req->rq_svc_ctx->sc_policy; sptlrpc_svc_alloc_rs()
2090 rc = policy->sp_sops->alloc_rs(req, msglen); sptlrpc_svc_alloc_rs()
2092 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; sptlrpc_svc_alloc_rs()
2107 req->rq_reply_state = rs; sptlrpc_svc_alloc_rs()
2108 rc = policy->sp_sops->alloc_rs(req, msglen); sptlrpc_svc_alloc_rs()
2111 req->rq_reply_state = NULL; sptlrpc_svc_alloc_rs()
2116 (req->rq_reply_state && req->rq_reply_state->rs_msg)); sptlrpc_svc_alloc_rs()
2124 * \post req->rq_reply_off is set to appropriate server-controlled reply offset.
2125 * \post req->rq_repmsg and req->rq_reply_state->rs_msg becomes inaccessible.
2127 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req) sptlrpc_svc_wrap_reply() argument
2132 LASSERT(req->rq_svc_ctx); sptlrpc_svc_wrap_reply()
2133 LASSERT(req->rq_svc_ctx->sc_policy); sptlrpc_svc_wrap_reply()
2135 policy = req->rq_svc_ctx->sc_policy; sptlrpc_svc_wrap_reply()
2138 rc = policy->sp_sops->authorize(req); sptlrpc_svc_wrap_reply()
2139 LASSERT(rc || req->rq_reply_state->rs_repdata_len); sptlrpc_svc_wrap_reply()
2165 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req) sptlrpc_svc_ctx_addref() argument
2167 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; sptlrpc_svc_ctx_addref()
2173 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req) sptlrpc_svc_ctx_decref() argument
2175 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; sptlrpc_svc_ctx_decref()
2185 req->rq_svc_ctx = NULL; sptlrpc_svc_ctx_decref()
2188 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req) sptlrpc_svc_ctx_invalidate() argument
2190 struct ptlrpc_svc_ctx *ctx = req->rq_svc_ctx; sptlrpc_svc_ctx_invalidate()
2209 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req, sptlrpc_cli_wrap_bulk() argument
2214 LASSERT(req->rq_bulk_read || req->rq_bulk_write); sptlrpc_cli_wrap_bulk()
2216 if (!req->rq_pack_bulk) sptlrpc_cli_wrap_bulk()
2219 ctx = req->rq_cli_ctx; sptlrpc_cli_wrap_bulk()
2221 return ctx->cc_ops->wrap_bulk(ctx, req, desc); sptlrpc_cli_wrap_bulk()
2230 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req, sptlrpc_cli_unwrap_bulk_read() argument
2237 LASSERT(req->rq_bulk_read && !req->rq_bulk_write); sptlrpc_cli_unwrap_bulk_read()
2239 if (!req->rq_pack_bulk) sptlrpc_cli_unwrap_bulk_read()
2242 ctx = req->rq_cli_ctx; sptlrpc_cli_unwrap_bulk_read()
2244 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc); sptlrpc_cli_unwrap_bulk_read()
2256 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req, sptlrpc_cli_unwrap_bulk_write() argument
2262 LASSERT(!req->rq_bulk_read && req->rq_bulk_write); sptlrpc_cli_unwrap_bulk_write()
2264 if (!req->rq_pack_bulk) sptlrpc_cli_unwrap_bulk_write()
2267 ctx = req->rq_cli_ctx; sptlrpc_cli_unwrap_bulk_write()
2269 rc = ctx->cc_ops->unwrap_bulk(ctx, req, desc); sptlrpc_cli_unwrap_bulk_write()
1724 flavor_allowed(struct sptlrpc_flavor *exp, struct ptlrpc_request *req) flavor_allowed() argument
1748 sptlrpc_target_export_check(struct obd_export *exp, struct ptlrpc_request *req) sptlrpc_target_export_check() argument
H A Dsec_plain.c191 int plain_ctx_sign(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) plain_ctx_sign() argument
193 struct lustre_msg *msg = req->rq_reqbuf; plain_ctx_sign()
196 msg->lm_secflvr = req->rq_flvr.sf_rpc; plain_ctx_sign()
202 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg; plain_ctx_sign()
204 if (req->rq_pack_udesc) plain_ctx_sign()
206 if (req->rq_pack_bulk) plain_ctx_sign()
209 req->rq_reqdata_len = lustre_msg_size_v2(msg->lm_bufcount, plain_ctx_sign()
215 int plain_ctx_verify(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req) plain_ctx_verify() argument
217 struct lustre_msg *msg = req->rq_repdata; plain_ctx_verify()
227 swabbed = ptlrpc_rep_need_swab(req); plain_ctx_verify()
246 if (phdr->ph_bulk_hash_alg != req->rq_flvr.u_bulk.hash.hash_alg) { plain_ctx_verify()
248 req->rq_flvr.u_bulk.hash.hash_alg); plain_ctx_verify()
252 if (unlikely(req->rq_early)) { plain_ctx_verify()
268 if (!req->rq_early && plain_ctx_verify()
269 !equi(req->rq_pack_bulk == 1, plain_ctx_verify()
272 req->rq_pack_bulk ? "Missing" : "Unexpected"); plain_ctx_verify()
282 req->rq_repmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0); plain_ctx_verify()
283 req->rq_replen = lustre_msg_buflen(msg, PLAIN_PACK_MSG_OFF); plain_ctx_verify()
289 struct ptlrpc_request *req, plain_cli_wrap_bulk()
296 LASSERT(req->rq_pack_bulk); plain_cli_wrap_bulk()
297 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS); plain_cli_wrap_bulk()
299 bsd = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0); plain_cli_wrap_bulk()
305 bsd->bsd_svc = SPTLRPC_FLVR_BULK_SVC(req->rq_flvr.sf_rpc); plain_cli_wrap_bulk()
310 if (req->rq_bulk_read) plain_cli_wrap_bulk()
313 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, plain_cli_wrap_bulk()
323 req->rq_flvr.u_bulk.hash.hash_alg != BULK_HASH_ALG_NULL) plain_cli_wrap_bulk()
332 struct ptlrpc_request *req, plain_cli_unwrap_bulk()
340 LASSERT(req->rq_pack_bulk); plain_cli_unwrap_bulk()
341 LASSERT(req->rq_reqbuf->lm_bufcount == PLAIN_PACK_SEGMENTS); plain_cli_unwrap_bulk()
342 LASSERT(req->rq_repdata->lm_bufcount == PLAIN_PACK_SEGMENTS); plain_cli_unwrap_bulk()
344 bsdv = lustre_msg_buf(req->rq_repdata, PLAIN_PACK_BULK_OFF, 0); plain_cli_unwrap_bulk()
347 if (req->rq_bulk_write) { plain_cli_unwrap_bulk()
362 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, plain_cli_unwrap_bulk()
540 struct ptlrpc_request *req, plain_alloc_reqbuf()
549 if (req->rq_pack_udesc) plain_alloc_reqbuf()
552 if (req->rq_pack_bulk) { plain_alloc_reqbuf()
553 LASSERT(req->rq_bulk_read || req->rq_bulk_write); plain_alloc_reqbuf()
559 if (!req->rq_reqbuf) { plain_alloc_reqbuf()
560 LASSERT(!req->rq_pool); plain_alloc_reqbuf()
563 OBD_ALLOC_LARGE(req->rq_reqbuf, alloc_len); plain_alloc_reqbuf()
564 if (!req->rq_reqbuf) plain_alloc_reqbuf()
567 req->rq_reqbuf_len = alloc_len; plain_alloc_reqbuf()
569 LASSERT(req->rq_pool); plain_alloc_reqbuf()
570 LASSERT(req->rq_reqbuf_len >= alloc_len); plain_alloc_reqbuf()
571 memset(req->rq_reqbuf, 0, alloc_len); plain_alloc_reqbuf()
574 lustre_init_msg_v2(req->rq_reqbuf, PLAIN_PACK_SEGMENTS, buflens, NULL); plain_alloc_reqbuf()
575 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0); plain_alloc_reqbuf()
577 if (req->rq_pack_udesc) plain_alloc_reqbuf()
578 sptlrpc_pack_user_desc(req->rq_reqbuf, PLAIN_PACK_USER_OFF); plain_alloc_reqbuf()
585 struct ptlrpc_request *req) plain_free_reqbuf()
587 if (!req->rq_pool) { plain_free_reqbuf()
588 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); plain_free_reqbuf()
589 req->rq_reqbuf = NULL; plain_free_reqbuf()
590 req->rq_reqbuf_len = 0; plain_free_reqbuf()
596 struct ptlrpc_request *req, plain_alloc_repbuf()
605 if (req->rq_pack_bulk) { plain_alloc_repbuf()
606 LASSERT(req->rq_bulk_read || req->rq_bulk_write); plain_alloc_repbuf()
617 OBD_ALLOC_LARGE(req->rq_repbuf, alloc_len); plain_alloc_repbuf()
618 if (!req->rq_repbuf) plain_alloc_repbuf()
621 req->rq_repbuf_len = alloc_len; plain_alloc_repbuf()
627 struct ptlrpc_request *req) plain_free_repbuf()
629 OBD_FREE_LARGE(req->rq_repbuf, req->rq_repbuf_len); plain_free_repbuf()
630 req->rq_repbuf = NULL; plain_free_repbuf()
631 req->rq_repbuf_len = 0; plain_free_repbuf()
636 struct ptlrpc_request *req, plain_enlarge_reqbuf()
643 LASSERT(req->rq_reqbuf); plain_enlarge_reqbuf()
644 LASSERT(req->rq_reqbuf_len >= req->rq_reqlen); plain_enlarge_reqbuf()
645 LASSERT(lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, 0) == plain_enlarge_reqbuf()
646 req->rq_reqmsg); plain_enlarge_reqbuf()
649 oldsize = req->rq_reqmsg->lm_buflens[segment]; plain_enlarge_reqbuf()
650 req->rq_reqmsg->lm_buflens[segment] = newsize; plain_enlarge_reqbuf()
651 newmsg_size = lustre_msg_size_v2(req->rq_reqmsg->lm_bufcount, plain_enlarge_reqbuf()
652 req->rq_reqmsg->lm_buflens); plain_enlarge_reqbuf()
653 req->rq_reqmsg->lm_buflens[segment] = oldsize; plain_enlarge_reqbuf()
656 oldsize = req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF]; plain_enlarge_reqbuf()
657 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = newmsg_size; plain_enlarge_reqbuf()
658 newbuf_size = lustre_msg_size_v2(req->rq_reqbuf->lm_bufcount, plain_enlarge_reqbuf()
659 req->rq_reqbuf->lm_buflens); plain_enlarge_reqbuf()
660 req->rq_reqbuf->lm_buflens[PLAIN_PACK_MSG_OFF] = oldsize; plain_enlarge_reqbuf()
663 LASSERT(!req->rq_pool || req->rq_reqbuf_len >= newbuf_size); plain_enlarge_reqbuf()
665 if (req->rq_reqbuf_len < newbuf_size) { plain_enlarge_reqbuf()
678 if (req->rq_import) plain_enlarge_reqbuf()
679 spin_lock(&req->rq_import->imp_lock); plain_enlarge_reqbuf()
681 memcpy(newbuf, req->rq_reqbuf, req->rq_reqbuf_len); plain_enlarge_reqbuf()
683 OBD_FREE_LARGE(req->rq_reqbuf, req->rq_reqbuf_len); plain_enlarge_reqbuf()
684 req->rq_reqbuf = newbuf; plain_enlarge_reqbuf()
685 req->rq_reqbuf_len = newbuf_size; plain_enlarge_reqbuf()
686 req->rq_reqmsg = lustre_msg_buf(req->rq_reqbuf, plain_enlarge_reqbuf()
689 if (req->rq_import) plain_enlarge_reqbuf()
690 spin_unlock(&req->rq_import->imp_lock); plain_enlarge_reqbuf()
693 _sptlrpc_enlarge_msg_inplace(req->rq_reqbuf, PLAIN_PACK_MSG_OFF, plain_enlarge_reqbuf()
695 _sptlrpc_enlarge_msg_inplace(req->rq_reqmsg, segment, newsize); plain_enlarge_reqbuf()
697 req->rq_reqlen = newmsg_size; plain_enlarge_reqbuf()
711 int plain_accept(struct ptlrpc_request *req) plain_accept() argument
713 struct lustre_msg *msg = req->rq_reqbuf; plain_accept()
717 LASSERT(SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) == plain_accept()
720 if (SPTLRPC_FLVR_BASE(req->rq_flvr.sf_rpc) != plain_accept()
722 SPTLRPC_FLVR_BULK_TYPE(req->rq_flvr.sf_rpc) != plain_accept()
724 CERROR("Invalid rpc flavor %x\n", req->rq_flvr.sf_rpc); plain_accept()
733 swabbed = ptlrpc_req_need_swab(req); plain_accept()
751 req->rq_sp_from = phdr->ph_sp; plain_accept()
752 req->rq_flvr.u_bulk.hash.hash_alg = phdr->ph_bulk_hash_alg; plain_accept()
761 req->rq_pack_udesc = 1; plain_accept()
762 req->rq_user_desc = lustre_msg_buf(msg, PLAIN_PACK_USER_OFF, 0); plain_accept()
769 req->rq_pack_bulk = 1; plain_accept()
772 req->rq_reqmsg = lustre_msg_buf(msg, PLAIN_PACK_MSG_OFF, 0); plain_accept()
773 req->rq_reqlen = msg->lm_buflens[PLAIN_PACK_MSG_OFF]; plain_accept()
775 req->rq_svc_ctx = &plain_svc_ctx; plain_accept()
776 atomic_inc(&req->rq_svc_ctx->sc_refcount); plain_accept()
782 int plain_alloc_rs(struct ptlrpc_request *req, int msgsize) plain_alloc_rs() argument
793 if (req->rq_pack_bulk && (req->rq_bulk_read || req->rq_bulk_write)) plain_alloc_rs()
798 rs = req->rq_reply_state; plain_alloc_rs()
811 rs->rs_svc_ctx = req->rq_svc_ctx; plain_alloc_rs()
812 atomic_inc(&req->rq_svc_ctx->sc_refcount); plain_alloc_rs()
819 req->rq_reply_state = rs; plain_alloc_rs()
834 int plain_authorize(struct ptlrpc_request *req) plain_authorize() argument
836 struct ptlrpc_reply_state *rs = req->rq_reply_state; plain_authorize()
844 if (req->rq_replen != msg->lm_buflens[PLAIN_PACK_MSG_OFF]) plain_authorize()
846 req->rq_replen, 1); plain_authorize()
850 msg->lm_secflvr = req->rq_flvr.sf_rpc; plain_authorize()
855 phdr->ph_bulk_hash_alg = req->rq_flvr.u_bulk.hash.hash_alg; plain_authorize()
857 if (req->rq_pack_bulk) plain_authorize()
862 if (likely(req->rq_packed_final)) { plain_authorize()
863 if (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) plain_authorize()
864 req->rq_reply_off = plain_at_offset; plain_authorize()
866 req->rq_reply_off = 0; plain_authorize()
874 req->rq_reply_off = 0; plain_authorize()
881 int plain_svc_unwrap_bulk(struct ptlrpc_request *req, plain_svc_unwrap_bulk() argument
884 struct ptlrpc_reply_state *rs = req->rq_reply_state; plain_svc_unwrap_bulk()
889 LASSERT(req->rq_bulk_write); plain_svc_unwrap_bulk()
890 LASSERT(req->rq_pack_bulk); plain_svc_unwrap_bulk()
892 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0); plain_svc_unwrap_bulk()
904 rc = plain_verify_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, plain_svc_unwrap_bulk()
915 int plain_svc_wrap_bulk(struct ptlrpc_request *req, plain_svc_wrap_bulk() argument
918 struct ptlrpc_reply_state *rs = req->rq_reply_state; plain_svc_wrap_bulk()
923 LASSERT(req->rq_bulk_read); plain_svc_wrap_bulk()
924 LASSERT(req->rq_pack_bulk); plain_svc_wrap_bulk()
926 bsdr = lustre_msg_buf(req->rq_reqbuf, PLAIN_PACK_BULK_OFF, 0); plain_svc_wrap_bulk()
938 rc = plain_generate_bulk_csum(desc, req->rq_flvr.u_bulk.hash.hash_alg, plain_svc_wrap_bulk()
288 plain_cli_wrap_bulk(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) plain_cli_wrap_bulk() argument
331 plain_cli_unwrap_bulk(struct ptlrpc_cli_ctx *ctx, struct ptlrpc_request *req, struct ptlrpc_bulk_desc *desc) plain_cli_unwrap_bulk() argument
539 plain_alloc_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int msgsize) plain_alloc_reqbuf() argument
584 plain_free_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req) plain_free_reqbuf() argument
595 plain_alloc_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int msgsize) plain_alloc_repbuf() argument
626 plain_free_repbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req) plain_free_repbuf() argument
635 plain_enlarge_reqbuf(struct ptlrpc_sec *sec, struct ptlrpc_request *req, int segment, int newsize) plain_enlarge_reqbuf() argument
H A Dllog_client.c86 struct ptlrpc_request *req = NULL; llog_client_open() local
95 req = ptlrpc_request_alloc(imp, &RQF_LLOG_ORIGIN_HANDLE_CREATE); llog_client_open()
96 if (req == NULL) { llog_client_open()
102 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, llog_client_open()
105 rc = ptlrpc_request_pack(req, LUSTRE_LOG_VERSION, llog_client_open()
108 ptlrpc_request_free(req); llog_client_open()
109 req = NULL; llog_client_open()
112 ptlrpc_request_set_replen(req); llog_client_open()
114 body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); llog_client_open()
121 tmp = req_capsule_client_sized_get(&req->rq_pill, &RMF_NAME, llog_client_open()
127 rc = ptlrpc_queue_wait(req); llog_client_open()
131 body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); llog_client_open()
141 ptlrpc_req_finished(req); llog_client_open()
149 struct ptlrpc_request *req = NULL; llog_client_destroy() local
154 req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_DESTROY, llog_client_destroy()
157 if (req == NULL) { llog_client_destroy()
162 body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); llog_client_destroy()
170 ptlrpc_request_set_replen(req); llog_client_destroy()
171 rc = ptlrpc_queue_wait(req); llog_client_destroy()
173 ptlrpc_req_finished(req); llog_client_destroy()
186 struct ptlrpc_request *req = NULL; llog_client_next_block() local
192 req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_NEXT_BLOCK, llog_client_next_block()
195 if (req == NULL) { llog_client_next_block()
200 body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); llog_client_next_block()
209 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len); llog_client_next_block()
210 ptlrpc_request_set_replen(req); llog_client_next_block()
211 rc = ptlrpc_queue_wait(req); llog_client_next_block()
215 body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); llog_client_next_block()
222 ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); llog_client_next_block()
233 ptlrpc_req_finished(req); llog_client_next_block()
244 struct ptlrpc_request *req = NULL; llog_client_prev_block() local
250 req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_PREV_BLOCK, llog_client_prev_block()
253 if (req == NULL) { llog_client_prev_block()
258 body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); llog_client_prev_block()
265 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_SERVER, len); llog_client_prev_block()
266 ptlrpc_request_set_replen(req); llog_client_prev_block()
268 rc = ptlrpc_queue_wait(req); llog_client_prev_block()
272 body = req_capsule_server_get(&req->rq_pill, &RMF_LLOGD_BODY); llog_client_prev_block()
278 ptr = req_capsule_server_get(&req->rq_pill, &RMF_EADATA); llog_client_prev_block()
286 ptlrpc_req_finished(req); llog_client_prev_block()
296 struct ptlrpc_request *req = NULL; llog_client_read_header() local
303 req = ptlrpc_request_alloc_pack(imp, &RQF_LLOG_ORIGIN_HANDLE_READ_HEADER, llog_client_read_header()
306 if (req == NULL) { llog_client_read_header()
311 body = req_capsule_client_get(&req->rq_pill, &RMF_LLOGD_BODY); llog_client_read_header()
316 ptlrpc_request_set_replen(req); llog_client_read_header()
317 rc = ptlrpc_queue_wait(req); llog_client_read_header()
321 hdr = req_capsule_server_get(&req->rq_pill, &RMF_LLOG_LOG_HDR); llog_client_read_header()
343 ptlrpc_req_finished(req); llog_client_read_header()
H A Devents.c57 struct ptlrpc_request *req = cbid->cbid_arg; request_out_callback() local
63 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); request_out_callback()
65 sptlrpc_request_out_callback(req); request_out_callback()
66 spin_lock(&req->rq_lock); request_out_callback()
67 req->rq_real_sent = get_seconds(); request_out_callback()
69 req->rq_req_unlink = 0; request_out_callback()
76 req->rq_net_err = 1; request_out_callback()
77 ptlrpc_client_wake_req(req); request_out_callback()
79 spin_unlock(&req->rq_lock); request_out_callback()
81 ptlrpc_req_finished(req); request_out_callback()
90 struct ptlrpc_request *req = cbid->cbid_arg; reply_in_callback() local
92 DEBUG_REQ(D_NET, req, "type %d, status %d", ev->type, ev->status); reply_in_callback()
95 LASSERT(ev->md.start == req->rq_repbuf); reply_in_callback()
96 LASSERT(ev->offset + ev->mlength <= req->rq_repbuf_len); reply_in_callback()
101 spin_lock(&req->rq_lock); reply_in_callback()
103 req->rq_receiving_reply = 0; reply_in_callback()
104 req->rq_early = 0; reply_in_callback()
106 req->rq_reply_unlink = 0; reply_in_callback()
113 DEBUG_REQ(D_NET, req, "unlink"); reply_in_callback()
118 CDEBUG(D_RPCTRACE, "truncate req %p rpc %d - %d+%d\n", req, reply_in_callback()
119 req->rq_replen, ev->rlength, ev->offset); reply_in_callback()
120 req->rq_reply_truncate = 1; reply_in_callback()
121 req->rq_replied = 1; reply_in_callback()
122 req->rq_status = -EOVERFLOW; reply_in_callback()
123 req->rq_nob_received = ev->rlength + ev->offset; reply_in_callback()
128 ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT))) { reply_in_callback()
130 DEBUG_REQ(D_ADAPTTO, req, reply_in_callback()
133 req->rq_replen, req->rq_replied, ev->unlinked); reply_in_callback()
135 req->rq_early_count++; /* number received, client side */ reply_in_callback()
137 if (req->rq_replied) /* already got the real reply */ reply_in_callback()
140 req->rq_early = 1; reply_in_callback()
141 req->rq_reply_off = ev->offset; reply_in_callback()
142 req->rq_nob_received = ev->mlength; reply_in_callback()
144 req->rq_receiving_reply = 1; reply_in_callback()
147 req->rq_rep_swab_mask = 0; reply_in_callback()
148 req->rq_replied = 1; reply_in_callback()
150 req->rq_resend = 0; reply_in_callback()
151 req->rq_reply_off = ev->offset; reply_in_callback()
152 req->rq_nob_received = ev->mlength; reply_in_callback()
155 DEBUG_REQ(D_INFO, req, reply_in_callback()
157 lustre_msg_get_flags(req->rq_reqmsg), reply_in_callback()
158 ev->mlength, ev->offset, req->rq_replen); reply_in_callback()
161 req->rq_import->imp_last_reply_time = get_seconds(); reply_in_callback()
164 /* NB don't unlock till after wakeup; req can disappear under us reply_in_callback()
166 ptlrpc_client_wake_req(req); reply_in_callback()
167 spin_unlock(&req->rq_lock); reply_in_callback()
177 struct ptlrpc_request *req; client_bulk_callback() local
198 req = desc->bd_req; client_bulk_callback()
207 spin_lock(&req->rq_lock); client_bulk_callback()
208 req->rq_net_err = 1; client_bulk_callback()
209 spin_unlock(&req->rq_lock); client_bulk_callback()
246 struct ptlrpc_request *req) ptlrpc_req_add_history()
248 __u64 sec = req->rq_arrival_time.tv_sec; ptlrpc_req_add_history()
249 __u32 usec = req->rq_arrival_time.tv_usec >> 4; /* usec / 16 */ ptlrpc_req_add_history()
274 req->rq_history_seq = new_seq; ptlrpc_req_add_history()
276 list_add_tail(&req->rq_history_list, &svcpt->scp_hist_reqs); ptlrpc_req_add_history()
288 struct ptlrpc_request *req; request_in_callback() local
306 req = &rqbd->rqbd_req; request_in_callback()
307 memset(req, 0, sizeof(*req)); request_in_callback()
314 req = ptlrpc_request_cache_alloc(GFP_ATOMIC); request_in_callback()
315 if (req == NULL) { request_in_callback()
323 /* NB we ABSOLUTELY RELY on req being zeroed, so pointers are NULL, request_in_callback()
326 req->rq_xid = ev->match_bits; request_in_callback()
327 req->rq_reqbuf = ev->md.start + ev->offset; request_in_callback()
329 req->rq_reqdata_len = ev->mlength; request_in_callback()
330 do_gettimeofday(&req->rq_arrival_time); request_in_callback()
331 req->rq_peer = ev->initiator; request_in_callback()
332 req->rq_self = ev->target.nid; request_in_callback()
333 req->rq_rqbd = rqbd; request_in_callback()
334 req->rq_phase = RQ_PHASE_NEW; request_in_callback()
335 spin_lock_init(&req->rq_lock); request_in_callback()
336 INIT_LIST_HEAD(&req->rq_timed_list); request_in_callback()
337 INIT_LIST_HEAD(&req->rq_exp_list); request_in_callback()
338 atomic_set(&req->rq_refcount, 1); request_in_callback()
340 CDEBUG(D_INFO, "incoming req@%p x%llu msgsize %u\n", request_in_callback()
341 req, req->rq_xid, ev->mlength); request_in_callback()
343 CDEBUG(D_RPCTRACE, "peer: %s\n", libcfs_id2str(req->rq_peer)); request_in_callback()
347 ptlrpc_req_add_history(svcpt, req); request_in_callback()
362 /* req takes over the network's ref on rqbd */ request_in_callback()
364 /* req takes a ref on rqbd */ request_in_callback()
368 list_add_tail(&req->rq_list, &svcpt->scp_req_incoming); request_in_callback()
245 ptlrpc_req_add_history(struct ptlrpc_service_part *svcpt, struct ptlrpc_request *req) ptlrpc_req_add_history() argument
H A Dniobuf.c114 int ptlrpc_register_bulk(struct ptlrpc_request *req) ptlrpc_register_bulk() argument
116 struct ptlrpc_bulk_desc *desc = req->rq_bulk; ptlrpc_register_bulk()
139 if (req->rq_resend || req->rq_send_state == LUSTRE_IMP_REPLAY) ptlrpc_register_bulk()
158 xid = req->rq_xid & ~((__u64)desc->bd_md_max_brw - 1); ptlrpc_register_bulk()
160 req->rq_send_state != LUSTRE_IMP_REPLAY) || ptlrpc_register_bulk()
208 req->rq_status = -ENOMEM; ptlrpc_register_bulk()
214 req->rq_xid = --xid; ptlrpc_register_bulk()
215 LASSERTF(desc->bd_last_xid == (req->rq_xid & PTLRPC_BULK_OPS_MASK), ptlrpc_register_bulk()
217 desc->bd_last_xid, req->rq_xid); ptlrpc_register_bulk()
231 desc->bd_last_xid, req->rq_xid, desc->bd_portal); ptlrpc_register_bulk()
243 int ptlrpc_unregister_bulk(struct ptlrpc_request *req, int async) ptlrpc_unregister_bulk() argument
245 struct ptlrpc_bulk_desc *desc = req->rq_bulk; ptlrpc_unregister_bulk()
254 async && req->rq_bulk_deadline == 0) ptlrpc_unregister_bulk()
255 req->rq_bulk_deadline = get_seconds() + LONG_UNLINK; ptlrpc_unregister_bulk()
257 if (ptlrpc_client_bulk_active(req) == 0) /* completed or */ ptlrpc_unregister_bulk()
260 LASSERT(desc->bd_req == req); /* bd_req NULL until registered */ ptlrpc_unregister_bulk()
268 if (ptlrpc_client_bulk_active(req) == 0) /* completed or */ ptlrpc_unregister_bulk()
272 ptlrpc_rqphase_move(req, RQ_PHASE_UNREGISTERING); ptlrpc_unregister_bulk()
278 if (req->rq_set != NULL) ptlrpc_unregister_bulk()
279 wq = &req->rq_set->set_waitq; ptlrpc_unregister_bulk()
281 wq = &req->rq_reply_waitq; ptlrpc_unregister_bulk()
288 rc = l_wait_event(*wq, !ptlrpc_client_bulk_active(req), &lwi); ptlrpc_unregister_bulk()
290 ptlrpc_rqphase_move(req, req->rq_next_phase); ptlrpc_unregister_bulk()
295 DEBUG_REQ(D_WARNING, req, "Unexpectedly long timeout: desc %p", ptlrpc_unregister_bulk()
302 static void ptlrpc_at_set_reply(struct ptlrpc_request *req, int flags) ptlrpc_at_set_reply() argument
304 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; ptlrpc_at_set_reply()
307 req->rq_arrival_time.tv_sec, 1); ptlrpc_at_set_reply()
310 (req->rq_type != PTL_RPC_MSG_ERR) && ptlrpc_at_set_reply()
311 (req->rq_reqmsg != NULL) && ptlrpc_at_set_reply()
312 !(lustre_msg_get_flags(req->rq_reqmsg) & ptlrpc_at_set_reply()
320 DEBUG_REQ(D_ADAPTTO, req, ptlrpc_at_set_reply()
327 lustre_msg_set_service_time(req->rq_repmsg, service_time); ptlrpc_at_set_reply()
331 if (req->rq_type == PTL_RPC_MSG_ERR && ptlrpc_at_set_reply()
332 (req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering)) ptlrpc_at_set_reply()
333 lustre_msg_set_timeout(req->rq_repmsg, 0); ptlrpc_at_set_reply()
335 lustre_msg_set_timeout(req->rq_repmsg, ptlrpc_at_set_reply()
338 if (req->rq_reqmsg && ptlrpc_at_set_reply()
339 !(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) { ptlrpc_at_set_reply()
341 flags, lustre_msg_get_flags(req->rq_reqmsg), ptlrpc_at_set_reply()
342 lustre_msg_is_v1(req->rq_reqmsg), ptlrpc_at_set_reply()
343 lustre_msg_get_magic(req->rq_reqmsg), ptlrpc_at_set_reply()
344 lustre_msg_get_magic(req->rq_repmsg), req->rq_replen); ptlrpc_at_set_reply()
349 * Send request reply from request \a req reply buffer.
353 int ptlrpc_send_reply(struct ptlrpc_request *req, int flags) ptlrpc_send_reply() argument
355 struct ptlrpc_reply_state *rs = req->rq_reply_state; ptlrpc_send_reply()
363 * request, or a saved copy if this is a req saved in ptlrpc_send_reply()
366 LASSERT(req->rq_no_reply == 0); ptlrpc_send_reply()
367 LASSERT(req->rq_reqbuf != NULL); ptlrpc_send_reply()
370 LASSERT(req->rq_repmsg != NULL); ptlrpc_send_reply()
371 LASSERT(req->rq_repmsg == rs->rs_msg); ptlrpc_send_reply()
377 if (unlikely(req->rq_export && req->rq_export->exp_obd && ptlrpc_send_reply()
378 req->rq_export->exp_obd->obd_fail)) { ptlrpc_send_reply()
380 req->rq_type = PTL_RPC_MSG_ERR; ptlrpc_send_reply()
381 req->rq_status = -ENODEV; ptlrpc_send_reply()
383 req->rq_export->exp_obd->obd_minor); ptlrpc_send_reply()
394 req->rq_replen = lustre_shrink_msg(req->rq_repmsg, 0, ptlrpc_send_reply()
397 if (req->rq_type != PTL_RPC_MSG_ERR) ptlrpc_send_reply()
398 req->rq_type = PTL_RPC_MSG_REPLY; ptlrpc_send_reply()
400 lustre_msg_set_type(req->rq_repmsg, req->rq_type); ptlrpc_send_reply()
401 lustre_msg_set_status(req->rq_repmsg, ptlrpc_send_reply()
402 ptlrpc_status_hton(req->rq_status)); ptlrpc_send_reply()
403 lustre_msg_set_opc(req->rq_repmsg, ptlrpc_send_reply()
404 req->rq_reqmsg ? lustre_msg_get_opc(req->rq_reqmsg) : 0); ptlrpc_send_reply()
406 target_pack_pool_reply(req); ptlrpc_send_reply()
408 ptlrpc_at_set_reply(req, flags); ptlrpc_send_reply()
410 if (req->rq_export == NULL || req->rq_export->exp_connection == NULL) ptlrpc_send_reply()
411 conn = ptlrpc_connection_get(req->rq_peer, req->rq_self, NULL); ptlrpc_send_reply()
413 conn = ptlrpc_connection_addref(req->rq_export->exp_connection); ptlrpc_send_reply()
421 rc = sptlrpc_svc_wrap_reply(req); ptlrpc_send_reply()
425 req->rq_sent = get_seconds(); ptlrpc_send_reply()
431 ptlrpc_req2svc(req)->srv_rep_portal, ptlrpc_send_reply()
432 req->rq_xid, req->rq_reply_off); ptlrpc_send_reply()
435 ptlrpc_req_drop_rs(req); ptlrpc_send_reply()
441 int ptlrpc_reply(struct ptlrpc_request *req) ptlrpc_reply() argument
443 if (req->rq_no_reply) ptlrpc_reply()
445 return ptlrpc_send_reply(req, 0); ptlrpc_reply()
450 * For request \a req send an error reply back. Create empty
453 int ptlrpc_send_error(struct ptlrpc_request *req, int may_be_difficult) ptlrpc_send_error() argument
457 if (req->rq_no_reply) ptlrpc_send_error()
460 if (!req->rq_repmsg) { ptlrpc_send_error()
461 rc = lustre_pack_reply(req, 1, NULL, NULL); ptlrpc_send_error()
466 if (req->rq_status != -ENOSPC && req->rq_status != -EACCES && ptlrpc_send_error()
467 req->rq_status != -EPERM && req->rq_status != -ENOENT && ptlrpc_send_error()
468 req->rq_status != -EINPROGRESS && req->rq_status != -EDQUOT) ptlrpc_send_error()
469 req->rq_type = PTL_RPC_MSG_ERR; ptlrpc_send_error()
471 rc = ptlrpc_send_reply(req, may_be_difficult); ptlrpc_send_error()
476 int ptlrpc_error(struct ptlrpc_request *req) ptlrpc_error() argument
478 return ptlrpc_send_error(req, 0); ptlrpc_error()
638 /* We give the server rq_timeout secs to process the req, and ptl_send_rpc()
H A Drecover.c73 struct ptlrpc_request *req = NULL; ptlrpc_replay_next() local
110 req = list_entry(tmp, struct ptlrpc_request, ptlrpc_replay_next()
114 if (req->rq_transno > last_transno) { ptlrpc_replay_next()
122 req = list_entry(imp->imp_replay_cursor, ptlrpc_replay_next()
125 if (req->rq_transno > last_transno) ptlrpc_replay_next()
128 req = NULL; ptlrpc_replay_next()
135 req = NULL; ptlrpc_replay_next()
141 if (req == NULL) { ptlrpc_replay_next()
143 req = list_entry(tmp, struct ptlrpc_request, ptlrpc_replay_next()
146 if (req->rq_transno > last_transno) ptlrpc_replay_next()
148 req = NULL; ptlrpc_replay_next()
153 * has occurred), then stop on the matching req and send it again. ptlrpc_replay_next()
156 if (req != NULL && imp->imp_resend_replay) ptlrpc_replay_next()
157 lustre_msg_add_flags(req->rq_reqmsg, MSG_RESENT); ptlrpc_replay_next()
163 if (req != NULL) { ptlrpc_replay_next()
164 rc = ptlrpc_replay_req(req); ptlrpc_replay_next()
166 CERROR("recovery replay error %d for req %llu\n", ptlrpc_replay_next()
167 rc, req->rq_xid); ptlrpc_replay_next()
181 struct ptlrpc_request *req, *next; ptlrpc_resend() local
195 list_for_each_entry_safe(req, next, &imp->imp_sending_list, ptlrpc_resend()
197 LASSERTF((long)req > PAGE_CACHE_SIZE && req != LP_POISON, ptlrpc_resend()
198 "req %p bad\n", req); ptlrpc_resend()
199 LASSERTF(req->rq_type != LI_POISON, "req %p freed\n", req); ptlrpc_resend()
200 if (!ptlrpc_no_resend(req)) ptlrpc_resend()
201 ptlrpc_resend_req(req); ptlrpc_resend()
216 struct ptlrpc_request *req; ptlrpc_wake_delayed() local
220 req = list_entry(tmp, struct ptlrpc_request, rq_list); ptlrpc_wake_delayed()
222 DEBUG_REQ(D_HA, req, "waking (set %p):", req->rq_set); ptlrpc_wake_delayed()
223 ptlrpc_client_wake_req(req); ptlrpc_wake_delayed()
H A Dservice.c64 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req);
65 static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
183 ptlrpc_save_lock(struct ptlrpc_request *req, ptlrpc_save_lock() argument
186 struct ptlrpc_reply_state *rs = req->rq_reply_state; ptlrpc_save_lock()
192 if (req->rq_export->exp_disconnected) { ptlrpc_save_lock()
828 * note it's caller's responsibility to unlink req->rq_list.
830 static void ptlrpc_server_free_request(struct ptlrpc_request *req) ptlrpc_server_free_request() argument
832 LASSERT(atomic_read(&req->rq_refcount) == 0); ptlrpc_server_free_request()
833 LASSERT(list_empty(&req->rq_timed_list)); ptlrpc_server_free_request()
837 ptlrpc_req_drop_rs(req); ptlrpc_server_free_request()
839 sptlrpc_svc_ctx_decref(req); ptlrpc_server_free_request()
841 if (req != &req->rq_rqbd->rqbd_req) { ptlrpc_server_free_request()
843 * req if the incoming req unlinked the ptlrpc_server_free_request()
845 ptlrpc_request_cache_free(req); ptlrpc_server_free_request()
853 void ptlrpc_server_drop_request(struct ptlrpc_request *req) ptlrpc_server_drop_request() argument
855 struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd; ptlrpc_server_drop_request()
862 if (!atomic_dec_and_test(&req->rq_refcount)) ptlrpc_server_drop_request()
865 if (req->rq_at_linked) { ptlrpc_server_drop_request()
869 if (likely(req->rq_at_linked)) ptlrpc_server_drop_request()
870 ptlrpc_at_remove_timed(req); ptlrpc_server_drop_request()
874 LASSERT(list_empty(&req->rq_timed_list)); ptlrpc_server_drop_request()
877 if (req->rq_export) { ptlrpc_server_drop_request()
878 class_export_put(req->rq_export); ptlrpc_server_drop_request()
879 req->rq_export = NULL; ptlrpc_server_drop_request()
884 list_add(&req->rq_list, &rqbd->rqbd_reqs); ptlrpc_server_drop_request()
904 /* remove rqbd's reqs from svc's req history while ptlrpc_server_drop_request()
907 req = list_entry(tmp, struct ptlrpc_request, ptlrpc_server_drop_request()
909 /* Track the highest culled req seq */ ptlrpc_server_drop_request()
910 if (req->rq_history_seq > ptlrpc_server_drop_request()
913 req->rq_history_seq; ptlrpc_server_drop_request()
915 list_del(&req->rq_history_list); ptlrpc_server_drop_request()
921 req = list_entry(rqbd->rqbd_reqs.next, ptlrpc_server_drop_request()
924 list_del(&req->rq_list); ptlrpc_server_drop_request()
925 ptlrpc_server_free_request(req); ptlrpc_server_drop_request()
930 * now all reqs including the embedded req has been ptlrpc_server_drop_request()
940 } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) { ptlrpc_server_drop_request()
942 list_del(&req->rq_list); ptlrpc_server_drop_request()
943 list_del_init(&req->rq_history_list); ptlrpc_server_drop_request()
945 /* Track the highest culled req seq */ ptlrpc_server_drop_request()
946 if (req->rq_history_seq > svcpt->scp_hist_seq_culled) ptlrpc_server_drop_request()
947 svcpt->scp_hist_seq_culled = req->rq_history_seq; ptlrpc_server_drop_request()
951 ptlrpc_server_free_request(req); ptlrpc_server_drop_request()
958 void ptlrpc_request_change_export(struct ptlrpc_request *req, ptlrpc_request_change_export() argument
961 if (req->rq_export != NULL) { ptlrpc_request_change_export()
962 if (!list_empty(&req->rq_exp_list)) { ptlrpc_request_change_export()
964 spin_lock_bh(&req->rq_export->exp_rpc_lock); ptlrpc_request_change_export()
965 list_del_init(&req->rq_exp_list); ptlrpc_request_change_export()
966 spin_unlock_bh(&req->rq_export->exp_rpc_lock); ptlrpc_request_change_export()
969 * add req to export queue here and get another ptlrpc_request_change_export()
972 list_add(&req->rq_exp_list, &export->exp_hp_rpcs); ptlrpc_request_change_export()
975 class_export_rpc_dec(req->rq_export); ptlrpc_request_change_export()
976 class_export_put(req->rq_export); ptlrpc_request_change_export()
980 req->rq_export = class_export_get(export); ptlrpc_request_change_export()
991 struct ptlrpc_request *req) ptlrpc_server_finish_request()
993 ptlrpc_server_hpreq_fini(req); ptlrpc_server_finish_request()
995 ptlrpc_server_drop_request(req); ptlrpc_server_finish_request()
1004 struct ptlrpc_request *req) ptlrpc_server_finish_active_request()
1007 ptlrpc_nrs_req_stop_nolock(req); ptlrpc_server_finish_active_request()
1009 if (req->rq_hp) ptlrpc_server_finish_active_request()
1013 ptlrpc_nrs_req_finalize(req); ptlrpc_server_finish_active_request()
1015 if (req->rq_export != NULL) ptlrpc_server_finish_active_request()
1016 class_export_rpc_dec(req->rq_export); ptlrpc_server_finish_active_request()
1018 ptlrpc_server_finish_request(svcpt, req); ptlrpc_server_finish_active_request()
1098 * Sanity check request \a req.
1101 static int ptlrpc_check_req(struct ptlrpc_request *req) ptlrpc_check_req() argument
1103 struct obd_device *obd = req->rq_export->exp_obd; ptlrpc_check_req()
1106 if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) < ptlrpc_check_req()
1107 req->rq_export->exp_conn_cnt)) { ptlrpc_check_req()
1108 DEBUG_REQ(D_RPCTRACE, req, ptlrpc_check_req()
1109 "DROPPING req from old connection %d < %d", ptlrpc_check_req()
1110 lustre_msg_get_conn_cnt(req->rq_reqmsg), ptlrpc_check_req()
1111 req->rq_export->exp_conn_cnt); ptlrpc_check_req()
1119 CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n", ptlrpc_check_req()
1120 req, (obd != NULL) ? obd->obd_name : "unknown"); ptlrpc_check_req()
1122 } else if (lustre_msg_get_flags(req->rq_reqmsg) & ptlrpc_check_req()
1125 DEBUG_REQ(D_ERROR, req, ptlrpc_check_req()
1127 class_fail_export(req->rq_export); ptlrpc_check_req()
1129 } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 && ptlrpc_check_req()
1131 DEBUG_REQ(D_ERROR, req, "Invalid req with transno %llu without recovery", ptlrpc_check_req()
1132 lustre_msg_get_transno(req->rq_reqmsg)); ptlrpc_check_req()
1133 class_fail_export(req->rq_export); ptlrpc_check_req()
1138 req->rq_status = rc; ptlrpc_check_req()
1139 ptlrpc_error(req); ptlrpc_check_req()
1167 static int ptlrpc_at_add_timed(struct ptlrpc_request *req) ptlrpc_at_add_timed() argument
1169 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; ptlrpc_at_add_timed()
1177 if (req->rq_no_reply) ptlrpc_at_add_timed()
1180 if ((lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) == 0) ptlrpc_at_add_timed()
1184 LASSERT(list_empty(&req->rq_timed_list)); ptlrpc_at_add_timed()
1186 index = (unsigned long)req->rq_deadline % array->paa_size; ptlrpc_at_add_timed()
1193 if (req->rq_deadline >= rq->rq_deadline) { ptlrpc_at_add_timed()
1194 list_add(&req->rq_timed_list, ptlrpc_at_add_timed()
1202 if (list_empty(&req->rq_timed_list)) ptlrpc_at_add_timed()
1203 list_add(&req->rq_timed_list, ptlrpc_at_add_timed()
1206 spin_lock(&req->rq_lock); ptlrpc_at_add_timed()
1207 req->rq_at_linked = 1; ptlrpc_at_add_timed()
1208 spin_unlock(&req->rq_lock); ptlrpc_at_add_timed()
1209 req->rq_at_index = index; ptlrpc_at_add_timed()
1212 if (array->paa_count == 1 || array->paa_deadline > req->rq_deadline) { ptlrpc_at_add_timed()
1213 array->paa_deadline = req->rq_deadline; ptlrpc_at_add_timed()
1222 ptlrpc_at_remove_timed(struct ptlrpc_request *req) ptlrpc_at_remove_timed() argument
1226 array = &req->rq_rqbd->rqbd_svcpt->scp_at_array; ptlrpc_at_remove_timed()
1229 LASSERT(!list_empty(&req->rq_timed_list)); ptlrpc_at_remove_timed()
1230 list_del_init(&req->rq_timed_list); ptlrpc_at_remove_timed()
1232 spin_lock(&req->rq_lock); ptlrpc_at_remove_timed()
1233 req->rq_at_linked = 0; ptlrpc_at_remove_timed()
1234 spin_unlock(&req->rq_lock); ptlrpc_at_remove_timed()
1236 array->paa_reqs_count[req->rq_at_index]--; ptlrpc_at_remove_timed()
1240 static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) ptlrpc_at_send_early_reply() argument
1242 struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; ptlrpc_at_send_early_reply()
1245 long olddl = req->rq_deadline - get_seconds(); ptlrpc_at_send_early_reply()
1251 DEBUG_REQ(D_ADAPTTO, req, ptlrpc_at_send_early_reply()
1261 DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), not sending early reply. Consider increasing at_early_margin (%d)?", ptlrpc_at_send_early_reply()
1268 if (!(lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT)) { ptlrpc_at_send_early_reply()
1269 DEBUG_REQ(D_INFO, req, "Wanted to ask client for more time, but no AT support"); ptlrpc_at_send_early_reply()
1273 if (req->rq_export && ptlrpc_at_send_early_reply()
1274 lustre_msg_get_flags(req->rq_reqmsg) & ptlrpc_at_send_early_reply()
1283 req->rq_export->exp_obd->obd_recovery_timeout / 4)); ptlrpc_at_send_early_reply()
1289 req->rq_arrival_time.tv_sec); ptlrpc_at_send_early_reply()
1293 if (req->rq_deadline >= req->rq_arrival_time.tv_sec + ptlrpc_at_send_early_reply()
1295 DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%ld), not sending early reply\n", ptlrpc_at_send_early_reply()
1296 olddl, req->rq_arrival_time.tv_sec + ptlrpc_at_send_early_reply()
1307 OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen); ptlrpc_at_send_early_reply()
1313 *reqcopy = *req; ptlrpc_at_send_early_reply()
1322 memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen); ptlrpc_at_send_early_reply()
1324 LASSERT(atomic_read(&req->rq_refcount)); ptlrpc_at_send_early_reply()
1326 if (atomic_read(&req->rq_refcount) == 1) { ptlrpc_at_send_early_reply()
1356 req->rq_deadline = newdl; ptlrpc_at_send_early_reply()
1357 req->rq_early_count++; /* number sent, server side */ ptlrpc_at_send_early_reply()
1359 DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc); ptlrpc_at_send_early_reply()
1371 OBD_FREE_LARGE(reqmsg, req->rq_reqlen); ptlrpc_at_send_early_reply()
1485 struct ptlrpc_request *req) ptlrpc_server_hpreq_init()
1490 rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req); ptlrpc_server_hpreq_init()
1495 if (req->rq_export && req->rq_ops) { ptlrpc_server_hpreq_init()
1499 if (req->rq_ops->hpreq_check) { ptlrpc_server_hpreq_init()
1500 rc = req->rq_ops->hpreq_check(req); ptlrpc_server_hpreq_init()
1517 spin_lock_bh(&req->rq_export->exp_rpc_lock); ptlrpc_server_hpreq_init()
1518 list_add(&req->rq_exp_list, ptlrpc_server_hpreq_init()
1519 &req->rq_export->exp_hp_rpcs); ptlrpc_server_hpreq_init()
1520 spin_unlock_bh(&req->rq_export->exp_rpc_lock); ptlrpc_server_hpreq_init()
1523 ptlrpc_nrs_req_initialize(svcpt, req, rc); ptlrpc_server_hpreq_init()
1529 static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req) ptlrpc_server_hpreq_fini() argument
1531 if (req->rq_export && req->rq_ops) { ptlrpc_server_hpreq_fini()
1534 if (req->rq_ops->hpreq_fini) ptlrpc_server_hpreq_fini()
1535 req->rq_ops->hpreq_fini(req); ptlrpc_server_hpreq_fini()
1537 spin_lock_bh(&req->rq_export->exp_rpc_lock); ptlrpc_server_hpreq_fini()
1538 list_del_init(&req->rq_exp_list); ptlrpc_server_hpreq_fini()
1539 spin_unlock_bh(&req->rq_export->exp_rpc_lock); ptlrpc_server_hpreq_fini()
1543 static int ptlrpc_hpreq_check(struct ptlrpc_request *req) ptlrpc_hpreq_check() argument
1553 int ptlrpc_hpreq_handler(struct ptlrpc_request *req) ptlrpc_hpreq_handler() argument
1555 int opc = lustre_msg_get_opc(req->rq_reqmsg); ptlrpc_hpreq_handler()
1559 if ((req->rq_export != NULL) && ptlrpc_hpreq_handler()
1561 req->rq_ops = &ptlrpc_hpreq_common; ptlrpc_hpreq_handler()
1568 struct ptlrpc_request *req) ptlrpc_server_request_add()
1572 rc = ptlrpc_server_hpreq_init(svcpt, req); ptlrpc_server_request_add()
1576 ptlrpc_nrs_req_add(svcpt, req, !!rc); ptlrpc_server_request_add()
1683 struct ptlrpc_request *req = NULL; ptlrpc_server_request_get() local
1688 req = ptlrpc_nrs_req_get_nolock(svcpt, true, force); ptlrpc_server_request_get()
1689 if (req != NULL) { ptlrpc_server_request_get()
1696 req = ptlrpc_nrs_req_get_nolock(svcpt, false, force); ptlrpc_server_request_get()
1697 if (req != NULL) { ptlrpc_server_request_get()
1708 if (req->rq_hp) ptlrpc_server_request_get()
1713 if (likely(req->rq_export)) ptlrpc_server_request_get()
1714 class_export_rpc_inc(req->rq_export); ptlrpc_server_request_get()
1716 return req; ptlrpc_server_request_get()
1730 struct ptlrpc_request *req; ptlrpc_server_handle_req_in() local
1740 req = list_entry(svcpt->scp_req_incoming.next, ptlrpc_server_handle_req_in()
1742 list_del_init(&req->rq_list); ptlrpc_server_handle_req_in()
1749 rc = sptlrpc_svc_unwrap_request(req); ptlrpc_server_handle_req_in()
1754 target_send_reply(req, 0, OBD_FAIL_MDS_ALL_REPLY_NET); ptlrpc_server_handle_req_in()
1766 if (SPTLRPC_FLVR_POLICY(req->rq_flvr.sf_rpc) != SPTLRPC_POLICY_NULL) { ptlrpc_server_handle_req_in()
1767 rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen); ptlrpc_server_handle_req_in()
1770 svc->srv_req_portal, libcfs_id2str(req->rq_peer), ptlrpc_server_handle_req_in()
1771 req->rq_xid); ptlrpc_server_handle_req_in()
1776 rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF); ptlrpc_server_handle_req_in()
1779 svc->srv_req_portal, libcfs_id2str(req->rq_peer), ptlrpc_server_handle_req_in()
1780 req->rq_xid); ptlrpc_server_handle_req_in()
1785 lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) { ptlrpc_server_handle_req_in()
1787 cfs_fail_val, req->rq_xid); ptlrpc_server_handle_req_in()
1792 if (lustre_msg_get_type(req->rq_reqmsg) != PTL_RPC_MSG_REQUEST) { ptlrpc_server_handle_req_in()
1794 lustre_msg_get_type(req->rq_reqmsg), ptlrpc_server_handle_req_in()
1795 libcfs_id2str(req->rq_peer)); ptlrpc_server_handle_req_in()
1799 switch (lustre_msg_get_opc(req->rq_reqmsg)) { ptlrpc_server_handle_req_in()
1802 req->rq_bulk_write = 1; ptlrpc_server_handle_req_in()
1807 req->rq_bulk_read = 1; ptlrpc_server_handle_req_in()
1811 CDEBUG(D_RPCTRACE, "got req x%llu\n", req->rq_xid); ptlrpc_server_handle_req_in()
1813 req->rq_export = class_conn2export( ptlrpc_server_handle_req_in()
1814 lustre_msg_get_handle(req->rq_reqmsg)); ptlrpc_server_handle_req_in()
1815 if (req->rq_export) { ptlrpc_server_handle_req_in()
1816 rc = ptlrpc_check_req(req); ptlrpc_server_handle_req_in()
1818 rc = sptlrpc_target_export_check(req->rq_export, req); ptlrpc_server_handle_req_in()
1820 DEBUG_REQ(D_ERROR, req, "DROPPING req with illegal security flavor,"); ptlrpc_server_handle_req_in()
1825 ptlrpc_update_export_timer(req->rq_export, 0); ptlrpc_server_handle_req_in()
1829 if (get_seconds() - req->rq_arrival_time.tv_sec > 5) ptlrpc_server_handle_req_in()
1830 DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s", ptlrpc_server_handle_req_in()
1832 req->rq_arrival_time.tv_sec)); ptlrpc_server_handle_req_in()
1835 deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) & ptlrpc_server_handle_req_in()
1838 lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout; ptlrpc_server_handle_req_in()
1839 req->rq_deadline = req->rq_arrival_time.tv_sec + deadline; ptlrpc_server_handle_req_in()
1841 DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout"); ptlrpc_server_handle_req_in()
1845 req->rq_svc_thread = thread; ptlrpc_server_handle_req_in()
1847 ptlrpc_at_add_timed(req); ptlrpc_server_handle_req_in()
1850 rc = ptlrpc_server_request_add(svcpt, req); ptlrpc_server_handle_req_in()
1858 ptlrpc_server_finish_request(svcpt, req); ptlrpc_server_handle_req_in()
1921 CDEBUG(D_NET, "got req %llu\n", request->rq_xid); ptlrpc_server_handle_request()
2923 struct ptlrpc_request *req; ptlrpc_service_purge_all() local
2945 req = list_entry(svcpt->scp_req_incoming.next, ptlrpc_service_for_each_part()
2948 list_del(&req->rq_list); ptlrpc_service_for_each_part()
2950 ptlrpc_server_finish_request(svcpt, req); ptlrpc_service_for_each_part()
2954 req = ptlrpc_server_request_get(svcpt, true); ptlrpc_service_for_each_part()
2955 ptlrpc_server_finish_active_request(svcpt, req); ptlrpc_service_for_each_part()
990 ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt, struct ptlrpc_request *req) ptlrpc_server_finish_request() argument
1002 ptlrpc_server_finish_active_request( struct ptlrpc_service_part *svcpt, struct ptlrpc_request *req) ptlrpc_server_finish_active_request() argument
1484 ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, struct ptlrpc_request *req) ptlrpc_server_hpreq_init() argument
1567 ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt, struct ptlrpc_request *req) ptlrpc_server_request_add() argument
/linux-4.1.27/include/crypto/
H A Daead.h34 struct aead_givcrypt_request *req) aead_givcrypt_reqtfm()
36 return crypto_aead_reqtfm(&req->areq); aead_givcrypt_reqtfm()
39 static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req) crypto_aead_givencrypt() argument
41 struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req)); crypto_aead_givencrypt()
42 return crt->givencrypt(req); crypto_aead_givencrypt()
45 static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req) crypto_aead_givdecrypt() argument
47 struct aead_tfm *crt = crypto_aead_crt(aead_givcrypt_reqtfm(req)); crypto_aead_givdecrypt()
48 return crt->givdecrypt(req); crypto_aead_givdecrypt()
51 static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req, aead_givcrypt_set_tfm() argument
54 req->areq.base.tfm = crypto_aead_tfm(tfm); aead_givcrypt_set_tfm()
60 struct aead_givcrypt_request *req; aead_givcrypt_alloc() local
62 req = kmalloc(sizeof(struct aead_givcrypt_request) + aead_givcrypt_alloc()
65 if (likely(req)) aead_givcrypt_alloc()
66 aead_givcrypt_set_tfm(req, tfm); aead_givcrypt_alloc()
68 return req; aead_givcrypt_alloc()
71 static inline void aead_givcrypt_free(struct aead_givcrypt_request *req) aead_givcrypt_free() argument
73 kfree(req); aead_givcrypt_free()
77 struct aead_givcrypt_request *req, u32 flags, aead_givcrypt_set_callback()
80 aead_request_set_callback(&req->areq, flags, compl, data); aead_givcrypt_set_callback()
83 static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req, aead_givcrypt_set_crypt() argument
88 aead_request_set_crypt(&req->areq, src, dst, nbytes, iv); aead_givcrypt_set_crypt()
91 static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req, aead_givcrypt_set_assoc() argument
95 aead_request_set_assoc(&req->areq, assoc, assoclen); aead_givcrypt_set_assoc()
98 static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req, aead_givcrypt_set_giv() argument
101 req->giv = giv; aead_givcrypt_set_giv()
102 req->seq = seq; aead_givcrypt_set_giv()
33 aead_givcrypt_reqtfm( struct aead_givcrypt_request *req) aead_givcrypt_reqtfm() argument
76 aead_givcrypt_set_callback( struct aead_givcrypt_request *req, u32 flags, crypto_completion_t compl, void *data) aead_givcrypt_set_callback() argument
H A Dskcipher.h34 struct skcipher_givcrypt_request *req) skcipher_givcrypt_reqtfm()
36 return crypto_ablkcipher_reqtfm(&req->creq); skcipher_givcrypt_reqtfm()
40 struct skcipher_givcrypt_request *req) crypto_skcipher_givencrypt()
43 crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); crypto_skcipher_givencrypt()
44 return crt->givencrypt(req); crypto_skcipher_givencrypt()
48 struct skcipher_givcrypt_request *req) crypto_skcipher_givdecrypt()
51 crypto_ablkcipher_crt(skcipher_givcrypt_reqtfm(req)); crypto_skcipher_givdecrypt()
52 return crt->givdecrypt(req); crypto_skcipher_givdecrypt()
56 struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm) skcipher_givcrypt_set_tfm()
58 req->creq.base.tfm = crypto_ablkcipher_tfm(tfm); skcipher_givcrypt_set_tfm()
62 struct crypto_async_request *req) skcipher_givcrypt_cast()
64 return container_of(ablkcipher_request_cast(req), skcipher_givcrypt_cast()
71 struct skcipher_givcrypt_request *req; skcipher_givcrypt_alloc() local
73 req = kmalloc(sizeof(struct skcipher_givcrypt_request) + skcipher_givcrypt_alloc()
76 if (likely(req)) skcipher_givcrypt_alloc()
77 skcipher_givcrypt_set_tfm(req, tfm); skcipher_givcrypt_alloc()
79 return req; skcipher_givcrypt_alloc()
82 static inline void skcipher_givcrypt_free(struct skcipher_givcrypt_request *req) skcipher_givcrypt_free() argument
84 kfree(req); skcipher_givcrypt_free()
88 struct skcipher_givcrypt_request *req, u32 flags, skcipher_givcrypt_set_callback()
91 ablkcipher_request_set_callback(&req->creq, flags, compl, data); skcipher_givcrypt_set_callback()
95 struct skcipher_givcrypt_request *req, skcipher_givcrypt_set_crypt()
99 ablkcipher_request_set_crypt(&req->creq, src, dst, nbytes, iv); skcipher_givcrypt_set_crypt()
103 struct skcipher_givcrypt_request *req, u8 *giv, u64 seq) skcipher_givcrypt_set_giv()
105 req->giv = giv; skcipher_givcrypt_set_giv()
106 req->seq = seq; skcipher_givcrypt_set_giv()
33 skcipher_givcrypt_reqtfm( struct skcipher_givcrypt_request *req) skcipher_givcrypt_reqtfm() argument
39 crypto_skcipher_givencrypt( struct skcipher_givcrypt_request *req) crypto_skcipher_givencrypt() argument
47 crypto_skcipher_givdecrypt( struct skcipher_givcrypt_request *req) crypto_skcipher_givdecrypt() argument
55 skcipher_givcrypt_set_tfm( struct skcipher_givcrypt_request *req, struct crypto_ablkcipher *tfm) skcipher_givcrypt_set_tfm() argument
61 skcipher_givcrypt_cast( struct crypto_async_request *req) skcipher_givcrypt_cast() argument
87 skcipher_givcrypt_set_callback( struct skcipher_givcrypt_request *req, u32 flags, crypto_completion_t compl, void *data) skcipher_givcrypt_set_callback() argument
94 skcipher_givcrypt_set_crypt( struct skcipher_givcrypt_request *req, struct scatterlist *src, struct scatterlist *dst, unsigned int nbytes, void *iv) skcipher_givcrypt_set_crypt() argument
102 skcipher_givcrypt_set_giv( struct skcipher_givcrypt_request *req, u8 *giv, u64 seq) skcipher_givcrypt_set_giv() argument
H A Dablk_helper.h19 extern int __ablk_encrypt(struct ablkcipher_request *req);
21 extern int ablk_encrypt(struct ablkcipher_request *req);
23 extern int ablk_decrypt(struct ablkcipher_request *req);
H A Dcompress.h62 struct comp_request *req);
64 struct comp_request *req);
69 struct comp_request *req);
71 struct comp_request *req);
111 struct comp_request *req) crypto_compress_update()
113 return crypto_pcomp_alg(tfm)->compress_update(tfm, req); crypto_compress_update()
117 struct comp_request *req) crypto_compress_final()
119 return crypto_pcomp_alg(tfm)->compress_final(tfm, req); crypto_compress_final()
134 struct comp_request *req) crypto_decompress_update()
136 return crypto_pcomp_alg(tfm)->decompress_update(tfm, req); crypto_decompress_update()
140 struct comp_request *req) crypto_decompress_final()
142 return crypto_pcomp_alg(tfm)->decompress_final(tfm, req); crypto_decompress_final()
110 crypto_compress_update(struct crypto_pcomp *tfm, struct comp_request *req) crypto_compress_update() argument
116 crypto_compress_final(struct crypto_pcomp *tfm, struct comp_request *req) crypto_compress_final() argument
133 crypto_decompress_update(struct crypto_pcomp *tfm, struct comp_request *req) crypto_decompress_update() argument
139 crypto_decompress_final(struct crypto_pcomp *tfm, struct comp_request *req) crypto_decompress_final() argument
/linux-4.1.27/drivers/s390/scsi/
H A Dzfcp_fsf.c66 static void zfcp_fsf_class_not_supp(struct zfcp_fsf_req *req) zfcp_fsf_class_not_supp() argument
68 dev_err(&req->adapter->ccw_device->dev, "FCP device not " zfcp_fsf_class_not_supp()
70 zfcp_erp_adapter_shutdown(req->adapter, 0, "fscns_1"); zfcp_fsf_class_not_supp()
71 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_class_not_supp()
78 void zfcp_fsf_req_free(struct zfcp_fsf_req *req) zfcp_fsf_req_free() argument
80 if (likely(req->pool)) { zfcp_fsf_req_free()
81 if (likely(req->qtcb)) zfcp_fsf_req_free()
82 mempool_free(req->qtcb, req->adapter->pool.qtcb_pool); zfcp_fsf_req_free()
83 mempool_free(req, req->pool); zfcp_fsf_req_free()
87 if (likely(req->qtcb)) zfcp_fsf_req_free()
88 kmem_cache_free(zfcp_fsf_qtcb_cache, req->qtcb); zfcp_fsf_req_free()
89 kfree(req); zfcp_fsf_req_free()
92 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req) zfcp_fsf_status_read_port_closed() argument
95 struct fsf_status_read_buffer *sr_buf = req->data; zfcp_fsf_status_read_port_closed()
96 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_status_read_port_closed()
109 static void zfcp_fsf_link_down_info_eval(struct zfcp_fsf_req *req, zfcp_fsf_link_down_info_eval() argument
112 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_link_down_info_eval()
126 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
131 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
136 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
141 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
146 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
151 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
155 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
159 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
164 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
169 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
174 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
179 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
184 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_link_down_info_eval()
192 static void zfcp_fsf_status_read_link_down(struct zfcp_fsf_req *req) zfcp_fsf_status_read_link_down() argument
194 struct fsf_status_read_buffer *sr_buf = req->data; zfcp_fsf_status_read_link_down()
200 zfcp_fsf_link_down_info_eval(req, ldi); zfcp_fsf_status_read_link_down()
203 zfcp_fsf_link_down_info_eval(req, ldi); zfcp_fsf_status_read_link_down()
206 zfcp_fsf_link_down_info_eval(req, NULL); zfcp_fsf_status_read_link_down()
210 static void zfcp_fsf_status_read_handler(struct zfcp_fsf_req *req) zfcp_fsf_status_read_handler() argument
212 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_status_read_handler()
213 struct fsf_status_read_buffer *sr_buf = req->data; zfcp_fsf_status_read_handler()
215 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { zfcp_fsf_status_read_handler()
216 zfcp_dbf_hba_fsf_uss("fssrh_1", req); zfcp_fsf_status_read_handler()
218 zfcp_fsf_req_free(req); zfcp_fsf_status_read_handler()
222 zfcp_dbf_hba_fsf_uss("fssrh_4", req); zfcp_fsf_status_read_handler()
226 zfcp_fsf_status_read_port_closed(req); zfcp_fsf_status_read_handler()
229 zfcp_fc_incoming_els(req); zfcp_fsf_status_read_handler()
237 zfcp_dbf_hba_bit_err("fssrh_3", req); zfcp_fsf_status_read_handler()
240 zfcp_fsf_status_read_link_down(req); zfcp_fsf_status_read_handler()
266 zfcp_fsf_req_free(req); zfcp_fsf_status_read_handler()
272 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req) zfcp_fsf_fsfstatus_qual_eval() argument
274 switch (req->qtcb->header.fsf_status_qual.word[0]) { zfcp_fsf_fsfstatus_qual_eval()
283 dev_err(&req->adapter->ccw_device->dev, zfcp_fsf_fsfstatus_qual_eval()
286 zfcp_qdio_siosl(req->adapter); zfcp_fsf_fsfstatus_qual_eval()
287 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfsqe1"); zfcp_fsf_fsfstatus_qual_eval()
291 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_fsfstatus_qual_eval()
294 static void zfcp_fsf_fsfstatus_eval(struct zfcp_fsf_req *req) zfcp_fsf_fsfstatus_eval() argument
296 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) zfcp_fsf_fsfstatus_eval()
299 switch (req->qtcb->header.fsf_status) { zfcp_fsf_fsfstatus_eval()
301 dev_err(&req->adapter->ccw_device->dev, zfcp_fsf_fsfstatus_eval()
303 req->qtcb->header.fsf_command); zfcp_fsf_fsfstatus_eval()
304 zfcp_erp_adapter_shutdown(req->adapter, 0, "fsfse_1"); zfcp_fsf_fsfstatus_eval()
305 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_fsfstatus_eval()
308 zfcp_fsf_fsfstatus_qual_eval(req); zfcp_fsf_fsfstatus_eval()
313 static void zfcp_fsf_protstatus_eval(struct zfcp_fsf_req *req) zfcp_fsf_protstatus_eval() argument
315 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_protstatus_eval()
316 struct fsf_qtcb *qtcb = req->qtcb; zfcp_fsf_protstatus_eval()
319 zfcp_dbf_hba_fsf_response(req); zfcp_fsf_protstatus_eval()
321 if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) { zfcp_fsf_protstatus_eval()
322 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_protstatus_eval()
340 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_protstatus_eval()
358 zfcp_fsf_link_down_info_eval(req, &psq->link_down_info); zfcp_fsf_protstatus_eval()
378 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_protstatus_eval()
390 static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req) zfcp_fsf_req_complete() argument
392 if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) { zfcp_fsf_req_complete()
393 zfcp_fsf_status_read_handler(req); zfcp_fsf_req_complete()
397 del_timer(&req->timer); zfcp_fsf_req_complete()
398 zfcp_fsf_protstatus_eval(req); zfcp_fsf_req_complete()
399 zfcp_fsf_fsfstatus_eval(req); zfcp_fsf_req_complete()
400 req->handler(req); zfcp_fsf_req_complete()
402 if (req->erp_action) zfcp_fsf_req_complete()
403 zfcp_erp_notify(req->erp_action, 0); zfcp_fsf_req_complete()
405 if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP)) zfcp_fsf_req_complete()
406 zfcp_fsf_req_free(req); zfcp_fsf_req_complete()
408 complete(&req->completion); zfcp_fsf_req_complete()
422 struct zfcp_fsf_req *req, *tmp; zfcp_fsf_req_dismiss_all() local
428 list_for_each_entry_safe(req, tmp, &remove_queue, list) { zfcp_fsf_req_dismiss_all()
429 list_del(&req->list); zfcp_fsf_req_dismiss_all()
430 req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; zfcp_fsf_req_dismiss_all()
431 zfcp_fsf_req_complete(req); zfcp_fsf_req_dismiss_all()
463 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) zfcp_fsf_exchange_config_evaluate() argument
465 struct fsf_qtcb_bottom_config *bottom = &req->qtcb->bottom.config; zfcp_fsf_exchange_config_evaluate()
466 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_exchange_config_evaluate()
476 if (req->data) zfcp_fsf_exchange_config_evaluate()
477 memcpy(req->data, bottom, sizeof(*bottom)); zfcp_fsf_exchange_config_evaluate()
494 if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) zfcp_fsf_exchange_config_evaluate()
527 static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) zfcp_fsf_exchange_config_data_handler() argument
529 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_exchange_config_data_handler()
530 struct fsf_qtcb *qtcb = req->qtcb; zfcp_fsf_exchange_config_data_handler()
534 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_exchange_config_data_handler()
546 if (zfcp_fsf_exchange_config_evaluate(req)) zfcp_fsf_exchange_config_data_handler()
572 zfcp_fsf_link_down_info_eval(req, zfcp_fsf_exchange_config_data_handler()
574 if (zfcp_fsf_exchange_config_evaluate(req)) zfcp_fsf_exchange_config_data_handler()
605 static void zfcp_fsf_exchange_port_evaluate(struct zfcp_fsf_req *req) zfcp_fsf_exchange_port_evaluate() argument
607 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_exchange_port_evaluate()
608 struct fsf_qtcb_bottom_port *bottom = &req->qtcb->bottom.port; zfcp_fsf_exchange_port_evaluate()
611 if (req->data) zfcp_fsf_exchange_port_evaluate()
612 memcpy(req->data, bottom, sizeof(*bottom)); zfcp_fsf_exchange_port_evaluate()
628 static void zfcp_fsf_exchange_port_data_handler(struct zfcp_fsf_req *req) zfcp_fsf_exchange_port_data_handler() argument
630 struct fsf_qtcb *qtcb = req->qtcb; zfcp_fsf_exchange_port_data_handler()
632 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_exchange_port_data_handler()
637 zfcp_fsf_exchange_port_evaluate(req); zfcp_fsf_exchange_port_data_handler()
640 zfcp_fsf_exchange_port_evaluate(req); zfcp_fsf_exchange_port_data_handler()
641 zfcp_fsf_link_down_info_eval(req, zfcp_fsf_exchange_port_data_handler()
649 struct zfcp_fsf_req *req; zfcp_fsf_alloc() local
652 req = mempool_alloc(pool, GFP_ATOMIC); zfcp_fsf_alloc()
654 req = kmalloc(sizeof(*req), GFP_ATOMIC); zfcp_fsf_alloc()
656 if (unlikely(!req)) zfcp_fsf_alloc()
659 memset(req, 0, sizeof(*req)); zfcp_fsf_alloc()
660 req->pool = pool; zfcp_fsf_alloc()
661 return req; zfcp_fsf_alloc()
685 struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool); zfcp_fsf_req_create() local
687 if (unlikely(!req)) zfcp_fsf_req_create()
693 INIT_LIST_HEAD(&req->list); zfcp_fsf_req_create()
694 init_timer(&req->timer); zfcp_fsf_req_create()
695 init_completion(&req->completion); zfcp_fsf_req_create()
697 req->adapter = adapter; zfcp_fsf_req_create()
698 req->fsf_command = fsf_cmd; zfcp_fsf_req_create()
699 req->req_id = adapter->req_no; zfcp_fsf_req_create()
703 req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool); zfcp_fsf_req_create()
705 req->qtcb = zfcp_qtcb_alloc(NULL); zfcp_fsf_req_create()
707 if (unlikely(!req->qtcb)) { zfcp_fsf_req_create()
708 zfcp_fsf_req_free(req); zfcp_fsf_req_create()
712 req->seq_no = adapter->fsf_req_seq_no; zfcp_fsf_req_create()
713 req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no; zfcp_fsf_req_create()
714 req->qtcb->prefix.req_id = req->req_id; zfcp_fsf_req_create()
715 req->qtcb->prefix.ulp_info = 26; zfcp_fsf_req_create()
716 req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command]; zfcp_fsf_req_create()
717 req->qtcb->prefix.qtcb_version = FSF_QTCB_CURRENT_VERSION; zfcp_fsf_req_create()
718 req->qtcb->header.req_handle = req->req_id; zfcp_fsf_req_create()
719 req->qtcb->header.fsf_command = req->fsf_command; zfcp_fsf_req_create()
722 zfcp_qdio_req_init(adapter->qdio, &req->qdio_req, req->req_id, sbtype, zfcp_fsf_req_create()
723 req->qtcb, sizeof(struct fsf_qtcb)); zfcp_fsf_req_create()
725 return req; zfcp_fsf_req_create()
728 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req) zfcp_fsf_req_send() argument
730 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_req_send()
732 int with_qtcb = (req->qtcb != NULL); zfcp_fsf_req_send()
733 int req_id = req->req_id; zfcp_fsf_req_send()
735 zfcp_reqlist_add(adapter->req_list, req); zfcp_fsf_req_send()
737 req->qdio_req.qdio_outb_usage = atomic_read(&qdio->req_q_free); zfcp_fsf_req_send()
738 req->issued = get_tod_clock(); zfcp_fsf_req_send()
739 if (zfcp_qdio_send(qdio, &req->qdio_req)) { zfcp_fsf_req_send()
740 del_timer(&req->timer); zfcp_fsf_req_send()
764 struct zfcp_fsf_req *req; zfcp_fsf_status_read() local
773 req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS, zfcp_fsf_status_read()
776 if (IS_ERR(req)) { zfcp_fsf_status_read()
777 retval = PTR_ERR(req); zfcp_fsf_status_read()
788 req->data = sr_buf; zfcp_fsf_status_read()
790 zfcp_qdio_fill_next(qdio, &req->qdio_req, sr_buf, sizeof(*sr_buf)); zfcp_fsf_status_read()
791 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_status_read()
793 retval = zfcp_fsf_req_send(req); zfcp_fsf_status_read()
800 req->data = NULL; zfcp_fsf_status_read()
803 zfcp_dbf_hba_fsf_uss("fssr__1", req); zfcp_fsf_status_read()
804 zfcp_fsf_req_free(req); zfcp_fsf_status_read()
810 static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req) zfcp_fsf_abort_fcp_command_handler() argument
812 struct scsi_device *sdev = req->data; zfcp_fsf_abort_fcp_command_handler()
814 union fsf_status_qual *fsq = &req->qtcb->header.fsf_status_qual; zfcp_fsf_abort_fcp_command_handler()
816 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_abort_fcp_command_handler()
821 switch (req->qtcb->header.fsf_status) { zfcp_fsf_abort_fcp_command_handler()
826 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_abort_fcp_command_handler()
832 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_abort_fcp_command_handler()
836 req->status |= ZFCP_STATUS_FSFREQ_ABORTNOTNEEDED; zfcp_fsf_abort_fcp_command_handler()
843 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_abort_fcp_command_handler()
849 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_abort_fcp_command_handler()
857 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_abort_fcp_command_handler()
862 req->status |= ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED; zfcp_fsf_abort_fcp_command_handler()
875 struct zfcp_fsf_req *req = NULL; zfcp_fsf_abort_fcp_cmnd() local
884 req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND, zfcp_fsf_abort_fcp_cmnd()
887 if (IS_ERR(req)) { zfcp_fsf_abort_fcp_cmnd()
888 req = NULL; zfcp_fsf_abort_fcp_cmnd()
896 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_abort_fcp_cmnd()
898 req->data = sdev; zfcp_fsf_abort_fcp_cmnd()
899 req->handler = zfcp_fsf_abort_fcp_command_handler; zfcp_fsf_abort_fcp_cmnd()
900 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; zfcp_fsf_abort_fcp_cmnd()
901 req->qtcb->header.port_handle = zfcp_sdev->port->handle; zfcp_fsf_abort_fcp_cmnd()
902 req->qtcb->bottom.support.req_handle = (u64) old_req_id; zfcp_fsf_abort_fcp_cmnd()
904 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); zfcp_fsf_abort_fcp_cmnd()
905 if (!zfcp_fsf_req_send(req)) zfcp_fsf_abort_fcp_cmnd()
909 zfcp_fsf_req_free(req); zfcp_fsf_abort_fcp_cmnd()
910 req = NULL; zfcp_fsf_abort_fcp_cmnd()
913 return req; zfcp_fsf_abort_fcp_cmnd()
916 static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req) zfcp_fsf_send_ct_handler() argument
918 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_send_ct_handler()
919 struct zfcp_fsf_ct_els *ct = req->data; zfcp_fsf_send_ct_handler()
920 struct fsf_qtcb_header *header = &req->qtcb->header; zfcp_fsf_send_ct_handler()
924 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_send_ct_handler()
929 zfcp_dbf_san_res("fsscth2", req); zfcp_fsf_send_ct_handler()
933 zfcp_fsf_class_not_supp(req); zfcp_fsf_send_ct_handler()
939 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_send_ct_handler()
944 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_send_ct_handler()
954 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_send_ct_handler()
973 static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req, zfcp_fsf_setup_ct_els_sbals() argument
977 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_setup_ct_els_sbals()
979 struct fsf_qtcb *qtcb = req->qtcb; zfcp_fsf_setup_ct_els_sbals()
983 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) zfcp_fsf_setup_ct_els_sbals()
985 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) zfcp_fsf_setup_ct_els_sbals()
988 zfcp_qdio_set_data_div(qdio, &req->qdio_req, zfcp_fsf_setup_ct_els_sbals()
990 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_setup_ct_els_sbals()
991 zfcp_qdio_set_scount(qdio, &req->qdio_req); zfcp_fsf_setup_ct_els_sbals()
997 zfcp_fsf_setup_ct_els_unchained(qdio, &req->qdio_req, zfcp_fsf_setup_ct_els_sbals()
1005 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_req)) zfcp_fsf_setup_ct_els_sbals()
1010 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_setup_ct_els_sbals()
1011 zfcp_qdio_skip_to_last_sbale(qdio, &req->qdio_req); zfcp_fsf_setup_ct_els_sbals()
1013 if (zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, sg_resp)) zfcp_fsf_setup_ct_els_sbals()
1018 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_setup_ct_els_sbals()
1023 static int zfcp_fsf_setup_ct_els(struct zfcp_fsf_req *req, zfcp_fsf_setup_ct_els() argument
1030 ret = zfcp_fsf_setup_ct_els_sbals(req, sg_req, sg_resp); zfcp_fsf_setup_ct_els()
1037 req->qtcb->bottom.support.service_class = FSF_CLASS_3; zfcp_fsf_setup_ct_els()
1038 req->qtcb->bottom.support.timeout = timeout; zfcp_fsf_setup_ct_els()
1039 zfcp_fsf_start_timer(req, (timeout + 10) * HZ); zfcp_fsf_setup_ct_els()
1054 struct zfcp_fsf_req *req; zfcp_fsf_send_ct() local
1061 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, zfcp_fsf_send_ct()
1064 if (IS_ERR(req)) { zfcp_fsf_send_ct()
1065 ret = PTR_ERR(req); zfcp_fsf_send_ct()
1069 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_send_ct()
1070 ret = zfcp_fsf_setup_ct_els(req, ct->req, ct->resp, timeout); zfcp_fsf_send_ct()
1074 req->handler = zfcp_fsf_send_ct_handler; zfcp_fsf_send_ct()
1075 req->qtcb->header.port_handle = wka_port->handle; zfcp_fsf_send_ct()
1076 req->data = ct; zfcp_fsf_send_ct()
1078 zfcp_dbf_san_req("fssct_1", req, wka_port->d_id); zfcp_fsf_send_ct()
1080 ret = zfcp_fsf_req_send(req); zfcp_fsf_send_ct()
1087 zfcp_fsf_req_free(req); zfcp_fsf_send_ct()
1093 static void zfcp_fsf_send_els_handler(struct zfcp_fsf_req *req) zfcp_fsf_send_els_handler() argument
1095 struct zfcp_fsf_ct_els *send_els = req->data; zfcp_fsf_send_els_handler()
1096 struct fsf_qtcb_header *header = &req->qtcb->header; zfcp_fsf_send_els_handler()
1100 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_send_els_handler()
1105 zfcp_dbf_san_res("fsselh1", req); zfcp_fsf_send_els_handler()
1109 zfcp_fsf_class_not_supp(req); zfcp_fsf_send_els_handler()
1116 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_send_els_handler()
1129 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_send_els_handler()
1144 struct zfcp_fsf_req *req; zfcp_fsf_send_els() local
1152 req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, zfcp_fsf_send_els()
1155 if (IS_ERR(req)) { zfcp_fsf_send_els()
1156 ret = PTR_ERR(req); zfcp_fsf_send_els()
1160 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_send_els()
1163 zfcp_qdio_sbal_limit(qdio, &req->qdio_req, 2); zfcp_fsf_send_els()
1165 ret = zfcp_fsf_setup_ct_els(req, els->req, els->resp, timeout); zfcp_fsf_send_els()
1170 hton24(req->qtcb->bottom.support.d_id, d_id); zfcp_fsf_send_els()
1171 req->handler = zfcp_fsf_send_els_handler; zfcp_fsf_send_els()
1172 req->data = els; zfcp_fsf_send_els()
1174 zfcp_dbf_san_req("fssels1", req, d_id); zfcp_fsf_send_els()
1176 ret = zfcp_fsf_req_send(req); zfcp_fsf_send_els()
1183 zfcp_fsf_req_free(req); zfcp_fsf_send_els()
1191 struct zfcp_fsf_req *req; zfcp_fsf_exchange_config_data() local
1199 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, zfcp_fsf_exchange_config_data()
1203 if (IS_ERR(req)) { zfcp_fsf_exchange_config_data()
1204 retval = PTR_ERR(req); zfcp_fsf_exchange_config_data()
1208 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_exchange_config_data()
1209 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_exchange_config_data()
1211 req->qtcb->bottom.config.feature_selection = zfcp_fsf_exchange_config_data()
1214 req->erp_action = erp_action; zfcp_fsf_exchange_config_data()
1215 req->handler = zfcp_fsf_exchange_config_data_handler; zfcp_fsf_exchange_config_data()
1216 erp_action->fsf_req_id = req->req_id; zfcp_fsf_exchange_config_data()
1218 zfcp_fsf_start_erp_timer(req); zfcp_fsf_exchange_config_data()
1219 retval = zfcp_fsf_req_send(req); zfcp_fsf_exchange_config_data()
1221 zfcp_fsf_req_free(req); zfcp_fsf_exchange_config_data()
1232 struct zfcp_fsf_req *req = NULL; zfcp_fsf_exchange_config_data_sync() local
1239 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, zfcp_fsf_exchange_config_data_sync()
1242 if (IS_ERR(req)) { zfcp_fsf_exchange_config_data_sync()
1243 retval = PTR_ERR(req); zfcp_fsf_exchange_config_data_sync()
1247 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_exchange_config_data_sync()
1248 req->handler = zfcp_fsf_exchange_config_data_handler; zfcp_fsf_exchange_config_data_sync()
1250 req->qtcb->bottom.config.feature_selection = zfcp_fsf_exchange_config_data_sync()
1255 req->data = data; zfcp_fsf_exchange_config_data_sync()
1257 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); zfcp_fsf_exchange_config_data_sync()
1258 retval = zfcp_fsf_req_send(req); zfcp_fsf_exchange_config_data_sync()
1261 wait_for_completion(&req->completion); zfcp_fsf_exchange_config_data_sync()
1263 zfcp_fsf_req_free(req); zfcp_fsf_exchange_config_data_sync()
1279 struct zfcp_fsf_req *req; zfcp_fsf_exchange_port_data() local
1289 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, zfcp_fsf_exchange_port_data()
1293 if (IS_ERR(req)) { zfcp_fsf_exchange_port_data()
1294 retval = PTR_ERR(req); zfcp_fsf_exchange_port_data()
1298 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_exchange_port_data()
1299 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_exchange_port_data()
1301 req->handler = zfcp_fsf_exchange_port_data_handler; zfcp_fsf_exchange_port_data()
1302 req->erp_action = erp_action; zfcp_fsf_exchange_port_data()
1303 erp_action->fsf_req_id = req->req_id; zfcp_fsf_exchange_port_data()
1305 zfcp_fsf_start_erp_timer(req); zfcp_fsf_exchange_port_data()
1306 retval = zfcp_fsf_req_send(req); zfcp_fsf_exchange_port_data()
1308 zfcp_fsf_req_free(req); zfcp_fsf_exchange_port_data()
1325 struct zfcp_fsf_req *req = NULL; zfcp_fsf_exchange_port_data_sync() local
1335 req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, zfcp_fsf_exchange_port_data_sync()
1338 if (IS_ERR(req)) { zfcp_fsf_exchange_port_data_sync()
1339 retval = PTR_ERR(req); zfcp_fsf_exchange_port_data_sync()
1344 req->data = data; zfcp_fsf_exchange_port_data_sync()
1346 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_exchange_port_data_sync()
1348 req->handler = zfcp_fsf_exchange_port_data_handler; zfcp_fsf_exchange_port_data_sync()
1349 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); zfcp_fsf_exchange_port_data_sync()
1350 retval = zfcp_fsf_req_send(req); zfcp_fsf_exchange_port_data_sync()
1354 wait_for_completion(&req->completion); zfcp_fsf_exchange_port_data_sync()
1356 zfcp_fsf_req_free(req); zfcp_fsf_exchange_port_data_sync()
1365 static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req) zfcp_fsf_open_port_handler() argument
1367 struct zfcp_port *port = req->data; zfcp_fsf_open_port_handler()
1368 struct fsf_qtcb_header *header = &req->qtcb->header; zfcp_fsf_open_port_handler()
1371 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_open_port_handler()
1378 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_open_port_handler()
1384 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_open_port_handler()
1391 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_open_port_handler()
1416 plogi = (struct fc_els_flogi *) req->qtcb->bottom.support.els; zfcp_fsf_open_port_handler()
1417 if (req->qtcb->bottom.support.els1_length >= zfcp_fsf_open_port_handler()
1422 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_open_port_handler()
1439 struct zfcp_fsf_req *req; zfcp_fsf_open_port() local
1446 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, zfcp_fsf_open_port()
1450 if (IS_ERR(req)) { zfcp_fsf_open_port()
1451 retval = PTR_ERR(req); zfcp_fsf_open_port()
1455 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_open_port()
1456 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_open_port()
1458 req->handler = zfcp_fsf_open_port_handler; zfcp_fsf_open_port()
1459 hton24(req->qtcb->bottom.support.d_id, port->d_id); zfcp_fsf_open_port()
1460 req->data = port; zfcp_fsf_open_port()
1461 req->erp_action = erp_action; zfcp_fsf_open_port()
1462 erp_action->fsf_req_id = req->req_id; zfcp_fsf_open_port()
1465 zfcp_fsf_start_erp_timer(req); zfcp_fsf_open_port()
1466 retval = zfcp_fsf_req_send(req); zfcp_fsf_open_port()
1468 zfcp_fsf_req_free(req); zfcp_fsf_open_port()
1477 static void zfcp_fsf_close_port_handler(struct zfcp_fsf_req *req) zfcp_fsf_close_port_handler() argument
1479 struct zfcp_port *port = req->data; zfcp_fsf_close_port_handler()
1481 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_close_port_handler()
1484 switch (req->qtcb->header.fsf_status) { zfcp_fsf_close_port_handler()
1487 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_close_port_handler()
1505 struct zfcp_fsf_req *req; zfcp_fsf_close_port() local
1512 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, zfcp_fsf_close_port()
1516 if (IS_ERR(req)) { zfcp_fsf_close_port()
1517 retval = PTR_ERR(req); zfcp_fsf_close_port()
1521 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_close_port()
1522 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_close_port()
1524 req->handler = zfcp_fsf_close_port_handler; zfcp_fsf_close_port()
1525 req->data = erp_action->port; zfcp_fsf_close_port()
1526 req->erp_action = erp_action; zfcp_fsf_close_port()
1527 req->qtcb->header.port_handle = erp_action->port->handle; zfcp_fsf_close_port()
1528 erp_action->fsf_req_id = req->req_id; zfcp_fsf_close_port()
1530 zfcp_fsf_start_erp_timer(req); zfcp_fsf_close_port()
1531 retval = zfcp_fsf_req_send(req); zfcp_fsf_close_port()
1533 zfcp_fsf_req_free(req); zfcp_fsf_close_port()
1541 static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req) zfcp_fsf_open_wka_port_handler() argument
1543 struct zfcp_fc_wka_port *wka_port = req->data; zfcp_fsf_open_wka_port_handler()
1544 struct fsf_qtcb_header *header = &req->qtcb->header; zfcp_fsf_open_wka_port_handler()
1546 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) { zfcp_fsf_open_wka_port_handler()
1553 dev_warn(&req->adapter->ccw_device->dev, zfcp_fsf_open_wka_port_handler()
1557 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_open_wka_port_handler()
1578 struct zfcp_fsf_req *req; zfcp_fsf_open_wka_port() local
1585 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID, zfcp_fsf_open_wka_port()
1589 if (IS_ERR(req)) { zfcp_fsf_open_wka_port()
1590 retval = PTR_ERR(req); zfcp_fsf_open_wka_port()
1594 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_open_wka_port()
1595 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_open_wka_port()
1597 req->handler = zfcp_fsf_open_wka_port_handler; zfcp_fsf_open_wka_port()
1598 hton24(req->qtcb->bottom.support.d_id, wka_port->d_id); zfcp_fsf_open_wka_port()
1599 req->data = wka_port; zfcp_fsf_open_wka_port()
1601 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); zfcp_fsf_open_wka_port()
1602 retval = zfcp_fsf_req_send(req); zfcp_fsf_open_wka_port()
1604 zfcp_fsf_req_free(req); zfcp_fsf_open_wka_port()
1610 static void zfcp_fsf_close_wka_port_handler(struct zfcp_fsf_req *req) zfcp_fsf_close_wka_port_handler() argument
1612 struct zfcp_fc_wka_port *wka_port = req->data; zfcp_fsf_close_wka_port_handler()
1614 if (req->qtcb->header.fsf_status == FSF_PORT_HANDLE_NOT_VALID) { zfcp_fsf_close_wka_port_handler()
1615 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_close_wka_port_handler()
1631 struct zfcp_fsf_req *req; zfcp_fsf_close_wka_port() local
1638 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT, zfcp_fsf_close_wka_port()
1642 if (IS_ERR(req)) { zfcp_fsf_close_wka_port()
1643 retval = PTR_ERR(req); zfcp_fsf_close_wka_port()
1647 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_close_wka_port()
1648 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_close_wka_port()
1650 req->handler = zfcp_fsf_close_wka_port_handler; zfcp_fsf_close_wka_port()
1651 req->data = wka_port; zfcp_fsf_close_wka_port()
1652 req->qtcb->header.port_handle = wka_port->handle; zfcp_fsf_close_wka_port()
1654 zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT); zfcp_fsf_close_wka_port()
1655 retval = zfcp_fsf_req_send(req); zfcp_fsf_close_wka_port()
1657 zfcp_fsf_req_free(req); zfcp_fsf_close_wka_port()
1663 static void zfcp_fsf_close_physical_port_handler(struct zfcp_fsf_req *req) zfcp_fsf_close_physical_port_handler() argument
1665 struct zfcp_port *port = req->data; zfcp_fsf_close_physical_port_handler()
1666 struct fsf_qtcb_header *header = &req->qtcb->header; zfcp_fsf_close_physical_port_handler()
1669 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_close_physical_port_handler()
1675 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_close_physical_port_handler()
1688 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_close_physical_port_handler()
1695 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_close_physical_port_handler()
1720 struct zfcp_fsf_req *req; zfcp_fsf_close_physical_port() local
1727 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT, zfcp_fsf_close_physical_port()
1731 if (IS_ERR(req)) { zfcp_fsf_close_physical_port()
1732 retval = PTR_ERR(req); zfcp_fsf_close_physical_port()
1736 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_close_physical_port()
1737 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_close_physical_port()
1739 req->data = erp_action->port; zfcp_fsf_close_physical_port()
1740 req->qtcb->header.port_handle = erp_action->port->handle; zfcp_fsf_close_physical_port()
1741 req->erp_action = erp_action; zfcp_fsf_close_physical_port()
1742 req->handler = zfcp_fsf_close_physical_port_handler; zfcp_fsf_close_physical_port()
1743 erp_action->fsf_req_id = req->req_id; zfcp_fsf_close_physical_port()
1745 zfcp_fsf_start_erp_timer(req); zfcp_fsf_close_physical_port()
1746 retval = zfcp_fsf_req_send(req); zfcp_fsf_close_physical_port()
1748 zfcp_fsf_req_free(req); zfcp_fsf_close_physical_port()
1756 static void zfcp_fsf_open_lun_handler(struct zfcp_fsf_req *req) zfcp_fsf_open_lun_handler() argument
1758 struct zfcp_adapter *adapter = req->adapter; zfcp_fsf_open_lun_handler()
1759 struct scsi_device *sdev = req->data; zfcp_fsf_open_lun_handler()
1761 struct fsf_qtcb_header *header = &req->qtcb->header; zfcp_fsf_open_lun_handler()
1764 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_open_lun_handler()
1785 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_open_lun_handler()
1799 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_open_lun_handler()
1810 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_open_lun_handler()
1818 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_open_lun_handler()
1839 struct zfcp_fsf_req *req; zfcp_fsf_open_lun() local
1846 req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN, zfcp_fsf_open_lun()
1850 if (IS_ERR(req)) { zfcp_fsf_open_lun()
1851 retval = PTR_ERR(req); zfcp_fsf_open_lun()
1855 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_open_lun()
1856 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_open_lun()
1858 req->qtcb->header.port_handle = erp_action->port->handle; zfcp_fsf_open_lun()
1859 req->qtcb->bottom.support.fcp_lun = zfcp_scsi_dev_lun(erp_action->sdev); zfcp_fsf_open_lun()
1860 req->handler = zfcp_fsf_open_lun_handler; zfcp_fsf_open_lun()
1861 req->data = erp_action->sdev; zfcp_fsf_open_lun()
1862 req->erp_action = erp_action; zfcp_fsf_open_lun()
1863 erp_action->fsf_req_id = req->req_id; zfcp_fsf_open_lun()
1866 req->qtcb->bottom.support.option = FSF_OPEN_LUN_SUPPRESS_BOXING; zfcp_fsf_open_lun()
1868 zfcp_fsf_start_erp_timer(req); zfcp_fsf_open_lun()
1869 retval = zfcp_fsf_req_send(req); zfcp_fsf_open_lun()
1871 zfcp_fsf_req_free(req); zfcp_fsf_open_lun()
1879 static void zfcp_fsf_close_lun_handler(struct zfcp_fsf_req *req) zfcp_fsf_close_lun_handler() argument
1881 struct scsi_device *sdev = req->data; zfcp_fsf_close_lun_handler()
1884 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_close_lun_handler()
1889 switch (req->qtcb->header.fsf_status) { zfcp_fsf_close_lun_handler()
1892 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_close_lun_handler()
1896 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_close_lun_handler()
1903 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_close_lun_handler()
1906 switch (req->qtcb->header.fsf_status_qual.word[0]) { zfcp_fsf_close_lun_handler()
1911 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_close_lun_handler()
1930 struct zfcp_fsf_req *req; zfcp_fsf_close_lun() local
1937 req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN, zfcp_fsf_close_lun()
1941 if (IS_ERR(req)) { zfcp_fsf_close_lun()
1942 retval = PTR_ERR(req); zfcp_fsf_close_lun()
1946 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_close_lun()
1947 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_close_lun()
1949 req->qtcb->header.port_handle = erp_action->port->handle; zfcp_fsf_close_lun()
1950 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; zfcp_fsf_close_lun()
1951 req->handler = zfcp_fsf_close_lun_handler; zfcp_fsf_close_lun()
1952 req->data = erp_action->sdev; zfcp_fsf_close_lun()
1953 req->erp_action = erp_action; zfcp_fsf_close_lun()
1954 erp_action->fsf_req_id = req->req_id; zfcp_fsf_close_lun()
1956 zfcp_fsf_start_erp_timer(req); zfcp_fsf_close_lun()
1957 retval = zfcp_fsf_req_send(req); zfcp_fsf_close_lun()
1959 zfcp_fsf_req_free(req); zfcp_fsf_close_lun()
1974 static void zfcp_fsf_req_trace(struct zfcp_fsf_req *req, struct scsi_cmnd *scsi) zfcp_fsf_req_trace() argument
1980 int ticks = req->adapter->timer_ticks; zfcp_fsf_req_trace()
1982 lat_in = &req->qtcb->prefix.prot_status_qual.latency_info; zfcp_fsf_req_trace()
1986 if (req->status & ZFCP_STATUS_FSFREQ_ERROR) zfcp_fsf_req_trace()
1989 blktrc.outb_usage = req->qdio_req.qdio_outb_usage; zfcp_fsf_req_trace()
1991 if (req->adapter->adapter_features & FSF_FEATURE_MEASUREMENT_DATA && zfcp_fsf_req_trace()
1992 !(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { zfcp_fsf_req_trace()
1998 switch (req->qtcb->bottom.io.data_direction) { zfcp_fsf_req_trace()
2027 static void zfcp_fsf_fcp_handler_common(struct zfcp_fsf_req *req) zfcp_fsf_fcp_handler_common() argument
2029 struct scsi_cmnd *scmnd = req->data; zfcp_fsf_fcp_handler_common()
2032 struct fsf_qtcb_header *header = &req->qtcb->header; zfcp_fsf_fcp_handler_common()
2034 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) zfcp_fsf_fcp_handler_common()
2043 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_fcp_handler_common()
2048 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_fcp_handler_common()
2051 zfcp_fsf_class_not_supp(req); zfcp_fsf_fcp_handler_common()
2054 dev_err(&req->adapter->ccw_device->dev, zfcp_fsf_fcp_handler_common()
2057 req->qtcb->bottom.io.data_direction, zfcp_fsf_fcp_handler_common()
2062 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_fcp_handler_common()
2065 dev_err(&req->adapter->ccw_device->dev, zfcp_fsf_fcp_handler_common()
2068 req->qtcb->bottom.io.fcp_cmnd_length, zfcp_fsf_fcp_handler_common()
2073 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_fcp_handler_common()
2080 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_fcp_handler_common()
2086 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_fcp_handler_common()
2092 req->status |= ZFCP_STATUS_FSFREQ_ERROR; zfcp_fsf_fcp_handler_common()
2097 static void zfcp_fsf_fcp_cmnd_handler(struct zfcp_fsf_req *req) zfcp_fsf_fcp_cmnd_handler() argument
2103 read_lock_irqsave(&req->adapter->abort_lock, flags); zfcp_fsf_fcp_cmnd_handler()
2105 scpnt = req->data; zfcp_fsf_fcp_cmnd_handler()
2107 read_unlock_irqrestore(&req->adapter->abort_lock, flags); zfcp_fsf_fcp_cmnd_handler()
2111 zfcp_fsf_fcp_handler_common(req); zfcp_fsf_fcp_cmnd_handler()
2113 if (unlikely(req->status & ZFCP_STATUS_FSFREQ_ERROR)) { zfcp_fsf_fcp_cmnd_handler()
2118 switch (req->qtcb->header.fsf_status) { zfcp_fsf_fcp_cmnd_handler()
2133 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; zfcp_fsf_fcp_cmnd_handler()
2137 zfcp_fsf_req_trace(req, scpnt); zfcp_fsf_fcp_cmnd_handler()
2138 zfcp_dbf_scsi_result(scpnt, req); zfcp_fsf_fcp_cmnd_handler()
2148 read_unlock_irqrestore(&req->adapter->abort_lock, flags); zfcp_fsf_fcp_cmnd_handler()
2195 struct zfcp_fsf_req *req; zfcp_fsf_fcp_cmnd() local
2219 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, zfcp_fsf_fcp_cmnd()
2222 if (IS_ERR(req)) { zfcp_fsf_fcp_cmnd()
2223 retval = PTR_ERR(req); zfcp_fsf_fcp_cmnd()
2227 scsi_cmnd->host_scribble = (unsigned char *) req->req_id; zfcp_fsf_fcp_cmnd()
2229 io = &req->qtcb->bottom.io; zfcp_fsf_fcp_cmnd()
2230 req->status |= ZFCP_STATUS_FSFREQ_CLEANUP; zfcp_fsf_fcp_cmnd()
2231 req->data = scsi_cmnd; zfcp_fsf_fcp_cmnd()
2232 req->handler = zfcp_fsf_fcp_cmnd_handler; zfcp_fsf_fcp_cmnd()
2233 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; zfcp_fsf_fcp_cmnd()
2234 req->qtcb->header.port_handle = zfcp_sdev->port->handle; zfcp_fsf_fcp_cmnd()
2246 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; zfcp_fsf_fcp_cmnd()
2250 zfcp_qdio_set_data_div(qdio, &req->qdio_req, zfcp_fsf_fcp_cmnd()
2252 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, zfcp_fsf_fcp_cmnd()
2260 retval = zfcp_qdio_sbals_from_sg(qdio, &req->qdio_req, zfcp_fsf_fcp_cmnd()
2265 zfcp_qdio_set_sbale_last(adapter->qdio, &req->qdio_req); zfcp_fsf_fcp_cmnd()
2267 zfcp_qdio_set_scount(qdio, &req->qdio_req); zfcp_fsf_fcp_cmnd()
2269 retval = zfcp_fsf_req_send(req); zfcp_fsf_fcp_cmnd()
2276 zfcp_fsf_req_free(req); zfcp_fsf_fcp_cmnd()
2283 static void zfcp_fsf_fcp_task_mgmt_handler(struct zfcp_fsf_req *req) zfcp_fsf_fcp_task_mgmt_handler() argument
2288 zfcp_fsf_fcp_handler_common(req); zfcp_fsf_fcp_task_mgmt_handler()
2290 fcp_rsp = (struct fcp_resp_with_ext *) &req->qtcb->bottom.io.fcp_rsp; zfcp_fsf_fcp_task_mgmt_handler()
2294 (req->status & ZFCP_STATUS_FSFREQ_ERROR)) zfcp_fsf_fcp_task_mgmt_handler()
2295 req->status |= ZFCP_STATUS_FSFREQ_TMFUNCFAILED; zfcp_fsf_fcp_task_mgmt_handler()
2307 struct zfcp_fsf_req *req = NULL; zfcp_fsf_fcp_task_mgmt() local
2320 req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND, zfcp_fsf_fcp_task_mgmt()
2324 if (IS_ERR(req)) { zfcp_fsf_fcp_task_mgmt()
2325 req = NULL; zfcp_fsf_fcp_task_mgmt()
2329 req->data = scmnd; zfcp_fsf_fcp_task_mgmt()
2330 req->handler = zfcp_fsf_fcp_task_mgmt_handler; zfcp_fsf_fcp_task_mgmt()
2331 req->qtcb->header.lun_handle = zfcp_sdev->lun_handle; zfcp_fsf_fcp_task_mgmt()
2332 req->qtcb->header.port_handle = zfcp_sdev->port->handle; zfcp_fsf_fcp_task_mgmt()
2333 req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND; zfcp_fsf_fcp_task_mgmt()
2334 req->qtcb->bottom.io.service_class = FSF_CLASS_3; zfcp_fsf_fcp_task_mgmt()
2335 req->qtcb->bottom.io.fcp_cmnd_length = FCP_CMND_LEN; zfcp_fsf_fcp_task_mgmt()
2337 zfcp_qdio_set_sbale_last(qdio, &req->qdio_req); zfcp_fsf_fcp_task_mgmt()
2339 fcp_cmnd = (struct fcp_cmnd *) &req->qtcb->bottom.io.fcp_cmnd; zfcp_fsf_fcp_task_mgmt()
2342 zfcp_fsf_start_timer(req, ZFCP_SCSI_ER_TIMEOUT); zfcp_fsf_fcp_task_mgmt()
2343 if (!zfcp_fsf_req_send(req)) zfcp_fsf_fcp_task_mgmt()
2346 zfcp_fsf_req_free(req); zfcp_fsf_fcp_task_mgmt()
2347 req = NULL; zfcp_fsf_fcp_task_mgmt()
2350 return req; zfcp_fsf_fcp_task_mgmt()
H A Dzfcp_reqlist.h85 struct zfcp_fsf_req *req; _zfcp_reqlist_find() local
89 list_for_each_entry(req, &rl->buckets[i], list) _zfcp_reqlist_find()
90 if (req->req_id == req_id) _zfcp_reqlist_find()
91 return req; _zfcp_reqlist_find()
107 struct zfcp_fsf_req *req; zfcp_reqlist_find() local
110 req = _zfcp_reqlist_find(rl, req_id); zfcp_reqlist_find()
113 return req; zfcp_reqlist_find()
132 struct zfcp_fsf_req *req; zfcp_reqlist_find_rm() local
135 req = _zfcp_reqlist_find(rl, req_id); zfcp_reqlist_find_rm()
136 if (req) zfcp_reqlist_find_rm()
137 list_del(&req->list); zfcp_reqlist_find_rm()
140 return req; zfcp_reqlist_find_rm()
146 * @req: The entry to add
154 struct zfcp_fsf_req *req) zfcp_reqlist_add()
159 i = zfcp_reqlist_hash(req->req_id); zfcp_reqlist_add()
162 list_add_tail(&req->list, &rl->buckets[i]); zfcp_reqlist_add()
153 zfcp_reqlist_add(struct zfcp_reqlist *rl, struct zfcp_fsf_req *req) zfcp_reqlist_add() argument
/linux-4.1.27/arch/um/drivers/
H A Dmconsole_user.c40 static int mconsole_reply_v0(struct mc_request *req, char *reply) mconsole_reply_v0() argument
48 msg.msg_name = &(req->origin); mconsole_reply_v0()
49 msg.msg_namelen = req->originlen; mconsole_reply_v0()
56 return sendmsg(req->originating_fd, &msg, 0); mconsole_reply_v0()
59 static struct mconsole_command *mconsole_parse(struct mc_request *req) mconsole_parse() argument
66 if (!strncmp(req->request.data, cmd->command, mconsole_parse()
79 int mconsole_get_request(int fd, struct mc_request *req) mconsole_get_request() argument
83 req->originlen = sizeof(req->origin); mconsole_get_request()
84 req->len = recvfrom(fd, &req->request, sizeof(req->request), 0, mconsole_get_request()
85 (struct sockaddr *) req->origin, &req->originlen); mconsole_get_request()
86 if (req->len < 0) mconsole_get_request()
89 req->originating_fd = fd; mconsole_get_request()
91 if (req->request.magic != MCONSOLE_MAGIC) { mconsole_get_request()
93 len = MIN(sizeof(req->request.data) - 1, mconsole_get_request()
94 strlen((char *) &req->request)); mconsole_get_request()
95 memmove(req->request.data, &req->request, len); mconsole_get_request()
96 req->request.data[len] = '\0'; mconsole_get_request()
98 req->request.magic = MCONSOLE_MAGIC; mconsole_get_request()
99 req->request.version = 0; mconsole_get_request()
100 req->request.len = len; mconsole_get_request()
102 mconsole_reply_v0(req, "ERR Version 0 mconsole clients are " mconsole_get_request()
107 if (req->request.len >= MCONSOLE_MAX_DATA) { mconsole_get_request()
108 mconsole_reply(req, "Request too large", 1, 0); mconsole_get_request()
111 if (req->request.version != MCONSOLE_VERSION) { mconsole_get_request()
112 mconsole_reply(req, "This driver only supports version " mconsole_get_request()
116 req->request.data[req->request.len] = '\0'; mconsole_get_request()
117 req->cmd = mconsole_parse(req); mconsole_get_request()
118 if (req->cmd == NULL) { mconsole_get_request()
119 mconsole_reply(req, "Unknown command", 1, 0); mconsole_get_request()
126 int mconsole_reply_len(struct mc_request *req, const char *str, int total, mconsole_reply_len() argument
156 n = sendto(req->originating_fd, &reply, len, 0, mconsole_reply_len()
157 (struct sockaddr *) req->origin, req->originlen); mconsole_reply_len()
165 int mconsole_reply(struct mc_request *req, const char *str, int err, int more) mconsole_reply() argument
167 return mconsole_reply_len(req, str, strlen(str), err, more); mconsole_reply()
H A Dmconsole.h51 void (*handler)(struct mc_request *req);
72 extern int mconsole_reply_len(struct mc_request *req, const char *reply,
74 extern int mconsole_reply(struct mc_request *req, const char *str, int err,
77 extern void mconsole_version(struct mc_request *req);
78 extern void mconsole_help(struct mc_request *req);
79 extern void mconsole_halt(struct mc_request *req);
80 extern void mconsole_reboot(struct mc_request *req);
81 extern void mconsole_config(struct mc_request *req);
82 extern void mconsole_remove(struct mc_request *req);
83 extern void mconsole_sysrq(struct mc_request *req);
84 extern void mconsole_cad(struct mc_request *req);
85 extern void mconsole_stop(struct mc_request *req);
86 extern void mconsole_go(struct mc_request *req);
87 extern void mconsole_log(struct mc_request *req);
88 extern void mconsole_proc(struct mc_request *req);
89 extern void mconsole_stack(struct mc_request *req);
91 extern int mconsole_get_request(int fd, struct mc_request *req);
H A Dmconsole_kern.c59 struct mconsole_entry *req; mc_work_proc() local
64 req = list_entry(mc_requests.next, struct mconsole_entry, list); mc_work_proc()
65 list_del(&req->list); mc_work_proc()
67 req->request.cmd->handler(&req->request); mc_work_proc()
68 kfree(req); mc_work_proc()
79 static struct mc_request req; /* that's OK */ mconsole_interrupt() local
82 while (mconsole_get_request(fd, &req)) { mconsole_interrupt()
83 if (req.cmd->context == MCONSOLE_INTR) mconsole_interrupt()
84 (*req.cmd->handler)(&req); mconsole_interrupt()
88 mconsole_reply(&req, "Out of memory", 1, 0); mconsole_interrupt()
90 new->request = req; mconsole_interrupt()
102 void mconsole_version(struct mc_request *req) mconsole_version() argument
109 mconsole_reply(req, version, 0, 0); mconsole_version()
112 void mconsole_log(struct mc_request *req) mconsole_log() argument
115 char *ptr = req->request.data; mconsole_log()
119 len = req->len - (ptr - req->request.data); mconsole_log()
121 mconsole_reply(req, "", 0, 0); mconsole_log()
124 void mconsole_proc(struct mc_request *req) mconsole_proc() argument
131 char *ptr = req->request.data; mconsole_proc()
138 mconsole_reply(req, "Failed to open file", 1, 0); mconsole_proc()
145 mconsole_reply(req, "Failed to allocate buffer", 1, 0); mconsole_proc()
157 mconsole_reply(req, "Read of file failed", 1, 0); mconsole_proc()
162 mconsole_reply(req, "\n", 0, 1); mconsole_proc()
166 mconsole_reply(req, buf, 0, (len > 0)); mconsole_proc()
194 void mconsole_help(struct mc_request *req) mconsole_help() argument
196 mconsole_reply(req, UML_MCONSOLE_HELPTEXT, 0, 0); mconsole_help()
199 void mconsole_halt(struct mc_request *req) mconsole_halt() argument
201 mconsole_reply(req, "", 0, 0); mconsole_halt()
205 void mconsole_reboot(struct mc_request *req) mconsole_reboot() argument
207 mconsole_reply(req, "", 0, 0); mconsole_reboot()
211 void mconsole_cad(struct mc_request *req) mconsole_cad() argument
213 mconsole_reply(req, "", 0, 0); mconsole_cad()
217 void mconsole_go(struct mc_request *req) mconsole_go() argument
219 mconsole_reply(req, "Not stopped", 1, 0); mconsole_go()
222 void mconsole_stop(struct mc_request *req) mconsole_stop() argument
224 deactivate_fd(req->originating_fd, MCONSOLE_IRQ); mconsole_stop()
225 os_set_fd_block(req->originating_fd, 1); mconsole_stop()
226 mconsole_reply(req, "stopped", 0, 0); mconsole_stop()
228 if (!mconsole_get_request(req->originating_fd, req)) mconsole_stop()
230 if (req->cmd->handler == mconsole_go) mconsole_stop()
232 if (req->cmd->handler == mconsole_stop) { mconsole_stop()
233 mconsole_reply(req, "Already stopped", 1, 0); mconsole_stop()
236 if (req->cmd->handler == mconsole_sysrq) { mconsole_stop()
238 old_regs = set_irq_regs((struct pt_regs *)&req->regs); mconsole_stop()
239 mconsole_sysrq(req); mconsole_stop()
243 (*req->cmd->handler)(req); mconsole_stop()
245 os_set_fd_block(req->originating_fd, 0); mconsole_stop()
246 reactivate_fd(req->originating_fd, MCONSOLE_IRQ); mconsole_stop()
247 mconsole_reply(req, "", 0, 0); mconsole_stop()
429 struct mc_request *req, char *name) mconsole_get_config()
435 mconsole_reply(req, "No get_config routine defined", 1, 0); mconsole_get_config()
446 mconsole_reply(req, error, 1, 0); mconsole_get_config()
451 mconsole_reply(req, buf, 0, 0); mconsole_get_config()
461 mconsole_reply(req, "Failed to allocate buffer", 1, 0); mconsole_get_config()
470 void mconsole_config(struct mc_request *req) mconsole_config() argument
473 char *ptr = req->request.data, *name, *error_string = ""; mconsole_config()
480 mconsole_reply(req, "Bad configuration option", 1, 0); mconsole_config()
491 mconsole_reply(req, error_string, err, 0); mconsole_config()
493 else mconsole_get_config(dev->get_config, req, name); mconsole_config()
496 void mconsole_remove(struct mc_request *req) mconsole_remove() argument
499 char *ptr = req->request.data, *err_msg = ""; mconsole_remove()
507 mconsole_reply(req, "Bad remove option", 1, 0); mconsole_remove()
544 mconsole_reply(req, err_msg, err, 0); mconsole_remove()
549 struct mc_request *req; member in struct:mconsole_output
575 mconsole_reply_len(entry->req, console_buf, n, 0, 1); console_write()
593 static void with_console(struct mc_request *req, void (*proc)(void *), with_console() argument
599 entry.req = req; with_console()
606 mconsole_reply_len(req, "", 0, 0, 0); with_console()
623 void mconsole_sysrq(struct mc_request *req) mconsole_sysrq() argument
625 char *ptr = req->request.data; mconsole_sysrq()
635 mconsole_reply(req, "", 0, 0); mconsole_sysrq()
637 with_console(req, sysrq_proc, ptr); mconsole_sysrq()
640 void mconsole_sysrq(struct mc_request *req) mconsole_sysrq() argument
642 mconsole_reply(req, "Sysrq not compiled in", 1, 0); mconsole_sysrq()
659 void mconsole_stack(struct mc_request *req) mconsole_stack() argument
661 char *ptr = req->request.data; mconsole_stack()
679 mconsole_reply(req, "Please specify a pid", 1, 0); mconsole_stack()
685 mconsole_reply(req, "Couldn't find that pid", 1, 0); mconsole_stack()
688 with_console(req, stack_proc, to); mconsole_stack()
427 mconsole_get_config(int (*get_config)(char *, char *, int, char **), struct mc_request *req, char *name) mconsole_get_config() argument
H A Dubd_kern.c47 struct request *req; member in struct:io_thread_req
449 struct io_thread_req *req; ubd_handler() local
456 n = os_read_file(thread_fd, &req, ubd_handler()
458 if(n != sizeof(req)){ ubd_handler()
466 blk_end_request(req->req, 0, req->length); ubd_handler()
467 kfree(req); ubd_handler()
1191 static void cowify_req(struct io_thread_req *req, unsigned long *bitmap, cowify_req() argument
1194 __u64 sector = req->offset >> 9; cowify_req()
1197 if(req->length > (sizeof(req->sector_mask) * 8) << 9) cowify_req()
1200 if(req->op == UBD_READ) { cowify_req()
1201 for(i = 0; i < req->length >> 9; i++){ cowify_req()
1204 &req->sector_mask); cowify_req()
1207 else cowify_bitmap(req->offset, req->length, &req->sector_mask, cowify_req()
1208 &req->cow_offset, bitmap, bitmap_offset, cowify_req()
1209 req->bitmap_words, bitmap_len); cowify_req()
1213 static void prepare_request(struct request *req, struct io_thread_req *io_req, prepare_request() argument
1217 struct gendisk *disk = req->rq_disk; prepare_request()
1220 io_req->req = req; prepare_request()
1230 io_req->op = (rq_data_dir(req) == READ) ? UBD_READ : UBD_WRITE; prepare_request()
1243 static void prepare_flush_request(struct request *req, prepare_flush_request() argument
1246 struct gendisk *disk = req->rq_disk; prepare_flush_request()
1249 io_req->req = req; prepare_flush_request()
1276 struct request *req; do_ubd_request() local
1281 struct request *req = blk_fetch_request(q); do_ubd_request() local
1282 if(req == NULL) do_ubd_request()
1285 dev->request = req; do_ubd_request()
1286 dev->rq_pos = blk_rq_pos(req); do_ubd_request()
1288 dev->end_sg = blk_rq_map_sg(q, req, dev->sg); do_ubd_request()
1291 req = dev->request; do_ubd_request()
1293 if (req->cmd_flags & REQ_FLUSH) { do_ubd_request()
1301 prepare_flush_request(req, io_req); do_ubd_request()
1316 prepare_request(req, io_req, do_ubd_request()
1373 static int update_bitmap(struct io_thread_req *req) update_bitmap() argument
1377 if(req->cow_offset == -1) update_bitmap()
1380 n = os_seek_file(req->fds[1], req->cow_offset); update_bitmap()
1386 n = os_write_file(req->fds[1], &req->bitmap_words, update_bitmap()
1387 sizeof(req->bitmap_words)); update_bitmap()
1388 if(n != sizeof(req->bitmap_words)){ update_bitmap()
1390 req->fds[1]); update_bitmap()
1397 static void do_io(struct io_thread_req *req) do_io() argument
1405 if (req->op == UBD_FLUSH) { do_io()
1407 n = os_sync_file(req->fds[0]); do_io()
1410 "fd = %d\n", -n, req->fds[0]); do_io()
1411 req->error = 1; do_io()
1416 nsectors = req->length / req->sectorsize; do_io()
1419 bit = ubd_test_bit(start, (unsigned char *) &req->sector_mask); do_io()
1423 &req->sector_mask) == bit)) do_io()
1426 off = req->offset + req->offsets[bit] + do_io()
1427 start * req->sectorsize; do_io()
1428 len = (end - start) * req->sectorsize; do_io()
1429 buf = &req->buffer[start * req->sectorsize]; do_io()
1431 err = os_seek_file(req->fds[bit], off); do_io()
1434 req->error = 1; do_io()
1437 if(req->op == UBD_READ){ do_io()
1442 n = os_read_file(req->fds[bit], buf, len); do_io()
1445 "fd = %d\n", -n, req->fds[bit]); do_io()
1446 req->error = 1; do_io()
1452 n = os_write_file(req->fds[bit], buf, len); do_io()
1455 "fd = %d\n", -n, req->fds[bit]); do_io()
1456 req->error = 1; do_io()
1464 req->error = update_bitmap(req); do_io()
1477 struct io_thread_req *req; io_thread() local
1483 n = os_read_file(kernel_fd, &req, io_thread()
1496 do_io(req); io_thread()
1497 n = os_write_file(kernel_fd, &req, io_thread()
/linux-4.1.27/drivers/macintosh/
H A Dvia-pmu68k.c106 static int pmu_send_request(struct adb_request *req, int sync);
114 static void pmu_done(struct adb_request *req);
192 volatile struct adb_request req; pmu_init() local
197 pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB); pmu_init()
199 while (!req.complete) { pmu_init()
224 pmu_request((struct adb_request *) &req, NULL, 2, PMU_SET_INTR_MASK, pmu_init()
227 while (!req.complete) { pmu_init()
272 pmu_send_request(struct adb_request *req, int sync) pmu_send_request() argument
278 req->complete = 1; pmu_send_request()
284 switch (req->data[0]) { pmu_send_request()
286 for (i = 0; i < req->nbytes - 1; ++i) pmu_send_request()
287 req->data[i] = req->data[i+1]; pmu_send_request()
288 --req->nbytes; pmu_send_request()
289 if (pmu_data_len[req->data[0]][1] != 0) { pmu_send_request()
290 req->reply[0] = ADB_RET_OK; pmu_send_request()
291 req->reply_len = 1; pmu_send_request()
293 req->reply_len = 0; pmu_send_request()
294 ret = pmu_queue_request(req); pmu_send_request()
297 switch (req->data[1]) { pmu_send_request()
299 if (req->nbytes != 2) pmu_send_request()
301 req->data[0] = PMU_READ_RTC; pmu_send_request()
302 req->nbytes = 1; pmu_send_request()
303 req->reply_len = 3; pmu_send_request()
304 req->reply[0] = CUDA_PACKET; pmu_send_request()
305 req->reply[1] = 0; pmu_send_request()
306 req->reply[2] = CUDA_GET_TIME; pmu_send_request()
307 ret = pmu_queue_request(req); pmu_send_request()
310 if (req->nbytes != 6) pmu_send_request()
312 req->data[0] = PMU_SET_RTC; pmu_send_request()
313 req->nbytes = 5; pmu_send_request()
315 req->data[i] = req->data[i+1]; pmu_send_request()
316 req->reply_len = 3; pmu_send_request()
317 req->reply[0] = CUDA_PACKET; pmu_send_request()
318 req->reply[1] = 0; pmu_send_request()
319 req->reply[2] = CUDA_SET_TIME; pmu_send_request()
320 ret = pmu_queue_request(req); pmu_send_request()
323 if (req->nbytes != 4) pmu_send_request()
325 req->data[0] = PMU_READ_NVRAM; pmu_send_request()
326 req->data[1] = req->data[2]; pmu_send_request()
327 req->data[2] = req->data[3]; pmu_send_request()
328 req->nbytes = 3; pmu_send_request()
329 req->reply_len = 3; pmu_send_request()
330 req->reply[0] = CUDA_PACKET; pmu_send_request()
331 req->reply[1] = 0; pmu_send_request()
332 req->reply[2] = CUDA_GET_PRAM; pmu_send_request()
333 ret = pmu_queue_request(req); pmu_send_request()
336 if (req->nbytes != 5) pmu_send_request()
338 req->data[0] = PMU_WRITE_NVRAM; pmu_send_request()
339 req->data[1] = req->data[2]; pmu_send_request()
340 req->data[2] = req->data[3]; pmu_send_request()
341 req->data[3] = req->data[4]; pmu_send_request()
342 req->nbytes = 4; pmu_send_request()
343 req->reply_len = 3; pmu_send_request()
344 req->reply[0] = CUDA_PACKET; pmu_send_request()
345 req->reply[1] = 0; pmu_send_request()
346 req->reply[2] = CUDA_SET_PRAM; pmu_send_request()
347 ret = pmu_queue_request(req); pmu_send_request()
352 for (i = req->nbytes - 1; i > 1; --i) pmu_send_request()
353 req->data[i+2] = req->data[i]; pmu_send_request()
354 req->data[3] = req->nbytes - 2; pmu_send_request()
355 req->data[2] = pmu_adb_flags; pmu_send_request()
356 /*req->data[1] = req->data[1];*/ pmu_send_request()
357 req->data[0] = PMU_ADB_CMD; pmu_send_request()
358 req->nbytes += 2; pmu_send_request()
359 req->reply_expected = 1; pmu_send_request()
360 req->reply_len = 0; pmu_send_request()
361 ret = pmu_queue_request(req); pmu_send_request()
366 req->complete = 1; pmu_send_request()
371 while (!req->complete) pmu_send_request()
382 struct adb_request req; pmu_autopoll() local
388 pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86, pmu_autopoll()
392 pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF); pmu_autopoll()
395 while (!req.complete) pmu_autopoll()
404 struct adb_request req; pmu_reset_bus() local
413 req.nbytes = 5; pmu_reset_bus()
414 req.done = NULL; pmu_reset_bus()
415 req.data[0] = PMU_ADB_CMD; pmu_reset_bus()
416 req.data[1] = 0; pmu_reset_bus()
417 req.data[2] = 3; /* ADB_BUSRESET ??? */ pmu_reset_bus()
418 req.data[3] = 0; pmu_reset_bus()
419 req.data[4] = 0; pmu_reset_bus()
420 req.reply_len = 0; pmu_reset_bus()
421 req.reply_expected = 1; pmu_reset_bus()
422 if (pmu_queue_request(&req) != 0) pmu_reset_bus()
427 while (!req.complete) pmu_reset_bus()
430 while (!req.complete) { pmu_reset_bus()
447 pmu_request(struct adb_request *req, void (*done)(struct adb_request *), pmu_request() argument
455 req->complete = 1; pmu_request()
458 req->nbytes = nbytes; pmu_request()
459 req->done = done; pmu_request()
462 req->data[i] = va_arg(list, int); pmu_request()
464 if (pmu_data_len[req->data[0]][1] != 0) { pmu_request()
465 req->reply[0] = ADB_RET_OK; pmu_request()
466 req->reply_len = 1; pmu_request()
468 req->reply_len = 0; pmu_request()
469 req->reply_expected = 0; pmu_request()
470 return pmu_queue_request(req); pmu_request()
474 pmu_queue_request(struct adb_request *req) pmu_queue_request() argument
479 if (req->nbytes <= 0) { pmu_queue_request()
480 req->complete = 1; pmu_queue_request()
483 nsend = pmu_data_len[req->data[0]][0]; pmu_queue_request()
484 if (nsend >= 0 && req->nbytes != nsend + 1) { pmu_queue_request()
485 req->complete = 1; pmu_queue_request()
489 req->next = NULL; pmu_queue_request()
490 req->sent = 0; pmu_queue_request()
491 req->complete = 0; pmu_queue_request()
495 last_req->next = req; pmu_queue_request()
496 last_req = req; pmu_queue_request()
498 current_req = req; pmu_queue_request()
499 last_req = req; pmu_queue_request()
530 struct adb_request *req; pmu_start() local
535 req = current_req; pmu_start()
536 if (req == 0 || pmu_state != idle pmu_start()
537 || (req->reply_expected && req_awaiting_reply)) pmu_start()
542 data_len = pmu_data_len[req->data[0]][0]; pmu_start()
545 send_byte(req->data[0]); pmu_start()
571 struct adb_request *req; pmu_interrupt() local
602 req = current_req; pmu_interrupt()
604 data_len = req->nbytes - 1; pmu_interrupt()
609 send_byte(req->data[data_index++]); pmu_interrupt()
612 req->sent = 1; pmu_interrupt()
613 data_len = pmu_data_len[req->data[0]][1]; pmu_interrupt()
616 current_req = req->next; pmu_interrupt()
617 if (req->reply_expected) pmu_interrupt()
618 req_awaiting_reply = req; pmu_interrupt()
620 pmu_done(req); pmu_interrupt()
624 reply_ptr = req->reply + req->reply_len; pmu_interrupt()
655 req = current_req; pmu_interrupt()
656 current_req = req->next; pmu_interrupt()
657 req->reply_len += data_index; pmu_interrupt()
658 pmu_done(req); pmu_interrupt()
688 pmu_done(struct adb_request *req) pmu_done() argument
690 req->complete = 1; pmu_done()
691 if (req->done) pmu_done()
692 (*req->done)(req); pmu_done()
708 struct adb_request *req = req_awaiting_reply; pmu_handle_data() local
709 if (req == 0) { pmu_handle_data()
715 req->reply_len = 0; pmu_handle_data()
717 memcpy(req->reply, data + 1, len - 1); pmu_handle_data()
718 req->reply_len = len - 1; pmu_handle_data()
720 pmu_done(req); pmu_handle_data()
748 struct adb_request req; pmu_enable_backlight() local
756 pmu_request(&req, NULL, 3, PMU_READ_NVRAM, 0x14, 0xe); pmu_enable_backlight()
757 while (!req.complete) pmu_enable_backlight()
759 printk(KERN_DEBUG "pmu: nvram returned bright: %d\n", (int)req.reply[1]); pmu_enable_backlight()
760 backlight_level = req.reply[1]; pmu_enable_backlight()
767 pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, pmu_enable_backlight()
769 while (!req.complete) pmu_enable_backlight()
772 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, pmu_enable_backlight()
774 while (!req.complete) pmu_enable_backlight()
799 struct adb_request req; pmu_enable_irled() local
801 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED | pmu_enable_irled()
803 while (!req.complete) pmu_enable_irled()
H A Dadb-iop.c66 static void adb_iop_end_req(struct adb_request *req, int state) adb_iop_end_req() argument
68 req->complete = 1; adb_iop_end_req()
69 current_req = req->next; adb_iop_end_req()
70 if (req->done) (*req->done)(req); adb_iop_end_req()
82 struct adb_request *req; adb_iop_complete() local
87 req = current_req; adb_iop_complete()
88 if ((adb_iop_state == sending) && req && req->reply_expected) { adb_iop_complete()
105 struct adb_request *req; adb_iop_listen() local
113 req = current_req; adb_iop_listen()
116 printk("adb_iop_listen %p: rcvd packet, %d bytes: %02X %02X", req, adb_iop_listen()
133 if (req && (adb_iop_state != idle)) { adb_iop_listen()
134 adb_iop_end_req(req, idle); adb_iop_listen()
142 req->reply_len = amsg->count + 1; adb_iop_listen()
143 memcpy(req->reply, &amsg->cmd, req->reply_len); adb_iop_listen()
164 struct adb_request *req; adb_iop_start() local
171 req = current_req; adb_iop_start()
172 if (!req) return; adb_iop_start()
177 printk("adb_iop_start %p: sending packet, %d bytes:", req, req->nbytes); adb_iop_start()
178 for (i = 0 ; i < req->nbytes ; i++) adb_iop_start()
179 printk(" %02X", (uint) req->data[i]); adb_iop_start()
187 amsg.count = req->nbytes - 2; adb_iop_start()
191 memcpy(&amsg.cmd, req->data + 1, req->nbytes - 1); adb_iop_start()
193 req->sent = 1; adb_iop_start()
200 iop_send_message(ADB_IOP, ADB_CHAN, req, adb_iop_start()
217 int adb_iop_send_request(struct adb_request *req, int sync) adb_iop_send_request() argument
221 err = adb_iop_write(req); adb_iop_send_request()
225 while (!req->complete) adb_iop_poll(); adb_iop_send_request()
230 static int adb_iop_write(struct adb_request *req) adb_iop_write() argument
234 if ((req->nbytes < 2) || (req->data[0] != ADB_PACKET)) { adb_iop_write()
235 req->complete = 1; adb_iop_write()
241 req->next = NULL; adb_iop_write()
242 req->sent = 0; adb_iop_write()
243 req->complete = 0; adb_iop_write()
244 req->reply_len = 0; adb_iop_write()
247 last_req->next = req; adb_iop_write()
248 last_req = req; adb_iop_write()
250 current_req = req; adb_iop_write()
251 last_req = req; adb_iop_write()
273 struct adb_request req = { adb_iop_reset_bus() local
279 adb_iop_write(&req); adb_iop_reset_bus()
280 while (!req.complete) { adb_iop_reset_bus()
H A Dvia-cuda.c91 static int cuda_send_request(struct adb_request *req, int sync);
101 static int cuda_write(struct adb_request *req);
103 int cuda_request(struct adb_request *req,
120 struct adb_request req; find_via_cuda() local
137 cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1); find_via_cuda()
138 while (!req.complete) find_via_cuda()
146 struct adb_request req; find_via_cuda() local
190 cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, 1); find_via_cuda()
191 while (!req.complete) find_via_cuda()
307 cuda_send_request(struct adb_request *req, int sync) cuda_send_request() argument
312 req->complete = 1; cuda_send_request()
316 req->reply_expected = 1; cuda_send_request()
318 i = cuda_write(req); cuda_send_request()
323 while (!req->complete) cuda_send_request()
334 struct adb_request req; cuda_adb_autopoll() local
339 cuda_request(&req, NULL, 3, CUDA_PACKET, CUDA_AUTOPOLL, (devs? 1: 0)); cuda_adb_autopoll()
340 while (!req.complete) cuda_adb_autopoll()
349 struct adb_request req; cuda_reset_adb_bus() local
354 cuda_request(&req, NULL, 2, ADB_PACKET, 0); /* maybe? */ cuda_reset_adb_bus()
355 while (!req.complete) cuda_reset_adb_bus()
362 cuda_request(struct adb_request *req, void (*done)(struct adb_request *), cuda_request() argument
369 req->complete = 1; cuda_request()
373 req->nbytes = nbytes; cuda_request()
374 req->done = done; cuda_request()
377 req->data[i] = va_arg(list, int); cuda_request()
379 req->reply_expected = 1; cuda_request()
380 return cuda_write(req); cuda_request()
385 cuda_write(struct adb_request *req) cuda_write() argument
389 if (req->nbytes < 2 || req->data[0] > CUDA_PACKET) { cuda_write()
390 req->complete = 1; cuda_write()
393 req->next = NULL; cuda_write()
394 req->sent = 0; cuda_write()
395 req->complete = 0; cuda_write()
396 req->reply_len = 0; cuda_write()
400 last_req->next = req; cuda_write()
401 last_req = req; cuda_write()
403 current_req = req; cuda_write()
404 last_req = req; cuda_write()
416 struct adb_request *req; cuda_start() local
420 req = current_req; cuda_start()
421 if (req == 0) cuda_start()
428 out_8(&via[SR], req->data[0]); cuda_start()
451 struct adb_request *req = NULL; cuda_interrupt() local
520 req = current_req; cuda_interrupt()
521 if (data_index >= req->nbytes) { cuda_interrupt()
525 req->sent = 1; cuda_interrupt()
526 if (req->reply_expected) { cuda_interrupt()
529 current_req = req->next; cuda_interrupt()
536 out_8(&via[SR], req->data[data_index++]); cuda_interrupt()
558 req = current_req; cuda_interrupt()
559 req->reply_len = reply_ptr - req->reply; cuda_interrupt()
560 if (req->data[0] == ADB_PACKET) { cuda_interrupt()
562 if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) { cuda_interrupt()
564 req->reply_len = 0; cuda_interrupt()
567 req->reply_len -= 2; cuda_interrupt()
568 memmove(req->reply, req->reply + 2, req->reply_len); cuda_interrupt()
571 current_req = req->next; cuda_interrupt()
599 if (complete && req) { cuda_interrupt()
600 void (*done)(struct adb_request *) = req->done; cuda_interrupt()
602 req->complete = 1; cuda_interrupt()
604 * struct request will survive to setting req->complete to 1 cuda_interrupt()
607 (*done)(req); cuda_interrupt()
H A Dvia-macii.c86 static int macii_send_request(struct adb_request *req, int sync);
87 static int macii_write(struct adb_request *req);
112 static unsigned char *reply_ptr; /* next byte in reply_buf or req->reply */
113 static int reading_reply; /* store reply in reply_buf else req->reply */
114 static int data_index; /* index of the next byte to send from req->data */
115 static int reply_len; /* number of bytes received in reply_buf or req->reply */
123 static int request_is_queued(struct adb_request *req) { request_is_queued() argument
129 if (cur == req) { request_is_queued()
205 static struct adb_request req; macii_queue_poll() local
215 BUG_ON(request_is_queued(&req)); macii_queue_poll()
217 adb_request(&req, NULL, ADBREQ_NOSEND, 1, macii_queue_poll()
220 req.sent = 0; macii_queue_poll()
221 req.complete = 0; macii_queue_poll()
222 req.reply_len = 0; macii_queue_poll()
223 req.next = current_req; macii_queue_poll()
226 current_req = &req; macii_queue_poll()
228 current_req = &req; macii_queue_poll()
229 last_req = &req; macii_queue_poll()
234 static int macii_send_request(struct adb_request *req, int sync) macii_send_request() argument
239 BUG_ON(request_is_queued(req)); macii_send_request()
242 err = macii_write(req); macii_send_request()
246 while (!req->complete) { macii_send_request()
249 BUG_ON(request_is_queued(req)); macii_send_request()
256 static int macii_write(struct adb_request *req) macii_write() argument
258 if (req->nbytes < 2 || req->data[0] != ADB_PACKET || req->nbytes > 15) { macii_write()
259 req->complete = 1; macii_write()
263 req->next = NULL; macii_write()
264 req->sent = 0; macii_write()
265 req->complete = 0; macii_write()
266 req->reply_len = 0; macii_write()
269 last_req->next = req; macii_write()
270 last_req = req; macii_write()
272 current_req = req; macii_write()
273 last_req = req; macii_write()
282 static struct adb_request req; macii_autopoll() local
297 adb_request(&req, NULL, ADBREQ_NOSEND, 1, macii_autopoll()
299 err = macii_write(&req); macii_autopoll()
327 static struct adb_request req; macii_reset_bus() local
329 if (request_is_queued(&req)) macii_reset_bus()
333 adb_request(&req, NULL, 0, 1, ADB_BUSRESET); macii_reset_bus()
344 struct adb_request *req; macii_start() local
346 req = current_req; macii_start()
348 BUG_ON(req == NULL); macii_start()
354 * And req->nbytes is the number of bytes of real data plus one. macii_start()
358 command_byte = req->data[1]; macii_start()
362 via[SR] = req->data[1]; macii_start()
390 struct adb_request *req; macii_interrupt() local
434 req = current_req; macii_interrupt()
435 if (data_index >= req->nbytes) { macii_interrupt()
436 req->sent = 1; macii_interrupt()
439 if (req->reply_expected) { macii_interrupt()
442 req->complete = 1; macii_interrupt()
443 current_req = req->next; macii_interrupt()
444 if (req->done) (*req->done)(req); macii_interrupt()
461 via[SR] = req->data[data_index++]; macii_interrupt()
519 req = current_req; macii_interrupt()
520 req->reply_len = reply_len; macii_interrupt()
521 req->complete = 1; macii_interrupt()
522 current_req = req->next; macii_interrupt()
523 if (req->done) (*req->done)(req); macii_interrupt()
H A Dvia-pmu.c184 static int pmu_send_request(struct adb_request *req, int sync);
216 int pmu_polled_request(struct adb_request *req);
531 struct adb_request req; init_pmu() local
536 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); init_pmu()
538 while (!req.complete) { init_pmu()
563 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); init_pmu()
564 while (!req.complete) init_pmu()
569 pmu_request(&req, NULL, 1, PMU_GET_VERSION); init_pmu()
570 pmu_wait_complete(&req); init_pmu()
571 if (req.reply_len > 0) init_pmu()
572 pmu_version = req.reply[0]; init_pmu()
576 pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, init_pmu()
578 pmu_wait_complete(&req); init_pmu()
579 if (req.reply_len == 2) { init_pmu()
580 if (req.reply[1] & PMU_PWR_WAKEUP_AC_INSERT) init_pmu()
597 struct adb_request req; pmu_set_server_mode() local
603 pmu_request(&req, NULL, 2, PMU_POWER_EVENTS, PMU_PWR_GET_POWERUP_EVENTS); pmu_set_server_mode()
604 pmu_wait_complete(&req); pmu_set_server_mode()
605 if (req.reply_len < 2) pmu_set_server_mode()
608 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, pmu_set_server_mode()
610 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); pmu_set_server_mode()
612 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, pmu_set_server_mode()
614 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); pmu_set_server_mode()
615 pmu_wait_complete(&req); pmu_set_server_mode()
622 done_battery_state_ohare(struct adb_request* req) done_battery_state_ohare() argument
649 if (req->reply[0] & 0x01) done_battery_state_ohare()
666 if (req->reply[0] & 0x04) { done_battery_state_ohare()
668 if (req->reply[0] & 0x02) done_battery_state_ohare()
670 vb = (req->reply[1] << 8) | req->reply[2]; done_battery_state_ohare()
672 amperage = req->reply[5]; done_battery_state_ohare()
673 if ((req->reply[0] & 0x01) == 0) { done_battery_state_ohare()
676 } else if (req->reply[0] & 0x02) { done_battery_state_ohare()
681 if (req->reply[0] & 0x40) { done_battery_state_ohare()
682 pcharge = (req->reply[6] << 8) + req->reply[7]; done_battery_state_ohare()
710 done_battery_state_smart(struct adb_request* req) done_battery_state_smart() argument
735 if (req->reply[1] & 0x01) done_battery_state_smart()
743 if (req->reply[1] & 0x04) { done_battery_state_smart()
745 switch(req->reply[0]) { done_battery_state_smart()
747 case 4: capa = req->reply[2]; done_battery_state_smart()
748 max = req->reply[3]; done_battery_state_smart()
749 amperage = *((signed char *)&req->reply[4]); done_battery_state_smart()
750 voltage = req->reply[5]; done_battery_state_smart()
752 case 5: capa = (req->reply[2] << 8) | req->reply[3]; done_battery_state_smart()
753 max = (req->reply[4] << 8) | req->reply[5]; done_battery_state_smart()
754 amperage = *((signed short *)&req->reply[6]); done_battery_state_smart()
755 voltage = (req->reply[8] << 8) | req->reply[9]; done_battery_state_smart()
759 "len: %d, %4ph\n", req->reply_len, done_battery_state_smart()
760 req->reply); done_battery_state_smart()
765 if ((req->reply[1] & 0x01) && (amperage > 0)) done_battery_state_smart()
774 if ((req->reply[1] & 0x01) && (amperage > 0)) done_battery_state_smart()
962 static int pmu_send_request(struct adb_request *req, int sync) pmu_send_request() argument
967 req->complete = 1; pmu_send_request()
973 switch (req->data[0]) { pmu_send_request()
975 for (i = 0; i < req->nbytes - 1; ++i) pmu_send_request()
976 req->data[i] = req->data[i+1]; pmu_send_request()
977 --req->nbytes; pmu_send_request()
978 if (pmu_data_len[req->data[0]][1] != 0) { pmu_send_request()
979 req->reply[0] = ADB_RET_OK; pmu_send_request()
980 req->reply_len = 1; pmu_send_request()
982 req->reply_len = 0; pmu_send_request()
983 ret = pmu_queue_request(req); pmu_send_request()
986 switch (req->data[1]) { pmu_send_request()
988 if (req->nbytes != 2) pmu_send_request()
990 req->data[0] = PMU_READ_RTC; pmu_send_request()
991 req->nbytes = 1; pmu_send_request()
992 req->reply_len = 3; pmu_send_request()
993 req->reply[0] = CUDA_PACKET; pmu_send_request()
994 req->reply[1] = 0; pmu_send_request()
995 req->reply[2] = CUDA_GET_TIME; pmu_send_request()
996 ret = pmu_queue_request(req); pmu_send_request()
999 if (req->nbytes != 6) pmu_send_request()
1001 req->data[0] = PMU_SET_RTC; pmu_send_request()
1002 req->nbytes = 5; pmu_send_request()
1004 req->data[i] = req->data[i+1]; pmu_send_request()
1005 req->reply_len = 3; pmu_send_request()
1006 req->reply[0] = CUDA_PACKET; pmu_send_request()
1007 req->reply[1] = 0; pmu_send_request()
1008 req->reply[2] = CUDA_SET_TIME; pmu_send_request()
1009 ret = pmu_queue_request(req); pmu_send_request()
1016 for (i = req->nbytes - 1; i > 1; --i) pmu_send_request()
1017 req->data[i+2] = req->data[i]; pmu_send_request()
1018 req->data[3] = req->nbytes - 2; pmu_send_request()
1019 req->data[2] = pmu_adb_flags; pmu_send_request()
1020 /*req->data[1] = req->data[1];*/ pmu_send_request()
1021 req->data[0] = PMU_ADB_CMD; pmu_send_request()
1022 req->nbytes += 2; pmu_send_request()
1023 req->reply_expected = 1; pmu_send_request()
1024 req->reply_len = 0; pmu_send_request()
1025 ret = pmu_queue_request(req); pmu_send_request()
1029 req->complete = 1; pmu_send_request()
1034 while (!req->complete) pmu_send_request()
1043 struct adb_request req; __pmu_adb_autopoll() local
1046 pmu_request(&req, NULL, 5, PMU_ADB_CMD, 0, 0x86, __pmu_adb_autopoll()
1050 pmu_request(&req, NULL, 1, PMU_ADB_POLL_OFF); __pmu_adb_autopoll()
1053 while (!req.complete) __pmu_adb_autopoll()
1070 struct adb_request req; pmu_adb_reset_bus() local
1079 req.nbytes = 4; pmu_adb_reset_bus()
1080 req.done = NULL; pmu_adb_reset_bus()
1081 req.data[0] = PMU_ADB_CMD; pmu_adb_reset_bus()
1082 req.data[1] = ADB_BUSRESET; pmu_adb_reset_bus()
1083 req.data[2] = 0; pmu_adb_reset_bus()
1084 req.data[3] = 0; pmu_adb_reset_bus()
1085 req.data[4] = 0; pmu_adb_reset_bus()
1086 req.reply_len = 0; pmu_adb_reset_bus()
1087 req.reply_expected = 1; pmu_adb_reset_bus()
1088 if (pmu_queue_request(&req) != 0) { pmu_adb_reset_bus()
1092 pmu_wait_complete(&req); pmu_adb_reset_bus()
1103 pmu_request(struct adb_request *req, void (*done)(struct adb_request *), pmu_request() argument
1114 req->complete = 1; pmu_request()
1117 req->nbytes = nbytes; pmu_request()
1118 req->done = done; pmu_request()
1121 req->data[i] = va_arg(list, int); pmu_request()
1123 req->reply_len = 0; pmu_request()
1124 req->reply_expected = 0; pmu_request()
1125 return pmu_queue_request(req); pmu_request()
1129 pmu_queue_request(struct adb_request *req) pmu_queue_request() argument
1135 req->complete = 1; pmu_queue_request()
1138 if (req->nbytes <= 0) { pmu_queue_request()
1139 req->complete = 1; pmu_queue_request()
1142 nsend = pmu_data_len[req->data[0]][0]; pmu_queue_request()
1143 if (nsend >= 0 && req->nbytes != nsend + 1) { pmu_queue_request()
1144 req->complete = 1; pmu_queue_request()
1148 req->next = NULL; pmu_queue_request()
1149 req->sent = 0; pmu_queue_request()
1150 req->complete = 0; pmu_queue_request()
1154 last_req->next = req; pmu_queue_request()
1155 last_req = req; pmu_queue_request()
1157 current_req = req; pmu_queue_request()
1158 last_req = req; pmu_queue_request()
1208 pmu_done(struct adb_request *req) pmu_done() argument
1210 void (*done)(struct adb_request *) = req->done; pmu_done()
1212 req->complete = 1; pmu_done()
1214 * struct request will survive to setting req->complete to 1 pmu_done()
1217 (*done)(req); pmu_done()
1223 struct adb_request *req; pmu_start() local
1227 req = current_req; pmu_start()
1228 if (req == 0 || pmu_state != idle pmu_start()
1229 || (/*req->reply_expected && */req_awaiting_reply)) pmu_start()
1234 data_len = pmu_data_len[req->data[0]][0]; pmu_start()
1241 send_byte(req->data[0]); pmu_start()
1270 pmu_wait_complete(struct adb_request *req) pmu_wait_complete() argument
1274 while((pmu_state != idle && pmu_state != locked) || !req->complete) pmu_wait_complete()
1382 struct adb_request *req = req_awaiting_reply; pmu_handle_data() local
1383 if (req == 0) { pmu_handle_data()
1389 req->reply_len = 0; pmu_handle_data()
1391 memcpy(req->reply, data + 1, len - 1); pmu_handle_data()
1392 req->reply_len = len - 1; pmu_handle_data()
1394 pmu_done(req); pmu_handle_data()
1453 struct adb_request *req; pmu_sr_intr() local
1475 req = current_req; pmu_sr_intr()
1477 data_len = req->nbytes - 1; pmu_sr_intr()
1482 send_byte(req->data[data_index++]); pmu_sr_intr()
1485 req->sent = 1; pmu_sr_intr()
1486 data_len = pmu_data_len[req->data[0]][1]; pmu_sr_intr()
1489 current_req = req->next; pmu_sr_intr()
1490 if (req->reply_expected) pmu_sr_intr()
1491 req_awaiting_reply = req; pmu_sr_intr()
1493 return req; pmu_sr_intr()
1497 reply_ptr = req->reply + req->reply_len; pmu_sr_intr()
1533 req = current_req; pmu_sr_intr()
1539 current_req = req->next; pmu_sr_intr()
1540 req->reply_len += data_index; pmu_sr_intr()
1541 if (req->data[0] == PMU_SLEEP || req->data[0] == PMU_CPU_SPEED) pmu_sr_intr()
1545 return req; pmu_sr_intr()
1563 struct adb_request *req = NULL; via_pmu_interrupt() local
1587 req = pmu_sr_intr(); via_pmu_interrupt()
1588 if (req) via_pmu_interrupt()
1626 if (req) { via_pmu_interrupt()
1627 pmu_done(req); via_pmu_interrupt()
1628 req = NULL; via_pmu_interrupt()
1680 struct adb_request req; pmu_enable_irled() local
1687 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, PMU_POW_IRLED | pmu_enable_irled()
1689 pmu_wait_complete(&req); pmu_enable_irled()
1695 struct adb_request req; pmu_restart() local
1705 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB | pmu_restart()
1707 while(!req.complete) pmu_restart()
1711 pmu_request(&req, NULL, 1, PMU_RESET); pmu_restart()
1712 pmu_wait_complete(&req); pmu_restart()
1720 struct adb_request req; pmu_shutdown() local
1730 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, PMU_INT_ADB | pmu_shutdown()
1732 pmu_wait_complete(&req); pmu_shutdown()
1740 pmu_request(&req, NULL, 5, PMU_SHUTDOWN, pmu_shutdown()
1742 pmu_wait_complete(&req); pmu_shutdown()
1797 struct adb_request req; powerbook_sleep_grackle() local
1805 pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, PMU_POW0_OFF|PMU_POW0_HARD_DRIVE); powerbook_sleep_grackle()
1806 pmu_wait_complete(&req); powerbook_sleep_grackle()
1807 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, powerbook_sleep_grackle()
1809 pmu_wait_complete(&req); powerbook_sleep_grackle()
1816 pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); powerbook_sleep_grackle()
1817 pmu_wait_complete(&req); powerbook_sleep_grackle()
1857 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); powerbook_sleep_grackle()
1858 pmu_wait_complete(&req); powerbook_sleep_grackle()
1859 pmu_request(&req, NULL, 2, PMU_POWER_CTRL0, powerbook_sleep_grackle()
1861 pmu_wait_complete(&req); powerbook_sleep_grackle()
1862 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, powerbook_sleep_grackle()
1864 pmu_wait_complete(&req); powerbook_sleep_grackle()
1874 struct adb_request req; powerbook_sleep_Core99() local
1885 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, 0); powerbook_sleep_Core99()
1886 pmu_wait_complete(&req); powerbook_sleep_Core99()
1889 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_CLR_WAKEUP_EVENTS, powerbook_sleep_Core99()
1891 pmu_wait_complete(&req); powerbook_sleep_Core99()
1892 pmu_request(&req, NULL, 4, PMU_POWER_EVENTS, PMU_PWR_SET_WAKEUP_EVENTS, powerbook_sleep_Core99()
1895 pmu_wait_complete(&req); powerbook_sleep_Core99()
1903 pmu_request(&req, NULL, 5, PMU_SLEEP, 'M', 'A', 'T', 'T'); powerbook_sleep_Core99()
1904 pmu_wait_complete(&req); powerbook_sleep_Core99()
1946 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); powerbook_sleep_Core99()
1947 pmu_wait_complete(&req); powerbook_sleep_Core99()
1948 pmu_request(&req, NULL, 2, PMU_SET_INTR_MASK, pmu_intr_mask); powerbook_sleep_Core99()
1949 pmu_wait_complete(&req); powerbook_sleep_Core99()
2458 pmu_polled_request(struct adb_request *req) pmu_polled_request() argument
2464 req->complete = 1; pmu_polled_request()
2465 c = req->data[0]; pmu_polled_request()
2467 if (l >= 0 && req->nbytes != l + 1) pmu_polled_request()
2478 l = req->nbytes - 1; pmu_polled_request()
2482 polled_send_byte(v, req->data[i]); pmu_polled_request()
2488 req->reply[i + req->reply_len] = polled_recv_byte(v); pmu_polled_request()
2490 if (req->done) pmu_polled_request()
2491 (*req->done)(req); pmu_polled_request()
2500 struct adb_request req; pmu_blink() local
2502 memset(&req, 0, sizeof(req)); pmu_blink()
2505 req.nbytes = 4; pmu_blink()
2506 req.done = NULL; pmu_blink()
2507 req.data[0] = 0xee; pmu_blink()
2508 req.data[1] = 4; pmu_blink()
2509 req.data[2] = 0; pmu_blink()
2510 req.data[3] = 1; pmu_blink()
2511 req.reply[0] = ADB_RET_OK; pmu_blink()
2512 req.reply_len = 1; pmu_blink()
2513 req.reply_expected = 0; pmu_blink()
2514 pmu_polled_request(&req); pmu_blink()
2516 req.nbytes = 4; pmu_blink()
2517 req.done = NULL; pmu_blink()
2518 req.data[0] = 0xee; pmu_blink()
2519 req.data[1] = 4; pmu_blink()
2520 req.data[2] = 0; pmu_blink()
2521 req.data[3] = 0; pmu_blink()
2522 req.reply[0] = ADB_RET_OK; pmu_blink()
2523 req.reply_len = 1; pmu_blink()
2524 req.reply_expected = 0; pmu_blink()
2525 pmu_polled_request(&req); pmu_blink()
2551 struct adb_request req; pmu_syscore_resume() local
2557 pmu_request(&req, NULL, 2, PMU_SYSTEM_READY, 2); pmu_syscore_resume()
2558 pmu_wait_complete(&req); pmu_syscore_resume()
H A Dvia-maciisi.c82 static int maciisi_send_request(struct adb_request* req, int sync);
83 static void maciisi_sync(struct adb_request *req);
84 static int maciisi_write(struct adb_request* req);
229 maciisi_send_request(struct adb_request* req, int sync) maciisi_send_request() argument
238 req->complete = 1; maciisi_send_request()
245 for (i = 0; i < req->nbytes; i++) { maciisi_send_request()
246 printk(" %.2x", req->data[i]); maciisi_send_request()
252 req->reply_expected = 1; maciisi_send_request()
254 i = maciisi_write(req); maciisi_send_request()
271 maciisi_sync(req); maciisi_send_request()
277 static void maciisi_sync(struct adb_request *req) maciisi_sync() argument
286 while (!req->complete && count++ < 50) { maciisi_sync()
296 maciisi_request(struct adb_request *req, void (*done)(struct adb_request *), maciisi_request() argument
302 req->nbytes = nbytes; maciisi_request()
303 req->done = done; maciisi_request()
304 req->reply_expected = 0; maciisi_request()
307 req->data[i++] = va_arg(list, int); maciisi_request()
310 return maciisi_send_request(req, 1); maciisi_request()
315 maciisi_write(struct adb_request* req) maciisi_write() argument
322 if (req->nbytes < 2 || req->data[0] > CUDA_PACKET) { maciisi_write()
324 req->complete = 1; maciisi_write()
327 req->next = NULL; maciisi_write()
328 req->sent = 0; maciisi_write()
329 req->complete = 0; maciisi_write()
330 req->reply_len = 0; maciisi_write()
335 last_req->next = req; maciisi_write()
336 last_req = req; maciisi_write()
338 current_req = req; maciisi_write()
339 last_req = req; maciisi_write()
367 struct adb_request* req; maciisi_start() local
382 req = current_req; maciisi_start()
383 if (req == NULL) maciisi_start()
406 via[SR] = req->data[0]; maciisi_start()
437 struct adb_request *req; maciisi_interrupt() local
492 req = current_req; maciisi_interrupt()
512 if (data_index >= req->nbytes) { maciisi_interrupt()
519 req->sent = 1; maciisi_interrupt()
521 if (req->reply_expected) { maciisi_interrupt()
526 current_req = req->next; maciisi_interrupt()
527 if (req->done) maciisi_interrupt()
528 (*req->done)(req); maciisi_interrupt()
543 via[SR] = req->data[data_index++]; maciisi_interrupt()
584 req = current_req; maciisi_interrupt()
585 req->reply_len = reply_ptr - req->reply; maciisi_interrupt()
586 if (req->data[0] == ADB_PACKET) { maciisi_interrupt()
588 if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) { maciisi_interrupt()
590 req->reply_len = 0; maciisi_interrupt()
593 req->reply_len -= 2; maciisi_interrupt()
594 memmove(req->reply, req->reply + 2, req->reply_len); maciisi_interrupt()
601 for (i = 0; i < req->reply_len; ++i) maciisi_interrupt()
602 printk(" %.2x", req->reply[i]); maciisi_interrupt()
606 req->complete = 1; maciisi_interrupt()
607 current_req = req->next; maciisi_interrupt()
608 if (req->done) maciisi_interrupt()
609 (*req->done)(req); maciisi_interrupt()
H A Dmacio-adb.c66 static int macio_send_request(struct adb_request *req, int sync);
166 static int macio_send_request(struct adb_request *req, int sync) macio_send_request() argument
171 if (req->data[0] != ADB_PACKET) macio_send_request()
174 for (i = 0; i < req->nbytes - 1; ++i) macio_send_request()
175 req->data[i] = req->data[i+1]; macio_send_request()
176 --req->nbytes; macio_send_request()
178 req->next = NULL; macio_send_request()
179 req->sent = 0; macio_send_request()
180 req->complete = 0; macio_send_request()
181 req->reply_len = 0; macio_send_request()
185 last_req->next = req; macio_send_request()
186 last_req = req; macio_send_request()
188 current_req = last_req = req; macio_send_request()
194 while (!req->complete) macio_send_request()
204 struct adb_request *req = NULL; macio_adb_interrupt() local
214 if ((req = current_req) != 0) { macio_adb_interrupt()
216 for (i = 0; i < req->nbytes; ++i) macio_adb_interrupt()
217 out_8(&adb->data[i].r, req->data[i]); macio_adb_interrupt()
218 out_8(&adb->dcount.r, req->nbytes & HMB); macio_adb_interrupt()
219 req->sent = 1; macio_adb_interrupt()
220 if (req->reply_expected) { macio_adb_interrupt()
224 current_req = req->next; macio_adb_interrupt()
238 req = current_req; macio_adb_interrupt()
240 req->reply_len = in_8(&adb->dcount.r) & HMB; macio_adb_interrupt()
241 for (i = 0; i < req->reply_len; ++i) macio_adb_interrupt()
242 req->reply[i] = in_8(&adb->data[i].r); macio_adb_interrupt()
244 current_req = req->next; macio_adb_interrupt()
260 if (complete && req) { macio_adb_interrupt()
261 void (*done)(struct adb_request *) = req->done; macio_adb_interrupt()
263 req->complete = 1; macio_adb_interrupt()
265 * struct request will survive to setting req->complete to 1 macio_adb_interrupt()
268 (*done)(req); macio_adb_interrupt()
H A Dvia-pmu-backlight.c73 struct adb_request req; __pmu_backlight_update_status() local
84 pmu_request(&req, NULL, 2, PMU_BACKLIGHT_BRIGHT, pmulevel); __pmu_backlight_update_status()
85 pmu_wait_complete(&req); __pmu_backlight_update_status()
87 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, __pmu_backlight_update_status()
89 pmu_wait_complete(&req); __pmu_backlight_update_status()
91 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, __pmu_backlight_update_status()
93 pmu_wait_complete(&req); __pmu_backlight_update_status()
127 struct adb_request req; pmu_backlight_set_sleep() local
129 pmu_request(&req, NULL, 2, PMU_POWER_CTRL, pmu_backlight_set_sleep()
131 pmu_wait_complete(&req); pmu_backlight_set_sleep()
175 struct adb_request req; pmu_backlight_init() local
176 pmu_request(&req, NULL, 2, 0xd9, 0); pmu_backlight_init()
177 pmu_wait_complete(&req); pmu_backlight_init()
180 (req.reply[0] >> 4) * pmu_backlight_init()
H A Dadb.c116 static void printADBreply(struct adb_request *req)
120 printk("adb reply (%d)", req->reply_len);
121 for(i = 0; i < req->reply_len; i++)
122 printk(" %x", req->reply[i]);
132 struct adb_request req; adb_scan_bus() local
137 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, adb_scan_bus()
139 if (req.reply_len > 1) adb_scan_bus()
156 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, adb_scan_bus()
163 adb_request(&req, NULL, ADBREQ_SYNC, 3, adb_scan_bus()
171 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, adb_scan_bus()
173 if (req.reply_len <= 1) continue; adb_scan_bus()
178 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, adb_scan_bus()
180 if (req.reply_len > 1) { adb_scan_bus()
201 adb_request(&req, NULL, ADBREQ_SYNC, 3, adb_scan_bus()
213 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, adb_scan_bus()
215 adb_handler[i].handler_id = req.reply[2]; adb_scan_bus()
416 static void adb_sync_req_done(struct adb_request *req) adb_sync_req_done() argument
418 struct completion *comp = req->arg; adb_sync_req_done()
424 adb_request(struct adb_request *req, void (*done)(struct adb_request *), adb_request() argument
437 req->nbytes = nbytes+1; adb_request()
438 req->done = done; adb_request()
439 req->reply_expected = flags & ADBREQ_REPLY; adb_request()
440 req->data[0] = ADB_PACKET; adb_request()
443 req->data[i+1] = va_arg(list, int); adb_request()
452 req->done = adb_sync_req_done; adb_request()
453 req->arg = &comp; adb_request()
457 rc = adb_controller->send_request(req, 0); adb_request()
459 if ((flags & ADBREQ_SYNC) && !rc && !req->complete) adb_request()
562 struct adb_request req; try_handler_change() local
566 adb_request(&req, NULL, ADBREQ_SYNC, 3, try_handler_change()
568 adb_request(&req, NULL, ADBREQ_SYNC | ADBREQ_REPLY, 1, try_handler_change()
570 if (req.reply_len < 2) try_handler_change()
572 if (req.reply[2] != new_id) try_handler_change()
574 adb_handler[address].handler_id = req.reply[2]; try_handler_change()
617 static void adb_write_done(struct adb_request *req) adb_write_done() argument
619 struct adbdev_state *state = (struct adbdev_state *) req->arg; adb_write_done()
622 if (!req->complete) { adb_write_done()
623 req->reply_len = 0; adb_write_done()
624 req->complete = 1; adb_write_done()
629 kfree(req); adb_write_done()
639 req->next = NULL; adb_write_done()
640 *ap = req; adb_write_done()
647 do_adb_query(struct adb_request *req) do_adb_query() argument
651 switch(req->data[1]) { do_adb_query()
653 if (req->nbytes < 3) do_adb_query()
656 req->reply[0] = adb_handler[req->data[2]].original_address; do_adb_query()
657 req->reply[1] = adb_handler[req->data[2]].handler_id; do_adb_query()
659 req->complete = 1; do_adb_query()
660 req->reply_len = 2; do_adb_query()
661 adb_write_done(req); do_adb_query()
722 struct adb_request *req; adb_read() local
728 if (count > sizeof(req->reply)) adb_read()
729 count = sizeof(req->reply); adb_read()
733 req = NULL; adb_read()
739 req = state->completed; adb_read()
740 if (req != NULL) adb_read()
741 state->completed = req->next; adb_read()
744 if (req != NULL || ret != 0) adb_read()
767 ret = req->reply_len; adb_read()
770 if (ret > 0 && copy_to_user(buf, req->reply, ret)) adb_read()
773 kfree(req); adb_read()
782 struct adb_request *req; adb_write() local
784 if (count < 2 || count > sizeof(req->data)) adb_write()
791 req = kmalloc(sizeof(struct adb_request), adb_write()
793 if (req == NULL) adb_write()
796 req->nbytes = count; adb_write()
797 req->done = adb_write_done; adb_write()
798 req->arg = (void *) state; adb_write()
799 req->complete = 0; adb_write()
802 if (copy_from_user(req->data, buf, count)) adb_write()
811 if (req->data[0] == ADB_QUERY) { adb_write()
813 ret = do_adb_query(req); adb_write()
820 else if ((req->data[0] == ADB_PACKET) && (count > 1) adb_write()
821 && (req->data[1] == ADB_BUSRESET)) { adb_write()
829 req->reply_expected = ((req->data[1] & 0xc) == 0xc); adb_write()
831 ret = adb_controller->send_request(req, 0); adb_write()
844 kfree(req); adb_write()
/linux-4.1.27/drivers/s390/cio/
H A Dccwreq.c42 struct ccw_request *req = &cdev->private->req; ccwreq_next_path() local
44 if (!req->singlepath) { ccwreq_next_path()
45 req->mask = 0; ccwreq_next_path()
48 req->retries = req->maxretries; ccwreq_next_path()
49 req->mask = lpm_adjust(req->mask >> 1, req->lpm); ccwreq_next_path()
51 return req->mask; ccwreq_next_path()
59 struct ccw_request *req = &cdev->private->req; ccwreq_stop() local
61 if (req->done) ccwreq_stop()
63 req->done = 1; ccwreq_stop()
66 if (rc && rc != -ENODEV && req->drc) ccwreq_stop()
67 rc = req->drc; ccwreq_stop()
68 req->callback(cdev, req->data, rc); ccwreq_stop()
76 struct ccw_request *req = &cdev->private->req; ccwreq_do() local
78 struct ccw1 *cp = req->cp; ccwreq_do()
81 while (req->mask) { ccwreq_do()
82 if (req->retries-- == 0) { ccwreq_do()
89 rc = cio_start(sch, cp, (u8) req->mask); ccwreq_do()
92 ccw_device_set_timeout(cdev, req->timeout); ccwreq_do()
117 * Perform the I/O request specified by cdev->req.
121 struct ccw_request *req = &cdev->private->req; ccw_request_start() local
123 if (req->singlepath) { ccw_request_start()
125 req->mask = 0x8080; ccw_request_start()
127 req->mask = req->lpm; ccw_request_start()
129 req->retries = req->maxretries; ccw_request_start()
130 req->mask = lpm_adjust(req->mask, req->lpm); ccw_request_start()
131 req->drc = 0; ccw_request_start()
132 req->done = 0; ccw_request_start()
133 req->cancel = 0; ccw_request_start()
134 if (!req->mask) ccw_request_start()
147 * Cancel the I/O request specified by cdev->req. Return non-zero if request
153 struct ccw_request *req = &cdev->private->req; ccw_request_cancel() local
156 if (req->done) ccw_request_cancel()
158 req->cancel = 1; ccw_request_cancel()
232 struct ccw_request *req = &cdev->private->req; ccwreq_log_status() local
240 data.retries = req->retries; ccwreq_log_status()
241 data.lpm = (u8) req->mask; ccwreq_log_status()
256 struct ccw_request *req = &cdev->private->req; ccw_request_handler() local
262 if (req->filter) ccw_request_handler()
263 status = req->filter(cdev, req->data, irb, status); ccw_request_handler()
281 if (req->cancel) { ccw_request_handler()
288 if (!req->check) ccw_request_handler()
290 switch (req->check(cdev, req->data)) { ccw_request_handler()
328 struct ccw_request *req = &cdev->private->req; ccw_request_timeout() local
338 dev_name(&cdev->dev), req->timeout / HZ, ccw_request_timeout()
347 req->drc = -ETIME; ccw_request_timeout()
H A Ddevice_pgid.c57 struct ccw_request *req = &cdev->private->req; nop_build_cp() local
64 req->cp = cp; nop_build_cp()
73 struct ccw_request *req = &cdev->private->req; nop_do() local
75 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & sch->opm & nop_do()
77 if (!req->lpm) nop_do()
105 struct ccw_request *req = &cdev->private->req; nop_callback() local
109 sch->vpm |= req->lpm; nop_callback()
112 cdev->private->path_noirq_mask |= req->lpm; nop_callback()
115 cdev->private->path_notoper_mask |= req->lpm; nop_callback()
121 req->lpm >>= 1; nop_callback()
134 struct ccw_request *req = &cdev->private->req; spid_build_cp() local
136 int i = 8 - ffs(req->lpm); spid_build_cp()
144 req->cp = cp; spid_build_cp()
169 struct ccw_request *req = &cdev->private->req; pgid_wipeout_start() local
177 memset(req, 0, sizeof(*req)); pgid_wipeout_start()
178 req->timeout = PGID_TIMEOUT; pgid_wipeout_start()
179 req->maxretries = PGID_RETRIES; pgid_wipeout_start()
180 req->lpm = sch->schib.pmcw.pam; pgid_wipeout_start()
181 req->callback = pgid_wipeout_callback; pgid_wipeout_start()
195 struct ccw_request *req = &cdev->private->req; spid_do() local
199 req->lpm = lpm_adjust(req->lpm, cdev->private->pgid_todo_mask); spid_do()
200 if (!req->lpm) spid_do()
203 if (req->lpm & sch->opm) spid_do()
228 struct ccw_request *req = &cdev->private->req; spid_callback() local
232 sch->vpm |= req->lpm & sch->opm; spid_callback()
236 cdev->private->path_noirq_mask |= req->lpm; spid_callback()
239 cdev->private->path_notoper_mask |= req->lpm; spid_callback()
253 req->lpm >>= 1; spid_callback()
266 struct ccw_request *req = &cdev->private->req; spid_start() local
269 memset(req, 0, sizeof(*req)); spid_start()
270 req->timeout = PGID_TIMEOUT; spid_start()
271 req->maxretries = PGID_RETRIES; spid_start()
272 req->lpm = 0x80; spid_start()
273 req->singlepath = 1; spid_start()
274 req->callback = spid_callback; spid_start()
435 struct ccw_request *req = &cdev->private->req; snid_build_cp() local
437 int i = 8 - ffs(req->lpm); snid_build_cp()
444 req->cp = cp; snid_build_cp()
453 struct ccw_request *req = &cdev->private->req; snid_do() local
456 req->lpm = lpm_adjust(req->lpm, sch->schib.pmcw.pam & snid_do()
458 if (!req->lpm) snid_do()
479 struct ccw_request *req = &cdev->private->req; snid_callback() local
483 cdev->private->pgid_valid_mask |= req->lpm; snid_callback()
487 cdev->private->path_noirq_mask |= req->lpm; snid_callback()
490 cdev->private->path_notoper_mask |= req->lpm; snid_callback()
496 req->lpm >>= 1; snid_callback()
510 struct ccw_request *req = &cdev->private->req; verify_start() local
523 memset(req, 0, sizeof(*req)); verify_start()
524 req->timeout = PGID_TIMEOUT; verify_start()
525 req->maxretries = PGID_RETRIES; verify_start()
526 req->lpm = 0x80; verify_start()
527 req->singlepath = 1; verify_start()
531 req->callback = snid_callback; verify_start()
536 req->filter = nop_filter; verify_start()
537 req->callback = nop_callback; verify_start()
600 struct ccw_request *req = &cdev->private->req; ccw_device_disband_start() local
606 memset(req, 0, sizeof(*req)); ccw_device_disband_start()
607 req->timeout = PGID_TIMEOUT; ccw_device_disband_start()
608 req->maxretries = PGID_RETRIES; ccw_device_disband_start()
609 req->lpm = sch->schib.pmcw.pam & sch->opm; ccw_device_disband_start()
610 req->singlepath = 1; ccw_device_disband_start()
611 req->callback = disband_callback; ccw_device_disband_start()
621 struct ccw_request *req = &cdev->private->req; stlck_build_cp() local
632 req->cp = cp; stlck_build_cp()
655 struct ccw_request *req = &cdev->private->req; ccw_device_stlck_start() local
660 memset(req, 0, sizeof(*req)); ccw_device_stlck_start()
661 req->timeout = PGID_TIMEOUT; ccw_device_stlck_start()
662 req->maxretries = PGID_RETRIES; ccw_device_stlck_start()
663 req->lpm = sch->schib.pmcw.pam & sch->opm; ccw_device_stlck_start()
664 req->data = data; ccw_device_stlck_start()
665 req->callback = stlck_callback; ccw_device_stlck_start()
/linux-4.1.27/drivers/video/fbdev/msm/
H A Dmdp_ppp.c92 static void rotate_dst_addr_x(struct mdp_blit_req *req, struct mdp_regs *regs) rotate_dst_addr_x() argument
94 regs->dst0 += (req->dst_rect.w - rotate_dst_addr_x()
95 min((uint32_t)16, req->dst_rect.w)) * regs->dst_bpp; rotate_dst_addr_x()
96 regs->dst1 += (req->dst_rect.w - rotate_dst_addr_x()
97 min((uint32_t)16, req->dst_rect.w)) * regs->dst_bpp; rotate_dst_addr_x()
100 static void rotate_dst_addr_y(struct mdp_blit_req *req, struct mdp_regs *regs) rotate_dst_addr_y() argument
102 regs->dst0 += (req->dst_rect.h - rotate_dst_addr_y()
103 min((uint32_t)16, req->dst_rect.h)) * rotate_dst_addr_y()
105 regs->dst1 += (req->dst_rect.h - rotate_dst_addr_y()
106 min((uint32_t)16, req->dst_rect.h)) * rotate_dst_addr_y()
110 static void blit_rotate(struct mdp_blit_req *req, blit_rotate() argument
113 if (req->flags == MDP_ROT_NOP) blit_rotate()
117 if ((req->flags & MDP_ROT_90 || req->flags & MDP_FLIP_LR) && blit_rotate()
118 !(req->flags & MDP_ROT_90 && req->flags & MDP_FLIP_LR)) blit_rotate()
119 rotate_dst_addr_x(req, regs); blit_rotate()
120 if (req->flags & MDP_ROT_90) blit_rotate()
122 if (req->flags & MDP_FLIP_UD) { blit_rotate()
124 rotate_dst_addr_y(req, regs); blit_rotate()
126 if (req->flags & MDP_FLIP_LR) blit_rotate()
130 static void blit_convert(struct mdp_blit_req *req, struct mdp_regs *regs) blit_convert() argument
132 if (req->src.format == req->dst.format) blit_convert()
134 if (IS_RGB(req->src.format) && IS_YCRCB(req->dst.format)) { blit_convert()
136 } else if (IS_YCRCB(req->src.format) && IS_RGB(req->dst.format)) { blit_convert()
138 if (req->dst.format == MDP_RGB_565) blit_convert()
145 static uint32_t transp_convert(struct mdp_blit_req *req) transp_convert() argument
148 if (req->src.format == MDP_RGB_565) { transp_convert()
151 transp |= ((GET_BIT_RANGE(req->transp_mask, 15, 11) << 3) | transp_convert()
152 (GET_BIT_RANGE(req->transp_mask, 15, 13))) << 16; transp_convert()
154 transp |= ((GET_BIT_RANGE(req->transp_mask, 4, 0) << 3) | transp_convert()
155 (GET_BIT_RANGE(req->transp_mask, 4, 2))) << 8; transp_convert()
157 transp |= (GET_BIT_RANGE(req->transp_mask, 10, 5) << 2) | transp_convert()
158 (GET_BIT_RANGE(req->transp_mask, 10, 9)); transp_convert()
161 transp |= (GET_BIT_RANGE(req->transp_mask, 15, 8)) | transp_convert()
162 (GET_BIT_RANGE(req->transp_mask, 23, 16) << 16) | transp_convert()
163 (GET_BIT_RANGE(req->transp_mask, 7, 0) << 8); transp_convert()
169 static void blit_blend(struct mdp_blit_req *req, struct mdp_regs *regs) blit_blend() argument
172 if (req->transp_mask != MDP_TRANSP_NOP) { blit_blend()
173 req->transp_mask = transp_convert(req); blit_blend()
174 if (req->alpha != MDP_ALPHA_NOP) { blit_blend()
191 req->alpha &= 0xff; blit_blend()
193 if (HAS_ALPHA(req->src.format)) { blit_blend()
196 } else if (req->alpha < MDP_ALPHA_NOP) { blit_blend()
203 regs->op |= bg_op_chroma[req->dst.format]; blit_blend()
336 static int get_edge_cond(struct mdp_blit_req *req, struct mdp_regs *regs) get_edge_cond() argument
352 if (req->flags & MDP_ROT_90) { get_edge_cond()
353 dst_w = req->dst_rect.h; get_edge_cond()
354 dst_h = req->dst_rect.w; get_edge_cond()
356 dst_w = req->dst_rect.w; get_edge_cond()
357 dst_h = req->dst_rect.h; get_edge_cond()
361 get_edge_info(req->src_rect.h, req->src_rect.y, dst_h, get_edge_cond()
364 get_edge_info(req->src_rect.w, req->src_rect.x, dst_w, get_edge_cond()
368 luma_interp[IMG_LEFT] = req->src_rect.x; get_edge_cond()
369 luma_interp[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1; get_edge_cond()
370 luma_interp[IMG_TOP] = req->src_rect.y; get_edge_cond()
371 luma_interp[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1; get_edge_cond()
383 chroma_bound[IMG_LEFT] = req->src_rect.x; get_edge_cond()
384 chroma_bound[IMG_RIGHT] = req->src_rect.x + req->src_rect.w - 1; get_edge_cond()
385 chroma_bound[IMG_TOP] = req->src_rect.y; get_edge_cond()
386 chroma_bound[IMG_BOTTOM] = req->src_rect.y + req->src_rect.h - 1; get_edge_cond()
388 if (IS_YCRCB(req->src.format)) { get_edge_cond()
396 if (req->src.format == MDP_Y_CBCR_H2V2 || get_edge_cond()
397 req->src.format == MDP_Y_CRCB_H2V2) { get_edge_cond()
435 static int blit_scale(const struct mdp_info *mdp, struct mdp_blit_req *req, blit_scale() argument
443 if (req->flags & MDP_ROT_90) { blit_scale()
444 dst_w = req->dst_rect.h; blit_scale()
445 dst_h = req->dst_rect.w; blit_scale()
447 dst_w = req->dst_rect.w; blit_scale()
448 dst_h = req->dst_rect.h; blit_scale()
450 if ((req->src_rect.w == dst_w) && (req->src_rect.h == dst_h) && blit_scale()
451 !(req->flags & MDP_BLUR)) { blit_scale()
459 if (scale_params(req->src_rect.w, dst_w, 1, &phase_init_x, blit_scale()
461 scale_params(req->src_rect.h, dst_h, 1, &phase_init_y, blit_scale()
465 scale_factor_x = (dst_w * 10) / req->src_rect.w; blit_scale()
466 scale_factor_y = (dst_h * 10) / req->src_rect.h; blit_scale()
503 static void blit_blur(const struct mdp_info *mdp, struct mdp_blit_req *req, blit_blur() argument
506 if (!(req->flags & MDP_BLUR)) blit_blur()
538 struct mdp_blit_req *req, struct mdp_regs *regs) valid_src_dst()
545 get_len(&req->src, &req->src_rect, regs->src_bpp, &src0_len, valid_src_dst()
547 get_len(&req->dst, &req->dst_rect, regs->dst_bpp, &dst0_len, valid_src_dst()
579 static void flush_imgs(struct mdp_blit_req *req, struct mdp_regs *regs, flush_imgs() argument
604 static int send_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, send_blit() argument
622 mdp_writel(mdp, (req->alpha << 24) | (req->transp_mask & 0xffffff), send_blit()
637 mdp_writel(mdp, src_img_cfg[req->dst.format], PPP_ADDR_BG_CFG); send_blit()
638 mdp_writel(mdp, pack_pattern[req->dst.format], send_blit()
641 flush_imgs(req, regs, src_file, dst_file); send_blit()
646 int mdp_ppp_blit(const struct mdp_info *mdp, struct mdp_blit_req *req, mdp_ppp_blit() argument
652 if (unlikely(req->src.format >= MDP_IMGTYPE_LIMIT || mdp_ppp_blit()
653 req->dst.format >= MDP_IMGTYPE_LIMIT)) { mdp_ppp_blit()
658 if (unlikely(req->src_rect.x > req->src.width || mdp_ppp_blit()
659 req->src_rect.y > req->src.height || mdp_ppp_blit()
660 req->dst_rect.x > req->dst.width || mdp_ppp_blit()
661 req->dst_rect.y > req->dst.height)) { mdp_ppp_blit()
667 regs.src_cfg = src_img_cfg[req->src.format]; mdp_ppp_blit()
668 regs.src_cfg |= (req->src_rect.x & 0x1) ? PPP_SRC_BPP_ROI_ODD_X : 0; mdp_ppp_blit()
669 regs.src_cfg |= (req->src_rect.y & 0x1) ? PPP_SRC_BPP_ROI_ODD_Y : 0; mdp_ppp_blit()
670 regs.src_rect = (req->src_rect.h << 16) | req->src_rect.w; mdp_ppp_blit()
671 regs.src_pack = pack_pattern[req->src.format]; mdp_ppp_blit()
674 regs.dst_cfg = dst_img_cfg[req->dst.format] | PPP_DST_OUT_SEL_AXI; mdp_ppp_blit()
675 regs.dst_rect = (req->dst_rect.h << 16) | req->dst_rect.w; mdp_ppp_blit()
676 regs.dst_pack = pack_pattern[req->dst.format]; mdp_ppp_blit()
679 regs.src_bpp = bytes_per_pixel[req->src.format]; mdp_ppp_blit()
680 regs.src0 = src_start + req->src.offset; mdp_ppp_blit()
681 regs.src_ystride = req->src.width * regs.src_bpp; mdp_ppp_blit()
682 get_chroma_addr(&req->src, &req->src_rect, regs.src0, regs.src_bpp, mdp_ppp_blit()
684 regs.src0 += (req->src_rect.x + (req->src_rect.y * req->src.width)) * mdp_ppp_blit()
688 regs.dst_bpp = bytes_per_pixel[req->dst.format]; mdp_ppp_blit()
689 regs.dst0 = dst_start + req->dst.offset; mdp_ppp_blit()
690 regs.dst_ystride = req->dst.width * regs.dst_bpp; mdp_ppp_blit()
691 get_chroma_addr(&req->dst, &req->dst_rect, regs.dst0, regs.dst_bpp, mdp_ppp_blit()
693 regs.dst0 += (req->dst_rect.x + (req->dst_rect.y * req->dst.width)) * mdp_ppp_blit()
696 if (!valid_src_dst(src_start, src_len, dst_start, dst_len, req, mdp_ppp_blit()
706 blit_rotate(req, &regs); mdp_ppp_blit()
707 blit_convert(req, &regs); mdp_ppp_blit()
708 if (req->flags & MDP_DITHER) mdp_ppp_blit()
710 blit_blend(req, &regs); mdp_ppp_blit()
711 if (blit_scale(mdp, req, &regs)) { mdp_ppp_blit()
715 blit_blur(mdp, req, &regs); mdp_ppp_blit()
716 regs.op |= dst_op_chroma[req->dst.format] | mdp_ppp_blit()
717 src_op_chroma[req->src.format]; mdp_ppp_blit()
720 if (unlikely(req->src.format == MDP_YCRYCB_H2V1)) { mdp_ppp_blit()
721 req->src_rect.x = req->src_rect.x & (~0x1); mdp_ppp_blit()
722 req->src_rect.w = req->src_rect.w & (~0x1); mdp_ppp_blit()
723 req->dst_rect.x = req->dst_rect.x & (~0x1); mdp_ppp_blit()
724 req->dst_rect.w = req->dst_rect.w & (~0x1); mdp_ppp_blit()
726 if (get_edge_cond(req, &regs)) mdp_ppp_blit()
729 send_blit(mdp, req, &regs, src_file, dst_file); mdp_ppp_blit()
536 valid_src_dst(unsigned long src_start, unsigned long src_len, unsigned long dst_start, unsigned long dst_len, struct mdp_blit_req *req, struct mdp_regs *regs) valid_src_dst() argument
/linux-4.1.27/fs/ncpfs/
H A Dsock.c59 struct list_head req; member in struct:ncp_request_reply
76 struct ncp_request_reply *req; ncp_alloc_req() local
78 req = kmalloc(sizeof(struct ncp_request_reply), GFP_KERNEL); ncp_alloc_req()
79 if (!req) ncp_alloc_req()
82 init_waitqueue_head(&req->wq); ncp_alloc_req()
83 atomic_set(&req->refs, (1)); ncp_alloc_req()
84 req->status = RQ_IDLE; ncp_alloc_req()
86 return req; ncp_alloc_req()
89 static void ncp_req_get(struct ncp_request_reply *req) ncp_req_get() argument
91 atomic_inc(&req->refs); ncp_req_get()
94 static void ncp_req_put(struct ncp_request_reply *req) ncp_req_put() argument
96 if (atomic_dec_and_test(&req->refs)) ncp_req_put()
97 kfree(req); ncp_req_put()
134 static inline void ncp_finish_request(struct ncp_server *server, struct ncp_request_reply *req, int result) ncp_finish_request() argument
136 req->result = result; ncp_finish_request()
137 if (req->status != RQ_ABANDONED) ncp_finish_request()
138 memcpy(req->reply_buf, server->rxbuf, req->datalen); ncp_finish_request()
139 req->status = RQ_DONE; ncp_finish_request()
140 wake_up_all(&req->wq); ncp_finish_request()
141 ncp_req_put(req); ncp_finish_request()
146 struct ncp_request_reply *req; __abort_ncp_connection() local
151 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req); __abort_ncp_connection()
153 list_del_init(&req->req); __abort_ncp_connection()
154 ncp_finish_request(server, req, -EIO); __abort_ncp_connection()
156 req = server->rcv.creq; __abort_ncp_connection()
157 if (req) { __abort_ncp_connection()
159 ncp_finish_request(server, req, -EIO); __abort_ncp_connection()
163 req = server->tx.creq; __abort_ncp_connection()
164 if (req) { __abort_ncp_connection()
166 ncp_finish_request(server, req, -EIO); __abort_ncp_connection()
175 static inline void __ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) __ncp_abort_request() argument
177 /* If req is done, we got signal, but we also received answer... */ __ncp_abort_request()
178 switch (req->status) { __ncp_abort_request()
183 list_del_init(&req->req); __ncp_abort_request()
184 ncp_finish_request(server, req, err); __ncp_abort_request()
187 req->status = RQ_ABANDONED; __ncp_abort_request()
194 static inline void ncp_abort_request(struct ncp_server *server, struct ncp_request_reply *req, int err) ncp_abort_request() argument
197 __ncp_abort_request(server, req, err); ncp_abort_request()
206 static int ncpdgram_send(struct socket *sock, struct ncp_request_reply *req) ncpdgram_send() argument
210 memcpy(vec, req->tx_ciov, req->tx_iovlen * sizeof(vec[0])); ncpdgram_send()
211 return do_send(sock, vec, req->tx_iovlen, ncpdgram_send()
212 req->tx_totallen, MSG_DONTWAIT); ncpdgram_send()
256 static inline void ncp_init_header(struct ncp_server *server, struct ncp_request_reply *req, struct ncp_request_header *h) ncp_init_header() argument
258 req->status = RQ_INPROGRESS; ncp_init_header()
264 static void ncpdgram_start_request(struct ncp_server *server, struct ncp_request_reply *req) ncpdgram_start_request() argument
269 req->tx_ciov = req->tx_iov + 1; ncpdgram_start_request()
271 h = req->tx_iov[1].iov_base; ncpdgram_start_request()
272 ncp_init_header(server, req, h); ncpdgram_start_request()
273 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, ncpdgram_start_request()
274 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, ncpdgram_start_request()
275 cpu_to_le32(req->tx_totallen), req->sign); ncpdgram_start_request()
277 req->tx_ciov[1].iov_base = req->sign; ncpdgram_start_request()
278 req->tx_ciov[1].iov_len = signlen; ncpdgram_start_request()
279 req->tx_iovlen += 1; ncpdgram_start_request()
280 req->tx_totallen += signlen; ncpdgram_start_request()
282 server->rcv.creq = req; ncpdgram_start_request()
285 ncpdgram_send(server->ncp_sock, req); ncpdgram_start_request()
293 static void ncptcp_start_request(struct ncp_server *server, struct ncp_request_reply *req) ncptcp_start_request() argument
298 req->tx_ciov = req->tx_iov; ncptcp_start_request()
299 h = req->tx_iov[1].iov_base; ncptcp_start_request()
300 ncp_init_header(server, req, h); ncptcp_start_request()
301 signlen = sign_packet(server, req->tx_iov[1].iov_base + sizeof(struct ncp_request_header) - 1, ncptcp_start_request()
302 req->tx_iov[1].iov_len - sizeof(struct ncp_request_header) + 1, ncptcp_start_request()
303 cpu_to_be32(req->tx_totallen + 24), req->sign + 4) + 16; ncptcp_start_request()
305 req->sign[0] = htonl(NCP_TCP_XMIT_MAGIC); ncptcp_start_request()
306 req->sign[1] = htonl(req->tx_totallen + signlen); ncptcp_start_request()
307 req->sign[2] = htonl(NCP_TCP_XMIT_VERSION); ncptcp_start_request()
308 req->sign[3] = htonl(req->datalen + 8); ncptcp_start_request()
309 req->tx_iov[0].iov_base = req->sign; ncptcp_start_request()
310 req->tx_iov[0].iov_len = signlen; ncptcp_start_request()
311 req->tx_iovlen += 1; ncptcp_start_request()
312 req->tx_totallen += signlen; ncptcp_start_request()
314 server->tx.creq = req; ncptcp_start_request()
318 static inline void __ncp_start_request(struct ncp_server *server, struct ncp_request_reply *req) __ncp_start_request() argument
322 memcpy(server->txbuf, req->tx_iov[1].iov_base, req->tx_iov[1].iov_len); __ncp_start_request()
323 req->tx_iov[1].iov_base = server->txbuf; __ncp_start_request()
326 ncptcp_start_request(server, req); __ncp_start_request()
328 ncpdgram_start_request(server, req); __ncp_start_request()
331 static int ncp_add_request(struct ncp_server *server, struct ncp_request_reply *req) ncp_add_request() argument
339 ncp_req_get(req); ncp_add_request()
341 req->status = RQ_QUEUED; ncp_add_request()
342 list_add_tail(&req->req, &server->tx.requests); ncp_add_request()
346 __ncp_start_request(server, req); ncp_add_request()
353 struct ncp_request_reply *req; __ncp_next_request() local
359 req = list_entry(server->tx.requests.next, struct ncp_request_reply, req); __ncp_next_request()
360 list_del_init(&req->req); __ncp_next_request()
361 __ncp_start_request(server, req); __ncp_next_request()
399 struct ncp_request_reply *req; ncpdgram_rcv_proc() local
433 req = server->rcv.creq; ncpdgram_rcv_proc()
434 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && ncpdgram_rcv_proc()
441 result = _recv(sock, server->rxbuf, req->datalen, MSG_DONTWAIT); ncpdgram_rcv_proc()
443 if (result >= 0 && server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { ncpdgram_rcv_proc()
460 ncp_finish_request(server, req, result); ncpdgram_rcv_proc()
477 struct ncp_request_reply* req; __ncpdgram_timeout_proc() local
479 req = server->rcv.creq; __ncpdgram_timeout_proc()
480 if (req) { __ncpdgram_timeout_proc()
485 __ncp_abort_request(server, req, -ETIMEDOUT); __ncpdgram_timeout_proc()
490 ncpdgram_send(server->ncp_sock, req); __ncpdgram_timeout_proc()
539 struct ncp_request_reply *req; __ncptcp_rcv_proc() local
549 req = server->rcv.creq; __ncptcp_rcv_proc()
550 if (req) { __ncptcp_rcv_proc()
551 __ncp_abort_request(server, req, -EIO); __ncptcp_rcv_proc()
616 req = server->rcv.creq; __ncptcp_rcv_proc()
617 if (!req) { __ncptcp_rcv_proc()
621 if (datalen > req->datalen + 8) { __ncptcp_rcv_proc()
622 pr_err("tcp: Unexpected reply len %d (expected at most %Zd)\n", datalen, req->datalen + 8); __ncptcp_rcv_proc()
626 req->datalen = datalen - 8; __ncptcp_rcv_proc()
639 req = server->rcv.creq; __ncptcp_rcv_proc()
640 if (req->tx_type != NCP_ALLOC_SLOT_REQUEST) { __ncptcp_rcv_proc()
643 __ncp_abort_request(server, req, -EIO); __ncptcp_rcv_proc()
648 __ncp_abort_request(server, req, -EIO); __ncptcp_rcv_proc()
653 if (server->sign_active && req->tx_type != NCP_DEALLOC_SLOT_REQUEST) { __ncptcp_rcv_proc()
654 if (sign_verify_reply(server, server->rxbuf + 6, req->datalen - 6, cpu_to_be32(req->datalen + 16), &server->rcv.buf.type)) { __ncptcp_rcv_proc()
656 __ncp_abort_request(server, req, -EIO); __ncptcp_rcv_proc()
661 ncp_finish_request(server, req, req->datalen); __ncptcp_rcv_proc()
704 struct ncp_request_reply *req; do_ncp_rpc_call() local
706 req = ncp_alloc_req(); do_ncp_rpc_call()
707 if (!req) do_ncp_rpc_call()
710 req->reply_buf = reply_buf; do_ncp_rpc_call()
711 req->datalen = max_reply_size; do_ncp_rpc_call()
712 req->tx_iov[1].iov_base = server->packet; do_ncp_rpc_call()
713 req->tx_iov[1].iov_len = size; do_ncp_rpc_call()
714 req->tx_iovlen = 1; do_ncp_rpc_call()
715 req->tx_totallen = size; do_ncp_rpc_call()
716 req->tx_type = *(u_int16_t*)server->packet; do_ncp_rpc_call()
718 result = ncp_add_request(server, req); do_ncp_rpc_call()
722 if (wait_event_interruptible(req->wq, req->status == RQ_DONE)) { do_ncp_rpc_call()
723 ncp_abort_request(server, req, -EINTR); do_ncp_rpc_call()
728 result = req->result; do_ncp_rpc_call()
731 ncp_req_put(req); do_ncp_rpc_call()
/linux-4.1.27/drivers/media/usb/dvb-usb-v2/
H A Dec168.c28 static int ec168_ctrl_msg(struct dvb_usb_device *d, struct ec168_req *req) ec168_ctrl_msg() argument
35 switch (req->cmd) { ec168_ctrl_msg()
41 request = req->cmd; ec168_ctrl_msg()
45 request = req->cmd; ec168_ctrl_msg()
65 KBUILD_MODNAME, req->cmd); ec168_ctrl_msg()
70 buf = kmalloc(req->size, GFP_KERNEL); ec168_ctrl_msg()
78 memcpy(buf, req->data, req->size); ec168_ctrl_msg()
87 ret = usb_control_msg(d->udev, pipe, request, requesttype, req->value, ec168_ctrl_msg()
88 req->index, buf, req->size, EC168_USB_TIMEOUT); ec168_ctrl_msg()
90 dvb_usb_dbg_usb_control_msg(d->udev, request, requesttype, req->value, ec168_ctrl_msg()
91 req->index, buf, req->size); ec168_ctrl_msg()
100 memcpy(req->data, buf, req->size); ec168_ctrl_msg()
119 struct ec168_req req; ec168_i2c_xfer() local
132 req.cmd = READ_DEMOD; ec168_i2c_xfer()
133 req.value = 0; ec168_i2c_xfer()
134 req.index = 0xff00 + msg[i].buf[0]; /* reg */ ec168_i2c_xfer()
135 req.size = msg[i+1].len; /* bytes to read */ ec168_i2c_xfer()
136 req.data = &msg[i+1].buf[0]; ec168_i2c_xfer()
137 ret = ec168_ctrl_msg(d, &req); ec168_i2c_xfer()
148 req.cmd = WRITE_DEMOD; ec168_i2c_xfer()
149 req.value = msg[i].buf[1]; /* val */ ec168_i2c_xfer()
150 req.index = 0xff00 + msg[i].buf[0]; /* reg */ ec168_i2c_xfer()
151 req.size = 0; ec168_i2c_xfer()
152 req.data = NULL; ec168_i2c_xfer()
153 ret = ec168_ctrl_msg(d, &req); ec168_i2c_xfer()
156 req.cmd = WRITE_I2C; ec168_i2c_xfer()
157 req.value = msg[i].buf[0]; /* val */ ec168_i2c_xfer()
158 req.index = 0x0100 + msg[i].addr; /* I2C addr */ ec168_i2c_xfer()
159 req.size = msg[i].len-1; ec168_i2c_xfer()
160 req.data = &msg[i].buf[1]; ec168_i2c_xfer()
161 ret = ec168_ctrl_msg(d, &req); ec168_i2c_xfer()
191 struct ec168_req req = {GET_CONFIG, 0, 1, sizeof(reply), &reply}; ec168_identify_state() local
194 ret = ec168_ctrl_msg(d, &req); ec168_identify_state()
215 struct ec168_req req = {DOWNLOAD_FIRMWARE, 0, 0, 0, NULL}; ec168_download_firmware() local
224 req.size = len; ec168_download_firmware()
225 req.data = (u8 *) &fw->data[fw->size - remaining]; ec168_download_firmware()
226 req.index = fw->size - remaining; ec168_download_firmware()
228 ret = ec168_ctrl_msg(d, &req); ec168_download_firmware()
237 req.size = 0; ec168_download_firmware()
240 req.cmd = SET_CONFIG; ec168_download_firmware()
241 req.value = 0; ec168_download_firmware()
242 req.index = 0x0001; ec168_download_firmware()
243 ret = ec168_ctrl_msg(d, &req); ec168_download_firmware()
248 req.cmd = GPIO; ec168_download_firmware()
249 req.value = 0; ec168_download_firmware()
250 req.index = 0x0206; ec168_download_firmware()
251 ret = ec168_ctrl_msg(d, &req); ec168_download_firmware()
256 req.cmd = WRITE_I2C; ec168_download_firmware()
257 req.value = 0; ec168_download_firmware()
258 req.index = 0x00c6; ec168_download_firmware()
259 ret = ec168_ctrl_msg(d, &req); ec168_download_firmware()
315 struct ec168_req req = {STREAMING_CTRL, 0x7f01, 0x0202, 0, NULL}; ec168_streaming_ctrl() local
319 req.index = 0x0102; ec168_streaming_ctrl()
320 return ec168_ctrl_msg(d, &req); ec168_streaming_ctrl()
H A Dce6230.c26 static int ce6230_ctrl_msg(struct dvb_usb_device *d, struct usb_req *req) ce6230_ctrl_msg() argument
36 request = req->cmd; ce6230_ctrl_msg()
37 value = req->value; ce6230_ctrl_msg()
38 index = req->index; ce6230_ctrl_msg()
40 switch (req->cmd) { ce6230_ctrl_msg()
53 KBUILD_MODNAME, req->cmd); ce6230_ctrl_msg()
58 buf = kmalloc(req->data_len, GFP_KERNEL); ce6230_ctrl_msg()
66 memcpy(buf, req->data, req->data_len); ce6230_ctrl_msg()
76 buf, req->data_len, CE6230_USB_TIMEOUT); ce6230_ctrl_msg()
79 buf, req->data_len); ce6230_ctrl_msg()
89 memcpy(req->data, buf, req->data_len); ce6230_ctrl_msg()
104 struct usb_req req; ce6230_i2c_master_xfer() local
109 memset(&req, 0, sizeof(req)); ce6230_i2c_master_xfer()
118 req.cmd = DEMOD_READ; ce6230_i2c_master_xfer()
119 req.value = msg[i].addr >> 1; ce6230_i2c_master_xfer()
120 req.index = msg[i].buf[0]; ce6230_i2c_master_xfer()
121 req.data_len = msg[i+1].len; ce6230_i2c_master_xfer()
122 req.data = &msg[i+1].buf[0]; ce6230_i2c_master_xfer()
123 ret = ce6230_ctrl_msg(d, &req); ce6230_i2c_master_xfer()
134 req.cmd = DEMOD_WRITE; ce6230_i2c_master_xfer()
135 req.value = msg[i].addr >> 1; ce6230_i2c_master_xfer()
136 req.index = msg[i].buf[0]; ce6230_i2c_master_xfer()
137 req.data_len = msg[i].len-1; ce6230_i2c_master_xfer()
138 req.data = &msg[i].buf[1]; ce6230_i2c_master_xfer()
139 ret = ce6230_ctrl_msg(d, &req); ce6230_i2c_master_xfer()
141 req.cmd = I2C_WRITE; ce6230_i2c_master_xfer()
142 req.value = 0x2000 + (msg[i].addr >> 1); ce6230_i2c_master_xfer()
143 req.index = 0x0000; ce6230_i2c_master_xfer()
144 req.data_len = msg[i].len; ce6230_i2c_master_xfer()
145 req.data = &msg[i].buf[0]; ce6230_i2c_master_xfer()
146 ret = ce6230_ctrl_msg(d, &req); ce6230_i2c_master_xfer()
H A Daf9015.c31 static int af9015_ctrl_msg(struct dvb_usb_device *d, struct req_t *req) af9015_ctrl_msg() argument
41 state->buf[0] = req->cmd; af9015_ctrl_msg()
43 state->buf[2] = req->i2c_addr; af9015_ctrl_msg()
44 state->buf[3] = req->addr >> 8; af9015_ctrl_msg()
45 state->buf[4] = req->addr & 0xff; af9015_ctrl_msg()
46 state->buf[5] = req->mbox; af9015_ctrl_msg()
47 state->buf[6] = req->addr_len; af9015_ctrl_msg()
48 state->buf[7] = req->data_len; af9015_ctrl_msg()
50 switch (req->cmd) { af9015_ctrl_msg()
63 if (((req->addr & 0xff00) == 0xff00) || af9015_ctrl_msg()
64 ((req->addr & 0xff00) == 0xae00)) af9015_ctrl_msg()
73 KBUILD_MODNAME, req->cmd); af9015_ctrl_msg()
79 if ((write && (req->data_len > BUF_LEN - REQ_HDR_LEN)) || af9015_ctrl_msg()
80 (!write && (req->data_len > BUF_LEN - ACK_HDR_LEN))) { af9015_ctrl_msg()
82 KBUILD_MODNAME, req->cmd, req->data_len); af9015_ctrl_msg()
92 wlen += req->data_len; af9015_ctrl_msg()
93 memcpy(&state->buf[REQ_HDR_LEN], req->data, req->data_len); af9015_ctrl_msg()
95 rlen += req->data_len; af9015_ctrl_msg()
99 if (req->cmd == DOWNLOAD_FIRMWARE || req->cmd == RECONNECT_USB) af9015_ctrl_msg()
117 memcpy(req->data, &state->buf[ACK_HDR_LEN], req->data_len); af9015_ctrl_msg()
127 struct req_t req = {WRITE_MEMORY, AF9015_I2C_DEMOD, addr, 0, 0, len, af9015_write_regs() local
129 return af9015_ctrl_msg(d, &req); af9015_write_regs()
134 struct req_t req = {READ_MEMORY, AF9015_I2C_DEMOD, addr, 0, 0, len, af9015_read_regs() local
136 return af9015_ctrl_msg(d, &req); af9015_read_regs()
153 struct req_t req = {WRITE_I2C, addr, reg, 1, 1, 1, &val}; af9015_write_reg_i2c() local
157 req.addr_len = 3; af9015_write_reg_i2c()
159 return af9015_ctrl_msg(d, &req); af9015_write_reg_i2c()
166 struct req_t req = {READ_I2C, addr, reg, 0, 1, 1, val}; af9015_read_reg_i2c() local
170 req.addr_len = 3; af9015_read_reg_i2c()
172 return af9015_ctrl_msg(d, &req); af9015_read_reg_i2c()
215 struct req_t req; af9015_i2c_xfer() local
261 req.cmd = READ_MEMORY; af9015_i2c_xfer()
263 req.cmd = READ_I2C; af9015_i2c_xfer()
264 req.i2c_addr = msg[i].addr; af9015_i2c_xfer()
265 req.addr = addr; af9015_i2c_xfer()
266 req.mbox = mbox; af9015_i2c_xfer()
267 req.addr_len = addr_len; af9015_i2c_xfer()
268 req.data_len = msg[i+1].len; af9015_i2c_xfer()
269 req.data = &msg[i+1].buf[0]; af9015_i2c_xfer()
270 ret = af9015_ctrl_msg(d, &req); af9015_i2c_xfer()
281 req.cmd = READ_I2C; af9015_i2c_xfer()
282 req.i2c_addr = msg[i].addr; af9015_i2c_xfer()
283 req.addr = addr; af9015_i2c_xfer()
284 req.mbox = mbox; af9015_i2c_xfer()
285 req.addr_len = addr_len; af9015_i2c_xfer()
286 req.data_len = msg[i].len; af9015_i2c_xfer()
287 req.data = &msg[i].buf[0]; af9015_i2c_xfer()
288 ret = af9015_ctrl_msg(d, &req); af9015_i2c_xfer()
296 req.cmd = WRITE_MEMORY; af9015_i2c_xfer()
298 req.cmd = WRITE_I2C; af9015_i2c_xfer()
299 req.i2c_addr = msg[i].addr; af9015_i2c_xfer()
300 req.addr = addr; af9015_i2c_xfer()
301 req.mbox = mbox; af9015_i2c_xfer()
302 req.addr_len = addr_len; af9015_i2c_xfer()
303 req.data_len = msg[i].len-addr_len; af9015_i2c_xfer()
304 req.data = &msg[i].buf[addr_len]; af9015_i2c_xfer()
305 ret = af9015_ctrl_msg(d, &req); af9015_i2c_xfer()
334 struct req_t req = {GET_CONFIG, 0, 0, 0, 0, 1, &reply}; af9015_identify_state() local
336 ret = af9015_ctrl_msg(d, &req); af9015_identify_state()
355 struct req_t req = {DOWNLOAD_FIRMWARE, 0, 0, 0, 0, 0, NULL}; af9015_download_firmware() local
373 req.data_len = len; af9015_download_firmware()
374 req.data = (u8 *) &fw->data[fw->size - remaining]; af9015_download_firmware()
375 req.addr = FW_ADDR + fw->size - remaining; af9015_download_firmware()
377 ret = af9015_ctrl_msg(d, &req); af9015_download_firmware()
387 req.cmd = BOOT; af9015_download_firmware()
388 req.data_len = 0; af9015_download_firmware()
389 ret = af9015_ctrl_msg(d, &req); af9015_download_firmware()
408 struct req_t req = {READ_I2C, AF9015_I2C_EEPROM, 0, 0, 1, 1, NULL}; af9015_eeprom_hash() local
412 req.addr = i; af9015_eeprom_hash()
413 req.data = &buf[i]; af9015_eeprom_hash()
414 ret = af9015_ctrl_msg(d, &req); af9015_eeprom_hash()
441 struct req_t req = {READ_I2C, AF9015_I2C_EEPROM, 0, 0, 1, 1, &val}; af9015_read_config() local
446 req.addr = AF9015_EEPROM_IR_MODE; af9015_read_config()
449 ret = af9015_ctrl_msg(d, &req); af9015_read_config()
464 req.addr = AF9015_EEPROM_TS_MODE; af9015_read_config()
465 ret = af9015_ctrl_msg(d, &req); af9015_read_config()
478 req.addr = AF9015_EEPROM_DEMOD2_I2C; af9015_read_config()
479 ret = af9015_ctrl_msg(d, &req); af9015_read_config()
490 req.addr = AF9015_EEPROM_XTAL_TYPE1 + offset; af9015_read_config()
491 ret = af9015_ctrl_msg(d, &req); af9015_read_config()
513 req.addr = AF9015_EEPROM_IF1H + offset; af9015_read_config()
514 ret = af9015_ctrl_msg(d, &req); af9015_read_config()
520 req.addr = AF9015_EEPROM_IF1L + offset; af9015_read_config()
521 ret = af9015_ctrl_msg(d, &req); af9015_read_config()
531 req.addr = AF9015_EEPROM_MT2060_IF1H + offset; af9015_read_config()
532 ret = af9015_ctrl_msg(d, &req); af9015_read_config()
536 req.addr = AF9015_EEPROM_MT2060_IF1L + offset; af9015_read_config()
537 ret = af9015_ctrl_msg(d, &req); af9015_read_config()
545 req.addr = AF9015_EEPROM_TUNER_ID1 + offset; af9015_read_config()
546 ret = af9015_ctrl_msg(d, &req); af9015_read_config()
729 struct req_t req = {COPY_FIRMWARE, 0, 0x5100, 0, 0, sizeof(fw_params), af9015_copy_firmware() local
760 ret = af9015_ctrl_msg(d, &req); af9015_copy_firmware()
/linux-4.1.27/crypto/
H A Dgcm.c63 void (*complete)(struct aead_request *req, int err);
86 struct aead_request *req) crypto_gcm_reqctx()
88 unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); crypto_gcm_reqctx()
90 return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); crypto_gcm_reqctx()
93 static void crypto_gcm_setkey_done(struct crypto_async_request *req, int err) crypto_gcm_setkey_done() argument
95 struct crypto_gcm_setkey_result *result = req->data; crypto_gcm_setkey_done()
117 struct ablkcipher_request req; crypto_gcm_setkey() member in struct:__anon3257
139 ablkcipher_request_set_tfm(&data->req, ctr); crypto_gcm_setkey()
140 ablkcipher_request_set_callback(&data->req, CRYPTO_TFM_REQ_MAY_SLEEP | crypto_gcm_setkey()
144 ablkcipher_request_set_crypt(&data->req, data->sg, data->sg, crypto_gcm_setkey()
147 err = crypto_ablkcipher_encrypt(&data->req); crypto_gcm_setkey()
190 struct aead_request *req, crypto_gcm_init_crypt()
193 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_gcm_init_crypt()
195 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); crypto_gcm_init_crypt()
200 memcpy(req->iv + 12, &counter, 4); crypto_gcm_init_crypt()
204 scatterwalk_sg_chain(pctx->src, 2, req->src); crypto_gcm_init_crypt()
207 if (req->src != req->dst) { crypto_gcm_init_crypt()
210 scatterwalk_sg_chain(pctx->dst, 2, req->dst); crypto_gcm_init_crypt()
217 req->iv); crypto_gcm_init_crypt()
229 static int gcm_hash_update(struct aead_request *req, gcm_hash_update() argument
237 ahash_request_set_callback(ahreq, aead_request_flags(req), gcm_hash_update()
238 compl, req); gcm_hash_update()
244 static int gcm_hash_remain(struct aead_request *req, gcm_hash_remain() argument
251 ahash_request_set_callback(ahreq, aead_request_flags(req), gcm_hash_remain()
252 compl, req); gcm_hash_remain()
259 static int gcm_hash_len(struct aead_request *req, gcm_hash_len() argument
266 lengths.a = cpu_to_be64(req->assoclen * 8); gcm_hash_len()
270 ahash_request_set_callback(ahreq, aead_request_flags(req), gcm_hash_len()
271 gcm_hash_len_done, req); gcm_hash_len()
278 static int gcm_hash_final(struct aead_request *req, gcm_hash_final() argument
283 ahash_request_set_callback(ahreq, aead_request_flags(req), gcm_hash_final()
284 gcm_hash_final_done, req); gcm_hash_final()
290 static void __gcm_hash_final_done(struct aead_request *req, int err) __gcm_hash_final_done() argument
292 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); __gcm_hash_final_done()
298 gctx->complete(req, err); __gcm_hash_final_done()
303 struct aead_request *req = areq->data; gcm_hash_final_done() local
305 __gcm_hash_final_done(req, err); gcm_hash_final_done()
308 static void __gcm_hash_len_done(struct aead_request *req, int err) __gcm_hash_len_done() argument
310 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); __gcm_hash_len_done()
313 err = gcm_hash_final(req, pctx); __gcm_hash_len_done()
318 __gcm_hash_final_done(req, err); __gcm_hash_len_done()
323 struct aead_request *req = areq->data; gcm_hash_len_done() local
325 __gcm_hash_len_done(req, err); gcm_hash_len_done()
328 static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err) __gcm_hash_crypt_remain_done() argument
330 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); __gcm_hash_crypt_remain_done()
333 err = gcm_hash_len(req, pctx); __gcm_hash_crypt_remain_done()
338 __gcm_hash_len_done(req, err); __gcm_hash_crypt_remain_done()
344 struct aead_request *req = areq->data; gcm_hash_crypt_remain_done() local
346 __gcm_hash_crypt_remain_done(req, err); gcm_hash_crypt_remain_done()
349 static void __gcm_hash_crypt_done(struct aead_request *req, int err) __gcm_hash_crypt_done() argument
351 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); __gcm_hash_crypt_done()
358 err = gcm_hash_remain(req, pctx, remain, __gcm_hash_crypt_done()
364 __gcm_hash_crypt_remain_done(req, err); __gcm_hash_crypt_done()
369 struct aead_request *req = areq->data; gcm_hash_crypt_done() local
371 __gcm_hash_crypt_done(req, err); gcm_hash_crypt_done()
374 static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err) __gcm_hash_assoc_remain_done() argument
376 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); __gcm_hash_assoc_remain_done()
385 err = gcm_hash_update(req, pctx, compl, __gcm_hash_assoc_remain_done()
392 __gcm_hash_crypt_done(req, err); __gcm_hash_assoc_remain_done()
394 __gcm_hash_crypt_remain_done(req, err); __gcm_hash_assoc_remain_done()
400 struct aead_request *req = areq->data; gcm_hash_assoc_remain_done() local
402 __gcm_hash_assoc_remain_done(req, err); gcm_hash_assoc_remain_done()
405 static void __gcm_hash_assoc_done(struct aead_request *req, int err) __gcm_hash_assoc_done() argument
407 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); __gcm_hash_assoc_done()
411 remain = gcm_remain(req->assoclen); __gcm_hash_assoc_done()
413 err = gcm_hash_remain(req, pctx, remain, __gcm_hash_assoc_done()
419 __gcm_hash_assoc_remain_done(req, err); __gcm_hash_assoc_done()
424 struct aead_request *req = areq->data; gcm_hash_assoc_done() local
426 __gcm_hash_assoc_done(req, err); gcm_hash_assoc_done()
429 static void __gcm_hash_init_done(struct aead_request *req, int err) __gcm_hash_init_done() argument
431 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); __gcm_hash_init_done()
435 if (!err && req->assoclen) { __gcm_hash_init_done()
436 remain = gcm_remain(req->assoclen); __gcm_hash_init_done()
439 err = gcm_hash_update(req, pctx, compl, __gcm_hash_init_done()
440 req->assoc, req->assoclen); __gcm_hash_init_done()
446 __gcm_hash_assoc_done(req, err); __gcm_hash_init_done()
448 __gcm_hash_assoc_remain_done(req, err); __gcm_hash_init_done()
453 struct aead_request *req = areq->data; gcm_hash_init_done() local
455 __gcm_hash_init_done(req, err); gcm_hash_init_done()
458 static int gcm_hash(struct aead_request *req, gcm_hash() argument
463 struct crypto_gcm_ctx *ctx = crypto_tfm_ctx(req->base.tfm); gcm_hash()
470 ahash_request_set_callback(ahreq, aead_request_flags(req), gcm_hash()
471 gcm_hash_init_done, req); gcm_hash()
475 remain = gcm_remain(req->assoclen); gcm_hash()
477 err = gcm_hash_update(req, pctx, compl, req->assoc, req->assoclen); gcm_hash()
481 err = gcm_hash_remain(req, pctx, remain, gcm_hash()
488 err = gcm_hash_update(req, pctx, compl, gctx->src, gctx->cryptlen); gcm_hash()
492 err = gcm_hash_remain(req, pctx, remain, gcm_hash()
497 err = gcm_hash_len(req, pctx); gcm_hash()
500 err = gcm_hash_final(req, pctx); gcm_hash()
507 static void gcm_enc_copy_hash(struct aead_request *req, gcm_enc_copy_hash() argument
510 struct crypto_aead *aead = crypto_aead_reqtfm(req); gcm_enc_copy_hash()
513 scatterwalk_map_and_copy(auth_tag, req->dst, req->cryptlen, gcm_enc_copy_hash()
517 static void gcm_enc_hash_done(struct aead_request *req, int err) gcm_enc_hash_done() argument
519 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); gcm_enc_hash_done()
522 gcm_enc_copy_hash(req, pctx); gcm_enc_hash_done()
524 aead_request_complete(req, err); gcm_enc_hash_done()
529 struct aead_request *req = areq->data; gcm_encrypt_done() local
530 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); gcm_encrypt_done()
533 err = gcm_hash(req, pctx); gcm_encrypt_done()
538 gcm_enc_copy_hash(req, pctx); gcm_encrypt_done()
542 aead_request_complete(req, err); gcm_encrypt_done()
545 static int crypto_gcm_encrypt(struct aead_request *req) crypto_gcm_encrypt() argument
547 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); crypto_gcm_encrypt()
552 crypto_gcm_init_crypt(abreq, req, req->cryptlen); crypto_gcm_encrypt()
553 ablkcipher_request_set_callback(abreq, aead_request_flags(req), crypto_gcm_encrypt()
554 gcm_encrypt_done, req); crypto_gcm_encrypt()
556 gctx->src = req->dst; crypto_gcm_encrypt()
557 gctx->cryptlen = req->cryptlen; crypto_gcm_encrypt()
564 err = gcm_hash(req, pctx); crypto_gcm_encrypt()
569 gcm_enc_copy_hash(req, pctx); crypto_gcm_encrypt()
574 static int crypto_gcm_verify(struct aead_request *req, crypto_gcm_verify() argument
577 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_gcm_verify()
581 unsigned int cryptlen = req->cryptlen - authsize; crypto_gcm_verify()
584 scatterwalk_map_and_copy(iauth_tag, req->src, cryptlen, authsize, 0); crypto_gcm_verify()
590 struct aead_request *req = areq->data; gcm_decrypt_done() local
591 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); gcm_decrypt_done()
594 err = crypto_gcm_verify(req, pctx); gcm_decrypt_done()
596 aead_request_complete(req, err); gcm_decrypt_done()
599 static void gcm_dec_hash_done(struct aead_request *req, int err) gcm_dec_hash_done() argument
601 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); gcm_dec_hash_done()
606 ablkcipher_request_set_callback(abreq, aead_request_flags(req), gcm_dec_hash_done()
607 gcm_decrypt_done, req); gcm_dec_hash_done()
608 crypto_gcm_init_crypt(abreq, req, gctx->cryptlen); gcm_dec_hash_done()
613 err = crypto_gcm_verify(req, pctx); gcm_dec_hash_done()
616 aead_request_complete(req, err); gcm_dec_hash_done()
619 static int crypto_gcm_decrypt(struct aead_request *req) crypto_gcm_decrypt() argument
621 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_gcm_decrypt()
622 struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req); crypto_gcm_decrypt()
626 unsigned int cryptlen = req->cryptlen; crypto_gcm_decrypt()
633 gctx->src = req->src; crypto_gcm_decrypt()
637 err = gcm_hash(req, pctx); crypto_gcm_decrypt()
641 ablkcipher_request_set_callback(abreq, aead_request_flags(req), crypto_gcm_decrypt()
642 gcm_decrypt_done, req); crypto_gcm_decrypt()
643 crypto_gcm_init_crypt(abreq, req, cryptlen); crypto_gcm_decrypt()
648 return crypto_gcm_verify(req, pctx); crypto_gcm_decrypt()
898 static struct aead_request *crypto_rfc4106_crypt(struct aead_request *req) crypto_rfc4106_crypt() argument
900 struct aead_request *subreq = aead_request_ctx(req); crypto_rfc4106_crypt()
901 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_rfc4106_crypt()
908 memcpy(iv + 4, req->iv, 8); crypto_rfc4106_crypt()
911 aead_request_set_callback(subreq, req->base.flags, req->base.complete, crypto_rfc4106_crypt()
912 req->base.data); crypto_rfc4106_crypt()
913 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); crypto_rfc4106_crypt()
914 aead_request_set_assoc(subreq, req->assoc, req->assoclen); crypto_rfc4106_crypt()
919 static int crypto_rfc4106_encrypt(struct aead_request *req) crypto_rfc4106_encrypt() argument
921 req = crypto_rfc4106_crypt(req); crypto_rfc4106_encrypt()
923 return crypto_aead_encrypt(req); crypto_rfc4106_encrypt()
926 static int crypto_rfc4106_decrypt(struct aead_request *req) crypto_rfc4106_decrypt() argument
928 req = crypto_rfc4106_crypt(req); crypto_rfc4106_decrypt()
930 return crypto_aead_decrypt(req); crypto_rfc4106_decrypt()
1062 struct aead_request *req) crypto_rfc4543_reqctx()
1064 unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); crypto_rfc4543_reqctx()
1066 return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); crypto_rfc4543_reqctx()
1105 struct aead_request *req = areq->data; crypto_rfc4543_done() local
1106 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_rfc4543_done()
1107 struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); crypto_rfc4543_done()
1110 scatterwalk_map_and_copy(rctx->auth_tag, req->dst, crypto_rfc4543_done()
1111 req->cryptlen, crypto_rfc4543_done()
1115 aead_request_complete(req, err); crypto_rfc4543_done()
1118 static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, crypto_rfc4543_crypt() argument
1121 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_rfc4543_crypt()
1123 struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); crypto_rfc4543_crypt()
1125 struct scatterlist *src = req->src; crypto_rfc4543_crypt()
1130 unsigned int assoclen = req->assoclen; crypto_rfc4543_crypt()
1137 memcpy(iv + 4, req->iv, 8); crypto_rfc4543_crypt()
1144 req->cryptlen - authsize, crypto_rfc4543_crypt()
1154 sg_set_buf(payload, req->iv, 8); crypto_rfc4543_crypt()
1155 scatterwalk_crypto_chain(payload, src, vsrc == req->iv + 8, 2); crypto_rfc4543_crypt()
1156 assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); crypto_rfc4543_crypt()
1158 if (req->assoc->length == req->assoclen) { crypto_rfc4543_crypt()
1160 sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, crypto_rfc4543_crypt()
1161 req->assoc->offset); crypto_rfc4543_crypt()
1163 BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); crypto_rfc4543_crypt()
1165 scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, crypto_rfc4543_crypt()
1166 req->assoclen, 0); crypto_rfc4543_crypt()
1169 sg_set_buf(assoc, rctx->assocbuf, req->assoclen); crypto_rfc4543_crypt()
1174 aead_request_set_callback(subreq, req->base.flags, crypto_rfc4543_done, crypto_rfc4543_crypt()
1175 req); crypto_rfc4543_crypt()
1182 static int crypto_rfc4543_copy_src_to_dst(struct aead_request *req, bool enc) crypto_rfc4543_copy_src_to_dst() argument
1184 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_rfc4543_copy_src_to_dst()
1187 unsigned int nbytes = req->cryptlen - (enc ? 0 : authsize); crypto_rfc4543_copy_src_to_dst()
1192 return crypto_blkcipher_encrypt(&desc, req->dst, req->src, nbytes); crypto_rfc4543_copy_src_to_dst()
1195 static int crypto_rfc4543_encrypt(struct aead_request *req) crypto_rfc4543_encrypt() argument
1197 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_rfc4543_encrypt()
1198 struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req); crypto_rfc4543_encrypt()
1202 if (req->src != req->dst) { crypto_rfc4543_encrypt()
1203 err = crypto_rfc4543_copy_src_to_dst(req, true); crypto_rfc4543_encrypt()
1208 subreq = crypto_rfc4543_crypt(req, true); crypto_rfc4543_encrypt()
1213 scatterwalk_map_and_copy(rctx->auth_tag, req->dst, req->cryptlen, crypto_rfc4543_encrypt()
1219 static int crypto_rfc4543_decrypt(struct aead_request *req) crypto_rfc4543_decrypt() argument
1223 if (req->src != req->dst) { crypto_rfc4543_decrypt()
1224 err = crypto_rfc4543_copy_src_to_dst(req, false); crypto_rfc4543_decrypt()
1229 req = crypto_rfc4543_crypt(req, false); crypto_rfc4543_decrypt()
1231 return crypto_aead_decrypt(req); crypto_rfc4543_decrypt()
85 crypto_gcm_reqctx( struct aead_request *req) crypto_gcm_reqctx() argument
189 crypto_gcm_init_crypt(struct ablkcipher_request *ablk_req, struct aead_request *req, unsigned int cryptlen) crypto_gcm_init_crypt() argument
1061 crypto_rfc4543_reqctx( struct aead_request *req) crypto_rfc4543_reqctx() argument
H A Dzlib.c144 struct comp_request *req) zlib_compress_update()
150 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); zlib_compress_update()
151 stream->next_in = req->next_in; zlib_compress_update()
152 stream->avail_in = req->avail_in; zlib_compress_update()
153 stream->next_out = req->next_out; zlib_compress_update()
154 stream->avail_out = req->avail_out; zlib_compress_update()
170 ret = req->avail_out - stream->avail_out; zlib_compress_update()
173 req->avail_in - stream->avail_in, ret); zlib_compress_update()
174 req->next_in = stream->next_in; zlib_compress_update()
175 req->avail_in = stream->avail_in; zlib_compress_update()
176 req->next_out = stream->next_out; zlib_compress_update()
177 req->avail_out = stream->avail_out; zlib_compress_update()
182 struct comp_request *req) zlib_compress_final()
188 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); zlib_compress_final()
189 stream->next_in = req->next_in; zlib_compress_final()
190 stream->avail_in = req->avail_in; zlib_compress_final()
191 stream->next_out = req->next_out; zlib_compress_final()
192 stream->avail_out = req->avail_out; zlib_compress_final()
200 ret = req->avail_out - stream->avail_out; zlib_compress_final()
203 req->avail_in - stream->avail_in, ret); zlib_compress_final()
204 req->next_in = stream->next_in; zlib_compress_final()
205 req->avail_in = stream->avail_in; zlib_compress_final()
206 req->next_out = stream->next_out; zlib_compress_final()
207 req->avail_out = stream->avail_out; zlib_compress_final()
258 struct comp_request *req) zlib_decompress_update()
264 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); zlib_decompress_update()
265 stream->next_in = req->next_in; zlib_decompress_update()
266 stream->avail_in = req->avail_in; zlib_decompress_update()
267 stream->next_out = req->next_out; zlib_decompress_update()
268 stream->avail_out = req->avail_out; zlib_decompress_update()
285 ret = req->avail_out - stream->avail_out; zlib_decompress_update()
288 req->avail_in - stream->avail_in, ret); zlib_decompress_update()
289 req->next_in = stream->next_in; zlib_decompress_update()
290 req->avail_in = stream->avail_in; zlib_decompress_update()
291 req->next_out = stream->next_out; zlib_decompress_update()
292 req->avail_out = stream->avail_out; zlib_decompress_update()
297 struct comp_request *req) zlib_decompress_final()
303 pr_debug("avail_in %u, avail_out %u\n", req->avail_in, req->avail_out); zlib_decompress_final()
304 stream->next_in = req->next_in; zlib_decompress_final()
305 stream->avail_in = req->avail_in; zlib_decompress_final()
306 stream->next_out = req->next_out; zlib_decompress_final()
307 stream->avail_out = req->avail_out; zlib_decompress_final()
333 ret = req->avail_out - stream->avail_out; zlib_decompress_final()
336 req->avail_in - stream->avail_in, ret); zlib_decompress_final()
337 req->next_in = stream->next_in; zlib_decompress_final()
338 req->avail_in = stream->avail_in; zlib_decompress_final()
339 req->next_out = stream->next_out; zlib_decompress_final()
340 req->avail_out = stream->avail_out; zlib_decompress_final()
143 zlib_compress_update(struct crypto_pcomp *tfm, struct comp_request *req) zlib_compress_update() argument
181 zlib_compress_final(struct crypto_pcomp *tfm, struct comp_request *req) zlib_compress_final() argument
257 zlib_decompress_update(struct crypto_pcomp *tfm, struct comp_request *req) zlib_decompress_update() argument
296 zlib_decompress_final(struct crypto_pcomp *tfm, struct comp_request *req) zlib_decompress_final() argument
H A Dahash.c131 int crypto_hash_walk_first(struct ahash_request *req, crypto_hash_walk_first() argument
134 walk->total = req->nbytes; crypto_hash_walk_first()
141 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); crypto_hash_walk_first()
142 walk->sg = req->src; crypto_hash_walk_first()
143 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; crypto_hash_walk_first()
149 int crypto_ahash_walk_first(struct ahash_request *req, crypto_ahash_walk_first() argument
152 walk->total = req->nbytes; crypto_ahash_walk_first()
159 walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req)); crypto_ahash_walk_first()
160 walk->sg = req->src; crypto_ahash_walk_first()
161 walk->flags = req->base.flags & CRYPTO_TFM_REQ_MASK; crypto_ahash_walk_first()
232 static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt) ahash_save_req() argument
234 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ahash_save_req()
240 (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? ahash_save_req()
259 * req { ahash_save_req()
262 * .base.data = ADJUSTED[*req (pointer to self)] ahash_save_req()
270 priv->result = req->result; ahash_save_req()
271 priv->complete = req->base.complete; ahash_save_req()
272 priv->data = req->base.data; ahash_save_req()
274 * WARNING: We do not backup req->priv here! The req->priv ahash_save_req()
279 req->result = PTR_ALIGN((u8 *)priv->ubuf, alignmask + 1); ahash_save_req()
280 req->base.complete = cplt; ahash_save_req()
281 req->base.data = req; ahash_save_req()
282 req->priv = priv; ahash_save_req()
287 static void ahash_restore_req(struct ahash_request *req) ahash_restore_req() argument
289 struct ahash_request_priv *priv = req->priv; ahash_restore_req()
292 req->result = priv->result; ahash_restore_req()
293 req->base.complete = priv->complete; ahash_restore_req()
294 req->base.data = priv->data; ahash_restore_req()
295 req->priv = NULL; ahash_restore_req()
297 /* Free the req->priv.priv from the ADJUSTED request. */ ahash_restore_req()
301 static void ahash_op_unaligned_finish(struct ahash_request *req, int err) ahash_op_unaligned_finish() argument
303 struct ahash_request_priv *priv = req->priv; ahash_op_unaligned_finish()
309 memcpy(priv->result, req->result, ahash_op_unaligned_finish()
310 crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); ahash_op_unaligned_finish()
312 ahash_restore_req(req); ahash_op_unaligned_finish()
315 static void ahash_op_unaligned_done(struct crypto_async_request *req, int err) ahash_op_unaligned_done() argument
317 struct ahash_request *areq = req->data; ahash_op_unaligned_done()
323 * The "struct ahash_request *req" here is in fact the "req.base" ahash_op_unaligned_done()
325 * is a pointer to self, it is also the ADJUSTED "req" . ahash_op_unaligned_done()
328 /* First copy req->result into req->priv.result */ ahash_op_unaligned_done()
335 static int ahash_op_unaligned(struct ahash_request *req, ahash_op_unaligned() argument
340 err = ahash_save_req(req, ahash_op_unaligned_done); ahash_op_unaligned()
344 err = op(req); ahash_op_unaligned()
345 ahash_op_unaligned_finish(req, err); ahash_op_unaligned()
350 static int crypto_ahash_op(struct ahash_request *req, crypto_ahash_op() argument
353 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); crypto_ahash_op()
356 if ((unsigned long)req->result & alignmask) crypto_ahash_op()
357 return ahash_op_unaligned(req, op); crypto_ahash_op()
359 return op(req); crypto_ahash_op()
362 int crypto_ahash_final(struct ahash_request *req) crypto_ahash_final() argument
364 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->final); crypto_ahash_final()
368 int crypto_ahash_finup(struct ahash_request *req) crypto_ahash_finup() argument
370 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->finup); crypto_ahash_finup()
374 int crypto_ahash_digest(struct ahash_request *req) crypto_ahash_digest() argument
376 return crypto_ahash_op(req, crypto_ahash_reqtfm(req)->digest); crypto_ahash_digest()
380 static void ahash_def_finup_finish2(struct ahash_request *req, int err) ahash_def_finup_finish2() argument
382 struct ahash_request_priv *priv = req->priv; ahash_def_finup_finish2()
388 memcpy(priv->result, req->result, ahash_def_finup_finish2()
389 crypto_ahash_digestsize(crypto_ahash_reqtfm(req))); ahash_def_finup_finish2()
391 ahash_restore_req(req); ahash_def_finup_finish2()
394 static void ahash_def_finup_done2(struct crypto_async_request *req, int err) ahash_def_finup_done2() argument
396 struct ahash_request *areq = req->data; ahash_def_finup_done2()
403 static int ahash_def_finup_finish1(struct ahash_request *req, int err) ahash_def_finup_finish1() argument
408 req->base.complete = ahash_def_finup_done2; ahash_def_finup_finish1()
409 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; ahash_def_finup_finish1()
410 err = crypto_ahash_reqtfm(req)->final(req); ahash_def_finup_finish1()
413 ahash_def_finup_finish2(req, err); ahash_def_finup_finish1()
417 static void ahash_def_finup_done1(struct crypto_async_request *req, int err) ahash_def_finup_done1() argument
419 struct ahash_request *areq = req->data; ahash_def_finup_done1()
426 static int ahash_def_finup(struct ahash_request *req) ahash_def_finup() argument
428 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ahash_def_finup()
431 err = ahash_save_req(req, ahash_def_finup_done1); ahash_def_finup()
435 err = tfm->update(req); ahash_def_finup()
436 return ahash_def_finup_finish1(req, err); ahash_def_finup()
439 static int ahash_no_export(struct ahash_request *req, void *out) ahash_no_export() argument
444 static int ahash_no_import(struct ahash_request *req, const void *in) ahash_no_import() argument
H A Dauthencesn.c53 static void authenc_esn_request_complete(struct aead_request *req, int err) authenc_esn_request_complete() argument
56 aead_request_complete(req, err); authenc_esn_request_complete()
99 struct aead_request *req = areq->data; authenc_esn_geniv_ahash_update_done() local
100 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); authenc_esn_geniv_ahash_update_done()
102 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); authenc_esn_geniv_ahash_update_done()
110 ahash_request_set_callback(ahreq, aead_request_flags(req) & authenc_esn_geniv_ahash_update_done()
112 areq_ctx->update_complete2, req); authenc_esn_geniv_ahash_update_done()
120 ahash_request_set_callback(ahreq, aead_request_flags(req) & authenc_esn_geniv_ahash_update_done()
122 areq_ctx->complete, req); authenc_esn_geniv_ahash_update_done()
133 authenc_esn_request_complete(req, err); authenc_esn_geniv_ahash_update_done()
139 struct aead_request *req = areq->data; authenc_esn_geniv_ahash_update_done2() local
140 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); authenc_esn_geniv_ahash_update_done2()
142 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); authenc_esn_geniv_ahash_update_done2()
150 ahash_request_set_callback(ahreq, aead_request_flags(req) & authenc_esn_geniv_ahash_update_done2()
152 areq_ctx->complete, req); authenc_esn_geniv_ahash_update_done2()
163 authenc_esn_request_complete(req, err); authenc_esn_geniv_ahash_update_done2()
170 struct aead_request *req = areq->data; authenc_esn_geniv_ahash_done() local
171 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); authenc_esn_geniv_ahash_done()
173 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); authenc_esn_geniv_ahash_done()
184 aead_request_complete(req, err); authenc_esn_geniv_ahash_done()
194 struct aead_request *req = areq->data; authenc_esn_verify_ahash_update_done() local
195 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); authenc_esn_verify_ahash_update_done()
197 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); authenc_esn_verify_ahash_update_done()
199 unsigned int cryptlen = req->cryptlen; authenc_esn_verify_ahash_update_done()
208 aead_request_flags(req) & authenc_esn_verify_ahash_update_done()
210 areq_ctx->update_complete2, req); authenc_esn_verify_ahash_update_done()
218 ahash_request_set_callback(ahreq, aead_request_flags(req) & authenc_esn_verify_ahash_update_done()
220 areq_ctx->complete, req); authenc_esn_verify_ahash_update_done()
236 abreq = aead_request_ctx(req); authenc_esn_verify_ahash_update_done()
238 ablkcipher_request_set_callback(abreq, aead_request_flags(req), authenc_esn_verify_ahash_update_done()
239 req->base.complete, req->base.data); authenc_esn_verify_ahash_update_done()
240 ablkcipher_request_set_crypt(abreq, req->src, req->dst, authenc_esn_verify_ahash_update_done()
241 cryptlen, req->iv); authenc_esn_verify_ahash_update_done()
246 authenc_esn_request_complete(req, err); authenc_esn_verify_ahash_update_done()
255 struct aead_request *req = areq->data; authenc_esn_verify_ahash_update_done2() local
256 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); authenc_esn_verify_ahash_update_done2()
258 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); authenc_esn_verify_ahash_update_done2()
260 unsigned int cryptlen = req->cryptlen; authenc_esn_verify_ahash_update_done2()
267 ahash_request_set_callback(ahreq, aead_request_flags(req) & authenc_esn_verify_ahash_update_done2()
269 areq_ctx->complete, req); authenc_esn_verify_ahash_update_done2()
285 abreq = aead_request_ctx(req); authenc_esn_verify_ahash_update_done2()
287 ablkcipher_request_set_callback(abreq, aead_request_flags(req), authenc_esn_verify_ahash_update_done2()
288 req->base.complete, req->base.data); authenc_esn_verify_ahash_update_done2()
289 ablkcipher_request_set_crypt(abreq, req->src, req->dst, authenc_esn_verify_ahash_update_done2()
290 cryptlen, req->iv); authenc_esn_verify_ahash_update_done2()
295 authenc_esn_request_complete(req, err); authenc_esn_verify_ahash_update_done2()
305 struct aead_request *req = areq->data; authenc_esn_verify_ahash_done() local
306 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); authenc_esn_verify_ahash_done()
308 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); authenc_esn_verify_ahash_done()
310 unsigned int cryptlen = req->cryptlen; authenc_esn_verify_ahash_done()
325 abreq = aead_request_ctx(req); authenc_esn_verify_ahash_done()
327 ablkcipher_request_set_callback(abreq, aead_request_flags(req), authenc_esn_verify_ahash_done()
328 req->base.complete, req->base.data); authenc_esn_verify_ahash_done()
329 ablkcipher_request_set_crypt(abreq, req->src, req->dst, authenc_esn_verify_ahash_done()
330 cryptlen, req->iv); authenc_esn_verify_ahash_done()
335 authenc_esn_request_complete(req, err); authenc_esn_verify_ahash_done()
338 static u8 *crypto_authenc_esn_ahash(struct aead_request *req, crypto_authenc_esn_ahash() argument
341 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); crypto_authenc_esn_ahash()
344 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_esn_ahash()
359 ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, crypto_authenc_esn_ahash()
360 areq_ctx->update_complete, req); crypto_authenc_esn_ahash()
367 ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, crypto_authenc_esn_ahash()
368 areq_ctx->update_complete2, req); crypto_authenc_esn_ahash()
376 ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, crypto_authenc_esn_ahash()
377 areq_ctx->complete, req); crypto_authenc_esn_ahash()
386 static int crypto_authenc_esn_genicv(struct aead_request *req, u8 *iv, crypto_authenc_esn_genicv() argument
389 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); crypto_authenc_esn_genicv()
390 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_esn_genicv()
391 struct scatterlist *dst = req->dst; crypto_authenc_esn_genicv()
392 struct scatterlist *assoc = req->assoc; crypto_authenc_esn_genicv()
399 unsigned int cryptlen = req->cryptlen; crypto_authenc_esn_genicv()
442 hash = crypto_authenc_esn_ahash(req, flags); crypto_authenc_esn_genicv()
452 static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req, crypto_authenc_esn_encrypt_done() argument
455 struct aead_request *areq = req->data; crypto_authenc_esn_encrypt_done()
470 static int crypto_authenc_esn_encrypt(struct aead_request *req) crypto_authenc_esn_encrypt() argument
472 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); crypto_authenc_esn_encrypt()
474 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_esn_encrypt()
476 struct scatterlist *dst = req->dst; crypto_authenc_esn_encrypt()
477 unsigned int cryptlen = req->cryptlen; crypto_authenc_esn_encrypt()
484 ablkcipher_request_set_callback(abreq, aead_request_flags(req), crypto_authenc_esn_encrypt()
485 crypto_authenc_esn_encrypt_done, req); crypto_authenc_esn_encrypt()
486 ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); crypto_authenc_esn_encrypt()
488 memcpy(iv, req->iv, crypto_aead_ivsize(authenc_esn)); crypto_authenc_esn_encrypt()
494 return crypto_authenc_esn_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); crypto_authenc_esn_encrypt()
497 static void crypto_authenc_esn_givencrypt_done(struct crypto_async_request *req, crypto_authenc_esn_givencrypt_done() argument
500 struct aead_request *areq = req->data; crypto_authenc_esn_givencrypt_done()
511 static int crypto_authenc_esn_givencrypt(struct aead_givcrypt_request *req) crypto_authenc_esn_givencrypt() argument
513 struct crypto_aead *authenc_esn = aead_givcrypt_reqtfm(req); crypto_authenc_esn_givencrypt()
515 struct aead_request *areq = &req->areq; crypto_authenc_esn_givencrypt()
517 u8 *iv = req->giv; crypto_authenc_esn_givencrypt()
525 skcipher_givcrypt_set_giv(greq, iv, req->seq); crypto_authenc_esn_givencrypt()
534 static int crypto_authenc_esn_verify(struct aead_request *req) crypto_authenc_esn_verify() argument
536 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); crypto_authenc_esn_verify()
537 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_esn_verify()
545 ohash = crypto_authenc_esn_ahash(req, CRYPTO_TFM_REQ_MAY_SLEEP); crypto_authenc_esn_verify()
556 static int crypto_authenc_esn_iverify(struct aead_request *req, u8 *iv, crypto_authenc_esn_iverify() argument
559 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); crypto_authenc_esn_iverify()
560 struct authenc_esn_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_esn_iverify()
561 struct scatterlist *src = req->src; crypto_authenc_esn_iverify()
562 struct scatterlist *assoc = req->assoc; crypto_authenc_esn_iverify()
610 return crypto_authenc_esn_verify(req); crypto_authenc_esn_iverify()
613 static int crypto_authenc_esn_decrypt(struct aead_request *req) crypto_authenc_esn_decrypt() argument
615 struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req); crypto_authenc_esn_decrypt()
617 struct ablkcipher_request *abreq = aead_request_ctx(req); crypto_authenc_esn_decrypt()
618 unsigned int cryptlen = req->cryptlen; crypto_authenc_esn_decrypt()
620 u8 *iv = req->iv; crypto_authenc_esn_decrypt()
627 err = crypto_authenc_esn_iverify(req, iv, cryptlen); crypto_authenc_esn_decrypt()
632 ablkcipher_request_set_callback(abreq, aead_request_flags(req), crypto_authenc_esn_decrypt()
633 req->base.complete, req->base.data); crypto_authenc_esn_decrypt()
634 ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); crypto_authenc_esn_decrypt()
H A Dseqiv.c32 static void seqiv_complete2(struct skcipher_givcrypt_request *req, int err) seqiv_complete2() argument
34 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); seqiv_complete2()
43 geniv = skcipher_givcrypt_reqtfm(req); seqiv_complete2()
44 memcpy(req->creq.info, subreq->info, crypto_ablkcipher_ivsize(geniv)); seqiv_complete2()
52 struct skcipher_givcrypt_request *req = base->data; seqiv_complete() local
54 seqiv_complete2(req, err); seqiv_complete()
55 skcipher_givcrypt_complete(req, err); seqiv_complete()
58 static void seqiv_aead_complete2(struct aead_givcrypt_request *req, int err) seqiv_aead_complete2() argument
60 struct aead_request *subreq = aead_givcrypt_reqctx(req); seqiv_aead_complete2()
69 geniv = aead_givcrypt_reqtfm(req); seqiv_aead_complete2()
70 memcpy(req->areq.iv, subreq->iv, crypto_aead_ivsize(geniv)); seqiv_aead_complete2()
78 struct aead_givcrypt_request *req = base->data; seqiv_aead_complete() local
80 seqiv_aead_complete2(req, err); seqiv_aead_complete()
81 aead_givcrypt_complete(req, err); seqiv_aead_complete()
98 static int seqiv_givencrypt(struct skcipher_givcrypt_request *req) seqiv_givencrypt() argument
100 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); seqiv_givencrypt()
102 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); seqiv_givencrypt()
111 compl = req->creq.base.complete; seqiv_givencrypt()
112 data = req->creq.base.data; seqiv_givencrypt()
113 info = req->creq.info; seqiv_givencrypt()
119 info = kmalloc(ivsize, req->creq.base.flags & seqiv_givencrypt()
126 data = req; seqiv_givencrypt()
129 ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, seqiv_givencrypt()
131 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, seqiv_givencrypt()
132 req->creq.nbytes, info); seqiv_givencrypt()
134 seqiv_geniv(ctx, info, req->seq, ivsize); seqiv_givencrypt()
135 memcpy(req->giv, info, ivsize); seqiv_givencrypt()
138 if (unlikely(info != req->creq.info)) seqiv_givencrypt()
139 seqiv_complete2(req, err); seqiv_givencrypt()
143 static int seqiv_aead_givencrypt(struct aead_givcrypt_request *req) seqiv_aead_givencrypt() argument
145 struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); seqiv_aead_givencrypt()
147 struct aead_request *areq = &req->areq; seqiv_aead_givencrypt()
148 struct aead_request *subreq = aead_givcrypt_reqctx(req); seqiv_aead_givencrypt()
172 data = req; seqiv_aead_givencrypt()
180 seqiv_geniv(ctx, info, req->seq, ivsize); seqiv_aead_givencrypt()
181 memcpy(req->giv, info, ivsize); seqiv_aead_givencrypt()
185 seqiv_aead_complete2(req, err); seqiv_aead_givencrypt()
189 static int seqiv_givencrypt_first(struct skcipher_givcrypt_request *req) seqiv_givencrypt_first() argument
191 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); seqiv_givencrypt_first()
209 return seqiv_givencrypt(req); seqiv_givencrypt_first()
212 static int seqiv_aead_givencrypt_first(struct aead_givcrypt_request *req) seqiv_aead_givencrypt_first() argument
214 struct crypto_aead *geniv = aead_givcrypt_reqtfm(req); seqiv_aead_givencrypt_first()
232 return seqiv_aead_givencrypt(req); seqiv_aead_givencrypt_first()
H A Dablk_helper.c54 int __ablk_encrypt(struct ablkcipher_request *req) __ablk_encrypt() argument
56 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); __ablk_encrypt()
61 desc.info = req->info; __ablk_encrypt()
65 &desc, req->dst, req->src, req->nbytes); __ablk_encrypt()
69 int ablk_encrypt(struct ablkcipher_request *req) ablk_encrypt() argument
71 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); ablk_encrypt()
76 ablkcipher_request_ctx(req); ablk_encrypt()
78 *cryptd_req = *req; ablk_encrypt()
83 return __ablk_encrypt(req); ablk_encrypt()
88 int ablk_decrypt(struct ablkcipher_request *req) ablk_decrypt() argument
90 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); ablk_decrypt()
95 ablkcipher_request_ctx(req); ablk_decrypt()
97 *cryptd_req = *req; ablk_decrypt()
105 desc.info = req->info; ablk_decrypt()
109 &desc, req->dst, req->src, req->nbytes); ablk_decrypt()
H A Dchainiv.c48 static int chainiv_givencrypt(struct skcipher_givcrypt_request *req) chainiv_givencrypt() argument
50 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); chainiv_givencrypt()
52 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); chainiv_givencrypt()
57 ablkcipher_request_set_callback(subreq, req->creq.base.flags & chainiv_givencrypt()
59 req->creq.base.complete, chainiv_givencrypt()
60 req->creq.base.data); chainiv_givencrypt()
61 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, chainiv_givencrypt()
62 req->creq.nbytes, req->creq.info); chainiv_givencrypt()
68 memcpy(req->giv, ctx->iv, ivsize); chainiv_givencrypt()
83 static int chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) chainiv_givencrypt_first() argument
85 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); chainiv_givencrypt_first()
104 return chainiv_givencrypt(req); chainiv_givencrypt_first()
144 static int async_chainiv_postpone_request(struct skcipher_givcrypt_request *req) async_chainiv_postpone_request() argument
146 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); async_chainiv_postpone_request()
151 err = skcipher_enqueue_givcrypt(&ctx->queue, req); async_chainiv_postpone_request()
161 static int async_chainiv_givencrypt_tail(struct skcipher_givcrypt_request *req) async_chainiv_givencrypt_tail() argument
163 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); async_chainiv_givencrypt_tail()
165 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); async_chainiv_givencrypt_tail()
168 memcpy(req->giv, ctx->iv, ivsize); async_chainiv_givencrypt_tail()
181 static int async_chainiv_givencrypt(struct skcipher_givcrypt_request *req) async_chainiv_givencrypt() argument
183 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); async_chainiv_givencrypt()
185 struct ablkcipher_request *subreq = skcipher_givcrypt_reqctx(req); async_chainiv_givencrypt()
188 ablkcipher_request_set_callback(subreq, req->creq.base.flags, async_chainiv_givencrypt()
189 req->creq.base.complete, async_chainiv_givencrypt()
190 req->creq.base.data); async_chainiv_givencrypt()
191 ablkcipher_request_set_crypt(subreq, req->creq.src, req->creq.dst, async_chainiv_givencrypt()
192 req->creq.nbytes, req->creq.info); async_chainiv_givencrypt()
202 return async_chainiv_givencrypt_tail(req); async_chainiv_givencrypt()
205 return async_chainiv_postpone_request(req); async_chainiv_givencrypt()
208 static int async_chainiv_givencrypt_first(struct skcipher_givcrypt_request *req) async_chainiv_givencrypt_first() argument
210 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); async_chainiv_givencrypt_first()
232 return async_chainiv_givencrypt(req); async_chainiv_givencrypt_first()
240 struct skcipher_givcrypt_request *req; async_chainiv_do_postponed() local
246 req = skcipher_dequeue_givcrypt(&ctx->queue); async_chainiv_do_postponed()
249 if (!req) { async_chainiv_do_postponed()
254 subreq = skcipher_givcrypt_reqctx(req); async_chainiv_do_postponed()
257 err = async_chainiv_givencrypt_tail(req); async_chainiv_do_postponed()
260 skcipher_givcrypt_complete(req, err); async_chainiv_do_postponed()
H A Dauthenc.c26 typedef u8 *(*authenc_ahash_t)(struct aead_request *req, unsigned int flags);
49 static void authenc_request_complete(struct aead_request *req, int err) authenc_request_complete() argument
52 aead_request_complete(req, err); authenc_request_complete()
125 struct aead_request *req = areq->data; authenc_geniv_ahash_update_done() local
126 struct crypto_aead *authenc = crypto_aead_reqtfm(req); authenc_geniv_ahash_update_done()
128 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); authenc_geniv_ahash_update_done()
136 ahash_request_set_callback(ahreq, aead_request_flags(req) & authenc_geniv_ahash_update_done()
138 areq_ctx->complete, req); authenc_geniv_ahash_update_done()
149 authenc_request_complete(req, err); authenc_geniv_ahash_update_done()
154 struct aead_request *req = areq->data; authenc_geniv_ahash_done() local
155 struct crypto_aead *authenc = crypto_aead_reqtfm(req); authenc_geniv_ahash_done()
157 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); authenc_geniv_ahash_done()
168 aead_request_complete(req, err); authenc_geniv_ahash_done()
177 struct aead_request *req = areq->data; authenc_verify_ahash_update_done() local
178 struct crypto_aead *authenc = crypto_aead_reqtfm(req); authenc_verify_ahash_update_done()
180 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); authenc_verify_ahash_update_done()
182 unsigned int cryptlen = req->cryptlen; authenc_verify_ahash_update_done()
189 ahash_request_set_callback(ahreq, aead_request_flags(req) & authenc_verify_ahash_update_done()
191 areq_ctx->complete, req); authenc_verify_ahash_update_done()
207 abreq = aead_request_ctx(req); authenc_verify_ahash_update_done()
209 ablkcipher_request_set_callback(abreq, aead_request_flags(req), authenc_verify_ahash_update_done()
210 req->base.complete, req->base.data); authenc_verify_ahash_update_done()
211 ablkcipher_request_set_crypt(abreq, req->src, req->dst, authenc_verify_ahash_update_done()
212 cryptlen, req->iv); authenc_verify_ahash_update_done()
217 authenc_request_complete(req, err); authenc_verify_ahash_update_done()
226 struct aead_request *req = areq->data; authenc_verify_ahash_done() local
227 struct crypto_aead *authenc = crypto_aead_reqtfm(req); authenc_verify_ahash_done()
229 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); authenc_verify_ahash_done()
231 unsigned int cryptlen = req->cryptlen; authenc_verify_ahash_done()
246 abreq = aead_request_ctx(req); authenc_verify_ahash_done()
248 ablkcipher_request_set_callback(abreq, aead_request_flags(req), authenc_verify_ahash_done()
249 req->base.complete, req->base.data); authenc_verify_ahash_done()
250 ablkcipher_request_set_crypt(abreq, req->src, req->dst, authenc_verify_ahash_done()
251 cryptlen, req->iv); authenc_verify_ahash_done()
256 authenc_request_complete(req, err); authenc_verify_ahash_done()
259 static u8 *crypto_authenc_ahash_fb(struct aead_request *req, unsigned int flags) crypto_authenc_ahash_fb() argument
261 struct crypto_aead *authenc = crypto_aead_reqtfm(req); crypto_authenc_ahash_fb()
264 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_ahash_fb()
278 ahash_request_set_crypt(ahreq, req->assoc, hash, req->assoclen); crypto_authenc_ahash_fb()
279 ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, crypto_authenc_ahash_fb()
280 areq_ctx->update_complete, req); crypto_authenc_ahash_fb()
288 ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, crypto_authenc_ahash_fb()
289 areq_ctx->complete, req); crypto_authenc_ahash_fb()
298 static u8 *crypto_authenc_ahash(struct aead_request *req, unsigned int flags) crypto_authenc_ahash() argument
300 struct crypto_aead *authenc = crypto_aead_reqtfm(req); crypto_authenc_ahash()
303 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_ahash()
314 ahash_request_set_callback(ahreq, aead_request_flags(req) & flags, crypto_authenc_ahash()
315 areq_ctx->complete, req); crypto_authenc_ahash()
324 static int crypto_authenc_genicv(struct aead_request *req, u8 *iv, crypto_authenc_genicv() argument
327 struct crypto_aead *authenc = crypto_aead_reqtfm(req); crypto_authenc_genicv()
328 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_genicv()
329 struct scatterlist *dst = req->dst; crypto_authenc_genicv()
330 struct scatterlist *assoc = req->assoc; crypto_authenc_genicv()
334 unsigned int cryptlen = req->cryptlen; crypto_authenc_genicv()
351 if (req->assoclen && sg_is_last(assoc)) { crypto_authenc_genicv()
357 cryptlen += req->assoclen; crypto_authenc_genicv()
366 hash = authenc_ahash_fn(req, flags); crypto_authenc_genicv()
375 static void crypto_authenc_encrypt_done(struct crypto_async_request *req, crypto_authenc_encrypt_done() argument
378 struct aead_request *areq = req->data; crypto_authenc_encrypt_done()
394 static int crypto_authenc_encrypt(struct aead_request *req) crypto_authenc_encrypt() argument
396 struct crypto_aead *authenc = crypto_aead_reqtfm(req); crypto_authenc_encrypt()
398 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_encrypt()
400 struct scatterlist *dst = req->dst; crypto_authenc_encrypt()
401 unsigned int cryptlen = req->cryptlen; crypto_authenc_encrypt()
408 ablkcipher_request_set_callback(abreq, aead_request_flags(req), crypto_authenc_encrypt()
409 crypto_authenc_encrypt_done, req); crypto_authenc_encrypt()
410 ablkcipher_request_set_crypt(abreq, req->src, dst, cryptlen, req->iv); crypto_authenc_encrypt()
412 memcpy(iv, req->iv, crypto_aead_ivsize(authenc)); crypto_authenc_encrypt()
418 return crypto_authenc_genicv(req, iv, CRYPTO_TFM_REQ_MAY_SLEEP); crypto_authenc_encrypt()
421 static void crypto_authenc_givencrypt_done(struct crypto_async_request *req, crypto_authenc_givencrypt_done() argument
424 struct aead_request *areq = req->data; crypto_authenc_givencrypt_done()
435 static int crypto_authenc_givencrypt(struct aead_givcrypt_request *req) crypto_authenc_givencrypt() argument
437 struct crypto_aead *authenc = aead_givcrypt_reqtfm(req); crypto_authenc_givencrypt()
439 struct aead_request *areq = &req->areq; crypto_authenc_givencrypt()
441 u8 *iv = req->giv; crypto_authenc_givencrypt()
449 skcipher_givcrypt_set_giv(greq, iv, req->seq); crypto_authenc_givencrypt()
458 static int crypto_authenc_verify(struct aead_request *req, crypto_authenc_verify() argument
461 struct crypto_aead *authenc = crypto_aead_reqtfm(req); crypto_authenc_verify()
462 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_verify()
470 ohash = authenc_ahash_fn(req, CRYPTO_TFM_REQ_MAY_SLEEP); crypto_authenc_verify()
481 static int crypto_authenc_iverify(struct aead_request *req, u8 *iv, crypto_authenc_iverify() argument
484 struct crypto_aead *authenc = crypto_aead_reqtfm(req); crypto_authenc_iverify()
485 struct authenc_request_ctx *areq_ctx = aead_request_ctx(req); crypto_authenc_iverify()
486 struct scatterlist *src = req->src; crypto_authenc_iverify()
487 struct scatterlist *assoc = req->assoc; crypto_authenc_iverify()
506 if (req->assoclen && sg_is_last(assoc)) { crypto_authenc_iverify()
512 cryptlen += req->assoclen; crypto_authenc_iverify()
518 return crypto_authenc_verify(req, authenc_ahash_fn); crypto_authenc_iverify()
521 static int crypto_authenc_decrypt(struct aead_request *req) crypto_authenc_decrypt() argument
523 struct crypto_aead *authenc = crypto_aead_reqtfm(req); crypto_authenc_decrypt()
525 struct ablkcipher_request *abreq = aead_request_ctx(req); crypto_authenc_decrypt()
526 unsigned int cryptlen = req->cryptlen; crypto_authenc_decrypt()
528 u8 *iv = req->iv; crypto_authenc_decrypt()
535 err = crypto_authenc_iverify(req, iv, cryptlen); crypto_authenc_decrypt()
540 ablkcipher_request_set_callback(abreq, aead_request_flags(req), crypto_authenc_decrypt()
541 req->base.complete, req->base.data); crypto_authenc_decrypt()
542 ablkcipher_request_set_crypt(abreq, req->src, req->dst, cryptlen, iv); crypto_authenc_decrypt()
H A Dcryptd.c132 * req->complete) and reschedule itself if there are more work to
137 struct crypto_async_request *req, *backlog; cryptd_queue_worker() local
149 req = crypto_dequeue_request(&cpu_queue->queue); cryptd_queue_worker()
153 if (!req) cryptd_queue_worker()
158 req->complete(req, 0); cryptd_queue_worker()
201 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req, cryptd_blkcipher_crypt() argument
212 rctx = ablkcipher_request_ctx(req); cryptd_blkcipher_crypt()
218 desc.info = req->info; cryptd_blkcipher_crypt()
221 err = crypt(&desc, req->dst, req->src, req->nbytes); cryptd_blkcipher_crypt()
223 req->base.complete = rctx->complete; cryptd_blkcipher_crypt()
227 rctx->complete(&req->base, err); cryptd_blkcipher_crypt()
231 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err) cryptd_blkcipher_encrypt() argument
233 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); cryptd_blkcipher_encrypt()
236 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, cryptd_blkcipher_encrypt()
240 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err) cryptd_blkcipher_decrypt() argument
242 struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm); cryptd_blkcipher_decrypt()
245 cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err, cryptd_blkcipher_decrypt()
249 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req, cryptd_blkcipher_enqueue() argument
252 struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req); cryptd_blkcipher_enqueue()
253 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); cryptd_blkcipher_enqueue()
257 rctx->complete = req->base.complete; cryptd_blkcipher_enqueue()
258 req->base.complete = compl; cryptd_blkcipher_enqueue()
260 return cryptd_enqueue_request(queue, &req->base); cryptd_blkcipher_enqueue()
263 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req) cryptd_blkcipher_encrypt_enqueue() argument
265 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt); cryptd_blkcipher_encrypt_enqueue()
268 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req) cryptd_blkcipher_decrypt_enqueue() argument
270 return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt); cryptd_blkcipher_decrypt_enqueue()
436 static int cryptd_hash_enqueue(struct ahash_request *req, cryptd_hash_enqueue() argument
439 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); cryptd_hash_enqueue()
440 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); cryptd_hash_enqueue()
444 rctx->complete = req->base.complete; cryptd_hash_enqueue()
445 req->base.complete = compl; cryptd_hash_enqueue()
447 return cryptd_enqueue_request(queue, &req->base); cryptd_hash_enqueue()
454 struct ahash_request *req = ahash_request_cast(req_async); cryptd_hash_init() local
455 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); cryptd_hash_init()
466 req->base.complete = rctx->complete; cryptd_hash_init()
470 rctx->complete(&req->base, err); cryptd_hash_init()
474 static int cryptd_hash_init_enqueue(struct ahash_request *req) cryptd_hash_init_enqueue() argument
476 return cryptd_hash_enqueue(req, cryptd_hash_init); cryptd_hash_init_enqueue()
481 struct ahash_request *req = ahash_request_cast(req_async); cryptd_hash_update() local
484 rctx = ahash_request_ctx(req); cryptd_hash_update()
489 err = shash_ahash_update(req, &rctx->desc); cryptd_hash_update()
491 req->base.complete = rctx->complete; cryptd_hash_update()
495 rctx->complete(&req->base, err); cryptd_hash_update()
499 static int cryptd_hash_update_enqueue(struct ahash_request *req) cryptd_hash_update_enqueue() argument
501 return cryptd_hash_enqueue(req, cryptd_hash_update); cryptd_hash_update_enqueue()
506 struct ahash_request *req = ahash_request_cast(req_async); cryptd_hash_final() local
507 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); cryptd_hash_final()
512 err = crypto_shash_final(&rctx->desc, req->result); cryptd_hash_final()
514 req->base.complete = rctx->complete; cryptd_hash_final()
518 rctx->complete(&req->base, err); cryptd_hash_final()
522 static int cryptd_hash_final_enqueue(struct ahash_request *req) cryptd_hash_final_enqueue() argument
524 return cryptd_hash_enqueue(req, cryptd_hash_final); cryptd_hash_final_enqueue()
529 struct ahash_request *req = ahash_request_cast(req_async); cryptd_hash_finup() local
530 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); cryptd_hash_finup()
535 err = shash_ahash_finup(req, &rctx->desc); cryptd_hash_finup()
537 req->base.complete = rctx->complete; cryptd_hash_finup()
541 rctx->complete(&req->base, err); cryptd_hash_finup()
545 static int cryptd_hash_finup_enqueue(struct ahash_request *req) cryptd_hash_finup_enqueue() argument
547 return cryptd_hash_enqueue(req, cryptd_hash_finup); cryptd_hash_finup_enqueue()
554 struct ahash_request *req = ahash_request_cast(req_async); cryptd_hash_digest() local
555 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); cryptd_hash_digest()
564 err = shash_ahash_digest(req, desc); cryptd_hash_digest()
566 req->base.complete = rctx->complete; cryptd_hash_digest()
570 rctx->complete(&req->base, err); cryptd_hash_digest()
574 static int cryptd_hash_digest_enqueue(struct ahash_request *req) cryptd_hash_digest_enqueue() argument
576 return cryptd_hash_enqueue(req, cryptd_hash_digest); cryptd_hash_digest_enqueue()
579 static int cryptd_hash_export(struct ahash_request *req, void *out) cryptd_hash_export() argument
581 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); cryptd_hash_export()
586 static int cryptd_hash_import(struct ahash_request *req, const void *in) cryptd_hash_import() argument
588 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); cryptd_hash_import()
657 static void cryptd_aead_crypt(struct aead_request *req, cryptd_aead_crypt() argument
660 int (*crypt)(struct aead_request *req)) cryptd_aead_crypt()
663 rctx = aead_request_ctx(req); cryptd_aead_crypt()
667 aead_request_set_tfm(req, child); cryptd_aead_crypt()
668 err = crypt( req ); cryptd_aead_crypt()
669 req->base.complete = rctx->complete; cryptd_aead_crypt()
672 rctx->complete(&req->base, err); cryptd_aead_crypt()
680 struct aead_request *req; cryptd_aead_encrypt() local
682 req = container_of(areq, struct aead_request, base); cryptd_aead_encrypt()
683 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->encrypt); cryptd_aead_encrypt()
690 struct aead_request *req; cryptd_aead_decrypt() local
692 req = container_of(areq, struct aead_request, base); cryptd_aead_decrypt()
693 cryptd_aead_crypt(req, child, err, crypto_aead_crt(child)->decrypt); cryptd_aead_decrypt()
696 static int cryptd_aead_enqueue(struct aead_request *req, cryptd_aead_enqueue() argument
699 struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req); cryptd_aead_enqueue()
700 struct crypto_aead *tfm = crypto_aead_reqtfm(req); cryptd_aead_enqueue()
703 rctx->complete = req->base.complete; cryptd_aead_enqueue()
704 req->base.complete = compl; cryptd_aead_enqueue()
705 return cryptd_enqueue_request(queue, &req->base); cryptd_aead_enqueue()
708 static int cryptd_aead_encrypt_enqueue(struct aead_request *req) cryptd_aead_encrypt_enqueue() argument
710 return cryptd_aead_enqueue(req, cryptd_aead_encrypt ); cryptd_aead_encrypt_enqueue()
713 static int cryptd_aead_decrypt_enqueue(struct aead_request *req) cryptd_aead_decrypt_enqueue() argument
715 return cryptd_aead_enqueue(req, cryptd_aead_decrypt ); cryptd_aead_decrypt_enqueue()
918 struct shash_desc *cryptd_shash_desc(struct ahash_request *req) cryptd_shash_desc() argument
920 struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req); cryptd_shash_desc()
H A Dmcryptd.c150 * req->complete) and reschedule itself if there are more work to
156 struct crypto_async_request *req, *backlog; mcryptd_queue_worker() local
174 req = crypto_dequeue_request(&cpu_queue->queue); mcryptd_queue_worker()
178 if (!req) { mcryptd_queue_worker()
185 req->complete(req, 0); mcryptd_queue_worker()
317 static int mcryptd_hash_enqueue(struct ahash_request *req, mcryptd_hash_enqueue() argument
322 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); mcryptd_hash_enqueue()
323 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); mcryptd_hash_enqueue()
327 rctx->complete = req->base.complete; mcryptd_hash_enqueue()
328 req->base.complete = complete; mcryptd_hash_enqueue()
330 ret = mcryptd_enqueue_request(queue, &req->base, rctx); mcryptd_hash_enqueue()
339 struct ahash_request *req = ahash_request_cast(req_async); mcryptd_hash_init() local
340 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); mcryptd_hash_init()
351 req->base.complete = rctx->complete; mcryptd_hash_init()
355 rctx->complete(&req->base, err); mcryptd_hash_init()
359 static int mcryptd_hash_init_enqueue(struct ahash_request *req) mcryptd_hash_init_enqueue() argument
361 return mcryptd_hash_enqueue(req, mcryptd_hash_init); mcryptd_hash_init_enqueue()
366 struct ahash_request *req = ahash_request_cast(req_async); mcryptd_hash_update() local
367 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); mcryptd_hash_update()
372 err = shash_ahash_mcryptd_update(req, &rctx->desc); mcryptd_hash_update()
374 req->base.complete = rctx->complete; mcryptd_hash_update()
381 rctx->complete(&req->base, err); mcryptd_hash_update()
385 static int mcryptd_hash_update_enqueue(struct ahash_request *req) mcryptd_hash_update_enqueue() argument
387 return mcryptd_hash_enqueue(req, mcryptd_hash_update); mcryptd_hash_update_enqueue()
392 struct ahash_request *req = ahash_request_cast(req_async); mcryptd_hash_final() local
393 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); mcryptd_hash_final()
398 err = shash_ahash_mcryptd_final(req, &rctx->desc); mcryptd_hash_final()
400 req->base.complete = rctx->complete; mcryptd_hash_final()
407 rctx->complete(&req->base, err); mcryptd_hash_final()
411 static int mcryptd_hash_final_enqueue(struct ahash_request *req) mcryptd_hash_final_enqueue() argument
413 return mcryptd_hash_enqueue(req, mcryptd_hash_final); mcryptd_hash_final_enqueue()
418 struct ahash_request *req = ahash_request_cast(req_async); mcryptd_hash_finup() local
419 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); mcryptd_hash_finup()
424 err = shash_ahash_mcryptd_finup(req, &rctx->desc); mcryptd_hash_finup()
427 req->base.complete = rctx->complete; mcryptd_hash_finup()
434 rctx->complete(&req->base, err); mcryptd_hash_finup()
438 static int mcryptd_hash_finup_enqueue(struct ahash_request *req) mcryptd_hash_finup_enqueue() argument
440 return mcryptd_hash_enqueue(req, mcryptd_hash_finup); mcryptd_hash_finup_enqueue()
447 struct ahash_request *req = ahash_request_cast(req_async); mcryptd_hash_digest() local
448 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); mcryptd_hash_digest()
457 err = shash_ahash_mcryptd_digest(req, desc); mcryptd_hash_digest()
460 req->base.complete = rctx->complete; mcryptd_hash_digest()
467 rctx->complete(&req->base, err); mcryptd_hash_digest()
471 static int mcryptd_hash_digest_enqueue(struct ahash_request *req) mcryptd_hash_digest_enqueue() argument
473 return mcryptd_hash_enqueue(req, mcryptd_hash_digest); mcryptd_hash_digest_enqueue()
476 static int mcryptd_hash_export(struct ahash_request *req, void *out) mcryptd_hash_export() argument
478 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); mcryptd_hash_export()
483 static int mcryptd_hash_import(struct ahash_request *req, const void *in) mcryptd_hash_import() argument
485 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); mcryptd_hash_import()
618 int shash_ahash_mcryptd_digest(struct ahash_request *req, shash_ahash_mcryptd_digest() argument
624 shash_ahash_mcryptd_finup(req, desc); shash_ahash_mcryptd_digest()
630 int shash_ahash_mcryptd_update(struct ahash_request *req, shash_ahash_mcryptd_update() argument
642 int shash_ahash_mcryptd_finup(struct ahash_request *req, shash_ahash_mcryptd_finup() argument
650 return shash->finup(desc, NULL, 0, req->result); shash_ahash_mcryptd_finup()
654 int shash_ahash_mcryptd_final(struct ahash_request *req, shash_ahash_mcryptd_final() argument
662 return shash->final(desc, req->result); shash_ahash_mcryptd_final()
674 struct shash_desc *mcryptd_shash_desc(struct ahash_request *req) mcryptd_shash_desc() argument
676 struct mcryptd_hash_request_ctx *rctx = ahash_request_ctx(req); mcryptd_shash_desc()
H A Dccm.c51 struct aead_request *req) crypto_ccm_reqctx()
53 unsigned long align = crypto_aead_alignmask(crypto_aead_reqtfm(req)); crypto_ccm_reqctx()
55 return (void *)PTR_ALIGN((u8 *)aead_request_ctx(req), align + 1); crypto_ccm_reqctx()
123 static int format_input(u8 *info, struct aead_request *req, format_input() argument
126 struct crypto_aead *aead = crypto_aead_reqtfm(req); format_input()
127 unsigned int lp = req->iv[0]; format_input()
133 memcpy(info, req->iv, 16); format_input()
139 if (req->assoclen) format_input()
245 static int crypto_ccm_auth(struct aead_request *req, struct scatterlist *plain, crypto_ccm_auth() argument
248 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_ccm_auth()
250 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); crypto_ccm_auth()
252 unsigned int assoclen = req->assoclen; crypto_ccm_auth()
258 err = format_input(odata, req, cryptlen); crypto_ccm_auth()
268 get_data_to_compute(cipher, pctx, req->assoc, req->assoclen); crypto_ccm_auth()
283 struct aead_request *req = areq->data; crypto_ccm_encrypt_done() local
284 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_ccm_encrypt_done()
285 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); crypto_ccm_encrypt_done()
289 scatterwalk_map_and_copy(odata, req->dst, req->cryptlen, crypto_ccm_encrypt_done()
291 aead_request_complete(req, err); crypto_ccm_encrypt_done()
303 static int crypto_ccm_encrypt(struct aead_request *req) crypto_ccm_encrypt() argument
305 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_ccm_encrypt()
307 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); crypto_ccm_encrypt()
310 unsigned int cryptlen = req->cryptlen; crypto_ccm_encrypt()
312 u8 *iv = req->iv; crypto_ccm_encrypt()
319 pctx->flags = aead_request_flags(req); crypto_ccm_encrypt()
321 err = crypto_ccm_auth(req, req->src, cryptlen); crypto_ccm_encrypt()
332 scatterwalk_sg_chain(pctx->src, 2, req->src); crypto_ccm_encrypt()
335 if (req->src != req->dst) { crypto_ccm_encrypt()
338 scatterwalk_sg_chain(pctx->dst, 2, req->dst); crypto_ccm_encrypt()
344 crypto_ccm_encrypt_done, req); crypto_ccm_encrypt()
351 scatterwalk_map_and_copy(odata, req->dst, cryptlen, crypto_ccm_encrypt()
359 struct aead_request *req = areq->data; crypto_ccm_decrypt_done() local
360 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); crypto_ccm_decrypt_done()
361 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_ccm_decrypt_done()
363 unsigned int cryptlen = req->cryptlen - authsize; crypto_ccm_decrypt_done()
366 err = crypto_ccm_auth(req, req->dst, cryptlen); crypto_ccm_decrypt_done()
370 aead_request_complete(req, err); crypto_ccm_decrypt_done()
373 static int crypto_ccm_decrypt(struct aead_request *req) crypto_ccm_decrypt() argument
375 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_ccm_decrypt()
377 struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req); crypto_ccm_decrypt()
381 unsigned int cryptlen = req->cryptlen; crypto_ccm_decrypt()
384 u8 *iv = req->iv; crypto_ccm_decrypt()
395 pctx->flags = aead_request_flags(req); crypto_ccm_decrypt()
397 scatterwalk_map_and_copy(authtag, req->src, cryptlen, authsize, 0); crypto_ccm_decrypt()
403 scatterwalk_sg_chain(pctx->src, 2, req->src); crypto_ccm_decrypt()
406 if (req->src != req->dst) { crypto_ccm_decrypt()
409 scatterwalk_sg_chain(pctx->dst, 2, req->dst); crypto_ccm_decrypt()
415 crypto_ccm_decrypt_done, req); crypto_ccm_decrypt()
421 err = crypto_ccm_auth(req, req->dst, cryptlen); crypto_ccm_decrypt()
678 static struct aead_request *crypto_rfc4309_crypt(struct aead_request *req) crypto_rfc4309_crypt() argument
680 struct aead_request *subreq = aead_request_ctx(req); crypto_rfc4309_crypt()
681 struct crypto_aead *aead = crypto_aead_reqtfm(req); crypto_rfc4309_crypt()
691 memcpy(iv + 4, req->iv, 8); crypto_rfc4309_crypt()
694 aead_request_set_callback(subreq, req->base.flags, req->base.complete, crypto_rfc4309_crypt()
695 req->base.data); crypto_rfc4309_crypt()
696 aead_request_set_crypt(subreq, req->src, req->dst, req->cryptlen, iv); crypto_rfc4309_crypt()
697 aead_request_set_assoc(subreq, req->assoc, req->assoclen); crypto_rfc4309_crypt()
702 static int crypto_rfc4309_encrypt(struct aead_request *req) crypto_rfc4309_encrypt() argument
704 req = crypto_rfc4309_crypt(req); crypto_rfc4309_encrypt()
706 return crypto_aead_encrypt(req); crypto_rfc4309_encrypt()
709 static int crypto_rfc4309_decrypt(struct aead_request *req) crypto_rfc4309_decrypt() argument
711 req = crypto_rfc4309_crypt(req); crypto_rfc4309_decrypt()
713 return crypto_aead_decrypt(req); crypto_rfc4309_decrypt()
50 crypto_ccm_reqctx( struct aead_request *req) crypto_ccm_reqctx() argument
H A Deseqiv.c42 static void eseqiv_complete2(struct skcipher_givcrypt_request *req) eseqiv_complete2() argument
44 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); eseqiv_complete2()
45 struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); eseqiv_complete2()
47 memcpy(req->giv, PTR_ALIGN((u8 *)reqctx->tail, eseqiv_complete2()
54 struct skcipher_givcrypt_request *req = base->data; eseqiv_complete() local
59 eseqiv_complete2(req); eseqiv_complete()
62 skcipher_givcrypt_complete(req, err); eseqiv_complete()
65 static int eseqiv_givencrypt(struct skcipher_givcrypt_request *req) eseqiv_givencrypt() argument
67 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); eseqiv_givencrypt()
69 struct eseqiv_request_ctx *reqctx = skcipher_givcrypt_reqctx(req); eseqiv_givencrypt()
88 giv = req->giv; eseqiv_givencrypt()
89 compl = req->creq.base.complete; eseqiv_givencrypt()
90 data = req->creq.base.data; eseqiv_givencrypt()
92 osrc = req->creq.src; eseqiv_givencrypt()
93 odst = req->creq.dst; eseqiv_givencrypt()
105 data = req; eseqiv_givencrypt()
108 ablkcipher_request_set_callback(subreq, req->creq.base.flags, compl, eseqiv_givencrypt()
125 req->creq.nbytes + ivsize, eseqiv_givencrypt()
126 req->creq.info); eseqiv_givencrypt()
128 memcpy(req->creq.info, ctx->salt, ivsize); eseqiv_givencrypt()
132 memset(req->giv, 0, ivsize - sizeof(u64)); eseqiv_givencrypt()
135 seq = cpu_to_be64(req->seq); eseqiv_givencrypt()
136 memcpy(req->giv + ivsize - len, &seq, len); eseqiv_givencrypt()
142 if (giv != req->giv) eseqiv_givencrypt()
143 eseqiv_complete2(req); eseqiv_givencrypt()
149 static int eseqiv_givencrypt_first(struct skcipher_givcrypt_request *req) eseqiv_givencrypt_first() argument
151 struct crypto_ablkcipher *geniv = skcipher_givcrypt_reqtfm(req); eseqiv_givencrypt_first()
169 return eseqiv_givencrypt(req); eseqiv_givencrypt_first()
H A Dpcrypt.c120 struct aead_request *req = pcrypt_request_ctx(preq); pcrypt_aead_serial() local
122 aead_request_complete(req->base.data, padata->info); pcrypt_aead_serial()
128 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); pcrypt_aead_giv_serial() local
130 aead_request_complete(req->areq.base.data, padata->info); pcrypt_aead_giv_serial()
135 struct aead_request *req = areq->data; pcrypt_aead_done() local
136 struct pcrypt_request *preq = aead_request_ctx(req); pcrypt_aead_done()
140 req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; pcrypt_aead_done()
148 struct aead_request *req = pcrypt_request_ctx(preq); pcrypt_aead_enc() local
150 padata->info = crypto_aead_encrypt(req); pcrypt_aead_enc()
158 static int pcrypt_aead_encrypt(struct aead_request *req) pcrypt_aead_encrypt() argument
161 struct pcrypt_request *preq = aead_request_ctx(req); pcrypt_aead_encrypt()
164 struct crypto_aead *aead = crypto_aead_reqtfm(req); pcrypt_aead_encrypt()
166 u32 flags = aead_request_flags(req); pcrypt_aead_encrypt()
175 pcrypt_aead_done, req); pcrypt_aead_encrypt()
176 aead_request_set_crypt(creq, req->src, req->dst, pcrypt_aead_encrypt()
177 req->cryptlen, req->iv); pcrypt_aead_encrypt()
178 aead_request_set_assoc(creq, req->assoc, req->assoclen); pcrypt_aead_encrypt()
190 struct aead_request *req = pcrypt_request_ctx(preq); pcrypt_aead_dec() local
192 padata->info = crypto_aead_decrypt(req); pcrypt_aead_dec()
200 static int pcrypt_aead_decrypt(struct aead_request *req) pcrypt_aead_decrypt() argument
203 struct pcrypt_request *preq = aead_request_ctx(req); pcrypt_aead_decrypt()
206 struct crypto_aead *aead = crypto_aead_reqtfm(req); pcrypt_aead_decrypt()
208 u32 flags = aead_request_flags(req); pcrypt_aead_decrypt()
217 pcrypt_aead_done, req); pcrypt_aead_decrypt()
218 aead_request_set_crypt(creq, req->src, req->dst, pcrypt_aead_decrypt()
219 req->cryptlen, req->iv); pcrypt_aead_decrypt()
220 aead_request_set_assoc(creq, req->assoc, req->assoclen); pcrypt_aead_decrypt()
232 struct aead_givcrypt_request *req = pcrypt_request_ctx(preq); pcrypt_aead_givenc() local
234 padata->info = crypto_aead_givencrypt(req); pcrypt_aead_givenc()
242 static int pcrypt_aead_givencrypt(struct aead_givcrypt_request *req) pcrypt_aead_givencrypt() argument
245 struct aead_request *areq = &req->areq; pcrypt_aead_givencrypt()
249 struct crypto_aead *aead = aead_givcrypt_reqtfm(req); pcrypt_aead_givencrypt()
264 aead_givcrypt_set_giv(creq, req->giv, req->seq); pcrypt_aead_givencrypt()
/linux-4.1.27/drivers/staging/emxx_udc/
H A Demxx_udc.c187 udc->ep0_req.req.buf = p_buf; _nbu2ss_create_ep0_packet()
188 udc->ep0_req.req.length = length; _nbu2ss_create_ep0_packet()
189 udc->ep0_req.req.dma = 0; _nbu2ss_create_ep0_packet()
190 udc->ep0_req.req.zero = TRUE; _nbu2ss_create_ep0_packet()
191 udc->ep0_req.req.complete = _nbu2ss_ep0_complete; _nbu2ss_create_ep0_packet()
192 udc->ep0_req.req.status = -EINPROGRESS; _nbu2ss_create_ep0_packet()
193 udc->ep0_req.req.context = udc; _nbu2ss_create_ep0_packet()
194 udc->ep0_req.req.actual = 0; _nbu2ss_create_ep0_packet()
483 struct nbu2ss_req *req, _nbu2ss_dma_map_single()
487 if (req->req.dma == DMA_ADDR_INVALID) { _nbu2ss_dma_map_single()
488 if (req->unaligned) _nbu2ss_dma_map_single()
489 req->req.dma = ep->phys_buf; _nbu2ss_dma_map_single()
491 req->req.dma = dma_map_single( _nbu2ss_dma_map_single()
493 req->req.buf, _nbu2ss_dma_map_single()
494 req->req.length, _nbu2ss_dma_map_single()
498 req->mapped = 1; _nbu2ss_dma_map_single()
500 if (!req->unaligned) _nbu2ss_dma_map_single()
503 req->req.dma, _nbu2ss_dma_map_single()
504 req->req.length, _nbu2ss_dma_map_single()
508 req->mapped = 0; _nbu2ss_dma_map_single()
516 struct nbu2ss_req *req, _nbu2ss_dma_unmap_single()
525 count = req->req.actual % 4; _nbu2ss_dma_unmap_single()
527 p = req->req.buf; _nbu2ss_dma_unmap_single()
528 p += (req->req.actual - count); _nbu2ss_dma_unmap_single()
533 if (req->mapped) { _nbu2ss_dma_unmap_single()
534 if (req->unaligned) { _nbu2ss_dma_unmap_single()
536 memcpy(req->req.buf, ep->virt_buf, _nbu2ss_dma_unmap_single()
537 req->req.actual & 0xfffffffc); _nbu2ss_dma_unmap_single()
540 req->req.dma, req->req.length, _nbu2ss_dma_unmap_single()
544 req->req.dma = DMA_ADDR_INVALID; _nbu2ss_dma_unmap_single()
545 req->mapped = 0; _nbu2ss_dma_unmap_single()
547 if (!req->unaligned) _nbu2ss_dma_unmap_single()
549 req->req.dma, req->req.length, _nbu2ss_dma_unmap_single()
556 p = req->req.buf; _nbu2ss_dma_unmap_single()
557 p += (req->req.actual - count); _nbu2ss_dma_unmap_single()
695 struct nbu2ss_req *req _nbu2ss_ep0_in_transfer()
705 if (req->req.actual == req->req.length) { _nbu2ss_ep0_in_transfer()
706 if ((req->req.actual % EP0_PACKETSIZE) == 0) { _nbu2ss_ep0_in_transfer()
707 if (req->zero) { _nbu2ss_ep0_in_transfer()
708 req->zero = false; _nbu2ss_ep0_in_transfer()
724 iRemainSize = req->req.length - req->req.actual; _nbu2ss_ep0_in_transfer()
725 pBuffer = (u8 *)req->req.buf; _nbu2ss_ep0_in_transfer()
726 pBuffer += req->req.actual; _nbu2ss_ep0_in_transfer()
732 req->div_len = result; _nbu2ss_ep0_in_transfer()
743 req->div_len = result; _nbu2ss_ep0_in_transfer()
753 struct nbu2ss_req *req _nbu2ss_ep0_out_transfer()
769 iRemainSize = req->req.length - req->req.actual; _nbu2ss_ep0_out_transfer()
770 pBuffer = (u8 *)req->req.buf; _nbu2ss_ep0_out_transfer()
771 pBuffer += req->req.actual; _nbu2ss_ep0_out_transfer()
778 req->req.actual += result; _nbu2ss_ep0_out_transfer()
787 req->req.actual += result; _nbu2ss_ep0_out_transfer()
795 if (req->req.actual == req->req.length) { _nbu2ss_ep0_out_transfer()
796 if ((req->req.actual % EP0_PACKETSIZE) == 0) { _nbu2ss_ep0_out_transfer()
797 if (req->zero) { _nbu2ss_ep0_out_transfer()
798 req->zero = false; _nbu2ss_ep0_out_transfer()
807 if ((req->req.actual % EP0_PACKETSIZE) != 0) _nbu2ss_ep0_out_transfer()
810 if (req->req.actual > req->req.length) { _nbu2ss_ep0_out_transfer()
831 struct nbu2ss_req *req, _nbu2ss_out_dma()
845 if (req->dma_flag) _nbu2ss_out_dma()
848 req->dma_flag = TRUE; _nbu2ss_out_dma()
849 pBuffer = (u8 *)req->req.dma; _nbu2ss_out_dma()
850 pBuffer += req->req.actual; _nbu2ss_out_dma()
886 req->div_len = result; _nbu2ss_out_dma()
895 struct nbu2ss_req *req, _nbu2ss_epn_out_pio()
908 if (req->dma_flag) _nbu2ss_epn_out_pio()
914 pBuffer = (u8 *)req->req.buf; _nbu2ss_epn_out_pio()
915 pBuf32 = (union usb_reg_access *)(pBuffer + req->req.actual); _nbu2ss_epn_out_pio()
939 req->req.actual += result; _nbu2ss_epn_out_pio()
941 if ((req->req.actual == req->req.length) _nbu2ss_epn_out_pio()
942 || ((req->req.actual % ep->ep.maxpacket) != 0)) { _nbu2ss_epn_out_pio()
954 struct nbu2ss_req *req, _nbu2ss_epn_out_data()
967 iBufSize = min((req->req.length - req->req.actual), data_size); _nbu2ss_epn_out_data()
970 && (req->req.dma != 0) _nbu2ss_epn_out_data()
972 nret = _nbu2ss_out_dma(udc, req, num, iBufSize); _nbu2ss_epn_out_data()
975 nret = _nbu2ss_epn_out_pio(udc, ep, req, iBufSize); _nbu2ss_epn_out_data()
985 struct nbu2ss_req *req _nbu2ss_epn_out_transfer()
1004 result = _nbu2ss_epn_out_data(udc, ep, req, iRecvLength); _nbu2ss_epn_out_transfer()
1007 req->req.actual += result; _nbu2ss_epn_out_transfer()
1012 if ((req->req.actual == req->req.length) _nbu2ss_epn_out_transfer()
1013 || ((req->req.actual % ep->ep.maxpacket) != 0)) { _nbu2ss_epn_out_transfer()
1020 if ((req->req.actual % ep->ep.maxpacket) == 0) { _nbu2ss_epn_out_transfer()
1021 if (req->zero) { _nbu2ss_epn_out_transfer()
1022 req->zero = false; _nbu2ss_epn_out_transfer()
1028 if (req->req.actual > req->req.length) { _nbu2ss_epn_out_transfer()
1031 req->req.actual, req->req.length); _nbu2ss_epn_out_transfer()
1042 struct nbu2ss_req *req, _nbu2ss_in_dma()
1056 if (req->dma_flag) _nbu2ss_in_dma()
1060 if (req->req.actual == 0) _nbu2ss_in_dma()
1061 _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_IN); _nbu2ss_in_dma()
1063 req->dma_flag = TRUE; _nbu2ss_in_dma()
1093 pBuffer = (u8 *)req->req.dma; _nbu2ss_in_dma()
1094 pBuffer += req->req.actual; _nbu2ss_in_dma()
1109 req->div_len = result; _nbu2ss_in_dma()
1118 struct nbu2ss_req *req, _nbu2ss_epn_in_pio()
1131 if (req->dma_flag) _nbu2ss_epn_in_pio()
1135 pBuffer = (u8 *)req->req.buf; _nbu2ss_epn_in_pio()
1136 pBuf32 = (union usb_reg_access *)(pBuffer + req->req.actual); _nbu2ss_epn_in_pio()
1162 req->div_len = result; _nbu2ss_epn_in_pio()
1171 struct nbu2ss_req *req, _nbu2ss_epn_in_data()
1184 && (req->req.dma != 0) _nbu2ss_epn_in_data()
1186 nret = _nbu2ss_in_dma(udc, ep, req, num, data_size); _nbu2ss_epn_in_data()
1189 nret = _nbu2ss_epn_in_pio(udc, ep, req, data_size); _nbu2ss_epn_in_data()
1199 struct nbu2ss_req *req _nbu2ss_epn_in_transfer()
1216 if (req->req.actual == 0) { _nbu2ss_epn_in_transfer()
1227 iBufSize = req->req.length - req->req.actual; _nbu2ss_epn_in_transfer()
1229 result = _nbu2ss_epn_in_data(udc, ep, req, iBufSize); _nbu2ss_epn_in_transfer()
1230 else if (req->req.length == 0) _nbu2ss_epn_in_transfer()
1240 struct nbu2ss_req *req, _nbu2ss_start_transfer()
1245 req->dma_flag = FALSE; _nbu2ss_start_transfer()
1246 req->div_len = 0; _nbu2ss_start_transfer()
1248 if (req->req.length == 0) _nbu2ss_start_transfer()
1249 req->zero = false; _nbu2ss_start_transfer()
1251 if ((req->req.length % ep->ep.maxpacket) == 0) _nbu2ss_start_transfer()
1252 req->zero = req->req.zero; _nbu2ss_start_transfer()
1254 req->zero = false; _nbu2ss_start_transfer()
1261 nret = _nbu2ss_ep0_in_transfer(udc, ep, req); _nbu2ss_start_transfer()
1265 nret = _nbu2ss_ep0_out_transfer(udc, ep, req); _nbu2ss_start_transfer()
1281 nret = _nbu2ss_epn_out_transfer(udc, ep, req); _nbu2ss_start_transfer()
1284 nret = _nbu2ss_epn_in_transfer(udc, ep, req); _nbu2ss_start_transfer()
1296 struct nbu2ss_req *req; _nbu2ss_restert_transfer() local
1299 req = NULL; _nbu2ss_restert_transfer()
1301 req = list_entry(ep->queue.next, struct nbu2ss_req, queue); _nbu2ss_restert_transfer()
1303 if (req == NULL) _nbu2ss_restert_transfer()
1315 _nbu2ss_start_transfer(ep->udc, ep, req, bflag); _nbu2ss_restert_transfer()
1795 struct nbu2ss_req *req; _nbu2ss_ep0_in_data_stage() local
1799 req = NULL; _nbu2ss_ep0_in_data_stage()
1801 req = list_entry(ep->queue.next, struct nbu2ss_req, queue); _nbu2ss_ep0_in_data_stage()
1803 if (req == NULL) _nbu2ss_ep0_in_data_stage()
1804 req = &udc->ep0_req; _nbu2ss_ep0_in_data_stage()
1806 req->req.actual += req->div_len; _nbu2ss_ep0_in_data_stage()
1807 req->div_len = 0; _nbu2ss_ep0_in_data_stage()
1809 nret = _nbu2ss_ep0_in_transfer(udc, ep, req); _nbu2ss_ep0_in_data_stage()
1822 struct nbu2ss_req *req; _nbu2ss_ep0_out_data_stage() local
1826 req = NULL; _nbu2ss_ep0_out_data_stage()
1828 req = list_entry(ep->queue.next, struct nbu2ss_req, queue); _nbu2ss_ep0_out_data_stage()
1830 if (req == NULL) _nbu2ss_ep0_out_data_stage()
1831 req = &udc->ep0_req; _nbu2ss_ep0_out_data_stage()
1833 nret = _nbu2ss_ep0_out_transfer(udc, ep, req); _nbu2ss_ep0_out_data_stage()
1840 req->req.status = nret; _nbu2ss_ep0_out_data_stage()
1849 struct nbu2ss_req *req; _nbu2ss_ep0_status_stage() local
1853 req = NULL; _nbu2ss_ep0_status_stage()
1855 req = list_entry(ep->queue.next, struct nbu2ss_req, queue); _nbu2ss_ep0_status_stage()
1857 if (req == NULL) { _nbu2ss_ep0_status_stage()
1858 req = &udc->ep0_req; _nbu2ss_ep0_status_stage()
1859 if (req->req.complete) _nbu2ss_ep0_status_stage()
1860 req->req.complete(&ep->ep, &req->req); _nbu2ss_ep0_status_stage()
1863 if (req->req.complete) _nbu2ss_ep0_status_stage()
1864 _nbu2ss_ep_done(ep, req, 0); _nbu2ss_ep0_status_stage()
1957 struct nbu2ss_req *req, _nbu2ss_ep_done()
1962 list_del_init(&req->queue); _nbu2ss_ep_done()
1967 if (likely(req->req.status == -EINPROGRESS)) _nbu2ss_ep_done()
1968 req->req.status = status; _nbu2ss_ep_done()
1979 (req->req.dma != 0)) _nbu2ss_ep_done()
1980 _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_OUT); _nbu2ss_ep_done()
1984 req->req.complete(&ep->ep, &req->req); _nbu2ss_ep_done()
1992 struct nbu2ss_req *req) _nbu2ss_epn_in_int()
1999 if (req->dma_flag) _nbu2ss_epn_in_int()
2002 req->req.actual += req->div_len; _nbu2ss_epn_in_int()
2003 req->div_len = 0; _nbu2ss_epn_in_int()
2005 if (req->req.actual != req->req.length) { _nbu2ss_epn_in_int()
2008 result = _nbu2ss_epn_in_transfer(udc, ep, req); _nbu2ss_epn_in_int()
2011 if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) { _nbu2ss_epn_in_int()
2019 req->zero = false; _nbu2ss_epn_in_int()
2029 _nbu2ss_ep_done(ep, req, result); _nbu2ss_epn_in_int()
2037 struct nbu2ss_req *req) _nbu2ss_epn_out_int()
2041 result = _nbu2ss_epn_out_transfer(udc, ep, req); _nbu2ss_epn_out_int()
2043 _nbu2ss_ep_done(ep, req, result); _nbu2ss_epn_out_int()
2050 struct nbu2ss_req *req) _nbu2ss_epn_in_dma_int()
2056 preq = &req->req; _nbu2ss_epn_in_dma_int()
2058 if (req->dma_flag == FALSE) _nbu2ss_epn_in_dma_int()
2061 preq->actual += req->div_len; _nbu2ss_epn_in_dma_int()
2062 req->div_len = 0; _nbu2ss_epn_in_dma_int()
2063 req->dma_flag = FALSE; _nbu2ss_epn_in_dma_int()
2066 _nbu2ss_dma_unmap_single(udc, ep, req, USB_DIR_IN); _nbu2ss_epn_in_dma_int()
2070 _nbu2ss_epn_in_transfer(udc, ep, req); _nbu2ss_epn_in_dma_int()
2078 _nbu2ss_epn_in_int(udc, ep, req); _nbu2ss_epn_in_dma_int()
2087 struct nbu2ss_req *req) _nbu2ss_epn_out_dma_int()
2097 if (req->req.actual == req->req.length) { _nbu2ss_epn_out_dma_int()
2098 if ((req->req.length % ep->ep.maxpacket) && !req->zero) { _nbu2ss_epn_out_dma_int()
2099 req->div_len = 0; _nbu2ss_epn_out_dma_int()
2100 req->dma_flag = FALSE; _nbu2ss_epn_out_dma_int()
2101 _nbu2ss_ep_done(ep, req, 0); _nbu2ss_epn_out_dma_int()
2122 if ((req->div_len % mpkt) == 0) _nbu2ss_epn_out_dma_int()
2123 req->div_len -= mpkt * dmacnt; _nbu2ss_epn_out_dma_int()
2126 if ((req->req.actual % ep->ep.maxpacket) > 0) { _nbu2ss_epn_out_dma_int()
2127 if (req->req.actual == req->div_len) { _nbu2ss_epn_out_dma_int()
2128 req->div_len = 0; _nbu2ss_epn_out_dma_int()
2129 req->dma_flag = FALSE; _nbu2ss_epn_out_dma_int()
2130 _nbu2ss_ep_done(ep, req, 0); _nbu2ss_epn_out_dma_int()
2135 req->req.actual += req->div_len; _nbu2ss_epn_out_dma_int()
2136 req->div_len = 0; _nbu2ss_epn_out_dma_int()
2137 req->dma_flag = FALSE; _nbu2ss_epn_out_dma_int()
2139 _nbu2ss_epn_out_int(udc, ep, req); _nbu2ss_epn_out_dma_int()
2148 struct nbu2ss_req *req; _nbu2ss_epn_int() local
2160 req = NULL; _nbu2ss_epn_int()
2162 req = list_entry(ep->queue.next, struct nbu2ss_req, queue); _nbu2ss_epn_int()
2164 if (req == NULL) { _nbu2ss_epn_int()
2165 /* pr_warn("=== %s(%d) req == NULL\n", __func__, epnum); */ _nbu2ss_epn_int()
2171 _nbu2ss_epn_out_dma_int(udc, ep, req); _nbu2ss_epn_int()
2175 _nbu2ss_epn_out_int(udc, ep, req); _nbu2ss_epn_int()
2179 _nbu2ss_epn_in_dma_int(udc, ep, req); _nbu2ss_epn_int()
2183 _nbu2ss_epn_in_int(udc, ep, req); _nbu2ss_epn_int()
2220 struct nbu2ss_req *req; _nbu2ss_nuke() local
2232 list_for_each_entry(req, &ep->queue, queue) { _nbu2ss_nuke()
2233 _nbu2ss_ep_done(ep, req, status); _nbu2ss_nuke()
2695 struct nbu2ss_req *req; nbu2ss_ep_alloc_request() local
2697 req = kzalloc(sizeof(*req), gfp_flags); nbu2ss_ep_alloc_request()
2698 if (!req) nbu2ss_ep_alloc_request()
2702 req->req.dma = DMA_ADDR_INVALID; nbu2ss_ep_alloc_request()
2704 INIT_LIST_HEAD(&req->queue); nbu2ss_ep_alloc_request()
2706 return &req->req; nbu2ss_ep_alloc_request()
2714 struct nbu2ss_req *req; nbu2ss_ep_free_request() local
2717 req = container_of(_req, struct nbu2ss_req, req); nbu2ss_ep_free_request()
2719 kfree(req); nbu2ss_ep_free_request()
2729 struct nbu2ss_req *req; nbu2ss_ep_queue() local
2747 req = container_of(_req, struct nbu2ss_req, req); nbu2ss_ep_queue()
2750 || !list_empty(&req->queue))) { nbu2ss_ep_queue()
2758 if (!list_empty(&req->queue)) nbu2ss_ep_queue()
2759 pr_err("%s --- !list_empty(&req->queue)\n", __func__); nbu2ss_ep_queue()
2783 if ((u32)req->req.buf & 0x3) nbu2ss_ep_queue()
2784 req->unaligned = TRUE; nbu2ss_ep_queue()
2786 req->unaligned = FALSE; nbu2ss_ep_queue()
2788 if (req->unaligned) { nbu2ss_ep_queue()
2795 memcpy(ep->virt_buf, req->req.buf, nbu2ss_ep_queue()
2796 req->req.length); nbu2ss_ep_queue()
2801 (req->req.dma != 0)) nbu2ss_ep_queue()
2802 _nbu2ss_dma_map_single(udc, ep, req, USB_DIR_OUT); nbu2ss_ep_queue()
2809 list_add_tail(&req->queue, &ep->queue); nbu2ss_ep_queue()
2813 result = _nbu2ss_start_transfer(udc, ep, req, FALSE); nbu2ss_ep_queue()
2817 list_del(&req->queue); nbu2ss_ep_queue()
2820 if (req->req.length < 4 && nbu2ss_ep_queue()
2821 req->req.length == req->req.actual) nbu2ss_ep_queue()
2823 if (req->req.length == req->req.actual) nbu2ss_ep_queue()
2825 _nbu2ss_ep_done(ep, req, result); nbu2ss_ep_queue()
2839 struct nbu2ss_req *req; nbu2ss_ep_dequeue() local
2865 list_for_each_entry(req, &ep->queue, queue) { nbu2ss_ep_dequeue()
2866 if (&req->req == _req) nbu2ss_ep_dequeue()
2869 if (&req->req != _req) { nbu2ss_ep_dequeue()
2875 _nbu2ss_ep_done(ep, req, -ECONNRESET); nbu2ss_ep_dequeue()
480 _nbu2ss_dma_map_single( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u8 direct ) _nbu2ss_dma_map_single() argument
513 _nbu2ss_dma_unmap_single( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u8 direct ) _nbu2ss_dma_unmap_single() argument
829 _nbu2ss_out_dma( struct nbu2ss_udc *udc, struct nbu2ss_req *req, u32 num, u32 length ) _nbu2ss_out_dma() argument
892 _nbu2ss_epn_out_pio( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u32 length ) _nbu2ss_epn_out_pio() argument
951 _nbu2ss_epn_out_data( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u32 data_size ) _nbu2ss_epn_out_data() argument
1039 _nbu2ss_in_dma( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u32 num, u32 length ) _nbu2ss_in_dma() argument
1115 _nbu2ss_epn_in_pio( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u32 length ) _nbu2ss_epn_in_pio() argument
1168 _nbu2ss_epn_in_data( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, u32 data_size ) _nbu2ss_epn_in_data() argument
1237 _nbu2ss_start_transfer( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req, bool bflag) _nbu2ss_start_transfer() argument
1955 _nbu2ss_ep_done( struct nbu2ss_ep *ep, struct nbu2ss_req *req, int status) _nbu2ss_ep_done() argument
1989 _nbu2ss_epn_in_int( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req) _nbu2ss_epn_in_int() argument
2034 _nbu2ss_epn_out_int( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req) _nbu2ss_epn_out_int() argument
2047 _nbu2ss_epn_in_dma_int( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req) _nbu2ss_epn_in_dma_int() argument
2084 _nbu2ss_epn_out_dma_int( struct nbu2ss_udc *udc, struct nbu2ss_ep *ep, struct nbu2ss_req *req) _nbu2ss_epn_out_dma_int() argument
/linux-4.1.27/drivers/isdn/hardware/eicon/
H A Ddiddfunc.c54 IDI_SYNC_REQ req; connect_didd() local
63 req.didd_notify.e.Req = 0; connect_didd()
64 req.didd_notify.e.Rc = connect_didd()
66 req.didd_notify.info.callback = (void *)didd_callback; connect_didd()
67 req.didd_notify.info.context = NULL; connect_didd()
68 _DAdapter.request((ENTITY *)&req); connect_didd()
69 if (req.didd_notify.e.Rc != 0xff) connect_didd()
71 notify_handle = req.didd_notify.info.handle; connect_didd()
84 IDI_SYNC_REQ req; disconnect_didd() local
86 req.didd_notify.e.Req = 0; disconnect_didd()
87 req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY; disconnect_didd()
88 req.didd_notify.info.handle = notify_handle; disconnect_didd()
89 _DAdapter.request((ENTITY *)&req); disconnect_didd()
H A Ddivasfunc.c67 IDI_SYNC_REQ req; diva_xdi_didd_register_adapter() local
81 req.didd_remove_adapter.e.Req = 0; diva_xdi_didd_register_adapter()
82 req.didd_add_adapter.e.Rc = IDI_SYNC_REQ_DIDD_ADD_ADAPTER; diva_xdi_didd_register_adapter()
83 req.didd_add_adapter.info.descriptor = (void *) &d; diva_xdi_didd_register_adapter()
84 DAdapter.request((ENTITY *)&req); diva_xdi_didd_register_adapter()
85 if (req.didd_add_adapter.e.Rc != 0xff) { diva_xdi_didd_register_adapter()
97 IDI_SYNC_REQ req; diva_xdi_didd_remove_adapter() local
102 req.didd_remove_adapter.e.Req = 0; diva_xdi_didd_remove_adapter()
103 req.didd_remove_adapter.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER; diva_xdi_didd_remove_adapter()
104 req.didd_remove_adapter.info.p_request = diva_xdi_didd_remove_adapter()
106 DAdapter.request((ENTITY *)&req); diva_xdi_didd_remove_adapter()
160 IDI_SYNC_REQ req; connect_didd() local
169 req.didd_notify.e.Req = 0; connect_didd()
170 req.didd_notify.e.Rc = connect_didd()
172 req.didd_notify.info.callback = (void *)didd_callback; connect_didd()
173 req.didd_notify.info.context = NULL; connect_didd()
174 DAdapter.request((ENTITY *)&req); connect_didd()
175 if (req.didd_notify.e.Rc != 0xff) { connect_didd()
179 notify_handle = req.didd_notify.info.handle; connect_didd()
199 IDI_SYNC_REQ req; disconnect_didd() local
203 req.didd_notify.e.Req = 0; disconnect_didd()
204 req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY; disconnect_didd()
205 req.didd_notify.info.handle = notify_handle; disconnect_didd()
206 DAdapter.request((ENTITY *)&req); disconnect_didd()
H A Dmntfunc.c79 IDI_SYNC_REQ req; connect_didd() local
88 req.didd_notify.e.Req = 0; connect_didd()
89 req.didd_notify.e.Rc = connect_didd()
91 req.didd_notify.info.callback = (void *)didd_callback; connect_didd()
92 req.didd_notify.info.context = NULL; connect_didd()
93 DAdapter.request((ENTITY *)&req); connect_didd()
94 if (req.didd_notify.e.Rc != 0xff) connect_didd()
96 notify_handle = req.didd_notify.info.handle; connect_didd()
98 req.didd_add_adapter.e.Req = 0; connect_didd()
99 req.didd_add_adapter.e.Rc = connect_didd()
101 req.didd_add_adapter.info.descriptor = connect_didd()
103 DAdapter.request((ENTITY *)&req); connect_didd()
104 if (req.didd_add_adapter.e.Rc != 0xff) connect_didd()
119 IDI_SYNC_REQ req; disconnect_didd() local
121 req.didd_notify.e.Req = 0; disconnect_didd()
122 req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY; disconnect_didd()
123 req.didd_notify.info.handle = notify_handle; disconnect_didd()
124 DAdapter.request((ENTITY *)&req); disconnect_didd()
126 req.didd_remove_adapter.e.Req = 0; disconnect_didd()
127 req.didd_remove_adapter.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER; disconnect_didd()
128 req.didd_remove_adapter.info.p_request = disconnect_didd()
130 DAdapter.request((ENTITY *)&req); disconnect_didd()
H A Didifunc.c188 IDI_SYNC_REQ req; connect_didd() local
197 req.didd_notify.e.Req = 0; connect_didd()
198 req.didd_notify.e.Rc = connect_didd()
200 req.didd_notify.info.callback = (void *)didd_callback; connect_didd()
201 req.didd_notify.info.context = NULL; connect_didd()
202 DAdapter.request((ENTITY *)&req); connect_didd()
203 if (req.didd_notify.e.Rc != 0xff) { connect_didd()
207 notify_handle = req.didd_notify.info.handle; connect_didd()
230 IDI_SYNC_REQ req; disconnect_didd() local
234 req.didd_notify.e.Req = 0; disconnect_didd()
235 req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY; disconnect_didd()
236 req.didd_notify.info.handle = notify_handle; disconnect_didd()
237 DAdapter.request((ENTITY *)&req); disconnect_didd()
/linux-4.1.27/arch/s390/pci/
H A Dpci_insn.c16 static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status) __mpcifc() argument
21 " .insn rxy,0xe300000000d0,%[req],%[fib]\n" __mpcifc()
24 : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib) __mpcifc()
26 *status = req >> 24 & 0xff; __mpcifc()
30 int zpci_mod_fc(u64 req, struct zpci_fib *fib) zpci_mod_fc() argument
35 cc = __mpcifc(req, fib, &status); zpci_mod_fc()
89 static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status) __pcilg() argument
91 register u64 __req asm("2") = req; __pcilg()
97 " .insn rre,0xb9d20000,%[data],%[req]\n" __pcilg()
102 : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req) __pcilg()
112 int zpci_load(u64 *data, u64 req, u64 offset) zpci_load() argument
118 cc = __pcilg(data, req, offset, &status); zpci_load()
124 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", zpci_load()
125 __func__, cc, status, req, offset); zpci_load()
131 static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status) __pcistg() argument
133 register u64 __req asm("2") = req; __pcistg()
138 " .insn rre,0xb9d00000,%[data],%[req]\n" __pcistg()
143 : [cc] "+d" (cc), [req] "+d" (__req) __pcistg()
150 int zpci_store(u64 data, u64 req, u64 offset) zpci_store() argument
156 cc = __pcistg(data, req, offset, &status); zpci_store()
162 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", zpci_store()
163 __func__, cc, status, req, offset); zpci_store()
169 static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status) __pcistb() argument
174 " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n" __pcistb()
179 : [cc] "+d" (cc), [req] "+d" (req) __pcistb()
182 *status = req >> 24 & 0xff; __pcistb()
186 int zpci_store_block(const u64 *data, u64 req, u64 offset) zpci_store_block() argument
192 cc = __pcistb(data, req, offset, &status); zpci_store_block()
198 printk_once(KERN_ERR "%s: error cc: %d status: %d req: %Lx offset: %Lx\n", zpci_store_block()
199 __func__, cc, status, req, offset); zpci_store_block()
/linux-4.1.27/drivers/staging/lustre/lustre/mdc/
H A Dmdc_reint.c108 struct ptlrpc_request *req; mdc_setattr() local
124 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_setattr()
126 if (req == NULL) { mdc_setattr()
130 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_setattr()
132 req_capsule_set_size(&req->rq_pill, &RMF_MDT_EPOCH, RCL_CLIENT, mdc_setattr()
134 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, ealen); mdc_setattr()
135 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_CLIENT, mdc_setattr()
138 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); mdc_setattr()
140 ptlrpc_request_free(req); mdc_setattr()
151 mdc_setattr_pack(req, op_data, ea, ealen, ea2, ea2len); mdc_setattr()
153 ptlrpc_request_set_replen(req); mdc_setattr()
155 req->rq_import->imp_replayable) { mdc_setattr()
160 DEBUG_REQ(D_ERROR, req, "Can't allocate md_open_data"); mdc_setattr()
162 req->rq_replay = 1; mdc_setattr()
163 req->rq_cb_data = *mod; mdc_setattr()
164 (*mod)->mod_open_req = req; mdc_setattr()
165 req->rq_commit_cb = mdc_commit_open; mdc_setattr()
177 rc = mdc_reint(req, rpc_lock, LUSTRE_IMP_FULL); mdc_setattr()
184 epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); mdc_setattr()
185 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); mdc_setattr()
190 req->rq_replay_cb = mdc_replay_open; mdc_setattr()
197 *request = req; mdc_setattr()
198 if (rc && req->rq_commit_cb) { mdc_setattr()
202 req->rq_commit_cb(req); mdc_setattr()
212 struct ptlrpc_request *req; mdc_create() local
240 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_create()
242 if (req == NULL) { mdc_create()
246 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_create()
247 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, mdc_create()
249 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, mdc_create()
252 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); mdc_create()
254 ptlrpc_request_free(req); mdc_create()
262 mdc_create_pack(req, op_data, data, datalen, mode, uid, mdc_create()
265 ptlrpc_request_set_replen(req); mdc_create()
269 req->rq_no_retry_einprogress = 1; mdc_create()
272 req->rq_generation_set = 1; mdc_create()
273 req->rq_import_generation = generation; mdc_create()
274 req->rq_sent = get_seconds() + resends; mdc_create()
278 rc = mdc_reint(req, exp->exp_obd->u.cli.cl_rpc_lock, level); mdc_create()
287 ptlrpc_req_finished(req); mdc_create()
304 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); mdc_create()
307 capa = req_capsule_server_get(&req->rq_pill, mdc_create()
314 *request = req; mdc_create()
323 struct ptlrpc_request *req = *request; mdc_unlink() local
326 LASSERT(req == NULL); mdc_unlink()
340 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_unlink()
342 if (req == NULL) { mdc_unlink()
346 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_unlink()
347 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, mdc_unlink()
350 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); mdc_unlink()
352 ptlrpc_request_free(req); mdc_unlink()
356 mdc_unlink_pack(req, op_data); mdc_unlink()
358 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, mdc_unlink()
360 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER, mdc_unlink()
362 ptlrpc_request_set_replen(req); mdc_unlink()
364 *request = req; mdc_unlink()
366 rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL); mdc_unlink()
377 struct ptlrpc_request *req; mdc_link() local
391 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_REINT_LINK); mdc_link()
392 if (req == NULL) { mdc_link()
396 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_link()
397 mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa2); mdc_link()
398 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, mdc_link()
401 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); mdc_link()
403 ptlrpc_request_free(req); mdc_link()
407 mdc_link_pack(req, op_data); mdc_link()
408 ptlrpc_request_set_replen(req); mdc_link()
410 rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL); mdc_link()
411 *request = req; mdc_link()
424 struct ptlrpc_request *req; mdc_rename() local
448 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_rename()
450 if (req == NULL) { mdc_rename()
455 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_rename()
456 mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa2); mdc_rename()
457 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, oldlen + 1); mdc_rename()
458 req_capsule_set_size(&req->rq_pill, &RMF_SYMTGT, RCL_CLIENT, newlen+1); mdc_rename()
460 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); mdc_rename()
462 ptlrpc_request_free(req); mdc_rename()
466 if (exp_connect_cancelset(exp) && req) mdc_rename()
467 ldlm_cli_cancel_list(&cancels, count, req, 0); mdc_rename()
469 mdc_rename_pack(req, op_data, old, oldlen, new, newlen); mdc_rename()
471 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, mdc_rename()
473 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER, mdc_rename()
475 ptlrpc_request_set_replen(req); mdc_rename()
477 rc = mdc_reint(req, obd->u.cli.cl_rpc_lock, LUSTRE_IMP_FULL); mdc_rename()
478 *request = req; mdc_rename()
H A Dmdc_request.c63 static int mdc_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req, mdc_unpack_capa() argument
70 capa = req_capsule_server_get(&req->rq_pill, field); mdc_unpack_capa()
85 static inline int mdc_queue_wait(struct ptlrpc_request *req) mdc_queue_wait() argument
87 struct client_obd *cli = &req->rq_import->imp_obd->u.cli; mdc_queue_wait()
97 rc = ptlrpc_queue_wait(req); mdc_queue_wait()
108 struct ptlrpc_request *req; send_getstatus() local
112 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_GETSTATUS, send_getstatus()
114 if (req == NULL) send_getstatus()
117 mdc_pack_body(req, NULL, NULL, 0, 0, -1, 0); send_getstatus()
118 lustre_msg_add_flags(req->rq_reqmsg, msg_flags); send_getstatus()
119 req->rq_send_state = level; send_getstatus()
121 ptlrpc_request_set_replen(req); send_getstatus()
123 rc = ptlrpc_queue_wait(req); send_getstatus()
127 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); send_getstatus()
134 rc = mdc_unpack_capa(NULL, req, &RMF_CAPA1, pc); send_getstatus()
143 lustre_msg_get_last_committed(req->rq_repmsg)); send_getstatus()
145 ptlrpc_req_finished(req); send_getstatus()
168 struct ptlrpc_request *req) mdc_getattr_common()
170 struct req_capsule *pill = &req->rq_pill; mdc_getattr_common()
176 rc = ptlrpc_queue_wait(req); mdc_getattr_common()
220 struct ptlrpc_request *req; mdc_getattr() local
229 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR); mdc_getattr()
230 if (req == NULL) mdc_getattr()
233 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_getattr()
235 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR); mdc_getattr()
237 ptlrpc_request_free(req); mdc_getattr()
241 mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1, mdc_getattr()
244 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, mdc_getattr()
248 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, mdc_getattr()
251 ptlrpc_request_set_replen(req); mdc_getattr()
253 rc = mdc_getattr_common(exp, req); mdc_getattr()
255 ptlrpc_req_finished(req); mdc_getattr()
257 *request = req; mdc_getattr()
264 struct ptlrpc_request *req; mdc_getattr_name() local
268 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_getattr_name()
270 if (req == NULL) mdc_getattr_name()
273 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_getattr_name()
274 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, mdc_getattr_name()
277 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR_NAME); mdc_getattr_name()
279 ptlrpc_request_free(req); mdc_getattr_name()
283 mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1, mdc_getattr_name()
288 char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME); mdc_getattr_name()
295 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, mdc_getattr_name()
297 ptlrpc_request_set_replen(req); mdc_getattr_name()
299 rc = mdc_getattr_common(exp, req); mdc_getattr_name()
301 ptlrpc_req_finished(req); mdc_getattr_name()
303 *request = req; mdc_getattr_name()
312 struct ptlrpc_request *req; mdc_is_subdir() local
316 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), mdc_is_subdir()
319 if (req == NULL) mdc_is_subdir()
322 mdc_is_subdir_pack(req, pfid, cfid, 0); mdc_is_subdir()
323 ptlrpc_request_set_replen(req); mdc_is_subdir()
325 rc = ptlrpc_queue_wait(req); mdc_is_subdir()
327 ptlrpc_req_finished(req); mdc_is_subdir()
329 *request = req; mdc_is_subdir()
341 struct ptlrpc_request *req; mdc_xattr_common() local
347 req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt); mdc_xattr_common()
348 if (req == NULL) mdc_xattr_common()
351 mdc_set_capa_size(req, &RMF_CAPA1, oc); mdc_xattr_common()
354 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, mdc_xattr_common()
359 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, mdc_xattr_common()
371 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, mdc_xattr_common()
378 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count); mdc_xattr_common()
380 ptlrpc_request_free(req); mdc_xattr_common()
384 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode); mdc_xattr_common()
386 ptlrpc_request_free(req); mdc_xattr_common()
396 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); mdc_xattr_common()
409 mdc_pack_capa(req, &RMF_CAPA1, oc); mdc_xattr_common()
411 mdc_pack_body(req, fid, oc, valid, output_size, suppgid, flags); mdc_xattr_common()
415 tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); mdc_xattr_common()
419 tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); mdc_xattr_common()
423 if (req_capsule_has_field(&req->rq_pill, &RMF_EADATA, RCL_SERVER)) mdc_xattr_common()
424 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, mdc_xattr_common()
426 ptlrpc_request_set_replen(req); mdc_xattr_common()
432 rc = ptlrpc_queue_wait(req); mdc_xattr_common()
438 ptlrpc_req_finished(req); mdc_xattr_common()
440 *request = req; mdc_xattr_common()
467 static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md) mdc_unpack_acl() argument
469 struct req_capsule *pill = &req->rq_pill; mdc_unpack_acl()
504 #define mdc_unpack_acl(req, md) 0
507 int mdc_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req, mdc_get_lustre_md() argument
511 struct req_capsule *pill = &req->rq_pill; mdc_get_lustre_md()
612 rc = mdc_unpack_acl(req, md); mdc_get_lustre_md()
624 rc = mdc_unpack_capa(NULL, req, &RMF_CAPA1, &oc); mdc_get_lustre_md()
633 rc = mdc_unpack_capa(NULL, req, &RMF_CAPA2, &oc); mdc_get_lustre_md()
667 void mdc_replay_open(struct ptlrpc_request *req) mdc_replay_open() argument
669 struct md_open_data *mod = req->rq_cb_data; mdc_replay_open()
676 DEBUG_REQ(D_ERROR, req, mdc_replay_open()
681 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); mdc_replay_open()
713 void mdc_commit_open(struct ptlrpc_request *req) mdc_commit_open() argument
715 struct md_open_data *mod = req->rq_cb_data; mdc_commit_open()
734 ptlrpc_request_addref(req); mdc_commit_open()
735 spin_lock(&req->rq_lock); mdc_commit_open()
736 req->rq_committed = 1; mdc_commit_open()
737 spin_unlock(&req->rq_lock); mdc_commit_open()
738 req->rq_cb_data = NULL; mdc_commit_open()
846 static void mdc_close_handle_reply(struct ptlrpc_request *req, mdc_close_handle_reply() argument
851 if (req && rc == -EAGAIN) { mdc_close_handle_reply()
852 repbody = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); mdc_close_handle_reply()
853 epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); mdc_close_handle_reply()
865 struct ptlrpc_request *req; mdc_close() local
886 req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt); mdc_close()
887 if (req == NULL) mdc_close()
890 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_close()
892 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE); mdc_close()
894 ptlrpc_request_free(req); mdc_close()
901 req->rq_request_portal = MDS_READPAGE_PORTAL; mdc_close()
902 ptlrpc_at_set_req_timeout(req); mdc_close()
910 mod->mod_close_req = req; mdc_close()
920 "couldn't find open req; expecting close error\n"); mdc_close()
923 mdc_close_pack(req, op_data); mdc_close()
925 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, mdc_close()
927 req_capsule_set_size(&req->rq_pill, &RMF_LOGCOOKIES, RCL_SERVER, mdc_close()
930 ptlrpc_request_set_replen(req); mdc_close()
933 rc = ptlrpc_queue_wait(req); mdc_close()
936 if (req->rq_repmsg == NULL) { mdc_close()
937 CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req, mdc_close()
938 req->rq_status); mdc_close()
940 rc = req->rq_status ?: -EIO; mdc_close()
944 rc = lustre_msg_get_status(req->rq_repmsg); mdc_close()
945 if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) { mdc_close()
946 DEBUG_REQ(D_ERROR, req, mdc_close()
951 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); mdc_close()
961 DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc); mdc_close()
972 * thus close req does not keep a reference on mod anymore. */ mdc_close()
975 *request = req; mdc_close()
976 mdc_close_handle_reply(req, op_data, rc); mdc_close()
984 struct ptlrpc_request *req; mdc_done_writing() local
987 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_done_writing()
989 if (req == NULL) mdc_done_writing()
992 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_done_writing()
993 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_DONE_WRITING); mdc_done_writing()
995 ptlrpc_request_free(req); mdc_done_writing()
1004 mod->mod_close_req = req; mdc_done_writing()
1013 mdc_close_pack(req, op_data); mdc_done_writing()
1014 ptlrpc_request_set_replen(req); mdc_done_writing()
1017 rc = ptlrpc_queue_wait(req); mdc_done_writing()
1039 /* Since now, mod is accessed through setattr req only, mdc_done_writing()
1040 * thus DW req does not keep a reference on mod anymore. */ mdc_done_writing()
1044 mdc_close_handle_reply(req, op_data, rc); mdc_done_writing()
1045 ptlrpc_req_finished(req); mdc_done_writing()
1053 struct ptlrpc_request *req; mdc_readpage() local
1065 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE); mdc_readpage()
1066 if (req == NULL) mdc_readpage()
1069 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_readpage()
1071 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE); mdc_readpage()
1073 ptlrpc_request_free(req); mdc_readpage()
1077 req->rq_request_portal = MDS_READPAGE_PORTAL; mdc_readpage()
1078 ptlrpc_at_set_req_timeout(req); mdc_readpage()
1080 desc = ptlrpc_prep_bulk_imp(req, op_data->op_npages, 1, BULK_PUT_SINK, mdc_readpage()
1083 ptlrpc_request_free(req); mdc_readpage()
1087 /* NB req now owns desc and will free it when it gets freed */ mdc_readpage()
1091 mdc_readdir_pack(req, op_data->op_offset, mdc_readpage()
1095 ptlrpc_request_set_replen(req); mdc_readpage()
1096 rc = ptlrpc_queue_wait(req); mdc_readpage()
1098 ptlrpc_req_finished(req); mdc_readpage()
1114 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, mdc_readpage()
1115 req->rq_bulk->bd_nob_transferred); mdc_readpage()
1117 ptlrpc_req_finished(req); mdc_readpage()
1121 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) { mdc_readpage()
1123 req->rq_bulk->bd_nob_transferred, mdc_readpage()
1125 ptlrpc_req_finished(req); mdc_readpage()
1129 *request = req; mdc_readpage()
1138 struct ptlrpc_request *req; mdc_statfs() local
1154 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS, mdc_statfs()
1156 if (req == NULL) { mdc_statfs()
1161 ptlrpc_request_set_replen(req); mdc_statfs()
1165 req->rq_no_resend = 1; mdc_statfs()
1166 req->rq_no_delay = 1; mdc_statfs()
1169 rc = ptlrpc_queue_wait(req); mdc_statfs()
1177 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); mdc_statfs()
1185 ptlrpc_req_finished(req); mdc_statfs()
1246 struct ptlrpc_request *req; mdc_ioc_hsm_progress() local
1249 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS, mdc_ioc_hsm_progress()
1251 if (req == NULL) { mdc_ioc_hsm_progress()
1256 mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0); mdc_ioc_hsm_progress()
1259 req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS); mdc_ioc_hsm_progress()
1268 ptlrpc_request_set_replen(req); mdc_ioc_hsm_progress()
1270 rc = mdc_queue_wait(req); mdc_ioc_hsm_progress()
1273 ptlrpc_req_finished(req); mdc_ioc_hsm_progress()
1280 struct ptlrpc_request *req; mdc_ioc_hsm_ct_register() local
1283 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER, mdc_ioc_hsm_ct_register()
1286 if (req == NULL) { mdc_ioc_hsm_ct_register()
1291 mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0); mdc_ioc_hsm_ct_register()
1294 archive_mask = req_capsule_client_get(&req->rq_pill, mdc_ioc_hsm_ct_register()
1303 ptlrpc_request_set_replen(req); mdc_ioc_hsm_ct_register()
1305 rc = mdc_queue_wait(req); mdc_ioc_hsm_ct_register()
1308 ptlrpc_req_finished(req); mdc_ioc_hsm_ct_register()
1317 struct ptlrpc_request *req; mdc_ioc_hsm_current_action() local
1320 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_ioc_hsm_current_action()
1322 if (req == NULL) mdc_ioc_hsm_current_action()
1325 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_ioc_hsm_current_action()
1327 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION); mdc_ioc_hsm_current_action()
1329 ptlrpc_request_free(req); mdc_ioc_hsm_current_action()
1333 mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1, mdc_ioc_hsm_current_action()
1336 ptlrpc_request_set_replen(req); mdc_ioc_hsm_current_action()
1338 rc = mdc_queue_wait(req); mdc_ioc_hsm_current_action()
1342 req_hca = req_capsule_server_get(&req->rq_pill, mdc_ioc_hsm_current_action()
1352 ptlrpc_req_finished(req); mdc_ioc_hsm_current_action()
1358 struct ptlrpc_request *req; mdc_ioc_hsm_ct_unregister() local
1361 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER, mdc_ioc_hsm_ct_unregister()
1364 if (req == NULL) { mdc_ioc_hsm_ct_unregister()
1369 mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0); mdc_ioc_hsm_ct_unregister()
1371 ptlrpc_request_set_replen(req); mdc_ioc_hsm_ct_unregister()
1373 rc = mdc_queue_wait(req); mdc_ioc_hsm_ct_unregister()
1376 ptlrpc_req_finished(req); mdc_ioc_hsm_ct_unregister()
1385 struct ptlrpc_request *req; mdc_ioc_hsm_state_get() local
1388 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_ioc_hsm_state_get()
1390 if (req == NULL) mdc_ioc_hsm_state_get()
1393 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_ioc_hsm_state_get()
1395 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET); mdc_ioc_hsm_state_get()
1397 ptlrpc_request_free(req); mdc_ioc_hsm_state_get()
1401 mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1, mdc_ioc_hsm_state_get()
1404 ptlrpc_request_set_replen(req); mdc_ioc_hsm_state_get()
1406 rc = mdc_queue_wait(req); mdc_ioc_hsm_state_get()
1410 req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE); mdc_ioc_hsm_state_get()
1419 ptlrpc_req_finished(req); mdc_ioc_hsm_state_get()
1428 struct ptlrpc_request *req; mdc_ioc_hsm_state_set() local
1431 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_ioc_hsm_state_set()
1433 if (req == NULL) mdc_ioc_hsm_state_set()
1436 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_ioc_hsm_state_set()
1438 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET); mdc_ioc_hsm_state_set()
1440 ptlrpc_request_free(req); mdc_ioc_hsm_state_set()
1444 mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1, mdc_ioc_hsm_state_set()
1448 req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET); mdc_ioc_hsm_state_set()
1455 ptlrpc_request_set_replen(req); mdc_ioc_hsm_state_set()
1457 rc = mdc_queue_wait(req); mdc_ioc_hsm_state_set()
1461 ptlrpc_req_finished(req); mdc_ioc_hsm_state_set()
1469 struct ptlrpc_request *req; mdc_ioc_hsm_request() local
1475 req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST); mdc_ioc_hsm_request()
1476 if (req == NULL) { mdc_ioc_hsm_request()
1481 req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM, RCL_CLIENT, mdc_ioc_hsm_request()
1484 req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, RCL_CLIENT, mdc_ioc_hsm_request()
1487 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_REQUEST); mdc_ioc_hsm_request()
1489 ptlrpc_request_free(req); mdc_ioc_hsm_request()
1493 mdc_pack_body(req, NULL, NULL, OBD_MD_FLRMTPERM, 0, 0, 0); mdc_ioc_hsm_request()
1496 req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST); mdc_ioc_hsm_request()
1504 req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM); mdc_ioc_hsm_request()
1513 req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA); mdc_ioc_hsm_request()
1520 ptlrpc_request_set_replen(req); mdc_ioc_hsm_request()
1522 rc = mdc_queue_wait(req); mdc_ioc_hsm_request()
1526 ptlrpc_req_finished(req); mdc_ioc_hsm_request()
1693 struct ptlrpc_request *req; mdc_quotacheck() local
1697 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), mdc_quotacheck()
1700 if (req == NULL) mdc_quotacheck()
1703 body = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); mdc_quotacheck()
1706 ptlrpc_request_set_replen(req); mdc_quotacheck()
1711 rc = ptlrpc_queue_wait(req); mdc_quotacheck()
1714 ptlrpc_req_finished(req); mdc_quotacheck()
1737 struct ptlrpc_request *req; mdc_quotactl() local
1741 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), mdc_quotactl()
1744 if (req == NULL) mdc_quotactl()
1747 oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL); mdc_quotactl()
1750 ptlrpc_request_set_replen(req); mdc_quotactl()
1751 ptlrpc_at_set_req_timeout(req); mdc_quotactl()
1752 req->rq_no_resend = 1; mdc_quotactl()
1754 rc = ptlrpc_queue_wait(req); mdc_quotactl()
1758 if (req->rq_repmsg) { mdc_quotactl()
1759 oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL); mdc_quotactl()
1770 ptlrpc_req_finished(req); mdc_quotactl()
1779 struct ptlrpc_request *req; mdc_ioc_swap_layouts() local
1796 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_ioc_swap_layouts()
1798 if (req == NULL) { mdc_ioc_swap_layouts()
1803 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_ioc_swap_layouts()
1804 mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa2); mdc_ioc_swap_layouts()
1806 rc = mdc_prep_elc_req(exp, req, MDS_SWAP_LAYOUTS, &cancels, count); mdc_ioc_swap_layouts()
1808 ptlrpc_request_free(req); mdc_ioc_swap_layouts()
1812 mdc_swap_layouts_pack(req, op_data); mdc_ioc_swap_layouts()
1814 payload = req_capsule_client_get(&req->rq_pill, &RMF_SWAP_LAYOUTS); mdc_ioc_swap_layouts()
1819 ptlrpc_request_set_replen(req); mdc_ioc_swap_layouts()
1821 rc = ptlrpc_queue_wait(req); mdc_ioc_swap_layouts()
1826 ptlrpc_req_finished(req); mdc_ioc_swap_layouts()
1985 struct ptlrpc_request *req; mdc_get_info_rpc() local
1989 req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO); mdc_get_info_rpc()
1990 if (req == NULL) mdc_get_info_rpc()
1993 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY, mdc_get_info_rpc()
1995 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VALLEN, mdc_get_info_rpc()
1998 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO); mdc_get_info_rpc()
2000 ptlrpc_request_free(req); mdc_get_info_rpc()
2004 tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY); mdc_get_info_rpc()
2006 tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_VALLEN); mdc_get_info_rpc()
2009 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VAL, mdc_get_info_rpc()
2011 ptlrpc_request_set_replen(req); mdc_get_info_rpc()
2013 rc = ptlrpc_queue_wait(req); mdc_get_info_rpc()
2017 tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL); mdc_get_info_rpc()
2019 if (ptlrpc_rep_need_swab(req)) { mdc_get_info_rpc()
2024 ptlrpc_req_finished(req); mdc_get_info_rpc()
2272 struct ptlrpc_request *req; mdc_sync() local
2276 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC); mdc_sync()
2277 if (req == NULL) mdc_sync()
2280 mdc_set_capa_size(req, &RMF_CAPA1, oc); mdc_sync()
2282 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC); mdc_sync()
2284 ptlrpc_request_free(req); mdc_sync()
2288 mdc_pack_body(req, fid, oc, 0, 0, -1, 0); mdc_sync()
2290 ptlrpc_request_set_replen(req); mdc_sync()
2292 rc = ptlrpc_queue_wait(req); mdc_sync()
2294 ptlrpc_req_finished(req); mdc_sync()
2296 *request = req; mdc_sync()
2560 struct ptlrpc_request *req; mdc_get_remote_perm() local
2566 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR); mdc_get_remote_perm()
2567 if (req == NULL) mdc_get_remote_perm()
2570 mdc_set_capa_size(req, &RMF_CAPA1, oc); mdc_get_remote_perm()
2572 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR); mdc_get_remote_perm()
2574 ptlrpc_request_free(req); mdc_get_remote_perm()
2578 mdc_pack_body(req, fid, oc, OBD_MD_FLRMTPERM, 0, suppgid, 0); mdc_get_remote_perm()
2580 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, mdc_get_remote_perm()
2583 ptlrpc_request_set_replen(req); mdc_get_remote_perm()
2585 rc = ptlrpc_queue_wait(req); mdc_get_remote_perm()
2587 ptlrpc_req_finished(req); mdc_get_remote_perm()
2589 *request = req; mdc_get_remote_perm()
2594 struct ptlrpc_request *req, void *args, mdc_interpret_renew_capa()
2606 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); mdc_interpret_renew_capa()
2617 capa = req_capsule_server_get(&req->rq_pill, &RMF_CAPA2); mdc_interpret_renew_capa()
2630 struct ptlrpc_request *req; mdc_renew_capa() local
2633 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_GETATTR, mdc_renew_capa()
2635 if (req == NULL) mdc_renew_capa()
2641 mdc_pack_body(req, &oc->c_capa.lc_fid, oc, OBD_MD_FLOSSCAPA, 0, -1, 0); mdc_renew_capa()
2642 ptlrpc_request_set_replen(req); mdc_renew_capa()
2644 CLASSERT(sizeof(*ra) <= sizeof(req->rq_async_args)); mdc_renew_capa()
2645 ra = ptlrpc_req_async_args(req); mdc_renew_capa()
2648 req->rq_interpret_reply = mdc_interpret_renew_capa; mdc_renew_capa()
2649 ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); mdc_renew_capa()
167 mdc_getattr_common(struct obd_export *exp, struct ptlrpc_request *req) mdc_getattr_common() argument
2593 mdc_interpret_renew_capa(const struct lu_env *env, struct ptlrpc_request *req, void *args, int status) mdc_interpret_renew_capa() argument
H A Dmdc_lib.c55 void mdc_pack_capa(struct ptlrpc_request *req, mdc_pack_capa() argument
59 struct req_capsule *pill = &req->rq_pill; mdc_pack_capa()
73 void mdc_is_subdir_pack(struct ptlrpc_request *req, const struct lu_fid *pfid, mdc_is_subdir_pack() argument
76 struct mdt_body *b = req_capsule_client_get(&req->rq_pill, mdc_is_subdir_pack()
88 void mdc_swap_layouts_pack(struct ptlrpc_request *req, mdc_swap_layouts_pack() argument
91 struct mdt_body *b = req_capsule_client_get(&req->rq_pill, mdc_swap_layouts_pack()
99 mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); mdc_swap_layouts_pack()
100 mdc_pack_capa(req, &RMF_CAPA2, op_data->op_capa2); mdc_swap_layouts_pack()
103 void mdc_pack_body(struct ptlrpc_request *req, mdc_pack_body() argument
107 struct mdt_body *b = req_capsule_client_get(&req->rq_pill, mdc_pack_body()
117 mdc_pack_capa(req, &RMF_CAPA1, oc); mdc_pack_body()
121 void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, mdc_readdir_pack() argument
124 struct mdt_body *b = req_capsule_client_get(&req->rq_pill, mdc_readdir_pack()
133 mdc_pack_capa(req, &RMF_CAPA1, oc); mdc_readdir_pack()
137 void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data, mdc_create_pack() argument
146 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); mdc_create_pack()
167 mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); mdc_create_pack()
169 tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); mdc_create_pack()
173 tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); mdc_create_pack()
209 void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data, mdc_open_pack() argument
218 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); mdc_open_pack()
238 mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); mdc_open_pack()
243 tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); mdc_open_pack()
251 tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); mdc_open_pack()
338 void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data, mdc_setattr_pack() argument
347 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); mdc_setattr_pack()
350 mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); mdc_setattr_pack()
353 epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); mdc_setattr_pack()
360 lum = req_capsule_client_get(&req->rq_pill, &RMF_EADATA); mdc_setattr_pack()
373 memcpy(req_capsule_client_get(&req->rq_pill, &RMF_LOGCOOKIES), ea2, mdc_setattr_pack()
377 void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data) mdc_unlink_pack() argument
383 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); mdc_unlink_pack()
399 mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); mdc_unlink_pack()
401 tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); mdc_unlink_pack()
406 void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data) mdc_link_pack() argument
412 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); mdc_link_pack()
426 mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); mdc_link_pack()
427 mdc_pack_capa(req, &RMF_CAPA2, op_data->op_capa2); mdc_link_pack()
429 tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); mdc_link_pack()
433 void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data, mdc_rename_pack() argument
440 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); mdc_rename_pack()
455 mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); mdc_rename_pack()
456 mdc_pack_capa(req, &RMF_CAPA2, op_data->op_capa2); mdc_rename_pack()
458 tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); mdc_rename_pack()
462 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SYMTGT); mdc_rename_pack()
467 void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags, mdc_getattr_pack() argument
470 struct mdt_body *b = req_capsule_client_get(&req->rq_pill, mdc_getattr_pack()
486 mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); mdc_getattr_pack()
489 char *tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME); mdc_getattr_pack()
496 static void mdc_hsm_release_pack(struct ptlrpc_request *req, mdc_hsm_release_pack() argument
503 data = req_capsule_client_get(&req->rq_pill, &RMF_CLOSE_DATA); mdc_hsm_release_pack()
518 void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data) mdc_close_pack() argument
523 epoch = req_capsule_client_get(&req->rq_pill, &RMF_MDT_EPOCH); mdc_close_pack()
524 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT); mdc_close_pack()
527 mdc_pack_capa(req, &RMF_CAPA1, op_data->op_capa1); mdc_close_pack()
529 mdc_hsm_release_pack(req, op_data); mdc_close_pack()
H A Dmdc_locks.c231 static inline void mdc_clear_replay_flag(struct ptlrpc_request *req, int rc) mdc_clear_replay_flag() argument
234 if (req->rq_replay) { mdc_clear_replay_flag()
235 spin_lock(&req->rq_lock); mdc_clear_replay_flag()
236 req->rq_replay = 0; mdc_clear_replay_flag()
237 spin_unlock(&req->rq_lock); mdc_clear_replay_flag()
239 if (rc && req->rq_transno != 0) { mdc_clear_replay_flag()
240 DEBUG_REQ(D_ERROR, req, "transno returned on error rc %d", rc); mdc_clear_replay_flag()
256 static void mdc_realloc_openmsg(struct ptlrpc_request *req, mdc_realloc_openmsg() argument
262 rc = sptlrpc_cli_enlarge_reqbuf(req, DLM_INTENT_REC_OFF + 4, mdc_realloc_openmsg()
278 struct ptlrpc_request *req; mdc_intent_open_pack() local
318 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_intent_open_pack()
320 if (req == NULL) { mdc_intent_open_pack()
326 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_intent_open_pack()
329 mdc_set_capa_size(req, &RMF_CAPA2, op_data->op_capa1); mdc_intent_open_pack()
331 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, mdc_intent_open_pack()
333 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, mdc_intent_open_pack()
336 rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); mdc_intent_open_pack()
338 ptlrpc_request_free(req); mdc_intent_open_pack()
342 spin_lock(&req->rq_lock); mdc_intent_open_pack()
343 req->rq_replay = req->rq_import->imp_replayable; mdc_intent_open_pack()
344 spin_unlock(&req->rq_lock); mdc_intent_open_pack()
347 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); mdc_intent_open_pack()
351 mdc_open_pack(req, op_data, it->it_create_mode, 0, it->it_flags, lmm, mdc_intent_open_pack()
356 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, mdc_intent_open_pack()
358 ptlrpc_request_set_replen(req); mdc_intent_open_pack()
359 return req; mdc_intent_open_pack()
367 struct ptlrpc_request *req; mdc_intent_getxattr_pack() local
374 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_intent_getxattr_pack()
376 if (req == NULL) mdc_intent_getxattr_pack()
379 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_intent_getxattr_pack()
381 rc = ldlm_prep_enqueue_req(exp, req, &cancels, count); mdc_intent_getxattr_pack()
383 ptlrpc_request_free(req); mdc_intent_getxattr_pack()
388 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); mdc_intent_getxattr_pack()
394 mdc_pack_body(req, &op_data->op_fid1, op_data->op_capa1, mdc_intent_getxattr_pack()
397 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, mdc_intent_getxattr_pack()
400 req_capsule_set_size(&req->rq_pill, &RMF_EAVALS, mdc_intent_getxattr_pack()
403 req_capsule_set_size(&req->rq_pill, &RMF_EAVALS_LENS, mdc_intent_getxattr_pack()
406 ptlrpc_request_set_replen(req); mdc_intent_getxattr_pack()
408 return req; mdc_intent_getxattr_pack()
415 struct ptlrpc_request *req; mdc_intent_unlink_pack() local
420 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_intent_unlink_pack()
422 if (req == NULL) mdc_intent_unlink_pack()
425 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_intent_unlink_pack()
426 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, mdc_intent_unlink_pack()
429 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); mdc_intent_unlink_pack()
431 ptlrpc_request_free(req); mdc_intent_unlink_pack()
436 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); mdc_intent_unlink_pack()
440 mdc_unlink_pack(req, op_data); mdc_intent_unlink_pack()
442 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, mdc_intent_unlink_pack()
444 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, mdc_intent_unlink_pack()
446 ptlrpc_request_set_replen(req); mdc_intent_unlink_pack()
447 return req; mdc_intent_unlink_pack()
454 struct ptlrpc_request *req; mdc_intent_getattr_pack() local
465 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_intent_getattr_pack()
467 if (req == NULL) mdc_intent_getattr_pack()
470 mdc_set_capa_size(req, &RMF_CAPA1, op_data->op_capa1); mdc_intent_getattr_pack()
471 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT, mdc_intent_getattr_pack()
474 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); mdc_intent_getattr_pack()
476 ptlrpc_request_free(req); mdc_intent_getattr_pack()
481 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); mdc_intent_getattr_pack()
490 mdc_getattr_pack(req, valid, it->it_flags, op_data, easize); mdc_intent_getattr_pack()
492 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER, easize); mdc_intent_getattr_pack()
494 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, mdc_intent_getattr_pack()
496 ptlrpc_request_set_replen(req); mdc_intent_getattr_pack()
497 return req; mdc_intent_getattr_pack()
505 struct ptlrpc_request *req; mdc_intent_layout_pack() local
510 req = ptlrpc_request_alloc(class_exp2cliimp(exp), mdc_intent_layout_pack()
512 if (req == NULL) mdc_intent_layout_pack()
515 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT, 0); mdc_intent_layout_pack()
516 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); mdc_intent_layout_pack()
518 ptlrpc_request_free(req); mdc_intent_layout_pack()
523 lit = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT); mdc_intent_layout_pack()
527 layout = req_capsule_client_get(&req->rq_pill, &RMF_LAYOUT_INTENT); mdc_intent_layout_pack()
532 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, mdc_intent_layout_pack()
534 ptlrpc_request_set_replen(req); mdc_intent_layout_pack()
535 return req; mdc_intent_layout_pack()
541 struct ptlrpc_request *req; mdc_enqueue_pack() local
544 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_LDLM_ENQUEUE); mdc_enqueue_pack()
545 if (req == NULL) mdc_enqueue_pack()
548 rc = ldlm_prep_enqueue_req(exp, req, NULL, 0); mdc_enqueue_pack()
550 ptlrpc_request_free(req); mdc_enqueue_pack()
554 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, lvb_len); mdc_enqueue_pack()
555 ptlrpc_request_set_replen(req); mdc_enqueue_pack()
556 return req; mdc_enqueue_pack()
560 struct ptlrpc_request *req, mdc_finish_enqueue()
566 struct req_capsule *pill = &req->rq_pill; mdc_finish_enqueue()
577 if (req->rq_transno || req->rq_replay) { mdc_finish_enqueue()
607 intent->it_data = req; mdc_finish_enqueue()
611 if ((!req->rq_transno || intent->it_status < 0) && req->rq_replay) mdc_finish_enqueue()
612 mdc_clear_replay_flag(req, intent->it_status); mdc_finish_enqueue()
621 if (it->it_op & IT_OPEN && req->rq_replay && mdc_finish_enqueue()
623 mdc_clear_replay_flag(req, intent->it_status); mdc_finish_enqueue()
625 DEBUG_REQ(D_RPCTRACE, req, "op: %d disposition: %x, status: %d", mdc_finish_enqueue()
677 if ((it->it_op & IT_OPEN) && req->rq_replay) { mdc_finish_enqueue()
683 mdc_realloc_openmsg(req, body); mdc_finish_enqueue()
731 * is packed into RMF_DLM_LVB of req */ mdc_finish_enqueue()
795 struct ptlrpc_request *req; mdc_enqueue() local
831 req = NULL; mdc_enqueue()
833 req = mdc_intent_open_pack(exp, it, op_data, lmm, lmmsize, mdc_enqueue()
839 req = mdc_intent_unlink_pack(exp, it, op_data); mdc_enqueue()
841 req = mdc_intent_getattr_pack(exp, it, op_data); mdc_enqueue()
843 req = mdc_enqueue_pack(exp, 0); mdc_enqueue()
847 req = mdc_intent_layout_pack(exp, it, op_data); mdc_enqueue()
850 req = mdc_intent_getxattr_pack(exp, it, op_data); mdc_enqueue()
856 if (IS_ERR(req)) mdc_enqueue()
857 return PTR_ERR(req); mdc_enqueue()
859 if (req != NULL && it && it->it_op & IT_CREAT) mdc_enqueue()
862 req->rq_no_retry_einprogress = 1; mdc_enqueue()
865 req->rq_generation_set = 1; mdc_enqueue()
866 req->rq_import_generation = generation; mdc_enqueue()
867 req->rq_sent = get_seconds() + resends; mdc_enqueue()
878 mdc_clear_replay_flag(req, 0); mdc_enqueue()
879 ptlrpc_req_finished(req); mdc_enqueue()
884 rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, policy, &flags, NULL, mdc_enqueue()
908 mdc_clear_replay_flag(req, rc); mdc_enqueue()
909 ptlrpc_req_finished(req); mdc_enqueue()
913 lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); mdc_enqueue()
923 mdc_clear_replay_flag(req, rc); mdc_enqueue()
924 ptlrpc_req_finished(req); mdc_enqueue()
939 rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc); mdc_enqueue()
945 ptlrpc_req_finished(req); mdc_enqueue()
1211 struct ptlrpc_request *req, mdc_intent_getattr_async_interpret()
1233 rc = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, 1, einfo->ei_mode, mdc_intent_getattr_async_interpret()
1237 mdc_clear_replay_flag(req, rc); mdc_intent_getattr_async_interpret()
1241 lockrep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); mdc_intent_getattr_async_interpret()
1247 rc = mdc_finish_enqueue(exp, req, einfo, it, lockh, rc); mdc_intent_getattr_async_interpret()
1251 rc = mdc_finish_intent_lock(exp, req, &minfo->mi_data, it, lockh); mdc_intent_getattr_async_interpret()
1255 minfo->mi_cb(req, minfo, rc); mdc_intent_getattr_async_interpret()
1265 struct ptlrpc_request *req; mdc_intent_getattr_async() local
1285 req = mdc_intent_getattr_pack(exp, it, op_data); mdc_intent_getattr_async()
1286 if (IS_ERR(req)) mdc_intent_getattr_async()
1287 return PTR_ERR(req); mdc_intent_getattr_async()
1291 ptlrpc_req_finished(req); mdc_intent_getattr_async()
1295 rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL, mdc_intent_getattr_async()
1299 ptlrpc_req_finished(req); mdc_intent_getattr_async()
1303 CLASSERT(sizeof(*ga) <= sizeof(req->rq_async_args)); mdc_intent_getattr_async()
1304 ga = ptlrpc_req_async_args(req); mdc_intent_getattr_async()
1309 req->rq_interpret_reply = mdc_intent_getattr_async_interpret; mdc_intent_getattr_async()
1310 ptlrpcd_add_req(req, PDL_POLICY_LOCAL, -1); mdc_intent_getattr_async()
559 mdc_finish_enqueue(struct obd_export *exp, struct ptlrpc_request *req, struct ldlm_enqueue_info *einfo, struct lookup_intent *it, struct lustre_handle *lockh, int rc) mdc_finish_enqueue() argument
1210 mdc_intent_getattr_async_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *args, int rc) mdc_intent_getattr_async_interpret() argument
H A Dmdc_internal.h52 void mdc_pack_body(struct ptlrpc_request *req, const struct lu_fid *fid,
55 void mdc_pack_capa(struct ptlrpc_request *req,
57 int mdc_pack_req(struct ptlrpc_request *req, int version, int opc);
58 void mdc_is_subdir_pack(struct ptlrpc_request *req, const struct lu_fid *pfid,
60 void mdc_swap_layouts_pack(struct ptlrpc_request *req,
62 void mdc_readdir_pack(struct ptlrpc_request *req, __u64 pgoff, __u32 size,
64 void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
66 void mdc_setattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
68 void mdc_create_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
71 void mdc_open_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
74 void mdc_unlink_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
75 void mdc_getxattr_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
76 void mdc_link_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
77 void mdc_rename_pack(struct ptlrpc_request *req, struct md_op_data *op_data,
79 void mdc_close_pack(struct ptlrpc_request *req, struct md_op_data *op_data);
102 struct ptlrpc_request **req, __u64 extra_lock_flags);
117 int mdc_get_lustre_md(struct obd_export *md_exp, struct ptlrpc_request *req,
129 void mdc_commit_open(struct ptlrpc_request *req);
130 void mdc_replay_open(struct ptlrpc_request *req);
150 static inline void mdc_set_capa_size(struct ptlrpc_request *req, mdc_set_capa_size() argument
155 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0); mdc_set_capa_size()
174 struct ptlrpc_request *req, int opc, mdc_prep_elc_req()
177 return ldlm_prep_elc_req(exp, req, LUSTRE_MDS_VERSION, opc, 0, cancels, mdc_prep_elc_req()
173 mdc_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req, int opc, struct list_head *cancels, int count) mdc_prep_elc_req() argument
/linux-4.1.27/net/sunrpc/
H A Dbackchannel_rqst.c57 static void xprt_free_allocation(struct rpc_rqst *req) xprt_free_allocation() argument
61 dprintk("RPC: free allocations for req= %p\n", req); xprt_free_allocation()
62 WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); xprt_free_allocation()
63 xbufp = &req->rq_rcv_buf; xprt_free_allocation()
65 xbufp = &req->rq_snd_buf; xprt_free_allocation()
67 kfree(req); xprt_free_allocation()
92 struct rpc_rqst *req, *tmp; xprt_setup_backchannel() local
109 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); xprt_setup_backchannel()
110 if (req == NULL) { xprt_setup_backchannel()
116 dprintk("RPC: adding req= %p\n", req); xprt_setup_backchannel()
117 list_add(&req->rq_bc_pa_list, &tmp_list); xprt_setup_backchannel()
119 req->rq_xprt = xprt; xprt_setup_backchannel()
120 INIT_LIST_HEAD(&req->rq_list); xprt_setup_backchannel()
121 INIT_LIST_HEAD(&req->rq_bc_list); xprt_setup_backchannel()
129 xbufp = &req->rq_rcv_buf; xprt_setup_backchannel()
145 xbufp = &req->rq_snd_buf; xprt_setup_backchannel()
170 list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) { xprt_setup_backchannel()
171 list_del(&req->rq_bc_pa_list); xprt_setup_backchannel()
172 xprt_free_allocation(req); xprt_setup_backchannel()
191 struct rpc_rqst *req = NULL, *tmp = NULL; xprt_destroy_backchannel() local
200 list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) { xprt_destroy_backchannel()
201 dprintk("RPC: req=%p\n", req); xprt_destroy_backchannel()
202 list_del(&req->rq_bc_pa_list); xprt_destroy_backchannel()
203 xprt_free_allocation(req); xprt_destroy_backchannel()
217 struct rpc_rqst *req = NULL; xprt_alloc_bc_request() local
223 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, xprt_alloc_bc_request()
225 req->rq_reply_bytes_recvd = 0; xprt_alloc_bc_request()
226 req->rq_bytes_sent = 0; xprt_alloc_bc_request()
227 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, xprt_alloc_bc_request()
228 sizeof(req->rq_private_buf)); xprt_alloc_bc_request()
229 req->rq_xid = xid; xprt_alloc_bc_request()
230 req->rq_connect_cookie = xprt->connect_cookie; xprt_alloc_bc_request()
232 dprintk("RPC: backchannel req=%p\n", req); xprt_alloc_bc_request()
233 return req; xprt_alloc_bc_request()
240 void xprt_free_bc_request(struct rpc_rqst *req) xprt_free_bc_request() argument
242 struct rpc_xprt *xprt = req->rq_xprt; xprt_free_bc_request()
244 dprintk("RPC: free backchannel req=%p\n", req); xprt_free_bc_request()
246 req->rq_connect_cookie = xprt->connect_cookie - 1; xprt_free_bc_request()
248 WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state)); xprt_free_bc_request()
249 clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); xprt_free_bc_request()
259 dprintk("RPC: Last session removed req=%p\n", req); xprt_free_bc_request()
260 xprt_free_allocation(req); xprt_free_bc_request()
269 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list); xprt_free_bc_request()
286 struct rpc_rqst *req; xprt_lookup_bc_request() local
289 list_for_each_entry(req, &xprt->bc_pa_list, rq_bc_pa_list) { xprt_lookup_bc_request()
290 if (req->rq_connect_cookie != xprt->connect_cookie) xprt_lookup_bc_request()
292 if (req->rq_xid == xid) xprt_lookup_bc_request()
295 req = xprt_alloc_bc_request(xprt, xid); xprt_lookup_bc_request()
298 return req; xprt_lookup_bc_request()
307 void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied) xprt_complete_bc_request() argument
309 struct rpc_xprt *xprt = req->rq_xprt; xprt_complete_bc_request()
313 list_del(&req->rq_bc_pa_list); xprt_complete_bc_request()
316 req->rq_private_buf.len = copied; xprt_complete_bc_request()
317 set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state); xprt_complete_bc_request()
321 list_add(&req->rq_bc_list, &bc_serv->sv_cb_list); xprt_complete_bc_request()
H A Dxprt.c183 struct rpc_rqst *req = task->tk_rqstp; xprt_reserve_xprt() local
192 if (req != NULL) xprt_reserve_xprt()
193 req->rq_ntrans++; xprt_reserve_xprt()
202 if (req == NULL) xprt_reserve_xprt()
204 else if (!req->rq_ntrans) xprt_reserve_xprt()
234 struct rpc_rqst *req = task->tk_rqstp; xprt_reserve_xprt_cong() local
242 if (req == NULL) { xprt_reserve_xprt_cong()
248 req->rq_ntrans++; xprt_reserve_xprt_cong()
256 if (req == NULL) xprt_reserve_xprt_cong()
258 else if (!req->rq_ntrans) xprt_reserve_xprt_cong()
280 struct rpc_rqst *req; __xprt_lock_write_func() local
282 req = task->tk_rqstp; __xprt_lock_write_func()
284 if (req) __xprt_lock_write_func()
285 req->rq_ntrans++; __xprt_lock_write_func()
302 struct rpc_rqst *req; __xprt_lock_write_cong_func() local
304 req = task->tk_rqstp; __xprt_lock_write_cong_func()
305 if (req == NULL) { __xprt_lock_write_cong_func()
311 req->rq_ntrans++; __xprt_lock_write_cong_func()
332 struct rpc_rqst *req = task->tk_rqstp; xprt_task_clear_bytes_sent() local
333 if (req != NULL) xprt_task_clear_bytes_sent()
334 req->rq_bytes_sent = 0; xprt_task_clear_bytes_sent()
387 struct rpc_rqst *req = task->tk_rqstp; __xprt_get_cong() local
389 if (req->rq_cong) __xprt_get_cong()
395 req->rq_cong = 1; __xprt_get_cong()
405 __xprt_put_cong(struct rpc_xprt *xprt, struct rpc_rqst *req) __xprt_put_cong() argument
407 if (!req->rq_cong) __xprt_put_cong()
409 req->rq_cong = 0; __xprt_put_cong()
422 struct rpc_rqst *req = task->tk_rqstp; xprt_release_rqst_cong() local
424 __xprt_put_cong(req->rq_xprt, req); xprt_release_rqst_cong()
446 struct rpc_rqst *req = task->tk_rqstp; xprt_adjust_cwnd() local
464 __xprt_put_cong(xprt, req); xprt_adjust_cwnd()
494 struct rpc_rqst *req = task->tk_rqstp; xprt_wait_for_buffer_space() local
495 struct rpc_xprt *xprt = req->rq_xprt; xprt_wait_for_buffer_space()
497 task->tk_timeout = RPC_IS_SOFT(task) ? req->rq_timeout : 0; xprt_wait_for_buffer_space()
545 struct rpc_rqst *req = task->tk_rqstp; xprt_set_retrans_timeout_rtt() local
549 task->tk_timeout <<= rpc_ntimeo(rtt, timer) + req->rq_retries; xprt_set_retrans_timeout_rtt()
555 static void xprt_reset_majortimeo(struct rpc_rqst *req) xprt_reset_majortimeo() argument
557 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; xprt_reset_majortimeo()
559 req->rq_majortimeo = req->rq_timeout; xprt_reset_majortimeo()
561 req->rq_majortimeo <<= to->to_retries; xprt_reset_majortimeo()
563 req->rq_majortimeo += to->to_increment * to->to_retries; xprt_reset_majortimeo()
564 if (req->rq_majortimeo > to->to_maxval || req->rq_majortimeo == 0) xprt_reset_majortimeo()
565 req->rq_majortimeo = to->to_maxval; xprt_reset_majortimeo()
566 req->rq_majortimeo += jiffies; xprt_reset_majortimeo()
571 * @req: RPC request containing parameters to use for the adjustment
574 int xprt_adjust_timeout(struct rpc_rqst *req) xprt_adjust_timeout() argument
576 struct rpc_xprt *xprt = req->rq_xprt; xprt_adjust_timeout()
577 const struct rpc_timeout *to = req->rq_task->tk_client->cl_timeout; xprt_adjust_timeout()
580 if (time_before(jiffies, req->rq_majortimeo)) { xprt_adjust_timeout()
582 req->rq_timeout <<= 1; xprt_adjust_timeout()
584 req->rq_timeout += to->to_increment; xprt_adjust_timeout()
585 if (to->to_maxval && req->rq_timeout >= to->to_maxval) xprt_adjust_timeout()
586 req->rq_timeout = to->to_maxval; xprt_adjust_timeout()
587 req->rq_retries++; xprt_adjust_timeout()
589 req->rq_timeout = to->to_initval; xprt_adjust_timeout()
590 req->rq_retries = 0; xprt_adjust_timeout()
591 xprt_reset_majortimeo(req); xprt_adjust_timeout()
594 rpc_init_rtt(req->rq_task->tk_client->cl_rtt, to->to_initval); xprt_adjust_timeout()
599 if (req->rq_timeout == 0) { xprt_adjust_timeout()
601 req->rq_timeout = 5 * HZ; xprt_adjust_timeout()
824 struct rpc_rqst *req = task->tk_rqstp; xprt_update_rtt() local
827 long m = usecs_to_jiffies(ktime_to_us(req->rq_rtt)); xprt_update_rtt()
830 if (req->rq_ntrans == 1) xprt_update_rtt()
832 rpc_set_timeo(rtt, timer, req->rq_ntrans - 1); xprt_update_rtt()
845 struct rpc_rqst *req = task->tk_rqstp; xprt_complete_rqst() local
846 struct rpc_xprt *xprt = req->rq_xprt; xprt_complete_rqst()
849 task->tk_pid, ntohl(req->rq_xid), copied); xprt_complete_rqst()
850 trace_xprt_complete_rqst(xprt, req->rq_xid, copied); xprt_complete_rqst()
853 req->rq_rtt = ktime_sub(ktime_get(), req->rq_xtime); xprt_complete_rqst()
857 list_del_init(&req->rq_list); xprt_complete_rqst()
858 req->rq_private_buf.len = copied; xprt_complete_rqst()
860 /* req->rq_reply_bytes_recvd */ xprt_complete_rqst()
862 req->rq_reply_bytes_recvd = copied; xprt_complete_rqst()
869 struct rpc_rqst *req = task->tk_rqstp; xprt_timer() local
870 struct rpc_xprt *xprt = req->rq_xprt; xprt_timer()
877 if (!req->rq_reply_bytes_recvd) { xprt_timer()
897 struct rpc_rqst *req = task->tk_rqstp; xprt_prepare_transmit() local
898 struct rpc_xprt *xprt = req->rq_xprt; xprt_prepare_transmit()
904 if (!req->rq_bytes_sent) { xprt_prepare_transmit()
905 if (req->rq_reply_bytes_recvd) { xprt_prepare_transmit()
906 task->tk_status = req->rq_reply_bytes_recvd; xprt_prepare_transmit()
911 && req->rq_connect_cookie == xprt->connect_cookie) { xprt_prepare_transmit()
940 struct rpc_rqst *req = task->tk_rqstp; xprt_transmit() local
941 struct rpc_xprt *xprt = req->rq_xprt; xprt_transmit()
944 dprintk("RPC: %5u xprt_transmit(%u)\n", task->tk_pid, req->rq_slen); xprt_transmit()
946 if (!req->rq_reply_bytes_recvd) { xprt_transmit()
947 if (list_empty(&req->rq_list) && rpc_reply_expected(task)) { xprt_transmit()
953 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, xprt_transmit()
954 sizeof(req->rq_private_buf)); xprt_transmit()
956 list_add_tail(&req->rq_list, &xprt->recv); xprt_transmit()
958 xprt_reset_majortimeo(req); xprt_transmit()
962 } else if (!req->rq_bytes_sent) xprt_transmit()
965 req->rq_xtime = ktime_get(); xprt_transmit()
967 trace_xprt_transmit(xprt, req->rq_xid, status); xprt_transmit()
996 if (!req->rq_reply_bytes_recvd && rpc_reply_expected(task)) xprt_transmit()
998 req->rq_connect_cookie = xprt->connect_cookie; xprt_transmit()
1033 struct rpc_rqst *req = ERR_PTR(-EAGAIN); xprt_dynamic_alloc_slot() local
1037 req = kzalloc(sizeof(struct rpc_rqst), gfp_flags); xprt_dynamic_alloc_slot()
1038 if (req != NULL) xprt_dynamic_alloc_slot()
1041 req = ERR_PTR(-ENOMEM); xprt_dynamic_alloc_slot()
1043 return req; xprt_dynamic_alloc_slot()
1046 static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) xprt_dynamic_free_slot() argument
1049 kfree(req); xprt_dynamic_free_slot()
1057 struct rpc_rqst *req; xprt_alloc_slot() local
1061 req = list_entry(xprt->free.next, struct rpc_rqst, rq_list); xprt_alloc_slot()
1062 list_del(&req->rq_list); xprt_alloc_slot()
1065 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN); xprt_alloc_slot()
1066 if (!IS_ERR(req)) xprt_alloc_slot()
1068 switch (PTR_ERR(req)) { xprt_alloc_slot()
1084 task->tk_rqstp = req; xprt_alloc_slot()
1104 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req) xprt_free_slot() argument
1107 if (!xprt_dynamic_free_slot(xprt, req)) { xprt_free_slot()
1108 memset(req, 0, sizeof(*req)); /* mark unused */ xprt_free_slot()
1109 list_add(&req->rq_list, &xprt->free); xprt_free_slot()
1117 struct rpc_rqst *req; xprt_free_all_slots() local
1119 req = list_first_entry(&xprt->free, struct rpc_rqst, rq_list); xprt_free_all_slots()
1120 list_del(&req->rq_list); xprt_free_all_slots()
1121 kfree(req); xprt_free_all_slots()
1130 struct rpc_rqst *req; xprt_alloc() local
1140 req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL); xprt_alloc()
1141 if (!req) xprt_alloc()
1143 list_add(&req->rq_list, &xprt->free); xprt_alloc()
1231 struct rpc_rqst *req = task->tk_rqstp; xprt_request_init() local
1233 INIT_LIST_HEAD(&req->rq_list); xprt_request_init()
1234 req->rq_timeout = task->tk_client->cl_timeout->to_initval; xprt_request_init()
1235 req->rq_task = task; xprt_request_init()
1236 req->rq_xprt = xprt; xprt_request_init()
1237 req->rq_buffer = NULL; xprt_request_init()
1238 req->rq_xid = xprt_alloc_xid(xprt); xprt_request_init()
1239 req->rq_connect_cookie = xprt->connect_cookie - 1; xprt_request_init()
1240 req->rq_bytes_sent = 0; xprt_request_init()
1241 req->rq_snd_buf.len = 0; xprt_request_init()
1242 req->rq_snd_buf.buflen = 0; xprt_request_init()
1243 req->rq_rcv_buf.len = 0; xprt_request_init()
1244 req->rq_rcv_buf.buflen = 0; xprt_request_init()
1245 req->rq_release_snd_buf = NULL; xprt_request_init()
1246 xprt_reset_majortimeo(req); xprt_request_init()
1247 dprintk("RPC: %5u reserved req %p xid %08x\n", task->tk_pid, xprt_request_init()
1248 req, ntohl(req->rq_xid)); xprt_request_init()
1259 struct rpc_rqst *req = task->tk_rqstp; xprt_release() local
1261 if (req == NULL) { xprt_release()
1272 xprt = req->rq_xprt; xprt_release()
1281 if (!list_empty(&req->rq_list)) xprt_release()
1282 list_del(&req->rq_list); xprt_release()
1288 if (req->rq_buffer) xprt_release()
1289 xprt->ops->buf_free(req->rq_buffer); xprt_release()
1290 if (req->rq_cred != NULL) xprt_release()
1291 put_rpccred(req->rq_cred); xprt_release()
1293 if (req->rq_release_snd_buf) xprt_release()
1294 req->rq_release_snd_buf(req); xprt_release()
1296 dprintk("RPC: %5u release request %p\n", task->tk_pid, req); xprt_release()
1297 if (likely(!bc_prealloc(req))) xprt_release()
1298 xprt_free_slot(xprt, req); xprt_release()
1300 xprt_free_bc_request(req); xprt_release()
/linux-4.1.27/fs/ceph/
H A Dexport.c73 struct ceph_mds_request *req; __fh_to_dentry() local
75 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO, __fh_to_dentry()
77 if (IS_ERR(req)) __fh_to_dentry()
78 return ERR_CAST(req); __fh_to_dentry()
80 req->r_ino1 = vino; __fh_to_dentry()
81 req->r_num_caps = 1; __fh_to_dentry()
82 err = ceph_mdsc_do_request(mdsc, NULL, req); __fh_to_dentry()
83 inode = req->r_target_inode; __fh_to_dentry()
86 ceph_mdsc_put_request(req); __fh_to_dentry()
128 struct ceph_mds_request *req; __get_parent() local
133 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPPARENT, __get_parent()
135 if (IS_ERR(req)) __get_parent()
136 return ERR_CAST(req); __get_parent()
139 req->r_inode = d_inode(child); __get_parent()
142 req->r_ino1 = (struct ceph_vino) { __get_parent()
147 req->r_num_caps = 1; __get_parent()
148 err = ceph_mdsc_do_request(mdsc, NULL, req); __get_parent()
149 inode = req->r_target_inode; __get_parent()
152 ceph_mdsc_put_request(req); __get_parent()
209 struct ceph_mds_request *req; ceph_get_name() local
213 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPNAME, ceph_get_name()
215 if (IS_ERR(req)) ceph_get_name()
216 return PTR_ERR(req); ceph_get_name()
220 req->r_inode = d_inode(child); ceph_get_name()
222 req->r_ino2 = ceph_vino(d_inode(parent)); ceph_get_name()
223 req->r_locked_dir = d_inode(parent); ceph_get_name()
224 req->r_num_caps = 2; ceph_get_name()
225 err = ceph_mdsc_do_request(mdsc, NULL, req); ceph_get_name()
230 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; ceph_get_name()
240 ceph_mdsc_put_request(req); ceph_get_name()
H A Dmds_client.c515 static void put_request_session(struct ceph_mds_request *req) put_request_session() argument
517 if (req->r_session) { put_request_session()
518 ceph_put_mds_session(req->r_session); put_request_session()
519 req->r_session = NULL; put_request_session()
525 struct ceph_mds_request *req = container_of(kref, ceph_mdsc_release_request() local
528 destroy_reply_info(&req->r_reply_info); ceph_mdsc_release_request()
529 if (req->r_request) ceph_mdsc_release_request()
530 ceph_msg_put(req->r_request); ceph_mdsc_release_request()
531 if (req->r_reply) ceph_mdsc_release_request()
532 ceph_msg_put(req->r_reply); ceph_mdsc_release_request()
533 if (req->r_inode) { ceph_mdsc_release_request()
534 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); ceph_mdsc_release_request()
535 iput(req->r_inode); ceph_mdsc_release_request()
537 if (req->r_locked_dir) ceph_mdsc_release_request()
538 ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); ceph_mdsc_release_request()
539 iput(req->r_target_inode); ceph_mdsc_release_request()
540 if (req->r_dentry) ceph_mdsc_release_request()
541 dput(req->r_dentry); ceph_mdsc_release_request()
542 if (req->r_old_dentry) ceph_mdsc_release_request()
543 dput(req->r_old_dentry); ceph_mdsc_release_request()
544 if (req->r_old_dentry_dir) { ceph_mdsc_release_request()
551 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), ceph_mdsc_release_request()
553 iput(req->r_old_dentry_dir); ceph_mdsc_release_request()
555 kfree(req->r_path1); ceph_mdsc_release_request()
556 kfree(req->r_path2); ceph_mdsc_release_request()
557 if (req->r_pagelist) ceph_mdsc_release_request()
558 ceph_pagelist_release(req->r_pagelist); ceph_mdsc_release_request()
559 put_request_session(req); ceph_mdsc_release_request()
560 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); ceph_mdsc_release_request()
561 kfree(req); ceph_mdsc_release_request()
572 struct ceph_mds_request *req; __lookup_request() local
576 req = rb_entry(n, struct ceph_mds_request, r_node); __lookup_request()
577 if (tid < req->r_tid) __lookup_request()
579 else if (tid > req->r_tid) __lookup_request()
582 ceph_mdsc_get_request(req); __lookup_request()
583 return req; __lookup_request()
594 struct ceph_mds_request *req = NULL; __insert_request() local
598 req = rb_entry(parent, struct ceph_mds_request, r_node); __insert_request()
599 if (new->r_tid < req->r_tid) __insert_request()
601 else if (new->r_tid > req->r_tid) __insert_request()
618 struct ceph_mds_request *req, __register_request()
621 req->r_tid = ++mdsc->last_tid; __register_request()
622 if (req->r_num_caps) __register_request()
623 ceph_reserve_caps(mdsc, &req->r_caps_reservation, __register_request()
624 req->r_num_caps); __register_request()
625 dout("__register_request %p tid %lld\n", req, req->r_tid); __register_request()
626 ceph_mdsc_get_request(req); __register_request()
627 __insert_request(mdsc, req); __register_request()
629 req->r_uid = current_fsuid(); __register_request()
630 req->r_gid = current_fsgid(); __register_request()
637 req->r_unsafe_dir = dir; __register_request()
638 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); __register_request()
644 struct ceph_mds_request *req) __unregister_request()
646 dout("__unregister_request %p tid %lld\n", req, req->r_tid); __unregister_request()
647 rb_erase(&req->r_node, &mdsc->request_tree); __unregister_request()
648 RB_CLEAR_NODE(&req->r_node); __unregister_request()
650 if (req->r_unsafe_dir) { __unregister_request()
651 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); __unregister_request()
654 list_del_init(&req->r_unsafe_dir_item); __unregister_request()
657 iput(req->r_unsafe_dir); __unregister_request()
658 req->r_unsafe_dir = NULL; __unregister_request()
661 complete_all(&req->r_safe_completion); __unregister_request()
663 ceph_mdsc_put_request(req); __unregister_request()
688 struct ceph_mds_request *req) __choose_mds()
693 int mode = req->r_direct_mode; __choose_mds()
695 u32 hash = req->r_direct_hash; __choose_mds()
696 bool is_hash = req->r_direct_is_hash; __choose_mds()
702 if (req->r_resend_mds >= 0 && __choose_mds()
703 (__have_session(mdsc, req->r_resend_mds) || __choose_mds()
704 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { __choose_mds()
706 req->r_resend_mds); __choose_mds()
707 return req->r_resend_mds; __choose_mds()
714 if (req->r_inode) { __choose_mds()
715 inode = req->r_inode; __choose_mds()
716 } else if (req->r_dentry) { __choose_mds()
718 struct dentry *parent = req->r_dentry->d_parent; __choose_mds()
723 inode = d_inode(req->r_dentry); __choose_mds()
732 inode = d_inode(req->r_dentry); __choose_mds()
736 hash = ceph_dentry_hash(dir, req->r_dentry); __choose_mds()
1027 struct ceph_mds_request *req; cleanup_session_requests() local
1033 req = list_first_entry(&session->s_unsafe, cleanup_session_requests()
1035 list_del_init(&req->r_unsafe_item); cleanup_session_requests()
1036 pr_info(" dropping unsafe request %llu\n", req->r_tid); cleanup_session_requests()
1037 __unregister_request(mdsc, req); cleanup_session_requests()
1042 req = rb_entry(p, struct ceph_mds_request, r_node); cleanup_session_requests()
1044 if (req->r_session && cleanup_session_requests()
1045 req->r_session->s_mds == session->s_mds) cleanup_session_requests()
1046 req->r_attempts = 0; cleanup_session_requests()
1620 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, ceph_alloc_readdir_reply_buffer() argument
1624 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; ceph_alloc_readdir_reply_buffer()
1625 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; ceph_alloc_readdir_reply_buffer()
1651 req->r_num_caps = num_entries + 1; ceph_alloc_readdir_reply_buffer()
1652 req->r_args.readdir.max_entries = cpu_to_le32(num_entries); ceph_alloc_readdir_reply_buffer()
1653 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); ceph_alloc_readdir_reply_buffer()
1663 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); ceph_mdsc_create_request() local
1665 if (!req) ceph_mdsc_create_request()
1668 mutex_init(&req->r_fill_mutex); ceph_mdsc_create_request()
1669 req->r_mdsc = mdsc; ceph_mdsc_create_request()
1670 req->r_started = jiffies; ceph_mdsc_create_request()
1671 req->r_resend_mds = -1; ceph_mdsc_create_request()
1672 INIT_LIST_HEAD(&req->r_unsafe_dir_item); ceph_mdsc_create_request()
1673 req->r_fmode = -1; ceph_mdsc_create_request()
1674 kref_init(&req->r_kref); ceph_mdsc_create_request()
1675 INIT_LIST_HEAD(&req->r_wait); ceph_mdsc_create_request()
1676 init_completion(&req->r_completion); ceph_mdsc_create_request()
1677 init_completion(&req->r_safe_completion); ceph_mdsc_create_request()
1678 INIT_LIST_HEAD(&req->r_unsafe_item); ceph_mdsc_create_request()
1680 req->r_stamp = CURRENT_TIME; ceph_mdsc_create_request()
1682 req->r_op = op; ceph_mdsc_create_request()
1683 req->r_direct_mode = mode; ceph_mdsc_create_request()
1684 return req; ceph_mdsc_create_request()
1702 struct ceph_mds_request *req = __get_oldest_req(mdsc); __get_oldest_tid() local
1704 if (req) __get_oldest_tid()
1705 return req->r_tid; __get_oldest_tid()
1875 struct ceph_mds_request *req, create_request_message()
1890 ret = set_request_path_attr(req->r_inode, req->r_dentry, create_request_message()
1891 req->r_path1, req->r_ino1.ino, create_request_message()
1898 ret = set_request_path_attr(NULL, req->r_old_dentry, create_request_message()
1899 req->r_path2, req->r_ino2.ino, create_request_message()
1912 (!!req->r_inode_drop + !!req->r_dentry_drop + create_request_message()
1913 !!req->r_old_inode_drop + !!req->r_old_dentry_drop); create_request_message()
1914 if (req->r_dentry_drop) create_request_message()
1915 len += req->r_dentry->d_name.len; create_request_message()
1916 if (req->r_old_dentry_drop) create_request_message()
1917 len += req->r_old_dentry->d_name.len; create_request_message()
1926 msg->hdr.tid = cpu_to_le64(req->r_tid); create_request_message()
1933 head->op = cpu_to_le32(req->r_op); create_request_message()
1934 head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); create_request_message()
1935 head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); create_request_message()
1936 head->args = req->r_args; create_request_message()
1942 req->r_request_release_offset = p - msg->front.iov_base; create_request_message()
1946 if (req->r_inode_drop) create_request_message()
1948 req->r_inode ? req->r_inode : d_inode(req->r_dentry), create_request_message()
1949 mds, req->r_inode_drop, req->r_inode_unless, 0); create_request_message()
1950 if (req->r_dentry_drop) create_request_message()
1951 releases += ceph_encode_dentry_release(&p, req->r_dentry, create_request_message()
1952 mds, req->r_dentry_drop, req->r_dentry_unless); create_request_message()
1953 if (req->r_old_dentry_drop) create_request_message()
1954 releases += ceph_encode_dentry_release(&p, req->r_old_dentry, create_request_message()
1955 mds, req->r_old_dentry_drop, req->r_old_dentry_unless); create_request_message()
1956 if (req->r_old_inode_drop) create_request_message()
1958 d_inode(req->r_old_dentry), create_request_message()
1959 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); create_request_message()
1963 p = msg->front.iov_base + req->r_request_release_offset; create_request_message()
1971 ceph_encode_timespec(&ts, &req->r_stamp); create_request_message()
1979 if (req->r_pagelist) { create_request_message()
1980 struct ceph_pagelist *pagelist = req->r_pagelist; create_request_message()
2005 struct ceph_mds_request *req) complete_request()
2007 if (req->r_callback) complete_request()
2008 req->r_callback(mdsc, req); complete_request()
2010 complete_all(&req->r_completion); complete_request()
2017 struct ceph_mds_request *req, __prepare_send_request()
2024 req->r_attempts++; __prepare_send_request()
2025 if (req->r_inode) { __prepare_send_request()
2027 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); __prepare_send_request()
2030 req->r_sent_on_mseq = cap->mseq; __prepare_send_request()
2032 req->r_sent_on_mseq = -1; __prepare_send_request()
2034 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, __prepare_send_request()
2035 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); __prepare_send_request()
2037 if (req->r_got_unsafe) { __prepare_send_request()
2045 msg = req->r_request; __prepare_send_request()
2052 if (req->r_target_inode) __prepare_send_request()
2053 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); __prepare_send_request()
2055 rhead->num_retry = req->r_attempts - 1; __prepare_send_request()
2061 p = msg->front.iov_base + req->r_request_release_offset; __prepare_send_request()
2064 ceph_encode_timespec(&ts, &req->r_stamp); __prepare_send_request()
2073 if (req->r_request) { __prepare_send_request()
2074 ceph_msg_put(req->r_request); __prepare_send_request()
2075 req->r_request = NULL; __prepare_send_request()
2077 msg = create_request_message(mdsc, req, mds, drop_cap_releases); __prepare_send_request()
2079 req->r_err = PTR_ERR(msg); __prepare_send_request()
2080 complete_request(mdsc, req); __prepare_send_request()
2083 req->r_request = msg; __prepare_send_request()
2087 if (req->r_got_unsafe) __prepare_send_request()
2089 if (req->r_locked_dir) __prepare_send_request()
2092 rhead->num_fwd = req->r_num_fwd; __prepare_send_request()
2093 rhead->num_retry = req->r_attempts - 1; __prepare_send_request()
2096 dout(" r_locked_dir = %p\n", req->r_locked_dir); __prepare_send_request()
2104 struct ceph_mds_request *req) __do_request()
2110 if (req->r_err || req->r_got_result) { __do_request()
2111 if (req->r_aborted) __do_request()
2112 __unregister_request(mdsc, req); __do_request()
2116 if (req->r_timeout && __do_request()
2117 time_after_eq(jiffies, req->r_started + req->r_timeout)) { __do_request()
2123 put_request_session(req); __do_request()
2125 mds = __choose_mds(mdsc, req); __do_request()
2129 list_add(&req->r_wait, &mdsc->waiting_for_map); __do_request()
2142 req->r_session = get_session(session); __do_request()
2151 list_add(&req->r_wait, &session->s_waiting); __do_request()
2156 req->r_resend_mds = -1; /* forget any previous mds hint */ __do_request()
2158 if (req->r_request_started == 0) /* note request start time */ __do_request()
2159 req->r_request_started = jiffies; __do_request()
2161 err = __prepare_send_request(mdsc, req, mds, false); __do_request()
2163 ceph_msg_get(req->r_request); __do_request()
2164 ceph_con_send(&session->s_con, req->r_request); __do_request()
2173 req->r_err = err; __do_request()
2174 complete_request(mdsc, req); __do_request()
2184 struct ceph_mds_request *req; __wake_requests() local
2190 req = list_entry(tmp_list.next, __wake_requests()
2192 list_del_init(&req->r_wait); __wake_requests()
2193 dout(" wake request %p tid %llu\n", req, req->r_tid); __wake_requests()
2194 __do_request(mdsc, req); __wake_requests()
2204 struct ceph_mds_request *req; kick_requests() local
2209 req = rb_entry(p, struct ceph_mds_request, r_node); kick_requests()
2211 if (req->r_got_unsafe) kick_requests()
2213 if (req->r_attempts > 0) kick_requests()
2215 if (req->r_session && kick_requests()
2216 req->r_session->s_mds == mds) { kick_requests()
2217 dout(" kicking tid %llu\n", req->r_tid); kick_requests()
2218 list_del_init(&req->r_wait); kick_requests()
2219 __do_request(mdsc, req); kick_requests()
2225 struct ceph_mds_request *req) ceph_mdsc_submit_request()
2227 dout("submit_request on %p\n", req); ceph_mdsc_submit_request()
2229 __register_request(mdsc, req, NULL); ceph_mdsc_submit_request()
2230 __do_request(mdsc, req); ceph_mdsc_submit_request()
2240 struct ceph_mds_request *req) ceph_mdsc_do_request()
2244 dout("do_request on %p\n", req); ceph_mdsc_do_request()
2247 if (req->r_inode) ceph_mdsc_do_request()
2248 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); ceph_mdsc_do_request()
2249 if (req->r_locked_dir) ceph_mdsc_do_request()
2250 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); ceph_mdsc_do_request()
2251 if (req->r_old_dentry_dir) ceph_mdsc_do_request()
2252 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), ceph_mdsc_do_request()
2257 __register_request(mdsc, req, dir); ceph_mdsc_do_request()
2258 __do_request(mdsc, req); ceph_mdsc_do_request()
2260 if (req->r_err) { ceph_mdsc_do_request()
2261 err = req->r_err; ceph_mdsc_do_request()
2262 __unregister_request(mdsc, req); ceph_mdsc_do_request()
2270 if (req->r_timeout) { ceph_mdsc_do_request()
2272 &req->r_completion, req->r_timeout); ceph_mdsc_do_request()
2275 } else if (req->r_wait_for_completion) { ceph_mdsc_do_request()
2276 err = req->r_wait_for_completion(mdsc, req); ceph_mdsc_do_request()
2278 err = wait_for_completion_killable(&req->r_completion); ceph_mdsc_do_request()
2284 if (req->r_got_result) { ceph_mdsc_do_request()
2285 err = le32_to_cpu(req->r_reply_info.head->result); ceph_mdsc_do_request()
2287 dout("aborted request %lld with %d\n", req->r_tid, err); ceph_mdsc_do_request()
2294 mutex_lock(&req->r_fill_mutex); ceph_mdsc_do_request()
2295 req->r_err = err; ceph_mdsc_do_request()
2296 req->r_aborted = true; ceph_mdsc_do_request()
2297 mutex_unlock(&req->r_fill_mutex); ceph_mdsc_do_request()
2299 if (req->r_locked_dir && ceph_mdsc_do_request()
2300 (req->r_op & CEPH_MDS_OP_WRITE)) ceph_mdsc_do_request()
2301 ceph_invalidate_dir_request(req); ceph_mdsc_do_request()
2303 err = req->r_err; ceph_mdsc_do_request()
2308 dout("do_request %p done, result %d\n", req, err); ceph_mdsc_do_request()
2316 void ceph_invalidate_dir_request(struct ceph_mds_request *req) ceph_invalidate_dir_request() argument
2318 struct inode *inode = req->r_locked_dir; ceph_invalidate_dir_request()
2323 if (req->r_dentry) ceph_invalidate_dir_request()
2324 ceph_invalidate_dentry_lease(req->r_dentry); ceph_invalidate_dir_request()
2325 if (req->r_old_dentry) ceph_invalidate_dir_request()
2326 ceph_invalidate_dentry_lease(req->r_old_dentry); ceph_invalidate_dir_request()
2339 struct ceph_mds_request *req; handle_reply() local
2356 req = __lookup_request(mdsc, tid); handle_reply()
2357 if (!req) { handle_reply()
2362 dout("handle_reply %p\n", req); handle_reply()
2365 if (req->r_session != session) { handle_reply()
2368 req->r_session ? req->r_session->s_mds : -1); handle_reply()
2374 if ((req->r_got_unsafe && !head->safe) || handle_reply()
2375 (req->r_got_safe && head->safe)) { handle_reply()
2381 if (req->r_got_safe && !head->safe) { handle_reply()
2398 dout("got ESTALE on request %llu", req->r_tid); handle_reply()
2399 req->r_resend_mds = -1; handle_reply()
2400 if (req->r_direct_mode != USE_AUTH_MDS) { handle_reply()
2402 req->r_direct_mode = USE_AUTH_MDS; handle_reply()
2403 __do_request(mdsc, req); handle_reply()
2407 int mds = __choose_mds(mdsc, req); handle_reply()
2408 if (mds >= 0 && mds != req->r_session->s_mds) { handle_reply()
2410 __do_request(mdsc, req); handle_reply()
2415 dout("have to return ESTALE on request %llu", req->r_tid); handle_reply()
2420 req->r_got_safe = true; handle_reply()
2421 __unregister_request(mdsc, req); handle_reply()
2423 if (req->r_got_unsafe) { handle_reply()
2432 list_del_init(&req->r_unsafe_item); handle_reply()
2441 req->r_got_unsafe = true; handle_reply()
2442 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); handle_reply()
2446 rinfo = &req->r_reply_info; handle_reply()
2471 mutex_lock(&req->r_fill_mutex); handle_reply()
2472 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); handle_reply()
2474 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || handle_reply()
2475 req->r_op == CEPH_MDS_OP_LSSNAP)) handle_reply()
2476 ceph_readdir_prepopulate(req, req->r_session); handle_reply()
2477 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); handle_reply()
2479 mutex_unlock(&req->r_fill_mutex); handle_reply()
2486 if (!req->r_aborted) { handle_reply()
2488 req->r_err = err; handle_reply()
2490 req->r_reply = msg; handle_reply()
2492 req->r_got_result = true; handle_reply()
2499 ceph_add_cap_releases(mdsc, req->r_session); handle_reply()
2503 complete_request(mdsc, req); handle_reply()
2505 ceph_mdsc_put_request(req); handle_reply()
2518 struct ceph_mds_request *req; handle_forward() local
2531 req = __lookup_request(mdsc, tid); handle_forward()
2532 if (!req) { handle_forward()
2533 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds); handle_forward()
2537 if (req->r_aborted) { handle_forward()
2539 __unregister_request(mdsc, req); handle_forward()
2540 } else if (fwd_seq <= req->r_num_fwd) { handle_forward()
2542 tid, next_mds, req->r_num_fwd, fwd_seq); handle_forward()
2546 BUG_ON(req->r_err); handle_forward()
2547 BUG_ON(req->r_got_result); handle_forward()
2548 req->r_attempts = 0; handle_forward()
2549 req->r_num_fwd = fwd_seq; handle_forward()
2550 req->r_resend_mds = next_mds; handle_forward()
2551 put_request_session(req); handle_forward()
2552 __do_request(mdsc, req); handle_forward()
2554 ceph_mdsc_put_request(req); handle_forward()
2680 struct ceph_mds_request *req, *nreq; replay_unsafe_requests() local
2687 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { replay_unsafe_requests()
2688 err = __prepare_send_request(mdsc, req, session->s_mds, true); replay_unsafe_requests()
2690 ceph_msg_get(req->r_request); replay_unsafe_requests()
2691 ceph_con_send(&session->s_con, req->r_request); replay_unsafe_requests()
2701 req = rb_entry(p, struct ceph_mds_request, r_node); replay_unsafe_requests()
2703 if (req->r_got_unsafe) replay_unsafe_requests()
2705 if (req->r_attempts == 0) replay_unsafe_requests()
2707 if (req->r_session && replay_unsafe_requests()
2708 req->r_session->s_mds == session->s_mds) { replay_unsafe_requests()
2709 err = __prepare_send_request(mdsc, req, replay_unsafe_requests()
2712 ceph_msg_get(req->r_request); replay_unsafe_requests()
2713 ceph_con_send(&session->s_con, req->r_request); replay_unsafe_requests()
3426 struct ceph_mds_request *req; wait_requests() local
3439 while ((req = __get_oldest_req(mdsc))) { wait_requests()
3441 req->r_tid); wait_requests()
3442 __unregister_request(mdsc, req); wait_requests()
3474 struct ceph_mds_request *req = NULL, *nextreq; wait_unsafe_requests() local
3480 req = __get_oldest_req(mdsc); wait_unsafe_requests()
3481 while (req && req->r_tid <= want_tid) { wait_unsafe_requests()
3483 n = rb_next(&req->r_node); wait_unsafe_requests()
3488 if ((req->r_op & CEPH_MDS_OP_WRITE)) { wait_unsafe_requests()
3490 ceph_mdsc_get_request(req); wait_unsafe_requests()
3495 req->r_tid, want_tid); wait_unsafe_requests()
3496 wait_for_completion(&req->r_safe_completion); wait_unsafe_requests()
3498 ceph_mdsc_put_request(req); wait_unsafe_requests()
3508 req = nextreq; wait_unsafe_requests()
617 __register_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, struct inode *dir) __register_request() argument
643 __unregister_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) __unregister_request() argument
687 __choose_mds(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) __choose_mds() argument
1874 create_request_message(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, int mds, bool drop_cap_releases) create_request_message() argument
2004 complete_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) complete_request() argument
2016 __prepare_send_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req, int mds, bool drop_cap_releases) __prepare_send_request() argument
2103 __do_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) __do_request() argument
2224 ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) ceph_mdsc_submit_request() argument
2238 ceph_mdsc_do_request(struct ceph_mds_client *mdsc, struct inode *dir, struct ceph_mds_request *req) ceph_mdsc_do_request() argument
H A Ddebugfs.c53 struct ceph_mds_request *req; mdsc_show() local
61 req = rb_entry(rp, struct ceph_mds_request, r_node); mdsc_show()
63 if (req->r_request && req->r_session) mdsc_show()
64 seq_printf(s, "%lld\tmds%d\t", req->r_tid, mdsc_show()
65 req->r_session->s_mds); mdsc_show()
66 else if (!req->r_request) mdsc_show()
67 seq_printf(s, "%lld\t(no request)\t", req->r_tid); mdsc_show()
69 seq_printf(s, "%lld\t(no session)\t", req->r_tid); mdsc_show()
71 seq_printf(s, "%s", ceph_mds_op_name(req->r_op)); mdsc_show()
73 if (req->r_got_unsafe) mdsc_show()
78 if (req->r_inode) { mdsc_show()
79 seq_printf(s, " #%llx", ceph_ino(req->r_inode)); mdsc_show()
80 } else if (req->r_dentry) { mdsc_show()
81 path = ceph_mdsc_build_path(req->r_dentry, &pathlen, mdsc_show()
85 spin_lock(&req->r_dentry->d_lock); mdsc_show()
87 ceph_ino(d_inode(req->r_dentry->d_parent)), mdsc_show()
88 req->r_dentry, mdsc_show()
90 spin_unlock(&req->r_dentry->d_lock); mdsc_show()
92 } else if (req->r_path1) { mdsc_show()
93 seq_printf(s, " #%llx/%s", req->r_ino1.ino, mdsc_show()
94 req->r_path1); mdsc_show()
96 seq_printf(s, " #%llx", req->r_ino1.ino); mdsc_show()
99 if (req->r_old_dentry) { mdsc_show()
100 path = ceph_mdsc_build_path(req->r_old_dentry, &pathlen, mdsc_show()
104 spin_lock(&req->r_old_dentry->d_lock); mdsc_show()
106 req->r_old_dentry_dir ? mdsc_show()
107 ceph_ino(req->r_old_dentry_dir) : 0, mdsc_show()
108 req->r_old_dentry, mdsc_show()
110 spin_unlock(&req->r_old_dentry->d_lock); mdsc_show()
112 } else if (req->r_path2) { mdsc_show()
113 if (req->r_ino2.ino) mdsc_show()
114 seq_printf(s, " #%llx/%s", req->r_ino2.ino, mdsc_show()
115 req->r_path2); mdsc_show()
117 seq_printf(s, " %s", req->r_path2); mdsc_show()
H A Ddir.c320 struct ceph_mds_request *req; ceph_readdir() local
332 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); ceph_readdir()
333 if (IS_ERR(req)) ceph_readdir()
334 return PTR_ERR(req); ceph_readdir()
335 err = ceph_alloc_readdir_reply_buffer(req, inode); ceph_readdir()
337 ceph_mdsc_put_request(req); ceph_readdir()
341 req->r_direct_mode = USE_AUTH_MDS; ceph_readdir()
342 req->r_direct_hash = ceph_frag_value(frag); ceph_readdir()
343 req->r_direct_is_hash = true; ceph_readdir()
345 req->r_path2 = kstrdup(fi->last_name, GFP_NOFS); ceph_readdir()
346 if (!req->r_path2) { ceph_readdir()
347 ceph_mdsc_put_request(req); ceph_readdir()
351 req->r_readdir_offset = fi->next_offset; ceph_readdir()
352 req->r_args.readdir.frag = cpu_to_le32(frag); ceph_readdir()
354 req->r_inode = inode; ceph_readdir()
356 req->r_dentry = dget(file->f_path.dentry); ceph_readdir()
357 err = ceph_mdsc_do_request(mdsc, NULL, req); ceph_readdir()
359 ceph_mdsc_put_request(req); ceph_readdir()
364 (int)req->r_reply_info.dir_end, ceph_readdir()
365 (int)req->r_reply_info.dir_complete); ceph_readdir()
367 if (!req->r_did_prepopulate) { ceph_readdir()
374 rinfo = &req->r_reply_info; ceph_readdir()
385 fi->last_readdir = req; ceph_readdir()
387 if (req->r_reply_info.dir_end) { ceph_readdir()
542 int ceph_handle_snapdir(struct ceph_mds_request *req, ceph_handle_snapdir() argument
566 * Mainly, make sure we return the final req->r_dentry (if it already
574 struct dentry *ceph_finish_lookup(struct ceph_mds_request *req, ceph_finish_lookup() argument
580 if (!req->r_reply_info.head->is_dentry) { ceph_finish_lookup()
593 else if (dentry != req->r_dentry) ceph_finish_lookup()
594 dentry = dget(req->r_dentry); /* we got spliced */ ceph_finish_lookup()
615 struct ceph_mds_request *req; ceph_lookup() local
654 req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS); ceph_lookup()
655 if (IS_ERR(req)) ceph_lookup()
656 return ERR_CAST(req); ceph_lookup()
657 req->r_dentry = dget(dentry); ceph_lookup()
658 req->r_num_caps = 2; ceph_lookup()
660 req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE); ceph_lookup()
661 req->r_locked_dir = dir; ceph_lookup()
662 err = ceph_mdsc_do_request(mdsc, NULL, req); ceph_lookup()
663 err = ceph_handle_snapdir(req, dentry, err); ceph_lookup()
664 dentry = ceph_finish_lookup(req, dentry, err); ceph_lookup()
665 ceph_mdsc_put_request(req); /* will dput(dentry) */ ceph_lookup()
702 struct ceph_mds_request *req; ceph_mknod() local
715 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS); ceph_mknod()
716 if (IS_ERR(req)) { ceph_mknod()
717 err = PTR_ERR(req); ceph_mknod()
720 req->r_dentry = dget(dentry); ceph_mknod()
721 req->r_num_caps = 2; ceph_mknod()
722 req->r_locked_dir = dir; ceph_mknod()
723 req->r_args.mknod.mode = cpu_to_le32(mode); ceph_mknod()
724 req->r_args.mknod.rdev = cpu_to_le32(rdev); ceph_mknod()
725 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; ceph_mknod()
726 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; ceph_mknod()
728 req->r_pagelist = acls.pagelist; ceph_mknod()
731 err = ceph_mdsc_do_request(mdsc, dir, req); ceph_mknod()
732 if (!err && !req->r_reply_info.head->is_dentry) ceph_mknod()
734 ceph_mdsc_put_request(req); ceph_mknod()
755 struct ceph_mds_request *req; ceph_symlink() local
762 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS); ceph_symlink()
763 if (IS_ERR(req)) { ceph_symlink()
764 err = PTR_ERR(req); ceph_symlink()
767 req->r_path2 = kstrdup(dest, GFP_NOFS); ceph_symlink()
768 if (!req->r_path2) { ceph_symlink()
770 ceph_mdsc_put_request(req); ceph_symlink()
773 req->r_locked_dir = dir; ceph_symlink()
774 req->r_dentry = dget(dentry); ceph_symlink()
775 req->r_num_caps = 2; ceph_symlink()
776 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; ceph_symlink()
777 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; ceph_symlink()
778 err = ceph_mdsc_do_request(mdsc, dir, req); ceph_symlink()
779 if (!err && !req->r_reply_info.head->is_dentry) ceph_symlink()
781 ceph_mdsc_put_request(req); ceph_symlink()
792 struct ceph_mds_request *req; ceph_mkdir() local
814 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); ceph_mkdir()
815 if (IS_ERR(req)) { ceph_mkdir()
816 err = PTR_ERR(req); ceph_mkdir()
820 req->r_dentry = dget(dentry); ceph_mkdir()
821 req->r_num_caps = 2; ceph_mkdir()
822 req->r_locked_dir = dir; ceph_mkdir()
823 req->r_args.mkdir.mode = cpu_to_le32(mode); ceph_mkdir()
824 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; ceph_mkdir()
825 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; ceph_mkdir()
827 req->r_pagelist = acls.pagelist; ceph_mkdir()
830 err = ceph_mdsc_do_request(mdsc, dir, req); ceph_mkdir()
832 !req->r_reply_info.head->is_target && ceph_mkdir()
833 !req->r_reply_info.head->is_dentry) ceph_mkdir()
835 ceph_mdsc_put_request(req); ceph_mkdir()
850 struct ceph_mds_request *req; ceph_link() local
858 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS); ceph_link()
859 if (IS_ERR(req)) { ceph_link()
861 return PTR_ERR(req); ceph_link()
863 req->r_dentry = dget(dentry); ceph_link()
864 req->r_num_caps = 2; ceph_link()
865 req->r_old_dentry = dget(old_dentry); ceph_link()
866 req->r_locked_dir = dir; ceph_link()
867 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; ceph_link()
868 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; ceph_link()
870 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; ceph_link()
871 err = ceph_mdsc_do_request(mdsc, dir, req); ceph_link()
874 } else if (!req->r_reply_info.head->is_dentry) { ceph_link()
878 ceph_mdsc_put_request(req); ceph_link()
910 struct ceph_mds_request *req; ceph_unlink() local
925 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); ceph_unlink()
926 if (IS_ERR(req)) { ceph_unlink()
927 err = PTR_ERR(req); ceph_unlink()
930 req->r_dentry = dget(dentry); ceph_unlink()
931 req->r_num_caps = 2; ceph_unlink()
932 req->r_locked_dir = dir; ceph_unlink()
933 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; ceph_unlink()
934 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; ceph_unlink()
935 req->r_inode_drop = drop_caps_for_unlink(inode); ceph_unlink()
936 err = ceph_mdsc_do_request(mdsc, dir, req); ceph_unlink()
937 if (!err && !req->r_reply_info.head->is_dentry) ceph_unlink()
939 ceph_mdsc_put_request(req); ceph_unlink()
949 struct ceph_mds_request *req; ceph_rename() local
963 req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS); ceph_rename()
964 if (IS_ERR(req)) ceph_rename()
965 return PTR_ERR(req); ceph_rename()
967 req->r_dentry = dget(new_dentry); ceph_rename()
968 req->r_num_caps = 2; ceph_rename()
969 req->r_old_dentry = dget(old_dentry); ceph_rename()
970 req->r_old_dentry_dir = old_dir; ceph_rename()
971 req->r_locked_dir = new_dir; ceph_rename()
972 req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED; ceph_rename()
973 req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL; ceph_rename()
974 req->r_dentry_drop = CEPH_CAP_FILE_SHARED; ceph_rename()
975 req->r_dentry_unless = CEPH_CAP_FILE_EXCL; ceph_rename()
977 req->r_old_inode_drop = CEPH_CAP_LINK_SHARED; ceph_rename()
979 req->r_inode_drop = drop_caps_for_unlink(d_inode(new_dentry)); ceph_rename()
980 err = ceph_mdsc_do_request(mdsc, old_dir, req); ceph_rename()
981 if (!err && !req->r_reply_info.head->is_dentry) { ceph_rename()
999 ceph_mdsc_put_request(req); ceph_rename()
1236 struct ceph_mds_request *req; ceph_dir_fsync() local
1250 req = list_entry(head->prev, ceph_dir_fsync()
1252 last_tid = req->r_tid; ceph_dir_fsync()
1255 ceph_mdsc_get_request(req); ceph_dir_fsync()
1259 inode, req->r_tid, last_tid); ceph_dir_fsync()
1260 if (req->r_timeout) { ceph_dir_fsync()
1262 &req->r_safe_completion, ceph_dir_fsync()
1263 req->r_timeout); ceph_dir_fsync()
1269 wait_for_completion(&req->r_safe_completion); ceph_dir_fsync()
1271 ceph_mdsc_put_request(req); ceph_dir_fsync()
1276 req = list_entry(head->next, ceph_dir_fsync()
1278 } while (req->r_tid < last_tid); ceph_dir_fsync()
H A Dlocks.c13 struct ceph_mds_request *req);
40 struct ceph_mds_request *req; ceph_lock_message() local
48 req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS); ceph_lock_message()
49 if (IS_ERR(req)) ceph_lock_message()
50 return PTR_ERR(req); ceph_lock_message()
51 req->r_inode = inode; ceph_lock_message()
53 req->r_num_caps = 1; ceph_lock_message()
68 req->r_args.filelock_change.rule = lock_type; ceph_lock_message()
69 req->r_args.filelock_change.type = cmd; ceph_lock_message()
70 req->r_args.filelock_change.owner = cpu_to_le64(owner); ceph_lock_message()
71 req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid); ceph_lock_message()
72 req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start); ceph_lock_message()
73 req->r_args.filelock_change.length = cpu_to_le64(length); ceph_lock_message()
74 req->r_args.filelock_change.wait = wait; ceph_lock_message()
77 req->r_wait_for_completion = ceph_lock_wait_for_completion; ceph_lock_message()
79 err = ceph_mdsc_do_request(mdsc, inode, req); ceph_lock_message()
82 fl->fl_pid = le64_to_cpu(req->r_reply_info.filelock_reply->pid); ceph_lock_message()
83 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type) ceph_lock_message()
85 else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type) ceph_lock_message()
90 fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start); ceph_lock_message()
91 length = le64_to_cpu(req->r_reply_info.filelock_reply->start) + ceph_lock_message()
92 le64_to_cpu(req->r_reply_info.filelock_reply->length); ceph_lock_message()
99 ceph_mdsc_put_request(req); ceph_lock_message()
108 struct ceph_mds_request *req) ceph_lock_wait_for_completion()
111 struct inode *inode = req->r_inode; ceph_lock_wait_for_completion()
114 BUG_ON(req->r_op != CEPH_MDS_OP_SETFILELOCK); ceph_lock_wait_for_completion()
115 if (req->r_args.filelock_change.rule == CEPH_LOCK_FCNTL) ceph_lock_wait_for_completion()
117 else if (req->r_args.filelock_change.rule == CEPH_LOCK_FLOCK) ceph_lock_wait_for_completion()
121 BUG_ON(req->r_args.filelock_change.type == CEPH_LOCK_UNLOCK); ceph_lock_wait_for_completion()
123 err = wait_for_completion_interruptible(&req->r_completion); ceph_lock_wait_for_completion()
128 req->r_tid); ceph_lock_wait_for_completion()
139 intr_req->r_args.filelock_change = req->r_args.filelock_change; ceph_lock_wait_for_completion()
149 wait_for_completion(&req->r_completion); ceph_lock_wait_for_completion()
107 ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc, struct ceph_mds_request *req) ceph_lock_wait_for_completion() argument
H A Dioctl.c67 struct ceph_mds_request *req; ceph_ioctl_set_layout() local
106 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETLAYOUT, ceph_ioctl_set_layout()
108 if (IS_ERR(req)) ceph_ioctl_set_layout()
109 return PTR_ERR(req); ceph_ioctl_set_layout()
110 req->r_inode = inode; ceph_ioctl_set_layout()
112 req->r_num_caps = 1; ceph_ioctl_set_layout()
114 req->r_inode_drop = CEPH_CAP_FILE_SHARED | CEPH_CAP_FILE_EXCL; ceph_ioctl_set_layout()
116 req->r_args.setlayout.layout.fl_stripe_unit = ceph_ioctl_set_layout()
118 req->r_args.setlayout.layout.fl_stripe_count = ceph_ioctl_set_layout()
120 req->r_args.setlayout.layout.fl_object_size = ceph_ioctl_set_layout()
122 req->r_args.setlayout.layout.fl_pg_pool = cpu_to_le32(l.data_pool); ceph_ioctl_set_layout()
124 err = ceph_mdsc_do_request(mdsc, NULL, req); ceph_ioctl_set_layout()
125 ceph_mdsc_put_request(req); ceph_ioctl_set_layout()
138 struct ceph_mds_request *req; ceph_ioctl_set_layout_policy() local
151 req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETDIRLAYOUT, ceph_ioctl_set_layout_policy()
154 if (IS_ERR(req)) ceph_ioctl_set_layout_policy()
155 return PTR_ERR(req); ceph_ioctl_set_layout_policy()
156 req->r_inode = inode; ceph_ioctl_set_layout_policy()
158 req->r_num_caps = 1; ceph_ioctl_set_layout_policy()
160 req->r_args.setlayout.layout.fl_stripe_unit = ceph_ioctl_set_layout_policy()
162 req->r_args.setlayout.layout.fl_stripe_count = ceph_ioctl_set_layout_policy()
164 req->r_args.setlayout.layout.fl_object_size = ceph_ioctl_set_layout_policy()
166 req->r_args.setlayout.layout.fl_pg_pool = ceph_ioctl_set_layout_policy()
169 err = ceph_mdsc_do_request(mdsc, inode, req); ceph_ioctl_set_layout_policy()
170 ceph_mdsc_put_request(req); ceph_ioctl_set_layout_policy()
/linux-4.1.27/net/ceph/
H A Dosd_client.c32 struct ceph_osd_request *req);
34 struct ceph_osd_request *req);
36 struct ceph_osd_request *req);
37 static void __enqueue_request(struct ceph_osd_request *req);
39 struct ceph_osd_request *req);
309 struct ceph_osd_request *req = container_of(kref, ceph_osdc_release_request() local
313 dout("%s %p (r_request %p r_reply %p)\n", __func__, req, ceph_osdc_release_request()
314 req->r_request, req->r_reply); ceph_osdc_release_request()
315 WARN_ON(!RB_EMPTY_NODE(&req->r_node)); ceph_osdc_release_request()
316 WARN_ON(!list_empty(&req->r_req_lru_item)); ceph_osdc_release_request()
317 WARN_ON(!list_empty(&req->r_osd_item)); ceph_osdc_release_request()
318 WARN_ON(!list_empty(&req->r_linger_item)); ceph_osdc_release_request()
319 WARN_ON(!list_empty(&req->r_linger_osd_item)); ceph_osdc_release_request()
320 WARN_ON(req->r_osd); ceph_osdc_release_request()
322 if (req->r_request) ceph_osdc_release_request()
323 ceph_msg_put(req->r_request); ceph_osdc_release_request()
324 if (req->r_reply) { ceph_osdc_release_request()
325 ceph_msg_revoke_incoming(req->r_reply); ceph_osdc_release_request()
326 ceph_msg_put(req->r_reply); ceph_osdc_release_request()
329 for (which = 0; which < req->r_num_ops; which++) ceph_osdc_release_request()
330 osd_req_op_data_release(req, which); ceph_osdc_release_request()
332 ceph_put_snap_context(req->r_snapc); ceph_osdc_release_request()
333 if (req->r_mempool) ceph_osdc_release_request()
334 mempool_free(req, req->r_osdc->req_mempool); ceph_osdc_release_request()
336 kmem_cache_free(ceph_osd_request_cache, req); ceph_osdc_release_request()
340 void ceph_osdc_get_request(struct ceph_osd_request *req) ceph_osdc_get_request() argument
342 dout("%s %p (was %d)\n", __func__, req, ceph_osdc_get_request()
343 atomic_read(&req->r_kref.refcount)); ceph_osdc_get_request()
344 kref_get(&req->r_kref); ceph_osdc_get_request()
348 void ceph_osdc_put_request(struct ceph_osd_request *req) ceph_osdc_put_request() argument
350 dout("%s %p (was %d)\n", __func__, req, ceph_osdc_put_request()
351 atomic_read(&req->r_kref.refcount)); ceph_osdc_put_request()
352 kref_put(&req->r_kref, ceph_osdc_release_request); ceph_osdc_put_request()
362 struct ceph_osd_request *req; ceph_osdc_alloc_request() local
380 req = mempool_alloc(osdc->req_mempool, gfp_flags); ceph_osdc_alloc_request()
381 memset(req, 0, sizeof(*req)); ceph_osdc_alloc_request()
383 req = kmem_cache_zalloc(ceph_osd_request_cache, gfp_flags); ceph_osdc_alloc_request()
385 if (req == NULL) ceph_osdc_alloc_request()
388 req->r_osdc = osdc; ceph_osdc_alloc_request()
389 req->r_mempool = use_mempool; ceph_osdc_alloc_request()
390 req->r_num_ops = num_ops; ceph_osdc_alloc_request()
392 kref_init(&req->r_kref); ceph_osdc_alloc_request()
393 init_completion(&req->r_completion); ceph_osdc_alloc_request()
394 init_completion(&req->r_safe_completion); ceph_osdc_alloc_request()
395 RB_CLEAR_NODE(&req->r_node); ceph_osdc_alloc_request()
396 INIT_LIST_HEAD(&req->r_unsafe_item); ceph_osdc_alloc_request()
397 INIT_LIST_HEAD(&req->r_linger_item); ceph_osdc_alloc_request()
398 INIT_LIST_HEAD(&req->r_linger_osd_item); ceph_osdc_alloc_request()
399 INIT_LIST_HEAD(&req->r_req_lru_item); ceph_osdc_alloc_request()
400 INIT_LIST_HEAD(&req->r_osd_item); ceph_osdc_alloc_request()
402 req->r_base_oloc.pool = -1; ceph_osdc_alloc_request()
403 req->r_target_oloc.pool = -1; ceph_osdc_alloc_request()
412 ceph_osdc_put_request(req); ceph_osdc_alloc_request()
415 req->r_reply = msg; ceph_osdc_alloc_request()
423 ceph_osdc_put_request(req); ceph_osdc_alloc_request()
429 req->r_request = msg; ceph_osdc_alloc_request()
431 return req; ceph_osdc_alloc_request()
641 static u64 osd_req_encode_op(struct ceph_osd_request *req, osd_req_encode_op() argument
649 BUG_ON(which >= req->r_num_ops); osd_req_encode_op()
650 src = &req->r_ops[which]; osd_req_encode_op()
660 ceph_osdc_msg_data_add(req->r_reply, osd_data); osd_req_encode_op()
676 ceph_osdc_msg_data_add(req->r_request, osd_data); osd_req_encode_op()
678 ceph_osdc_msg_data_add(req->r_reply, osd_data); osd_req_encode_op()
684 ceph_osdc_msg_data_add(req->r_request, osd_data); osd_req_encode_op()
693 ceph_osdc_msg_data_add(req->r_request, osd_data); osd_req_encode_op()
698 ceph_osdc_msg_data_add(req->r_reply, osd_data); osd_req_encode_op()
721 ceph_osdc_msg_data_add(req->r_request, osd_data); osd_req_encode_op()
764 struct ceph_osd_request *req; ceph_osdc_new_request() local
774 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool, ceph_osdc_new_request()
776 if (!req) ceph_osdc_new_request()
779 req->r_flags = flags; ceph_osdc_new_request()
784 ceph_osdc_put_request(req); ceph_osdc_new_request()
789 osd_req_op_init(req, which, opcode); ceph_osdc_new_request()
802 osd_req_op_extent_init(req, which, opcode, objoff, objlen, ceph_osdc_new_request()
806 req->r_base_oloc.pool = ceph_file_layout_pg_pool(*layout); ceph_osdc_new_request()
808 snprintf(req->r_base_oid.name, sizeof(req->r_base_oid.name), ceph_osdc_new_request()
810 req->r_base_oid.name_len = strlen(req->r_base_oid.name); ceph_osdc_new_request()
812 return req; ceph_osdc_new_request()
824 struct ceph_osd_request *req = NULL; __insert_request() local
828 req = rb_entry(parent, struct ceph_osd_request, r_node); __insert_request()
829 if (new->r_tid < req->r_tid) __insert_request()
831 else if (new->r_tid > req->r_tid) __insert_request()
844 struct ceph_osd_request *req; __lookup_request() local
848 req = rb_entry(n, struct ceph_osd_request, r_node); __lookup_request()
849 if (tid < req->r_tid) __lookup_request()
851 else if (tid > req->r_tid) __lookup_request()
854 return req; __lookup_request()
863 struct ceph_osd_request *req; __lookup_request_ge() local
867 req = rb_entry(n, struct ceph_osd_request, r_node); __lookup_request_ge()
868 if (tid < req->r_tid) { __lookup_request_ge()
870 return req; __lookup_request_ge()
872 } else if (tid > req->r_tid) { __lookup_request_ge()
875 return req; __lookup_request_ge()
881 static void __kick_linger_request(struct ceph_osd_request *req) __kick_linger_request() argument
883 struct ceph_osd_client *osdc = req->r_osdc; __kick_linger_request()
884 struct ceph_osd *osd = req->r_osd; __kick_linger_request()
891 ceph_osdc_get_request(req); __kick_linger_request()
892 if (!list_empty(&req->r_linger_item)) __kick_linger_request()
893 __unregister_linger_request(osdc, req); __kick_linger_request()
895 __unregister_request(osdc, req); __kick_linger_request()
896 __register_request(osdc, req); __kick_linger_request()
897 ceph_osdc_put_request(req); __kick_linger_request()
905 WARN_ON(req->r_osd || !osd); __kick_linger_request()
906 req->r_osd = osd; __kick_linger_request()
908 dout("%s requeueing %p tid %llu\n", __func__, req, req->r_tid); __kick_linger_request()
909 __enqueue_request(req); __kick_linger_request()
918 struct ceph_osd_request *req, *nreq; __kick_osd_requests() local
945 list_for_each_entry(req, &osd->o_requests, r_osd_item) { __kick_osd_requests()
946 if (!req->r_sent) __kick_osd_requests()
949 if (!req->r_linger) { __kick_osd_requests()
950 dout("%s requeueing %p tid %llu\n", __func__, req, __kick_osd_requests()
951 req->r_tid); __kick_osd_requests()
952 list_move_tail(&req->r_req_lru_item, &resend); __kick_osd_requests()
953 req->r_flags |= CEPH_OSD_FLAG_RETRY; __kick_osd_requests()
955 list_move_tail(&req->r_req_lru_item, &resend_linger); __kick_osd_requests()
966 list_for_each_entry_safe(req, nreq, &osd->o_linger_requests, __kick_osd_requests()
968 WARN_ON(!list_empty(&req->r_req_lru_item)); __kick_osd_requests()
969 __kick_linger_request(req); __kick_osd_requests()
972 list_for_each_entry_safe(req, nreq, &resend_linger, r_req_lru_item) __kick_osd_requests()
973 __kick_linger_request(req); __kick_osd_requests()
1142 struct ceph_osd_request *req; __reset_osd() local
1147 list_for_each_entry(req, &osd->o_requests, r_osd_item) __reset_osd()
1148 req->r_stamp = jiffies; __reset_osd()
1215 struct ceph_osd_request *req) __register_request()
1217 req->r_tid = ++osdc->last_tid; __register_request()
1218 req->r_request->hdr.tid = cpu_to_le64(req->r_tid); __register_request()
1219 dout("__register_request %p tid %lld\n", req, req->r_tid); __register_request()
1220 __insert_request(osdc, req); __register_request()
1221 ceph_osdc_get_request(req); __register_request()
1233 struct ceph_osd_request *req) __unregister_request()
1235 if (RB_EMPTY_NODE(&req->r_node)) { __unregister_request()
1237 req, req->r_tid); __unregister_request()
1241 dout("__unregister_request %p tid %lld\n", req, req->r_tid); __unregister_request()
1242 rb_erase(&req->r_node, &osdc->requests); __unregister_request()
1243 RB_CLEAR_NODE(&req->r_node); __unregister_request()
1246 if (req->r_osd) { __unregister_request()
1248 ceph_msg_revoke(req->r_request); __unregister_request()
1250 list_del_init(&req->r_osd_item); __unregister_request()
1251 maybe_move_osd_to_lru(osdc, req->r_osd); __unregister_request()
1252 if (list_empty(&req->r_linger_osd_item)) __unregister_request()
1253 req->r_osd = NULL; __unregister_request()
1256 list_del_init(&req->r_req_lru_item); __unregister_request()
1257 ceph_osdc_put_request(req); __unregister_request()
1268 static void __cancel_request(struct ceph_osd_request *req) __cancel_request() argument
1270 if (req->r_sent && req->r_osd) { __cancel_request()
1271 ceph_msg_revoke(req->r_request); __cancel_request()
1272 req->r_sent = 0; __cancel_request()
1277 struct ceph_osd_request *req) __register_linger_request()
1279 dout("%s %p tid %llu\n", __func__, req, req->r_tid); __register_linger_request()
1280 WARN_ON(!req->r_linger); __register_linger_request()
1282 ceph_osdc_get_request(req); __register_linger_request()
1283 list_add_tail(&req->r_linger_item, &osdc->req_linger); __register_linger_request()
1284 if (req->r_osd) __register_linger_request()
1285 list_add_tail(&req->r_linger_osd_item, __register_linger_request()
1286 &req->r_osd->o_linger_requests); __register_linger_request()
1290 struct ceph_osd_request *req) __unregister_linger_request()
1292 WARN_ON(!req->r_linger); __unregister_linger_request()
1294 if (list_empty(&req->r_linger_item)) { __unregister_linger_request()
1295 dout("%s %p tid %llu not registered\n", __func__, req, __unregister_linger_request()
1296 req->r_tid); __unregister_linger_request()
1300 dout("%s %p tid %llu\n", __func__, req, req->r_tid); __unregister_linger_request()
1301 list_del_init(&req->r_linger_item); __unregister_linger_request()
1303 if (req->r_osd) { __unregister_linger_request()
1304 list_del_init(&req->r_linger_osd_item); __unregister_linger_request()
1305 maybe_move_osd_to_lru(osdc, req->r_osd); __unregister_linger_request()
1306 if (list_empty(&req->r_osd_item)) __unregister_linger_request()
1307 req->r_osd = NULL; __unregister_linger_request()
1309 ceph_osdc_put_request(req); __unregister_linger_request()
1313 struct ceph_osd_request *req) ceph_osdc_set_request_linger()
1315 if (!req->r_linger) { ceph_osdc_set_request_linger()
1316 dout("set_request_linger %p\n", req); ceph_osdc_set_request_linger()
1317 req->r_linger = 1; ceph_osdc_set_request_linger()
1329 struct ceph_osd_request *req) __req_should_be_paused()
1334 return (req->r_flags & CEPH_OSD_FLAG_READ && pauserd) || __req_should_be_paused()
1335 (req->r_flags & CEPH_OSD_FLAG_WRITE && pausewr); __req_should_be_paused()
1342 struct ceph_osd_request *req, __calc_request_pg()
1348 if (req->r_target_oloc.pool == -1) { __calc_request_pg()
1349 req->r_target_oloc = req->r_base_oloc; /* struct */ __calc_request_pg()
1352 if (req->r_target_oid.name_len == 0) { __calc_request_pg()
1353 ceph_oid_copy(&req->r_target_oid, &req->r_base_oid); __calc_request_pg()
1358 (req->r_flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) { __calc_request_pg()
1361 pi = ceph_pg_pool_by_id(osdmap, req->r_target_oloc.pool); __calc_request_pg()
1363 if ((req->r_flags & CEPH_OSD_FLAG_READ) && __calc_request_pg()
1365 req->r_target_oloc.pool = pi->read_tier; __calc_request_pg()
1366 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) && __calc_request_pg()
1368 req->r_target_oloc.pool = pi->write_tier; __calc_request_pg()
1373 return ceph_oloc_oid_to_pg(osdmap, &req->r_target_oloc, __calc_request_pg()
1374 &req->r_target_oid, pg_out); __calc_request_pg()
1377 static void __enqueue_request(struct ceph_osd_request *req) __enqueue_request() argument
1379 struct ceph_osd_client *osdc = req->r_osdc; __enqueue_request()
1381 dout("%s %p tid %llu to osd%d\n", __func__, req, req->r_tid, __enqueue_request()
1382 req->r_osd ? req->r_osd->o_osd : -1); __enqueue_request()
1384 if (req->r_osd) { __enqueue_request()
1385 __remove_osd_from_lru(req->r_osd); __enqueue_request()
1386 list_add_tail(&req->r_osd_item, &req->r_osd->o_requests); __enqueue_request()
1387 list_move_tail(&req->r_req_lru_item, &osdc->req_unsent); __enqueue_request()
1389 list_move_tail(&req->r_req_lru_item, &osdc->req_notarget); __enqueue_request()
1404 struct ceph_osd_request *req, int force_resend) __map_request()
1412 dout("map_request %p tid %lld\n", req, req->r_tid); __map_request()
1414 err = __calc_request_pg(osdc->osdmap, req, &pgid); __map_request()
1416 list_move(&req->r_req_lru_item, &osdc->req_notarget); __map_request()
1419 req->r_pgid = pgid; __map_request()
1425 was_paused = req->r_paused; __map_request()
1426 req->r_paused = __req_should_be_paused(osdc, req); __map_request()
1427 if (was_paused && !req->r_paused) __map_request()
1431 req->r_osd && req->r_osd->o_osd == o && __map_request()
1432 req->r_sent >= req->r_osd->o_incarnation && __map_request()
1433 req->r_num_pg_osds == num && __map_request()
1434 memcmp(req->r_pg_osds, acting, sizeof(acting[0])*num) == 0) || __map_request()
1435 (req->r_osd == NULL && o == -1) || __map_request()
1436 req->r_paused) __map_request()
1440 req->r_tid, pgid.pool, pgid.seed, o, __map_request()
1441 req->r_osd ? req->r_osd->o_osd : -1); __map_request()
1444 memcpy(req->r_pg_osds, acting, sizeof(acting[0]) * num); __map_request()
1445 req->r_num_pg_osds = num; __map_request()
1447 if (req->r_osd) { __map_request()
1448 __cancel_request(req); __map_request()
1449 list_del_init(&req->r_osd_item); __map_request()
1450 list_del_init(&req->r_linger_osd_item); __map_request()
1451 req->r_osd = NULL; __map_request()
1454 req->r_osd = __lookup_osd(osdc, o); __map_request()
1455 if (!req->r_osd && o >= 0) { __map_request()
1457 req->r_osd = create_osd(osdc, o); __map_request()
1458 if (!req->r_osd) { __map_request()
1459 list_move(&req->r_req_lru_item, &osdc->req_notarget); __map_request()
1463 dout("map_request osd %p is osd%d\n", req->r_osd, o); __map_request()
1464 __insert_osd(osdc, req->r_osd); __map_request()
1466 ceph_con_open(&req->r_osd->o_con, __map_request()
1471 __enqueue_request(req); __map_request()
1482 struct ceph_osd_request *req) __send_request()
1487 req, req->r_tid, req->r_osd->o_osd, req->r_flags, __send_request()
1488 (unsigned long long)req->r_pgid.pool, req->r_pgid.seed); __send_request()
1491 put_unaligned_le32(osdc->osdmap->epoch, req->r_request_osdmap_epoch); __send_request()
1492 put_unaligned_le32(req->r_flags, req->r_request_flags); __send_request()
1493 put_unaligned_le64(req->r_target_oloc.pool, req->r_request_pool); __send_request()
1494 p = req->r_request_pgid; __send_request()
1495 ceph_encode_64(&p, req->r_pgid.pool); __send_request()
1496 ceph_encode_32(&p, req->r_pgid.seed); __send_request()
1497 put_unaligned_le64(1, req->r_request_attempts); /* FIXME */ __send_request()
1498 memcpy(req->r_request_reassert_version, &req->r_reassert_version, __send_request()
1499 sizeof(req->r_reassert_version)); __send_request()
1501 req->r_stamp = jiffies; __send_request()
1502 list_move_tail(&req->r_req_lru_item, &osdc->req_lru); __send_request()
1504 ceph_msg_get(req->r_request); /* send consumes a ref */ __send_request()
1506 req->r_sent = req->r_osd->o_incarnation; __send_request()
1508 ceph_con_send(&req->r_osd->o_con, req->r_request); __send_request()
1516 struct ceph_osd_request *req, *tmp; __send_queued() local
1519 list_for_each_entry_safe(req, tmp, &osdc->req_unsent, r_req_lru_item) __send_queued()
1520 __send_request(osdc, req); __send_queued()
1527 struct ceph_osd_request *req, __ceph_osdc_start_request()
1532 __register_request(osdc, req); __ceph_osdc_start_request()
1533 req->r_sent = 0; __ceph_osdc_start_request()
1534 req->r_got_reply = 0; __ceph_osdc_start_request()
1535 rc = __map_request(osdc, req, 0); __ceph_osdc_start_request()
1539 " will retry %lld\n", req->r_tid); __ceph_osdc_start_request()
1542 __unregister_request(osdc, req); __ceph_osdc_start_request()
1547 if (req->r_osd == NULL) { __ceph_osdc_start_request()
1548 dout("send_request %p no up osds in pg\n", req); __ceph_osdc_start_request()
1570 struct ceph_osd_request *req; handle_timeout() local
1588 list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) { handle_timeout()
1589 if (time_before(jiffies, req->r_stamp + keepalive)) handle_timeout()
1592 osd = req->r_osd; handle_timeout()
1595 req->r_tid, osd->o_osd); handle_timeout()
1731 static void complete_request(struct ceph_osd_request *req) complete_request() argument
1733 complete_all(&req->r_safe_completion); /* fsync waiter */ complete_request()
1744 struct ceph_osd_request *req; handle_reply() local
1786 req = __lookup_request(osdc, tid); handle_reply()
1787 if (req == NULL) { handle_reply()
1791 ceph_osdc_get_request(req); handle_reply()
1793 dout("handle_reply %p tid %llu req %p result %d\n", msg, tid, handle_reply()
1794 req, result); handle_reply()
1800 if (numops != req->r_num_ops) handle_reply()
1809 req->r_reply_op_len[i] = len; handle_reply()
1824 req->r_reply_op_result[i] = ceph_decode_32(&p); handle_reply()
1840 __unregister_request(osdc, req); handle_reply()
1842 req->r_target_oloc = redir.oloc; /* struct */ handle_reply()
1852 err = __ceph_osdc_start_request(osdc, req, true); handle_reply()
1858 already_completed = req->r_got_reply; handle_reply()
1859 if (!req->r_got_reply) { handle_reply()
1860 req->r_result = result; handle_reply()
1861 dout("handle_reply result %d bytes %d\n", req->r_result, handle_reply()
1863 if (req->r_result == 0) handle_reply()
1864 req->r_result = bytes; handle_reply()
1867 req->r_reassert_version.epoch = cpu_to_le32(reassert_epoch); handle_reply()
1868 req->r_reassert_version.version = cpu_to_le64(reassert_version); handle_reply()
1870 req->r_got_reply = 1; handle_reply()
1878 if (req->r_linger && (flags & CEPH_OSD_FLAG_ONDISK)) handle_reply()
1879 __register_linger_request(osdc, req); handle_reply()
1885 __unregister_request(osdc, req); handle_reply()
1891 if (req->r_unsafe_callback && handle_reply()
1893 req->r_unsafe_callback(req, true); handle_reply()
1894 if (req->r_callback) handle_reply()
1895 req->r_callback(req, msg); handle_reply()
1897 complete_all(&req->r_completion); handle_reply()
1901 if (req->r_unsafe_callback && already_completed) handle_reply()
1902 req->r_unsafe_callback(req, false); handle_reply()
1903 complete_request(req); handle_reply()
1907 dout("req=%p req->r_linger=%d\n", req, req->r_linger); handle_reply()
1908 ceph_osdc_put_request(req); handle_reply()
1916 req->r_result = -EIO; handle_reply()
1917 __unregister_request(osdc, req); handle_reply()
1918 if (req->r_callback) handle_reply()
1919 req->r_callback(req, msg); handle_reply()
1921 complete_all(&req->r_completion); handle_reply()
1922 complete_request(req); handle_reply()
1923 ceph_osdc_put_request(req); handle_reply()
1960 struct ceph_osd_request *req, *nreq; kick_requests() local
1970 req = rb_entry(p, struct ceph_osd_request, r_node); kick_requests()
1981 if (req->r_linger && list_empty(&req->r_linger_item)) { kick_requests()
1983 req, req->r_tid, kick_requests()
1984 req->r_osd ? req->r_osd->o_osd : -1); kick_requests()
1985 ceph_osdc_get_request(req); kick_requests()
1986 __unregister_request(osdc, req); kick_requests()
1987 __register_linger_request(osdc, req); kick_requests()
1988 ceph_osdc_put_request(req); kick_requests()
1994 req->r_flags & CEPH_OSD_FLAG_WRITE); kick_requests()
1995 err = __map_request(osdc, req, force_resend_req); kick_requests()
1998 if (req->r_osd == NULL) { kick_requests()
1999 dout("%p tid %llu maps to no osd\n", req, req->r_tid); kick_requests()
2002 if (!req->r_linger) { kick_requests()
2003 dout("%p tid %llu requeued on osd%d\n", req, kick_requests()
2004 req->r_tid, kick_requests()
2005 req->r_osd ? req->r_osd->o_osd : -1); kick_requests()
2006 req->r_flags |= CEPH_OSD_FLAG_RETRY; kick_requests()
2011 list_for_each_entry_safe(req, nreq, &osdc->req_linger, kick_requests()
2013 dout("linger req=%p req->r_osd=%p\n", req, req->r_osd); kick_requests()
2015 err = __map_request(osdc, req, kick_requests()
2020 if (req->r_osd == NULL || err > 0) { kick_requests()
2021 if (req->r_osd == NULL) { kick_requests()
2023 req, req->r_tid); kick_requests()
2036 dout("kicking lingering %p tid %llu osd%d\n", req, kick_requests()
2037 req->r_tid, req->r_osd ? req->r_osd->o_osd : -1); kick_requests()
2038 __register_request(osdc, req); kick_requests()
2039 __unregister_linger_request(osdc, req); kick_requests()
2388 void ceph_osdc_build_request(struct ceph_osd_request *req, u64 off, ceph_osdc_build_request() argument
2392 struct ceph_msg *msg = req->r_request; ceph_osdc_build_request()
2395 int flags = req->r_flags; ceph_osdc_build_request()
2399 req->r_snapid = snap_id; ceph_osdc_build_request()
2400 req->r_snapc = ceph_get_snap_context(snapc); ceph_osdc_build_request()
2407 req->r_request_osdmap_epoch = p; ceph_osdc_build_request()
2409 req->r_request_flags = p; ceph_osdc_build_request()
2411 if (req->r_flags & CEPH_OSD_FLAG_WRITE) ceph_osdc_build_request()
2414 req->r_request_reassert_version = p; ceph_osdc_build_request()
2421 req->r_request_pool = p; ceph_osdc_build_request()
2427 req->r_request_pgid = p; ceph_osdc_build_request()
2432 ceph_encode_32(&p, req->r_base_oid.name_len); ceph_osdc_build_request()
2433 memcpy(p, req->r_base_oid.name, req->r_base_oid.name_len); ceph_osdc_build_request()
2434 dout("oid '%.*s' len %d\n", req->r_base_oid.name_len, ceph_osdc_build_request()
2435 req->r_base_oid.name, req->r_base_oid.name_len); ceph_osdc_build_request()
2436 p += req->r_base_oid.name_len; ceph_osdc_build_request()
2439 ceph_encode_16(&p, (u16)req->r_num_ops); ceph_osdc_build_request()
2441 for (i = 0; i < req->r_num_ops; i++) { ceph_osdc_build_request()
2442 data_len += osd_req_encode_op(req, p, i); ceph_osdc_build_request()
2447 ceph_encode_64(&p, req->r_snapid); ceph_osdc_build_request()
2448 ceph_encode_64(&p, req->r_snapc ? req->r_snapc->seq : 0); ceph_osdc_build_request()
2449 ceph_encode_32(&p, req->r_snapc ? req->r_snapc->num_snaps : 0); ceph_osdc_build_request()
2450 if (req->r_snapc) { ceph_osdc_build_request()
2452 ceph_encode_64(&p, req->r_snapc->snaps[i]); ceph_osdc_build_request()
2456 req->r_request_attempts = p; ceph_osdc_build_request()
2470 req->r_request->hdr.data_off = cpu_to_le16(data_off); ceph_osdc_build_request()
2472 req->r_request->hdr.data_len = cpu_to_le32(data_len); ceph_osdc_build_request()
2487 struct ceph_osd_request *req, ceph_osdc_start_request()
2495 rc = __ceph_osdc_start_request(osdc, req, nofail); ceph_osdc_start_request()
2509 void ceph_osdc_cancel_request(struct ceph_osd_request *req) ceph_osdc_cancel_request() argument
2511 struct ceph_osd_client *osdc = req->r_osdc; ceph_osdc_cancel_request()
2514 if (req->r_linger) ceph_osdc_cancel_request()
2515 __unregister_linger_request(osdc, req); ceph_osdc_cancel_request()
2516 __unregister_request(osdc, req); ceph_osdc_cancel_request()
2519 dout("%s %p tid %llu canceled\n", __func__, req, req->r_tid); ceph_osdc_cancel_request()
2527 struct ceph_osd_request *req) ceph_osdc_wait_request()
2531 dout("%s %p tid %llu\n", __func__, req, req->r_tid); ceph_osdc_wait_request()
2533 rc = wait_for_completion_interruptible(&req->r_completion); ceph_osdc_wait_request()
2535 dout("%s %p tid %llu interrupted\n", __func__, req, req->r_tid); ceph_osdc_wait_request()
2536 ceph_osdc_cancel_request(req); ceph_osdc_wait_request()
2537 complete_request(req); ceph_osdc_wait_request()
2541 dout("%s %p tid %llu result %d\n", __func__, req, req->r_tid, ceph_osdc_wait_request()
2542 req->r_result); ceph_osdc_wait_request()
2543 return req->r_result; ceph_osdc_wait_request()
2552 struct ceph_osd_request *req; ceph_osdc_sync() local
2558 req = __lookup_request_ge(osdc, next_tid); ceph_osdc_sync()
2559 if (!req) ceph_osdc_sync()
2561 if (req->r_tid > last_tid) ceph_osdc_sync()
2564 next_tid = req->r_tid + 1; ceph_osdc_sync()
2565 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) == 0) ceph_osdc_sync()
2568 ceph_osdc_get_request(req); ceph_osdc_sync()
2571 req->r_tid, last_tid); ceph_osdc_sync()
2572 wait_for_completion(&req->r_safe_completion); ceph_osdc_sync()
2574 ceph_osdc_put_request(req); ceph_osdc_sync()
2684 struct ceph_osd_request *req; ceph_osdc_readpages() local
2689 req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1, ceph_osdc_readpages()
2693 if (IS_ERR(req)) ceph_osdc_readpages()
2694 return PTR_ERR(req); ceph_osdc_readpages()
2698 osd_req_op_extent_osd_data_pages(req, 0, ceph_osdc_readpages()
2704 ceph_osdc_build_request(req, off, NULL, vino.snap, NULL); ceph_osdc_readpages()
2706 rc = ceph_osdc_start_request(osdc, req, false); ceph_osdc_readpages()
2708 rc = ceph_osdc_wait_request(osdc, req); ceph_osdc_readpages()
2710 ceph_osdc_put_request(req); ceph_osdc_readpages()
2727 struct ceph_osd_request *req; ceph_osdc_writepages() local
2732 req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1, ceph_osdc_writepages()
2737 if (IS_ERR(req)) ceph_osdc_writepages()
2738 return PTR_ERR(req); ceph_osdc_writepages()
2741 osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align, ceph_osdc_writepages()
2745 ceph_osdc_build_request(req, off, snapc, CEPH_NOSNAP, mtime); ceph_osdc_writepages()
2747 rc = ceph_osdc_start_request(osdc, req, true); ceph_osdc_writepages()
2749 rc = ceph_osdc_wait_request(osdc, req); ceph_osdc_writepages()
2751 ceph_osdc_put_request(req); ceph_osdc_writepages()
2822 struct ceph_osd_request *req; get_reply() local
2829 req = __lookup_request(osdc, tid); get_reply()
2830 if (!req) { get_reply()
2838 if (req->r_reply->con) get_reply()
2840 req->r_reply, req->r_reply->con); get_reply()
2841 ceph_msg_revoke_incoming(req->r_reply); get_reply()
2843 if (front_len > req->r_reply->front_alloc_len) { get_reply()
2845 front_len, req->r_reply->front_alloc_len, get_reply()
2852 ceph_msg_put(req->r_reply); get_reply()
2853 req->r_reply = m; get_reply()
2855 m = ceph_msg_get(req->r_reply); get_reply()
2865 osd_data = osd_req_op_extent_osd_data(req, 0); get_reply()
1214 __register_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) __register_request() argument
1232 __unregister_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) __unregister_request() argument
1276 __register_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) __register_linger_request() argument
1289 __unregister_linger_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) __unregister_linger_request() argument
1312 ceph_osdc_set_request_linger(struct ceph_osd_client *osdc, struct ceph_osd_request *req) ceph_osdc_set_request_linger() argument
1328 __req_should_be_paused(struct ceph_osd_client *osdc, struct ceph_osd_request *req) __req_should_be_paused() argument
1341 __calc_request_pg(struct ceph_osdmap *osdmap, struct ceph_osd_request *req, struct ceph_pg *pg_out) __calc_request_pg() argument
1403 __map_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, int force_resend) __map_request() argument
1481 __send_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) __send_request() argument
1526 __ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, bool nofail) __ceph_osdc_start_request() argument
2486 ceph_osdc_start_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req, bool nofail) ceph_osdc_start_request() argument
2526 ceph_osdc_wait_request(struct ceph_osd_client *osdc, struct ceph_osd_request *req) ceph_osdc_wait_request() argument
H A Dmon_client.c418 struct ceph_mon_generic_request *req; __lookup_generic_req() local
422 req = rb_entry(n, struct ceph_mon_generic_request, node); __lookup_generic_req()
423 if (tid < req->tid) __lookup_generic_req()
425 else if (tid > req->tid) __lookup_generic_req()
428 return req; __lookup_generic_req()
438 struct ceph_mon_generic_request *req = NULL; __insert_generic_request() local
442 req = rb_entry(parent, struct ceph_mon_generic_request, node); __insert_generic_request()
443 if (new->tid < req->tid) __insert_generic_request()
445 else if (new->tid > req->tid) __insert_generic_request()
457 struct ceph_mon_generic_request *req = release_generic_request() local
460 if (req->reply) release_generic_request()
461 ceph_msg_put(req->reply); release_generic_request()
462 if (req->request) release_generic_request()
463 ceph_msg_put(req->request); release_generic_request()
465 kfree(req); release_generic_request()
468 static void put_generic_request(struct ceph_mon_generic_request *req) put_generic_request() argument
470 kref_put(&req->kref, release_generic_request); put_generic_request()
473 static void get_generic_request(struct ceph_mon_generic_request *req) get_generic_request() argument
475 kref_get(&req->kref); get_generic_request()
483 struct ceph_mon_generic_request *req; get_generic_reply() local
488 req = __lookup_generic_req(monc, tid); get_generic_reply()
489 if (!req) { get_generic_reply()
494 dout("get_generic_reply %lld got %p\n", tid, req->reply); get_generic_reply()
496 m = ceph_msg_get(req->reply); get_generic_reply()
508 struct ceph_mon_generic_request *req) __do_generic_request()
513 req->tid = tid != 0 ? tid : ++monc->last_tid; __do_generic_request()
514 req->request->hdr.tid = cpu_to_le64(req->tid); __do_generic_request()
515 __insert_generic_request(monc, req); __do_generic_request()
517 ceph_con_send(&monc->con, ceph_msg_get(req->request)); __do_generic_request()
520 err = wait_for_completion_interruptible(&req->completion); __do_generic_request()
523 rb_erase(&req->node, &monc->generic_request_tree); __do_generic_request()
527 err = req->result; __do_generic_request()
532 struct ceph_mon_generic_request *req) do_generic_request()
537 err = __do_generic_request(monc, 0, req); do_generic_request()
549 struct ceph_mon_generic_request *req; handle_statfs_reply() local
558 req = __lookup_generic_req(monc, tid); handle_statfs_reply()
559 if (req) { handle_statfs_reply()
560 *(struct ceph_statfs *)req->buf = reply->st; handle_statfs_reply()
561 req->result = 0; handle_statfs_reply()
562 get_generic_request(req); handle_statfs_reply()
565 if (req) { handle_statfs_reply()
566 complete_all(&req->completion); handle_statfs_reply()
567 put_generic_request(req); handle_statfs_reply()
581 struct ceph_mon_generic_request *req; ceph_monc_do_statfs() local
585 req = kzalloc(sizeof(*req), GFP_NOFS); ceph_monc_do_statfs()
586 if (!req) ceph_monc_do_statfs()
589 kref_init(&req->kref); ceph_monc_do_statfs()
590 req->buf = buf; ceph_monc_do_statfs()
591 init_completion(&req->completion); ceph_monc_do_statfs()
594 req->request = ceph_msg_new(CEPH_MSG_STATFS, sizeof(*h), GFP_NOFS, ceph_monc_do_statfs()
596 if (!req->request) ceph_monc_do_statfs()
598 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS, ceph_monc_do_statfs()
600 if (!req->reply) ceph_monc_do_statfs()
604 h = req->request->front.iov_base; ceph_monc_do_statfs()
610 err = do_generic_request(monc, req); ceph_monc_do_statfs()
613 put_generic_request(req); ceph_monc_do_statfs()
621 struct ceph_mon_generic_request *req; handle_get_version_reply() local
635 req = __lookup_generic_req(monc, handle); handle_get_version_reply()
636 if (req) { handle_get_version_reply()
637 *(u64 *)req->buf = ceph_decode_64(&p); handle_get_version_reply()
638 req->result = 0; handle_get_version_reply()
639 get_generic_request(req); handle_get_version_reply()
642 if (req) { handle_get_version_reply()
643 complete_all(&req->completion); handle_get_version_reply()
644 put_generic_request(req); handle_get_version_reply()
661 struct ceph_mon_generic_request *req; ceph_monc_do_get_version() local
666 req = kzalloc(sizeof(*req), GFP_NOFS); ceph_monc_do_get_version()
667 if (!req) ceph_monc_do_get_version()
670 kref_init(&req->kref); ceph_monc_do_get_version()
671 req->buf = newest; ceph_monc_do_get_version()
672 init_completion(&req->completion); ceph_monc_do_get_version()
674 req->request = ceph_msg_new(CEPH_MSG_MON_GET_VERSION, ceph_monc_do_get_version()
677 if (!req->request) { ceph_monc_do_get_version()
682 req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 1024, ceph_monc_do_get_version()
684 if (!req->reply) { ceph_monc_do_get_version()
689 p = req->request->front.iov_base; ceph_monc_do_get_version()
690 end = p + req->request->front_alloc_len; ceph_monc_do_get_version()
698 err = __do_generic_request(monc, tid, req); ceph_monc_do_get_version()
702 put_generic_request(req); ceph_monc_do_get_version()
712 struct ceph_mon_generic_request *req; __resend_generic_request() local
716 req = rb_entry(p, struct ceph_mon_generic_request, node); __resend_generic_request()
717 ceph_msg_revoke(req->request); __resend_generic_request()
718 ceph_msg_revoke_incoming(req->reply); __resend_generic_request()
719 ceph_con_send(&monc->con, ceph_msg_get(req->request)); __resend_generic_request()
507 __do_generic_request(struct ceph_mon_client *monc, u64 tid, struct ceph_mon_generic_request *req) __do_generic_request() argument
531 do_generic_request(struct ceph_mon_client *monc, struct ceph_mon_generic_request *req) do_generic_request() argument
H A Ddebugfs.c112 struct ceph_mon_generic_request *req; monc_show() local
127 req = rb_entry(rp, struct ceph_mon_generic_request, node); monc_show()
128 op = le16_to_cpu(req->request->hdr.type); monc_show()
130 seq_printf(s, "%llu statfs\n", req->tid); monc_show()
132 seq_printf(s, "%llu mon_get_version", req->tid); monc_show()
134 seq_printf(s, "%llu unknown\n", req->tid); monc_show()
149 struct ceph_osd_request *req; osdc_show() local
153 req = rb_entry(p, struct ceph_osd_request, r_node); osdc_show()
155 seq_printf(s, "%lld\tosd%d\t%lld.%x\t", req->r_tid, osdc_show()
156 req->r_osd ? req->r_osd->o_osd : -1, osdc_show()
157 req->r_pgid.pool, req->r_pgid.seed); osdc_show()
159 seq_printf(s, "%.*s", req->r_base_oid.name_len, osdc_show()
160 req->r_base_oid.name); osdc_show()
162 if (req->r_reassert_version.epoch) osdc_show()
164 (unsigned int)le32_to_cpu(req->r_reassert_version.epoch), osdc_show()
165 le64_to_cpu(req->r_reassert_version.version)); osdc_show()
169 for (i = 0; i < req->r_num_ops; i++) { osdc_show()
170 opcode = req->r_ops[i].op; osdc_show()
/linux-4.1.27/drivers/block/drbd/
H A Ddrbd_req.c37 static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) _drbd_start_io_acct() argument
39 generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9, _drbd_start_io_acct()
44 static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) _drbd_end_io_acct() argument
46 generic_end_io_acct(bio_data_dir(req->master_bio), _drbd_end_io_acct()
47 &device->vdisk->part0, req->start_jif); _drbd_end_io_acct()
53 struct drbd_request *req; drbd_req_new() local
55 req = mempool_alloc(drbd_request_mempool, GFP_NOIO); drbd_req_new()
56 if (!req) drbd_req_new()
58 memset(req, 0, sizeof(*req)); drbd_req_new()
60 drbd_req_make_private_bio(req, bio_src); drbd_req_new()
61 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; drbd_req_new()
62 req->device = device; drbd_req_new()
63 req->master_bio = bio_src; drbd_req_new()
64 req->epoch = 0; drbd_req_new()
66 drbd_clear_interval(&req->i); drbd_req_new()
67 req->i.sector = bio_src->bi_iter.bi_sector; drbd_req_new()
68 req->i.size = bio_src->bi_iter.bi_size; drbd_req_new()
69 req->i.local = true; drbd_req_new()
70 req->i.waiting = false; drbd_req_new()
72 INIT_LIST_HEAD(&req->tl_requests); drbd_req_new()
73 INIT_LIST_HEAD(&req->w.list); drbd_req_new()
74 INIT_LIST_HEAD(&req->req_pending_master_completion); drbd_req_new()
75 INIT_LIST_HEAD(&req->req_pending_local); drbd_req_new()
78 atomic_set(&req->completion_ref, 1); drbd_req_new()
80 kref_init(&req->kref); drbd_req_new()
81 return req; drbd_req_new()
85 struct drbd_request *req) drbd_remove_request_interval()
87 struct drbd_device *device = req->device; drbd_remove_request_interval()
88 struct drbd_interval *i = &req->i; drbd_remove_request_interval()
99 struct drbd_request *req = container_of(kref, struct drbd_request, kref); drbd_req_destroy() local
100 struct drbd_device *device = req->device; drbd_req_destroy()
101 const unsigned s = req->rq_state; drbd_req_destroy()
103 if ((req->master_bio && !(s & RQ_POSTPONED)) || drbd_req_destroy()
104 atomic_read(&req->completion_ref) || drbd_req_destroy()
108 s, atomic_read(&req->completion_ref)); drbd_req_destroy()
114 * req_lock, and req->tl_requests will typicaly be on ->transfer_log, drbd_req_destroy()
118 * still allowed to unconditionally list_del(&req->tl_requests), drbd_req_destroy()
120 list_del_init(&req->tl_requests); drbd_req_destroy()
124 if (!drbd_interval_empty(&req->i)) { drbd_req_destroy()
131 drbd_remove_request_interval(root, req); drbd_req_destroy()
132 } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0) drbd_req_destroy()
134 s, (unsigned long long)req->i.sector, req->i.size); drbd_req_destroy()
153 drbd_set_out_of_sync(device, req->i.sector, req->i.size); drbd_req_destroy()
156 drbd_set_in_sync(device, req->i.sector, req->i.size); drbd_req_destroy()
171 drbd_al_complete_io(device, &req->i); drbd_req_destroy()
176 (unsigned long long) req->i.sector, req->i.size); drbd_req_destroy()
181 mempool_free(req, drbd_request_mempool); drbd_req_destroy()
216 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) drbd_req_complete() argument
218 const unsigned s = req->rq_state; drbd_req_complete()
219 struct drbd_device *device = req->device; drbd_req_complete()
239 if (!req->master_bio) { drbd_req_complete()
244 rw = bio_rw(req->master_bio); drbd_req_complete()
260 error = PTR_ERR(req->private_bio); drbd_req_complete()
270 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) drbd_req_complete()
274 _drbd_end_io_acct(device, req); drbd_req_complete()
290 if (!ok && rw == READ && !list_empty(&req->tl_requests)) drbd_req_complete()
291 req->rq_state |= RQ_POSTPONED; drbd_req_complete()
293 if (!(req->rq_state & RQ_POSTPONED)) { drbd_req_complete()
295 m->bio = req->master_bio; drbd_req_complete()
296 req->master_bio = NULL; drbd_req_complete()
301 req->i.completed = true; drbd_req_complete()
304 if (req->i.waiting) drbd_req_complete()
311 list_del_init(&req->req_pending_master_completion); drbd_req_complete()
315 static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) drbd_req_put_completion_ref() argument
317 struct drbd_device *device = req->device; drbd_req_put_completion_ref()
318 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); drbd_req_put_completion_ref()
320 if (!atomic_sub_and_test(put, &req->completion_ref)) drbd_req_put_completion_ref()
323 drbd_req_complete(req, m); drbd_req_put_completion_ref()
325 if (req->rq_state & RQ_POSTPONED) { drbd_req_put_completion_ref()
326 /* don't destroy the req object just yet, drbd_req_put_completion_ref()
328 drbd_restart_request(req); drbd_req_put_completion_ref()
335 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) set_if_null_req_next() argument
341 connection->req_next = req; set_if_null_req_next()
344 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) advance_conn_req_next() argument
349 if (connection->req_next != req) advance_conn_req_next()
351 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { advance_conn_req_next()
352 const unsigned s = req->rq_state; advance_conn_req_next()
356 if (&req->tl_requests == &connection->transfer_log) advance_conn_req_next()
357 req = NULL; advance_conn_req_next()
358 connection->req_next = req; advance_conn_req_next()
361 static void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) set_if_null_req_ack_pending() argument
367 connection->req_ack_pending = req; set_if_null_req_ack_pending()
370 static void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) advance_conn_req_ack_pending() argument
375 if (connection->req_ack_pending != req) advance_conn_req_ack_pending()
377 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { advance_conn_req_ack_pending()
378 const unsigned s = req->rq_state; advance_conn_req_ack_pending()
382 if (&req->tl_requests == &connection->transfer_log) advance_conn_req_ack_pending()
383 req = NULL; advance_conn_req_ack_pending()
384 connection->req_ack_pending = req; advance_conn_req_ack_pending()
387 static void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) set_if_null_req_not_net_done() argument
393 connection->req_not_net_done = req; set_if_null_req_not_net_done()
396 static void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) advance_conn_req_not_net_done() argument
401 if (connection->req_not_net_done != req) advance_conn_req_not_net_done()
403 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { advance_conn_req_not_net_done()
404 const unsigned s = req->rq_state; advance_conn_req_not_net_done()
408 if (&req->tl_requests == &connection->transfer_log) advance_conn_req_not_net_done()
409 req = NULL; advance_conn_req_not_net_done()
410 connection->req_not_net_done = req; advance_conn_req_not_net_done()
414 * req->completion_ref and req->kref. */ mod_rq_state()
415 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, mod_rq_state() argument
418 struct drbd_device *device = req->device; mod_rq_state()
420 unsigned s = req->rq_state; mod_rq_state()
429 req->rq_state &= ~clear; mod_rq_state()
430 req->rq_state |= set; mod_rq_state()
433 if (req->rq_state == s) mod_rq_state()
439 atomic_inc(&req->completion_ref); mod_rq_state()
443 atomic_inc(&req->completion_ref); mod_rq_state()
447 atomic_inc(&req->completion_ref); mod_rq_state()
448 set_if_null_req_next(peer_device, req); mod_rq_state()
452 kref_get(&req->kref); /* wait for the DONE */ mod_rq_state()
457 atomic_add(req->i.size >> 9, &device->ap_in_flight); mod_rq_state()
458 set_if_null_req_not_net_done(peer_device, req); mod_rq_state()
461 set_if_null_req_ack_pending(peer_device, req); mod_rq_state()
465 atomic_inc(&req->completion_ref); mod_rq_state()
473 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); mod_rq_state()
475 * we need to keep the req object around. */ mod_rq_state()
476 kref_get(&req->kref); mod_rq_state()
481 if (req->rq_state & RQ_LOCAL_ABORTED) mod_rq_state()
485 list_del_init(&req->req_pending_local); mod_rq_state()
491 req->acked_jif = jiffies; mod_rq_state()
492 advance_conn_req_ack_pending(peer_device, req); mod_rq_state()
497 advance_conn_req_next(peer_device, req); mod_rq_state()
502 atomic_sub(req->i.size >> 9, &device->ap_in_flight); mod_rq_state()
505 req->net_done_jif = jiffies; mod_rq_state()
510 advance_conn_req_next(peer_device, req); mod_rq_state()
511 advance_conn_req_ack_pending(peer_device, req); mod_rq_state()
512 advance_conn_req_not_net_done(peer_device, req); mod_rq_state()
519 * kref_sub below, we need req to be still around then. */ mod_rq_state()
521 int refcount = atomic_read(&req->kref.refcount); mod_rq_state()
525 s, req->rq_state, refcount, at_least); mod_rq_state()
529 if (req->i.waiting) mod_rq_state()
533 k_put += drbd_req_put_completion_ref(req, m, c_put); mod_rq_state()
535 kref_sub(&req->kref, k_put, drbd_req_destroy); mod_rq_state()
538 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) drbd_report_io_error() argument
546 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", drbd_report_io_error()
547 (unsigned long long)req->i.sector, drbd_report_io_error()
548 req->i.size >> 9, drbd_report_io_error()
558 static inline bool is_pending_write_protocol_A(struct drbd_request *req) is_pending_write_protocol_A() argument
560 return (req->rq_state & is_pending_write_protocol_A()
577 int __req_mod(struct drbd_request *req, enum drbd_req_event what, __req_mod() argument
580 struct drbd_device *const device = req->device; __req_mod()
603 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); __req_mod()
608 req->rq_state |= __req_mod()
611 mod_rq_state(req, m, 0, RQ_NET_PENDING); __req_mod()
616 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); __req_mod()
617 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); __req_mod()
621 if (req->rq_state & RQ_WRITE) __req_mod()
622 device->writ_cnt += req->i.size >> 9; __req_mod()
624 device->read_cnt += req->i.size >> 9; __req_mod()
626 mod_rq_state(req, m, RQ_LOCAL_PENDING, __req_mod()
631 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); __req_mod()
635 drbd_report_io_error(device, req); __req_mod()
637 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); __req_mod()
641 drbd_set_out_of_sync(device, req->i.sector, req->i.size); __req_mod()
642 drbd_report_io_error(device, req); __req_mod()
647 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); __req_mod()
654 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); __req_mod()
668 D_ASSERT(device, drbd_interval_empty(&req->i)); __req_mod()
669 drbd_insert_interval(&device->read_requests, &req->i); __req_mod()
673 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); __req_mod()
674 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); __req_mod()
675 mod_rq_state(req, m, 0, RQ_NET_QUEUED); __req_mod()
676 req->w.cb = w_send_read_req; __req_mod()
678 &req->w); __req_mod()
687 D_ASSERT(device, drbd_interval_empty(&req->i)); __req_mod()
688 drbd_insert_interval(&device->write_requests, &req->i); __req_mod()
691 * In case the req ended up on the transfer log before being __req_mod()
697 * _req_add_to_epoch(req); this has to be after the __req_mod()
698 * _maybe_start_new_epoch(req); which happened in __req_mod()
702 * Add req to the (now) current epoch (barrier). */ __req_mod()
710 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); __req_mod()
711 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); __req_mod()
712 req->w.cb = w_send_dblock; __req_mod()
714 &req->w); __req_mod()
727 mod_rq_state(req, m, 0, RQ_NET_QUEUED); __req_mod()
728 req->w.cb = w_send_out_of_sync; __req_mod()
730 &req->w); __req_mod()
738 mod_rq_state(req, m, RQ_NET_QUEUED, 0); __req_mod()
743 if (is_pending_write_protocol_A(req)) __req_mod()
746 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING, __req_mod()
749 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); __req_mod()
758 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); __req_mod()
763 mod_rq_state(req, m, __req_mod()
776 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); __req_mod()
777 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); __req_mod()
778 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); __req_mod()
782 req->rq_state |= RQ_NET_SIS; __req_mod()
793 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); __req_mod()
798 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); __req_mod()
802 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); __req_mod()
807 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); __req_mod()
808 req->rq_state |= RQ_POSTPONED; __req_mod()
809 if (req->i.waiting) __req_mod()
817 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); __req_mod()
821 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) __req_mod()
823 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); __req_mod()
827 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) __req_mod()
830 mod_rq_state(req, m, __req_mod()
835 if (bio_data_dir(req->master_bio) == WRITE) __req_mod()
839 req->w.cb = w_restart_disk_io; __req_mod()
841 &req->w); __req_mod()
846 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { __req_mod()
847 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); __req_mod()
856 if (!(req->rq_state & RQ_NET_OK)) { __req_mod()
857 /* FIXME could this possibly be a req->dw.cb == w_send_out_of_sync? __req_mod()
860 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); __req_mod()
861 if (req->w.cb) { __req_mod()
864 &req->w); __req_mod()
865 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; __req_mod()
873 if (!(req->rq_state & RQ_WRITE)) __req_mod()
876 if (req->rq_state & RQ_NET_PENDING) { __req_mod()
886 mod_rq_state(req, m, RQ_COMPLETION_SUSP, __req_mod()
887 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); __req_mod()
891 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); __req_mod()
892 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); __req_mod()
897 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); __req_mod()
971 static void complete_conflicting_writes(struct drbd_request *req) complete_conflicting_writes() argument
974 struct drbd_device *device = req->device; complete_conflicting_writes()
976 sector_t sector = req->i.sector; complete_conflicting_writes()
977 int size = req->i.size; complete_conflicting_writes()
1046 /* If this returns false, and req->private_bio is still set,
1049 * If it returns false, but req->private_bio is not set,
1052 * Otherwise, this destroys req->private_bio, if any,
1055 static bool do_remote_read(struct drbd_request *req) do_remote_read() argument
1057 struct drbd_device *device = req->device; do_remote_read()
1060 if (req->private_bio) { do_remote_read()
1062 req->i.sector, req->i.size)) { do_remote_read()
1063 bio_put(req->private_bio); do_remote_read()
1064 req->private_bio = NULL; do_remote_read()
1072 if (req->private_bio == NULL) do_remote_read()
1082 if (rbm == RB_PREFER_LOCAL && req->private_bio) do_remote_read()
1085 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { do_remote_read()
1086 if (req->private_bio) { do_remote_read()
1087 bio_put(req->private_bio); do_remote_read()
1088 req->private_bio = NULL; do_remote_read()
1100 static int drbd_process_write_request(struct drbd_request *req) drbd_process_write_request() argument
1102 struct drbd_device *device = req->device; drbd_process_write_request()
1114 if (unlikely(req->i.size == 0)) { drbd_process_write_request()
1116 D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH); drbd_process_write_request()
1118 _req_mod(req, QUEUE_AS_DRBD_BARRIER); drbd_process_write_request()
1128 _req_mod(req, TO_BE_SENT); drbd_process_write_request()
1129 _req_mod(req, QUEUE_FOR_NET_WRITE); drbd_process_write_request()
1130 } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) drbd_process_write_request()
1131 _req_mod(req, QUEUE_FOR_SEND_OOS); drbd_process_write_request()
1137 drbd_submit_req_private_bio(struct drbd_request *req) drbd_submit_req_private_bio() argument
1139 struct drbd_device *device = req->device; drbd_submit_req_private_bio()
1140 struct bio *bio = req->private_bio; drbd_submit_req_private_bio()
1151 req->pre_submit_jif = jiffies; drbd_submit_req_private_bio()
1164 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) drbd_queue_write() argument
1167 list_add_tail(&req->tl_requests, &device->submit.writes); drbd_queue_write()
1168 list_add_tail(&req->req_pending_master_completion, drbd_queue_write()
1185 struct drbd_request *req; drbd_request_prepare() local
1188 req = drbd_req_new(device, bio); drbd_request_prepare()
1189 if (!req) { drbd_request_prepare()
1193 drbd_err(device, "could not kmalloc() req\n"); drbd_request_prepare()
1197 req->start_jif = start_jif; drbd_request_prepare()
1200 bio_put(req->private_bio); drbd_request_prepare()
1201 req->private_bio = NULL; drbd_request_prepare()
1205 _drbd_start_io_acct(device, req); drbd_request_prepare()
1207 if (rw == WRITE && req->private_bio && req->i.size drbd_request_prepare()
1209 if (!drbd_al_begin_io_fastpath(device, &req->i)) { drbd_request_prepare()
1211 drbd_queue_write(device, req); drbd_request_prepare()
1214 req->rq_state |= RQ_IN_ACT_LOG; drbd_request_prepare()
1215 req->in_actlog_jif = jiffies; drbd_request_prepare()
1218 return req; drbd_request_prepare()
1221 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) drbd_send_and_submit() argument
1224 const int rw = bio_rw(req->master_bio); drbd_send_and_submit()
1234 complete_conflicting_writes(req); drbd_send_and_submit()
1245 req->rq_state |= RQ_POSTPONED; drbd_send_and_submit()
1246 if (req->private_bio) { drbd_send_and_submit()
1247 bio_put(req->private_bio); drbd_send_and_submit()
1248 req->private_bio = NULL; drbd_send_and_submit()
1255 * We must do this before req is registered on any lists. drbd_send_and_submit()
1258 if (!do_remote_read(req) && !req->private_bio) drbd_send_and_submit()
1263 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr); drbd_send_and_submit()
1267 if (likely(req->i.size!=0)) { drbd_send_and_submit()
1271 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log); drbd_send_and_submit()
1275 if (!drbd_process_write_request(req)) drbd_send_and_submit()
1280 if (req->private_bio == NULL) { drbd_send_and_submit()
1281 _req_mod(req, TO_BE_SENT); drbd_send_and_submit()
1282 _req_mod(req, QUEUE_FOR_NET_READ); drbd_send_and_submit()
1289 if (list_empty(&req->req_pending_master_completion)) drbd_send_and_submit()
1290 list_add_tail(&req->req_pending_master_completion, drbd_send_and_submit()
1292 if (req->private_bio) { drbd_send_and_submit()
1294 list_add_tail(&req->req_pending_local, drbd_send_and_submit()
1296 _req_mod(req, TO_BE_SUBMITTED); drbd_send_and_submit()
1303 (unsigned long long)req->i.sector, req->i.size >> 9); drbd_send_and_submit()
1309 if (drbd_req_put_completion_ref(req, &m, 1)) drbd_send_and_submit()
1310 kref_put(&req->kref, drbd_req_destroy); drbd_send_and_submit()
1317 * (e.g. remote read), req may already be invalid now. drbd_send_and_submit()
1318 * That's why we cannot check on req->private_bio. */ drbd_send_and_submit()
1320 drbd_submit_req_private_bio(req); drbd_send_and_submit()
1327 struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); __drbd_make_request() local
1328 if (IS_ERR_OR_NULL(req)) __drbd_make_request()
1330 drbd_send_and_submit(device, req); __drbd_make_request()
1335 struct drbd_request *req, *tmp; list_for_each_entry_safe() local
1336 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { list_for_each_entry_safe()
1337 const int rw = bio_data_dir(req->master_bio); list_for_each_entry_safe()
1340 && req->private_bio && req->i.size list_for_each_entry_safe()
1342 if (!drbd_al_begin_io_fastpath(device, &req->i)) list_for_each_entry_safe()
1345 req->rq_state |= RQ_IN_ACT_LOG; list_for_each_entry_safe()
1346 req->in_actlog_jif = jiffies; list_for_each_entry_safe()
1350 list_del_init(&req->tl_requests); list_for_each_entry_safe()
1351 drbd_send_and_submit(device, req); list_for_each_entry_safe()
1360 struct drbd_request *req, *tmp; prepare_al_transaction_nonblock() local
1365 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { list_for_each_entry_safe()
1366 err = drbd_al_begin_io_nonblock(device, &req->i); list_for_each_entry_safe()
1372 list_move_tail(&req->tl_requests, later); list_for_each_entry_safe()
1374 list_move_tail(&req->tl_requests, pending); list_for_each_entry_safe()
1384 struct drbd_request *req, *tmp; send_and_submit_pending() local
1386 list_for_each_entry_safe(req, tmp, pending, tl_requests) { list_for_each_entry_safe()
1387 req->rq_state |= RQ_IN_ACT_LOG; list_for_each_entry_safe()
1388 req->in_actlog_jif = jiffies; list_for_each_entry_safe()
1390 list_del_init(&req->tl_requests); list_for_each_entry_safe()
1391 drbd_send_and_submit(device, req); list_for_each_entry_safe()
84 drbd_remove_request_interval(struct rb_root *root, struct drbd_request *req) drbd_remove_request_interval() argument
/linux-4.1.27/arch/powerpc/platforms/52xx/
H A Dmpc52xx_lpbfifo.c53 struct mpc52xx_lpbfifo_request *req; member in struct:mpc52xx_lpbfifo
63 static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) mpc52xx_lpbfifo_kick() argument
65 size_t transfer_size = req->size - req->pos; mpc52xx_lpbfifo_kick()
71 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); mpc52xx_lpbfifo_kick()
72 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; mpc52xx_lpbfifo_kick()
73 int poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; mpc52xx_lpbfifo_kick()
96 data = req->data + req->pos; mpc52xx_lpbfifo_kick()
150 bd->data[0] = req->data_phys + req->pos; mpc52xx_lpbfifo_kick()
164 req->offset + req->pos); mpc52xx_lpbfifo_kick()
167 bit_fields = req->cs << 24 | 0x000008; mpc52xx_lpbfifo_kick()
173 if (!lpbfifo.req->defer_xfer_start) mpc52xx_lpbfifo_kick()
222 struct mpc52xx_lpbfifo_request *req; mpc52xx_lpbfifo_irq() local
235 req = lpbfifo.req; mpc52xx_lpbfifo_irq()
236 if (!req) { mpc52xx_lpbfifo_irq()
242 dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); mpc52xx_lpbfifo_irq()
243 write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; mpc52xx_lpbfifo_irq()
244 poll_dma = req->flags & MPC52XX_LPBFIFO_FLAG_POLL_DMA; mpc52xx_lpbfifo_irq()
270 data = req->data + req->pos; mpc52xx_lpbfifo_irq()
276 req->pos += count; mpc52xx_lpbfifo_irq()
279 if (req->size - req->pos) mpc52xx_lpbfifo_irq()
280 mpc52xx_lpbfifo_kick(req); /* more work to do */ mpc52xx_lpbfifo_irq()
301 req->last_byte = ((u8 *)req->data)[req->size - 1]; mpc52xx_lpbfifo_irq()
306 lpbfifo.req = NULL; mpc52xx_lpbfifo_irq()
309 req->irq_count++; mpc52xx_lpbfifo_irq()
311 req->irq_ticks += get_tbl() - ts; mpc52xx_lpbfifo_irq()
315 if (do_callback && req->callback) mpc52xx_lpbfifo_irq()
316 req->callback(req); mpc52xx_lpbfifo_irq()
328 struct mpc52xx_lpbfifo_request *req; mpc52xx_lpbfifo_bcom_irq() local
336 req = lpbfifo.req; mpc52xx_lpbfifo_bcom_irq()
337 if (!req || (req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA)) { mpc52xx_lpbfifo_bcom_irq()
343 req->irq_count++; mpc52xx_lpbfifo_bcom_irq()
348 req->buffer_not_done_cnt++; mpc52xx_lpbfifo_bcom_irq()
349 if ((req->buffer_not_done_cnt % 1000) == 0) mpc52xx_lpbfifo_bcom_irq()
357 req->last_byte = ((u8 *)req->data)[req->size - 1]; mpc52xx_lpbfifo_bcom_irq()
359 req->pos = status & 0x00ffffff; mpc52xx_lpbfifo_bcom_irq()
362 lpbfifo.req = NULL; mpc52xx_lpbfifo_bcom_irq()
365 req->irq_ticks += get_tbl() - ts; mpc52xx_lpbfifo_bcom_irq()
368 if (req->callback) mpc52xx_lpbfifo_bcom_irq()
369 req->callback(req); mpc52xx_lpbfifo_bcom_irq()
379 struct mpc52xx_lpbfifo_request *req = lpbfifo.req; mpc52xx_lpbfifo_poll() local
380 int dma = !(req->flags & MPC52XX_LPBFIFO_FLAG_NO_DMA); mpc52xx_lpbfifo_poll()
381 int write = req->flags & MPC52XX_LPBFIFO_FLAG_WRITE; mpc52xx_lpbfifo_poll()
395 * @req: Pointer to request structure
397 int mpc52xx_lpbfifo_submit(struct mpc52xx_lpbfifo_request *req) mpc52xx_lpbfifo_submit() argument
406 /* If the req pointer is already set, then a transfer is in progress */ mpc52xx_lpbfifo_submit()
407 if (lpbfifo.req) { mpc52xx_lpbfifo_submit()
413 lpbfifo.req = req; mpc52xx_lpbfifo_submit()
414 req->irq_count = 0; mpc52xx_lpbfifo_submit()
415 req->irq_ticks = 0; mpc52xx_lpbfifo_submit()
416 req->buffer_not_done_cnt = 0; mpc52xx_lpbfifo_submit()
417 req->pos = 0; mpc52xx_lpbfifo_submit()
419 mpc52xx_lpbfifo_kick(req); mpc52xx_lpbfifo_submit()
425 int mpc52xx_lpbfifo_start_xfer(struct mpc52xx_lpbfifo_request *req) mpc52xx_lpbfifo_start_xfer() argument
435 * If the req pointer is already set and a transfer was mpc52xx_lpbfifo_start_xfer()
438 if (lpbfifo.req && !lpbfifo.req->defer_xfer_start) { mpc52xx_lpbfifo_start_xfer()
444 * If the req was previously submitted but not mpc52xx_lpbfifo_start_xfer()
447 if (lpbfifo.req && lpbfifo.req == req && mpc52xx_lpbfifo_start_xfer()
448 lpbfifo.req->defer_xfer_start) { mpc52xx_lpbfifo_start_xfer()
457 void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) mpc52xx_lpbfifo_abort() argument
462 if (lpbfifo.req == req) { mpc52xx_lpbfifo_abort()
467 lpbfifo.req = NULL; mpc52xx_lpbfifo_abort()
/linux-4.1.27/block/
H A Dblk-timeout.c75 * @req: request that we are canceling timer for
78 void blk_delete_timer(struct request *req) blk_delete_timer() argument
80 list_del_init(&req->timeout_list); blk_delete_timer()
83 static void blk_rq_timed_out(struct request *req) blk_rq_timed_out() argument
85 struct request_queue *q = req->q; blk_rq_timed_out()
89 ret = q->rq_timed_out_fn(req); blk_rq_timed_out()
92 /* Can we use req->errors here? */ blk_rq_timed_out()
93 __blk_complete_request(req); blk_rq_timed_out()
96 blk_add_timer(req); blk_rq_timed_out()
97 blk_clear_rq_complete(req); blk_rq_timed_out()
150 * @req: pointer to the request of interest
157 void blk_abort_request(struct request *req) blk_abort_request() argument
159 if (blk_mark_rq_complete(req)) blk_abort_request()
161 blk_delete_timer(req); blk_abort_request()
162 if (req->q->mq_ops) blk_abort_request()
163 blk_mq_rq_timed_out(req, false); blk_abort_request()
165 blk_rq_timed_out(req); blk_abort_request()
182 * @req: request that is about to start running.
188 void blk_add_timer(struct request *req) blk_add_timer() argument
190 struct request_queue *q = req->q; blk_add_timer()
193 if (req->cmd_flags & REQ_NO_TIMEOUT) blk_add_timer()
200 BUG_ON(!list_empty(&req->timeout_list)); blk_add_timer()
206 if (!req->timeout) blk_add_timer()
207 req->timeout = q->rq_timeout; blk_add_timer()
209 req->deadline = jiffies + req->timeout; blk_add_timer()
211 list_add_tail(&req->timeout_list, &req->q->timeout_list); blk_add_timer()
218 expiry = blk_rq_timeout(round_jiffies_up(req->deadline)); blk_add_timer()
H A Dblk-merge.c287 struct request *req, ll_new_hw_segment()
292 if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) ll_new_hw_segment()
295 if (blk_integrity_merge_bio(q, req, bio) == false) ll_new_hw_segment()
302 req->nr_phys_segments += nr_phys_segs; ll_new_hw_segment()
306 req->cmd_flags |= REQ_NOMERGE; ll_new_hw_segment()
307 if (req == q->last_merge) ll_new_hw_segment()
312 int ll_back_merge_fn(struct request_queue *q, struct request *req, ll_back_merge_fn() argument
315 if (blk_rq_sectors(req) + bio_sectors(bio) > ll_back_merge_fn()
316 blk_rq_get_max_sectors(req)) { ll_back_merge_fn()
317 req->cmd_flags |= REQ_NOMERGE; ll_back_merge_fn()
318 if (req == q->last_merge) ll_back_merge_fn()
322 if (!bio_flagged(req->biotail, BIO_SEG_VALID)) ll_back_merge_fn()
323 blk_recount_segments(q, req->biotail); ll_back_merge_fn()
327 return ll_new_hw_segment(q, req, bio); ll_back_merge_fn()
330 int ll_front_merge_fn(struct request_queue *q, struct request *req, ll_front_merge_fn() argument
333 if (blk_rq_sectors(req) + bio_sectors(bio) > ll_front_merge_fn()
334 blk_rq_get_max_sectors(req)) { ll_front_merge_fn()
335 req->cmd_flags |= REQ_NOMERGE; ll_front_merge_fn()
336 if (req == q->last_merge) ll_front_merge_fn()
342 if (!bio_flagged(req->bio, BIO_SEG_VALID)) ll_front_merge_fn()
343 blk_recount_segments(q, req->bio); ll_front_merge_fn()
345 return ll_new_hw_segment(q, req, bio); ll_front_merge_fn()
349 * blk-mq uses req->special to carry normal driver per-request payload, it
352 static bool req_no_special_merge(struct request *req) req_no_special_merge() argument
354 struct request_queue *q = req->q; req_no_special_merge()
356 return !q->mq_ops && req->special; req_no_special_merge()
359 static int req_gap_to_prev(struct request *req, struct request *next) req_gap_to_prev() argument
361 struct bio *prev = req->biotail; req_gap_to_prev()
367 static int ll_merge_requests_fn(struct request_queue *q, struct request *req, ll_merge_requests_fn() argument
372 req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size; ll_merge_requests_fn()
378 if (req_no_special_merge(req) || req_no_special_merge(next)) ll_merge_requests_fn()
382 req_gap_to_prev(req, next)) ll_merge_requests_fn()
388 if ((blk_rq_sectors(req) + blk_rq_sectors(next)) > ll_merge_requests_fn()
389 blk_rq_get_max_sectors(req)) ll_merge_requests_fn()
392 total_phys_segments = req->nr_phys_segments + next->nr_phys_segments; ll_merge_requests_fn()
393 if (blk_phys_contig_segment(q, req->biotail, next->bio)) { ll_merge_requests_fn()
394 if (req->nr_phys_segments == 1) ll_merge_requests_fn()
395 req->bio->bi_seg_front_size = seg_size; ll_merge_requests_fn()
404 if (blk_integrity_merge_rq(q, req, next) == false) ll_merge_requests_fn()
408 req->nr_phys_segments = total_phys_segments; ll_merge_requests_fn()
442 static void blk_account_io_merge(struct request *req) blk_account_io_merge() argument
444 if (blk_do_io_stat(req)) { blk_account_io_merge()
449 part = req->part; blk_account_io_merge()
452 part_dec_in_flight(part, rq_data_dir(req)); blk_account_io_merge()
462 static int attempt_merge(struct request_queue *q, struct request *req, attempt_merge() argument
465 if (!rq_mergeable(req) || !rq_mergeable(next)) attempt_merge()
468 if (!blk_check_merge_flags(req->cmd_flags, next->cmd_flags)) attempt_merge()
474 if (blk_rq_pos(req) + blk_rq_sectors(req) != blk_rq_pos(next)) attempt_merge()
477 if (rq_data_dir(req) != rq_data_dir(next) attempt_merge()
478 || req->rq_disk != next->rq_disk attempt_merge()
482 if (req->cmd_flags & REQ_WRITE_SAME && attempt_merge()
483 !blk_write_same_mergeable(req->bio, next->bio)) attempt_merge()
492 if (!ll_merge_requests_fn(q, req, next)) attempt_merge()
501 if ((req->cmd_flags | next->cmd_flags) & REQ_MIXED_MERGE || attempt_merge()
502 (req->cmd_flags & REQ_FAILFAST_MASK) != attempt_merge()
504 blk_rq_set_mixed_merge(req); attempt_merge()
514 if (time_after(req->start_time, next->start_time)) attempt_merge()
515 req->start_time = next->start_time; attempt_merge()
517 req->biotail->bi_next = next->bio; attempt_merge()
518 req->biotail = next->biotail; attempt_merge()
520 req->__data_len += blk_rq_bytes(next); attempt_merge()
522 elv_merge_requests(q, req, next); attempt_merge()
529 req->ioprio = ioprio_best(req->ioprio, next->ioprio); attempt_merge()
531 req->cpu = next->cpu; attempt_merge()
533 /* owner-ship of bio passed from next to req */ attempt_merge()
286 ll_new_hw_segment(struct request_queue *q, struct request *req, struct bio *bio) ll_new_hw_segment() argument
H A Dbsg-lib.c55 struct request *req = job->req; bsg_job_done() local
56 struct request *rsp = req->next_rq; bsg_job_done()
59 err = job->req->errors = result; bsg_job_done()
62 job->req->sense_len = sizeof(u32); bsg_job_done()
64 job->req->sense_len = job->reply_len; bsg_job_done()
66 req->resid_len = 0; bsg_job_done()
74 blk_complete_request(req); bsg_job_done()
90 static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) bsg_map_buffer() argument
92 size_t sz = (sizeof(struct scatterlist) * req->nr_phys_segments); bsg_map_buffer()
94 BUG_ON(!req->nr_phys_segments); bsg_map_buffer()
99 sg_init_table(buf->sg_list, req->nr_phys_segments); bsg_map_buffer()
100 buf->sg_cnt = blk_rq_map_sg(req->q, req, buf->sg_list); bsg_map_buffer()
101 buf->payload_len = blk_rq_bytes(req); bsg_map_buffer()
108 * @req: BSG request that needs a job structure
110 static int bsg_create_job(struct device *dev, struct request *req) bsg_create_job() argument
112 struct request *rsp = req->next_rq; bsg_create_job()
113 struct request_queue *q = req->q; bsg_create_job()
117 BUG_ON(req->special); bsg_create_job()
123 req->special = job; bsg_create_job()
124 job->req = req; bsg_create_job()
127 job->request = req->cmd; bsg_create_job()
128 job->request_len = req->cmd_len; bsg_create_job()
129 job->reply = req->sense; bsg_create_job()
132 if (req->bio) { bsg_create_job()
133 ret = bsg_map_buffer(&job->request_payload, req); bsg_create_job()
159 * that will be set to the req->errors.
166 struct request *req; bsg_request_fn() local
174 req = blk_fetch_request(q); bsg_request_fn()
175 if (!req) bsg_request_fn()
179 ret = bsg_create_job(dev, req); bsg_request_fn()
181 req->errors = ret; bsg_request_fn()
182 blk_end_request_all(req, ret); bsg_request_fn()
187 job = req->special; bsg_request_fn()
/linux-4.1.27/drivers/net/ethernet/emulex/benet/
H A Dbe_cmds.c900 struct be_cmd_req_eq_create *req; be_cmd_eq_create() local
908 req = embedded_payload(wrb); be_cmd_eq_create()
910 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_eq_create()
911 OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, be_cmd_eq_create()
918 req->hdr.version = ver; be_cmd_eq_create()
919 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); be_cmd_eq_create()
921 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); be_cmd_eq_create()
923 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); be_cmd_eq_create()
924 AMAP_SET_BITS(struct amap_eq_context, count, req->context, be_cmd_eq_create()
926 be_dws_cpu_to_le(req->context, sizeof(req->context)); be_cmd_eq_create()
928 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_eq_create()
949 struct be_cmd_req_mac_query *req; be_cmd_mac_addr_query() local
959 req = embedded_payload(wrb); be_cmd_mac_addr_query()
961 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_mac_addr_query()
962 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, be_cmd_mac_addr_query()
964 req->type = MAC_ADDRESS_TYPE_NETWORK; be_cmd_mac_addr_query()
966 req->permanent = 1; be_cmd_mac_addr_query()
968 req->if_id = cpu_to_le16((u16)if_handle); be_cmd_mac_addr_query()
969 req->pmac_id = cpu_to_le32(pmac_id); be_cmd_mac_addr_query()
970 req->permanent = 0; be_cmd_mac_addr_query()
990 struct be_cmd_req_pmac_add *req; be_cmd_pmac_add() local
1000 req = embedded_payload(wrb); be_cmd_pmac_add()
1002 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_pmac_add()
1003 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, be_cmd_pmac_add()
1006 req->hdr.domain = domain; be_cmd_pmac_add()
1007 req->if_id = cpu_to_le32(if_id); be_cmd_pmac_add()
1008 memcpy(req->mac_address, mac_addr, ETH_ALEN); be_cmd_pmac_add()
1030 struct be_cmd_req_pmac_del *req; be_cmd_pmac_del() local
1043 req = embedded_payload(wrb); be_cmd_pmac_del()
1045 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_pmac_del()
1046 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), be_cmd_pmac_del()
1049 req->hdr.domain = dom; be_cmd_pmac_del()
1050 req->if_id = cpu_to_le32(if_id); be_cmd_pmac_del()
1051 req->pmac_id = cpu_to_le32(pmac_id); be_cmd_pmac_del()
1065 struct be_cmd_req_cq_create *req; be_cmd_cq_create() local
1074 req = embedded_payload(wrb); be_cmd_cq_create()
1075 ctxt = &req->context; be_cmd_cq_create()
1077 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_cq_create()
1078 OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, be_cmd_cq_create()
1081 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); be_cmd_cq_create()
1094 req->hdr.version = 2; be_cmd_cq_create()
1095 req->page_size = 1; /* 1 for 4K */ be_cmd_cq_create()
1112 be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_cq_create()
1114 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_cq_create()
1143 struct be_cmd_req_mcc_ext_create *req; be_cmd_mccq_ext_create() local
1152 req = embedded_payload(wrb); be_cmd_mccq_ext_create()
1153 ctxt = &req->context; be_cmd_mccq_ext_create()
1155 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_mccq_ext_create()
1156 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, be_cmd_mccq_ext_create()
1159 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); be_cmd_mccq_ext_create()
1166 req->hdr.version = 1; be_cmd_mccq_ext_create()
1167 req->cq_id = cpu_to_le16(cq->id); be_cmd_mccq_ext_create()
1181 req->async_event_bitmap[0] = be_cmd_mccq_ext_create()
1187 be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_mccq_ext_create()
1189 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_mccq_ext_create()
1208 struct be_cmd_req_mcc_create *req; be_cmd_mccq_org_create() local
1217 req = embedded_payload(wrb); be_cmd_mccq_org_create()
1218 ctxt = &req->context; be_cmd_mccq_org_create()
1220 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_mccq_org_create()
1221 OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, be_cmd_mccq_org_create()
1224 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); be_cmd_mccq_org_create()
1231 be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_mccq_org_create()
1233 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_mccq_org_create()
1265 struct be_cmd_req_eth_tx_create *req; be_cmd_txq_create() local
1271 req = embedded_payload(&wrb); be_cmd_txq_create()
1272 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, be_cmd_txq_create()
1273 OPCODE_ETH_TX_CREATE, sizeof(*req), &wrb, NULL); be_cmd_txq_create()
1276 req->hdr.version = 1; be_cmd_txq_create()
1279 req->hdr.version = 2; be_cmd_txq_create()
1281 req->hdr.version = 2; be_cmd_txq_create()
1284 if (req->hdr.version > 0) be_cmd_txq_create()
1285 req->if_id = cpu_to_le16(adapter->if_handle); be_cmd_txq_create()
1286 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); be_cmd_txq_create()
1287 req->ulp_num = BE_ULP1_NUM; be_cmd_txq_create()
1288 req->type = BE_ETH_TX_RING_TYPE_STANDARD; be_cmd_txq_create()
1289 req->cq_id = cpu_to_le16(cq->id); be_cmd_txq_create()
1290 req->queue_size = be_encoded_q_len(txq->len); be_cmd_txq_create()
1291 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_txq_create()
1292 ver = req->hdr.version; be_cmd_txq_create()
1315 struct be_cmd_req_eth_rx_create *req; be_cmd_rxq_create() local
1326 req = embedded_payload(wrb); be_cmd_rxq_create()
1328 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, be_cmd_rxq_create()
1329 OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); be_cmd_rxq_create()
1331 req->cq_id = cpu_to_le16(cq_id); be_cmd_rxq_create()
1332 req->frag_size = fls(frag_size) - 1; be_cmd_rxq_create()
1333 req->num_pages = 2; be_cmd_rxq_create()
1334 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_rxq_create()
1335 req->interface_id = cpu_to_le32(if_id); be_cmd_rxq_create()
1336 req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); be_cmd_rxq_create()
1337 req->rss_queue = cpu_to_le32(rss); be_cmd_rxq_create()
1360 struct be_cmd_req_q_destroy *req; be_cmd_q_destroy() local
1368 req = embedded_payload(wrb); be_cmd_q_destroy()
1395 be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, be_cmd_q_destroy()
1397 req->id = cpu_to_le16(q->id); be_cmd_q_destroy()
1410 struct be_cmd_req_q_destroy *req; be_cmd_rxq_destroy() local
1420 req = embedded_payload(wrb); be_cmd_rxq_destroy()
1422 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, be_cmd_rxq_destroy()
1423 OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); be_cmd_rxq_destroy()
1424 req->id = cpu_to_le16(q->id); be_cmd_rxq_destroy()
1441 struct be_cmd_req_if_create *req; be_cmd_if_create() local
1444 req = embedded_payload(&wrb); be_cmd_if_create()
1445 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_if_create()
1447 sizeof(*req), &wrb, NULL); be_cmd_if_create()
1448 req->hdr.domain = domain; be_cmd_if_create()
1449 req->capability_flags = cpu_to_le32(cap_flags); be_cmd_if_create()
1450 req->enable_flags = cpu_to_le32(en_flags); be_cmd_if_create()
1451 req->pmac_invalid = true; be_cmd_if_create()
1470 struct be_cmd_req_if_destroy *req; be_cmd_if_destroy() local
1483 req = embedded_payload(wrb); be_cmd_if_destroy()
1485 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_if_destroy()
1487 sizeof(*req), wrb, NULL); be_cmd_if_destroy()
1488 req->hdr.domain = domain; be_cmd_if_destroy()
1489 req->interface_id = cpu_to_le32(interface_id); be_cmd_if_destroy()
1541 struct lancer_cmd_req_pport_stats *req; lancer_cmd_get_pport_stats() local
1555 req = nonemb_cmd->va; lancer_cmd_get_pport_stats()
1557 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, lancer_cmd_get_pport_stats()
1561 req->cmd_params.params.pport_num = cpu_to_le16(adapter->hba_port_num); lancer_cmd_get_pport_stats()
1562 req->cmd_params.params.reset_stats = 0; lancer_cmd_get_pport_stats()
1602 struct be_cmd_req_link_status *req; be_cmd_link_status_query() local
1615 req = embedded_payload(wrb); be_cmd_link_status_query()
1617 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_link_status_query()
1619 sizeof(*req), wrb, NULL); be_cmd_link_status_query()
1623 req->hdr.version = 1; be_cmd_link_status_query()
1625 req->hdr.domain = dom; be_cmd_link_status_query()
1652 struct be_cmd_req_get_cntl_addnl_attribs *req; be_cmd_get_die_temperature() local
1662 req = embedded_payload(wrb); be_cmd_get_die_temperature()
1664 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_die_temperature()
1666 sizeof(*req), wrb, NULL); be_cmd_get_die_temperature()
1679 struct be_cmd_req_get_fat *req; be_cmd_get_reg_len() local
1689 req = embedded_payload(wrb); be_cmd_get_reg_len()
1691 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_reg_len()
1692 OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, be_cmd_get_reg_len()
1694 req->fat_operation = cpu_to_le32(QUERY_FAT); be_cmd_get_reg_len()
1712 struct be_cmd_req_get_fat *req; be_cmd_get_regs() local
1743 req = get_fat_cmd.va; be_cmd_get_regs()
1746 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_regs()
1750 req->fat_operation = cpu_to_le32(RETRIEVE_FAT); be_cmd_get_regs()
1751 req->read_log_offset = cpu_to_le32(log_offset); be_cmd_get_regs()
1752 req->read_log_length = cpu_to_le32(buf_size); be_cmd_get_regs()
1753 req->data_buffer_size = cpu_to_le32(buf_size); be_cmd_get_regs()
1780 struct be_cmd_req_get_fw_version *req; be_cmd_get_fw_ver() local
1791 req = embedded_payload(wrb); be_cmd_get_fw_ver()
1793 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_fw_ver()
1794 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, be_cmd_get_fw_ver()
1817 struct be_cmd_req_modify_eq_delay *req; __be_cmd_modify_eqd() local
1827 req = embedded_payload(wrb); __be_cmd_modify_eqd()
1829 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, __be_cmd_modify_eqd()
1830 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, __be_cmd_modify_eqd()
1833 req->num_eq = cpu_to_le32(num); __be_cmd_modify_eqd()
1835 req->set_eqd[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); __be_cmd_modify_eqd()
1836 req->set_eqd[i].phase = 0; __be_cmd_modify_eqd()
1837 req->set_eqd[i].delay_multiplier = __be_cmd_modify_eqd()
1867 struct be_cmd_req_vlan_config *req; be_cmd_vlan_config() local
1877 req = embedded_payload(wrb); be_cmd_vlan_config()
1879 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_vlan_config()
1880 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), be_cmd_vlan_config()
1882 req->hdr.domain = domain; be_cmd_vlan_config()
1884 req->interface_id = if_id; be_cmd_vlan_config()
1885 req->untagged = BE_IF_FLAGS_UNTAGGED & be_if_cap_flags(adapter) ? 1 : 0; be_cmd_vlan_config()
1886 req->num_vlan = num; be_cmd_vlan_config()
1887 memcpy(req->normal_vlan, vtag_array, be_cmd_vlan_config()
1888 req->num_vlan * sizeof(vtag_array[0])); be_cmd_vlan_config()
1900 struct be_cmd_req_rx_filter *req = mem->va; __be_cmd_rx_filter() local
1910 memset(req, 0, sizeof(*req)); __be_cmd_rx_filter()
1911 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, __be_cmd_rx_filter()
1912 OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), __be_cmd_rx_filter()
1915 req->if_id = cpu_to_le32(adapter->if_handle); __be_cmd_rx_filter()
1916 req->if_flags_mask = cpu_to_le32(flags); __be_cmd_rx_filter()
1917 req->if_flags = (value == ON) ? req->if_flags_mask : 0; __be_cmd_rx_filter()
1926 req->if_flags_mask |= __be_cmd_rx_filter()
1929 req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); __be_cmd_rx_filter()
1931 memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); __be_cmd_rx_filter()
1958 struct be_cmd_req_set_flow_control *req; be_cmd_set_flow_control() local
1972 req = embedded_payload(wrb); be_cmd_set_flow_control()
1974 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_set_flow_control()
1975 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), be_cmd_set_flow_control()
1978 req->hdr.version = 1; be_cmd_set_flow_control()
1979 req->tx_flow_control = cpu_to_le16((u16)tx_fc); be_cmd_set_flow_control()
1980 req->rx_flow_control = cpu_to_le16((u16)rx_fc); be_cmd_set_flow_control()
1997 struct be_cmd_req_get_flow_control *req; be_cmd_get_flow_control() local
2011 req = embedded_payload(wrb); be_cmd_get_flow_control()
2013 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_flow_control()
2014 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), be_cmd_get_flow_control()
2035 struct be_cmd_req_query_fw_cfg *req; be_cmd_query_fw_cfg() local
2042 req = embedded_payload(wrb); be_cmd_query_fw_cfg()
2044 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_query_fw_cfg()
2046 sizeof(*req), wrb, NULL); be_cmd_query_fw_cfg()
2069 struct be_cmd_req_hdr *req; be_cmd_reset_function() local
2086 req = embedded_payload(wrb); be_cmd_reset_function()
2088 be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, be_cmd_reset_function()
2089 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, be_cmd_reset_function()
2102 struct be_cmd_req_rss_config *req; be_cmd_rss_config() local
2115 req = embedded_payload(wrb); be_cmd_rss_config()
2117 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, be_cmd_rss_config()
2118 OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); be_cmd_rss_config()
2120 req->if_id = cpu_to_le32(adapter->if_handle); be_cmd_rss_config()
2121 req->enable_rss = cpu_to_le16(rss_hash_opts); be_cmd_rss_config()
2122 req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); be_cmd_rss_config()
2125 req->hdr.version = 1; be_cmd_rss_config()
2127 memcpy(req->cpu_table, rsstable, table_size); be_cmd_rss_config()
2128 memcpy(req->hash, rss_hkey, RSS_HASH_KEY_LEN); be_cmd_rss_config()
2129 be_dws_cpu_to_le(req->hash, sizeof(req->hash)); be_cmd_rss_config()
2142 struct be_cmd_req_enable_disable_beacon *req; be_cmd_set_beacon_state() local
2152 req = embedded_payload(wrb); be_cmd_set_beacon_state()
2154 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_set_beacon_state()
2156 sizeof(*req), wrb, NULL); be_cmd_set_beacon_state()
2158 req->port_num = port_num; be_cmd_set_beacon_state()
2159 req->beacon_state = state; be_cmd_set_beacon_state()
2160 req->beacon_duration = bcn; be_cmd_set_beacon_state()
2161 req->status_duration = sts; be_cmd_set_beacon_state()
2174 struct be_cmd_req_get_beacon_state *req; be_cmd_get_beacon_state() local
2184 req = embedded_payload(wrb); be_cmd_get_beacon_state()
2186 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_beacon_state()
2187 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), be_cmd_get_beacon_state()
2190 req->port_num = port_num; be_cmd_get_beacon_state()
2211 struct be_cmd_req_port_type *req; be_cmd_read_port_transceiver_data() local
2232 req = cmd.va; be_cmd_read_port_transceiver_data()
2234 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_read_port_transceiver_data()
2238 req->port = cpu_to_le32(adapter->hba_port_num); be_cmd_read_port_transceiver_data()
2239 req->page_num = cpu_to_le32(page_num); be_cmd_read_port_transceiver_data()
2258 struct lancer_cmd_req_write_object *req; lancer_cmd_write_object() local
2272 req = embedded_payload(wrb); lancer_cmd_write_object()
2274 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, lancer_cmd_write_object()
2279 ctxt = &req->context; lancer_cmd_write_object()
2290 be_dws_cpu_to_le(ctxt, sizeof(req->context)); lancer_cmd_write_object()
2291 req->write_offset = cpu_to_le32(data_offset); lancer_cmd_write_object()
2292 strlcpy(req->object_name, obj_name, sizeof(req->object_name)); lancer_cmd_write_object()
2293 req->descriptor_count = cpu_to_le32(1); lancer_cmd_write_object()
2294 req->buf_len = cpu_to_le32(data_size); lancer_cmd_write_object()
2295 req->addr_low = cpu_to_le32((cmd->dma + lancer_cmd_write_object()
2298 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + lancer_cmd_write_object()
2370 struct lancer_cmd_req_delete_object *req; lancer_cmd_delete_object() local
2382 req = embedded_payload(wrb); lancer_cmd_delete_object()
2384 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, lancer_cmd_delete_object()
2386 sizeof(*req), wrb, NULL); lancer_cmd_delete_object()
2388 strlcpy(req->object_name, obj_name, sizeof(req->object_name)); lancer_cmd_delete_object()
2401 struct lancer_cmd_req_read_object *req; lancer_cmd_read_object() local
2413 req = embedded_payload(wrb); lancer_cmd_read_object()
2415 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, lancer_cmd_read_object()
2420 req->desired_read_len = cpu_to_le32(data_size); lancer_cmd_read_object()
2421 req->read_offset = cpu_to_le32(data_offset); lancer_cmd_read_object()
2422 strcpy(req->object_name, obj_name); lancer_cmd_read_object()
2423 req->descriptor_count = cpu_to_le32(1); lancer_cmd_read_object()
2424 req->buf_len = cpu_to_le32(data_size); lancer_cmd_read_object()
2425 req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); lancer_cmd_read_object()
2426 req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); lancer_cmd_read_object()
2448 struct be_cmd_write_flashrom *req; be_cmd_write_flashrom() local
2459 req = cmd->va; be_cmd_write_flashrom()
2461 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_write_flashrom()
2465 req->params.op_type = cpu_to_le32(flash_type); be_cmd_write_flashrom()
2467 req->params.offset = cpu_to_le32(img_offset); be_cmd_write_flashrom()
2469 req->params.op_code = cpu_to_le32(flash_opcode); be_cmd_write_flashrom()
2470 req->params.data_buf_size = cpu_to_le32(buf_size); be_cmd_write_flashrom()
2491 struct be_cmd_read_flash_crc *req; be_cmd_get_flash_crc() local
2502 req = embedded_payload(wrb); be_cmd_get_flash_crc()
2504 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_flash_crc()
2505 OPCODE_COMMON_READ_FLASHROM, sizeof(*req), be_cmd_get_flash_crc()
2508 req->params.op_type = cpu_to_le32(img_optype); be_cmd_get_flash_crc()
2510 req->params.offset = cpu_to_le32(img_offset + crc_offset); be_cmd_get_flash_crc()
2512 req->params.offset = cpu_to_le32(crc_offset); be_cmd_get_flash_crc()
2514 req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); be_cmd_get_flash_crc()
2515 req->params.data_buf_size = cpu_to_le32(0x4); be_cmd_get_flash_crc()
2519 memcpy(flashed_crc, req->crc, 4); be_cmd_get_flash_crc()
2530 struct be_cmd_req_acpi_wol_magic_config *req; be_cmd_enable_magic_wol() local
2540 req = nonemb_cmd->va; be_cmd_enable_magic_wol()
2542 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, be_cmd_enable_magic_wol()
2543 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), be_cmd_enable_magic_wol()
2545 memcpy(req->magic_mac, mac, ETH_ALEN); be_cmd_enable_magic_wol()
2558 struct be_cmd_req_set_lmode *req; be_cmd_set_loopback() local
2569 req = embedded_payload(wrb); be_cmd_set_loopback()
2571 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, be_cmd_set_loopback()
2572 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), be_cmd_set_loopback()
2575 req->src_port = port_num; be_cmd_set_loopback()
2576 req->dest_port = port_num; be_cmd_set_loopback()
2577 req->loopback_type = loopback_type; be_cmd_set_loopback()
2578 req->loopback_state = enable; be_cmd_set_loopback()
2591 struct be_cmd_req_loopback_test *req; be_cmd_loopback_test() local
2603 req = embedded_payload(wrb); be_cmd_loopback_test()
2605 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, be_cmd_loopback_test()
2606 OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, be_cmd_loopback_test()
2609 req->hdr.timeout = cpu_to_le32(15); be_cmd_loopback_test()
2610 req->pattern = cpu_to_le64(pattern); be_cmd_loopback_test()
2611 req->src_port = cpu_to_le32(port_num); be_cmd_loopback_test()
2612 req->dest_port = cpu_to_le32(port_num); be_cmd_loopback_test()
2613 req->pkt_size = cpu_to_le32(pkt_size); be_cmd_loopback_test()
2614 req->num_pkts = cpu_to_le32(num_pkts); be_cmd_loopback_test()
2615 req->loopback_type = cpu_to_le32(loopback_type); be_cmd_loopback_test()
2635 struct be_cmd_req_ddrdma_test *req; be_cmd_ddr_dma_test() local
2646 req = cmd->va; be_cmd_ddr_dma_test()
2647 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, be_cmd_ddr_dma_test()
2651 req->pattern = cpu_to_le64(pattern); be_cmd_ddr_dma_test()
2652 req->byte_count = cpu_to_le32(byte_cnt); be_cmd_ddr_dma_test()
2654 req->snd_buff[i] = (u8)(pattern >> (j*8)); be_cmd_ddr_dma_test()
2666 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || be_cmd_ddr_dma_test()
2681 struct be_cmd_req_seeprom_read *req; be_cmd_get_seeprom_data() local
2691 req = nonemb_cmd->va; be_cmd_get_seeprom_data()
2693 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_seeprom_data()
2694 OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, be_cmd_get_seeprom_data()
2707 struct be_cmd_req_get_phy_info *req; be_cmd_get_phy_info() local
2731 req = cmd.va; be_cmd_get_phy_info()
2733 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_phy_info()
2734 OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), be_cmd_get_phy_info()
2767 struct be_cmd_req_set_qos *req; be_cmd_set_qos() local
2778 req = embedded_payload(wrb); be_cmd_set_qos()
2780 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_set_qos()
2781 OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); be_cmd_set_qos()
2783 req->hdr.domain = domain; be_cmd_set_qos()
2784 req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); be_cmd_set_qos()
2785 req->max_bps_nic = cpu_to_le32(bps); be_cmd_set_qos()
2797 struct be_cmd_req_cntl_attribs *req; be_cmd_get_cntl_attributes() local
2800 int payload_len = max(sizeof(*req), sizeof(*resp)); be_cmd_get_cntl_attributes()
2823 req = attribs_cmd.va; be_cmd_get_cntl_attributes()
2825 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_cntl_attributes()
2847 struct be_cmd_req_set_func_cap *req; be_cmd_req_native_mode() local
2859 req = embedded_payload(wrb); be_cmd_req_native_mode()
2861 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_req_native_mode()
2863 sizeof(*req), wrb, NULL); be_cmd_req_native_mode()
2865 req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | be_cmd_req_native_mode()
2867 req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); be_cmd_req_native_mode()
2889 struct be_cmd_req_get_fn_privileges *req; be_cmd_get_fn_privileges() local
2900 req = embedded_payload(wrb); be_cmd_get_fn_privileges()
2902 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_fn_privileges()
2903 OPCODE_COMMON_GET_FN_PRIVILEGES, sizeof(*req), be_cmd_get_fn_privileges()
2906 req->hdr.domain = domain; be_cmd_get_fn_privileges()
2933 struct be_cmd_req_set_fn_privileges *req; be_cmd_set_fn_privileges() local
2944 req = embedded_payload(wrb); be_cmd_set_fn_privileges()
2945 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_set_fn_privileges()
2946 OPCODE_COMMON_SET_FN_PRIVILEGES, sizeof(*req), be_cmd_set_fn_privileges()
2948 req->hdr.domain = domain; be_cmd_set_fn_privileges()
2950 req->privileges_lancer = cpu_to_le32(privileges); be_cmd_set_fn_privileges()
2952 req->privileges = cpu_to_le32(privileges); be_cmd_set_fn_privileges()
2969 struct be_cmd_req_get_mac_list *req; be_cmd_get_mac_from_list() local
2996 req = get_mac_list_cmd.va; be_cmd_get_mac_from_list()
2998 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_mac_from_list()
3001 req->hdr.domain = domain; be_cmd_get_mac_from_list()
3002 req->mac_type = MAC_ADDRESS_TYPE_NETWORK; be_cmd_get_mac_from_list()
3004 req->mac_id = cpu_to_le32(*pmac_id); be_cmd_get_mac_from_list()
3005 req->iface_id = cpu_to_le16(if_handle); be_cmd_get_mac_from_list()
3006 req->perm_override = 0; be_cmd_get_mac_from_list()
3008 req->perm_override = 1; be_cmd_get_mac_from_list()
3101 struct be_cmd_req_set_mac_list *req; be_cmd_set_mac_list() local
3120 req = cmd.va; be_cmd_set_mac_list()
3121 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_set_mac_list()
3122 OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), be_cmd_set_mac_list()
3125 req->hdr.domain = domain; be_cmd_set_mac_list()
3126 req->mac_count = mac_count; be_cmd_set_mac_list()
3128 memcpy(req->mac, mac_array, ETH_ALEN*mac_count); be_cmd_set_mac_list()
3162 struct be_cmd_req_set_hsw_config *req; be_cmd_set_hsw_config() local
3174 req = embedded_payload(wrb); be_cmd_set_hsw_config()
3175 ctxt = &req->context; be_cmd_set_hsw_config()
3177 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_set_hsw_config()
3178 OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, be_cmd_set_hsw_config()
3181 req->hdr.domain = domain; be_cmd_set_hsw_config()
3195 be_dws_cpu_to_le(req->context, sizeof(req->context)); be_cmd_set_hsw_config()
3208 struct be_cmd_req_get_hsw_config *req; be_cmd_get_hsw_config() local
3221 req = embedded_payload(wrb); be_cmd_get_hsw_config()
3222 ctxt = &req->context; be_cmd_get_hsw_config()
3224 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_hsw_config()
3225 OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, be_cmd_get_hsw_config()
3228 req->hdr.domain = domain; be_cmd_get_hsw_config()
3238 be_dws_cpu_to_le(req->context, sizeof(req->context)); be_cmd_get_hsw_config()
3281 struct be_cmd_req_acpi_wol_magic_config_v1 *req; be_cmd_get_acpi_wol_cap() local
3311 req = cmd.va; be_cmd_get_acpi_wol_cap()
3313 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, be_cmd_get_acpi_wol_cap()
3315 sizeof(*req), wrb, &cmd); be_cmd_get_acpi_wol_cap()
3317 req->hdr.version = 1; be_cmd_get_acpi_wol_cap()
3318 req->query_options = BE_GET_WOL_CAP; be_cmd_get_acpi_wol_cap()
3416 struct be_cmd_req_get_ext_fat_caps *req; be_cmd_get_ext_fat_capabilites() local
3428 req = cmd->va; be_cmd_get_ext_fat_capabilites()
3429 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_ext_fat_capabilites()
3432 req->parameter_type = cpu_to_le32(1); be_cmd_get_ext_fat_capabilites()
3445 struct be_cmd_req_set_ext_fat_caps *req; be_cmd_set_ext_fat_capabilites() local
3456 req = cmd->va; be_cmd_set_ext_fat_capabilites()
3457 memcpy(&req->set_params, configs, sizeof(struct be_fat_conf_params)); be_cmd_set_ext_fat_capabilites()
3458 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_set_ext_fat_capabilites()
3470 struct be_cmd_req_get_port_name *req; be_cmd_query_port_name() local
3478 req = embedded_payload(wrb); be_cmd_query_port_name()
3480 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_query_port_name()
3481 OPCODE_COMMON_GET_PORT_NAME, sizeof(*req), wrb, be_cmd_query_port_name()
3484 req->hdr.version = 1; be_cmd_query_port_name()
3596 struct be_cmd_req_get_func_config *req; be_cmd_get_func_config() local
3619 req = cmd.va; be_cmd_get_func_config()
3621 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_func_config()
3626 req->hdr.version = 1; be_cmd_get_func_config()
3656 struct be_cmd_req_get_profile_config *req; be_cmd_get_profile_config() local
3673 req = cmd.va; be_cmd_get_profile_config()
3674 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_profile_config()
3678 req->hdr.domain = domain; be_cmd_get_profile_config()
3680 req->hdr.version = 1; be_cmd_get_profile_config()
3681 req->type = ACTIVE_PROFILE_TYPE; be_cmd_get_profile_config()
3688 req->type |= QUERY_MODIFIABLE_FIELDS_TYPE; be_cmd_get_profile_config()
3724 struct be_cmd_req_set_profile_config *req; be_cmd_set_profile_config() local
3736 req = cmd.va; be_cmd_set_profile_config()
3737 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_set_profile_config()
3740 req->hdr.version = version; be_cmd_set_profile_config()
3741 req->hdr.domain = domain; be_cmd_set_profile_config()
3742 req->desc_count = cpu_to_le32(count); be_cmd_set_profile_config()
3743 memcpy(req->desc, desc, size); be_cmd_set_profile_config()
3919 struct be_cmd_req_manage_iface_filters *req; be_cmd_manage_iface() local
3932 req = embedded_payload(wrb); be_cmd_manage_iface()
3934 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_manage_iface()
3935 OPCODE_COMMON_MANAGE_IFACE_FILTERS, sizeof(*req), be_cmd_manage_iface()
3937 req->op = op; be_cmd_manage_iface()
3938 req->target_iface_id = cpu_to_le32(iface); be_cmd_manage_iface()
3972 struct be_cmd_req_get_iface_list *req; be_cmd_get_if_id() local
3983 req = embedded_payload(wrb); be_cmd_get_if_id()
3985 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_if_id()
3988 req->hdr.domain = vf_num + 1; be_cmd_get_if_id()
3992 resp = (struct be_cmd_resp_get_iface_list *)req; be_cmd_get_if_id()
4085 struct be_cmd_enable_disable_vf *req; be_cmd_enable_vf() local
4099 req = embedded_payload(wrb); be_cmd_enable_vf()
4101 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_enable_vf()
4102 OPCODE_COMMON_ENABLE_DISABLE_VF, sizeof(*req), be_cmd_enable_vf()
4105 req->hdr.domain = domain; be_cmd_enable_vf()
4106 req->enable = 1; be_cmd_enable_vf()
4116 struct be_cmd_req_intr_set *req; be_cmd_intr_set() local
4124 req = embedded_payload(wrb); be_cmd_intr_set()
4126 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_intr_set()
4127 OPCODE_COMMON_SET_INTERRUPT_ENABLE, sizeof(*req), be_cmd_intr_set()
4130 req->intr_enabled = intr_enable; be_cmd_intr_set()
4141 struct be_cmd_req_get_active_profile *req; be_cmd_get_active_profile() local
4154 req = embedded_payload(wrb); be_cmd_get_active_profile()
4156 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_active_profile()
4157 OPCODE_COMMON_GET_ACTIVE_PROFILE, sizeof(*req), be_cmd_get_active_profile()
4177 struct be_cmd_req_set_ll_link *req; be_cmd_set_logical_link_config() local
4191 req = embedded_payload(wrb); be_cmd_set_logical_link_config()
4193 be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_set_logical_link_config()
4195 sizeof(*req), wrb, NULL); be_cmd_set_logical_link_config()
4197 req->hdr.version = 1; be_cmd_set_logical_link_config()
4198 req->hdr.domain = domain; be_cmd_set_logical_link_config()
4201 req->link_config |= 1; be_cmd_set_logical_link_config()
4204 req->link_config |= 1 << PLINK_TRACK_SHIFT; be_cmd_set_logical_link_config()
4218 struct be_cmd_req_hdr *req; be_roce_mcc_cmd() local
4229 req = embedded_payload(wrb); be_roce_mcc_cmd()
4232 be_wrb_cmd_hdr_prepare(req, hdr->subsystem, be_roce_mcc_cmd()
4234 memcpy(req, wrb_payload, wrb_payload_size); be_roce_mcc_cmd()
4235 be_dws_cpu_to_le(req, wrb_payload_size); be_roce_mcc_cmd()
/linux-4.1.27/drivers/scsi/device_handler/
H A Dscsi_dh_hp_sw.c114 struct request *req; hp_sw_tur() local
118 req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); hp_sw_tur()
119 if (IS_ERR(req)) hp_sw_tur()
122 blk_rq_set_block_pc(req); hp_sw_tur()
123 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | hp_sw_tur()
125 req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); hp_sw_tur()
126 req->cmd[0] = TEST_UNIT_READY; hp_sw_tur()
127 req->timeout = HP_SW_TIMEOUT; hp_sw_tur()
128 req->sense = h->sense; hp_sw_tur()
129 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); hp_sw_tur()
130 req->sense_len = 0; hp_sw_tur()
132 ret = blk_execute_rq(req->q, NULL, req, 1); hp_sw_tur()
134 if (req->sense_len > 0) { hp_sw_tur()
139 HP_SW_NAME, req->errors); hp_sw_tur()
147 blk_put_request(req); hp_sw_tur()
155 blk_put_request(req); hp_sw_tur()
201 static void start_stop_endio(struct request *req, int error) start_stop_endio() argument
203 struct hp_sw_dh_data *h = req->end_io_data; start_stop_endio()
206 if (error || host_byte(req->errors) != DID_OK || start_stop_endio()
207 msg_byte(req->errors) != COMMAND_COMPLETE) { start_stop_endio()
210 HP_SW_NAME, req->errors); start_stop_endio()
215 if (req->sense_len > 0) { start_stop_endio()
220 blk_put_request(req); start_stop_endio()
228 req->end_io_data = NULL; start_stop_endio()
229 __blk_put_request(req->q, req); start_stop_endio()
246 struct request *req; hp_sw_start_stop() local
248 req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC); hp_sw_start_stop()
249 if (IS_ERR(req)) hp_sw_start_stop()
252 blk_rq_set_block_pc(req); hp_sw_start_stop()
253 req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | hp_sw_start_stop()
255 req->cmd_len = COMMAND_SIZE(START_STOP); hp_sw_start_stop()
256 req->cmd[0] = START_STOP; hp_sw_start_stop()
257 req->cmd[4] = 1; /* Start spin cycle */ hp_sw_start_stop()
258 req->timeout = HP_SW_TIMEOUT; hp_sw_start_stop()
259 req->sense = h->sense; hp_sw_start_stop()
260 memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); hp_sw_start_stop()
261 req->sense_len = 0; hp_sw_start_stop()
262 req->end_io_data = h; hp_sw_start_stop()
264 blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio); hp_sw_start_stop()
268 static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) hp_sw_prep_fn() argument
275 req->cmd_flags |= REQ_QUIET; hp_sw_prep_fn()
/linux-4.1.27/drivers/usb/gadget/legacy/
H A Ddbgp.c27 struct usb_request *req; member in struct:dbgp
94 static void dbgp_complete(struct usb_ep *ep, struct usb_request *req) dbgp_complete() argument
98 int status = req->status; dbgp_complete()
110 dbgp_consume(req->buf, req->actual); dbgp_complete()
112 req->length = DBGP_REQ_LEN; dbgp_complete()
113 err = usb_ep_queue(ep, req, GFP_ATOMIC); dbgp_complete()
122 kfree(req->buf); dbgp_complete()
123 usb_ep_free_request(dbgp.o_ep, req); dbgp_complete()
133 struct usb_request *req; dbgp_enable_ep_req() local
135 req = usb_ep_alloc_request(ep, GFP_KERNEL); dbgp_enable_ep_req()
136 if (!req) { dbgp_enable_ep_req()
142 req->buf = kmalloc(DBGP_REQ_LEN, GFP_KERNEL); dbgp_enable_ep_req()
143 if (!req->buf) { dbgp_enable_ep_req()
149 req->complete = dbgp_complete; dbgp_enable_ep_req()
150 req->length = DBGP_REQ_LEN; dbgp_enable_ep_req()
151 err = usb_ep_queue(ep, req, GFP_ATOMIC); dbgp_enable_ep_req()
160 kfree(req->buf); dbgp_enable_ep_req()
162 usb_ep_free_request(dbgp.o_ep, req); dbgp_enable_ep_req()
165 "enable ep req: failure (%d:%d)\n", stp, err); dbgp_enable_ep_req()
227 if (dbgp.req) { dbgp_unbind()
228 kfree(dbgp.req->buf); dbgp_unbind()
229 usb_ep_free_request(gadget->ep0, dbgp.req); dbgp_unbind()
230 dbgp.req = NULL; dbgp_unbind()
294 dbgp.req = usb_ep_alloc_request(gadget->ep0, GFP_KERNEL); dbgp_bind()
295 if (!dbgp.req) { dbgp_bind()
301 dbgp.req->buf = kmalloc(DBGP_REQ_EP0_LEN, GFP_KERNEL); dbgp_bind()
302 if (!dbgp.req->buf) { dbgp_bind()
308 dbgp.req->length = DBGP_REQ_EP0_LEN; dbgp_bind()
342 struct usb_request *req) dbgp_setup_complete()
345 req->status, req->actual, req->length); dbgp_setup_complete()
351 struct usb_request *req = dbgp.req; dbgp_setup() local
395 req->length = min(length, len); dbgp_setup()
396 req->zero = len < req->length; dbgp_setup()
397 if (data && req->length) dbgp_setup()
398 memcpy(req->buf, data, req->length); dbgp_setup()
400 req->complete = dbgp_setup_complete; dbgp_setup()
401 return usb_ep_queue(gadget->ep0, req, GFP_ATOMIC); dbgp_setup()
405 "setup: failure req %x v %x\n", request, value); dbgp_setup()
341 dbgp_setup_complete(struct usb_ep *ep, struct usb_request *req) dbgp_setup_complete() argument
/linux-4.1.27/fs/nfs/
H A Dpagelist.c61 hdr->req = nfs_list_entry(mirror->pg_list.next); nfs_pgheader_init()
63 hdr->cred = hdr->req->wb_context->cred; nfs_pgheader_init()
64 hdr->io_start = req_offset(hdr->req); nfs_pgheader_init()
155 * @req - request in group that is to be locked
168 nfs_page_group_lock(struct nfs_page *req, bool nonblock) nfs_page_group_lock() argument
170 struct nfs_page *head = req->wb_head; nfs_page_group_lock()
186 * @req - a request in the group
191 nfs_page_group_lock_wait(struct nfs_page *req) nfs_page_group_lock_wait() argument
193 struct nfs_page *head = req->wb_head; nfs_page_group_lock_wait()
203 * @req - request in group that is to be unlocked
206 nfs_page_group_unlock(struct nfs_page *req) nfs_page_group_unlock() argument
208 struct nfs_page *head = req->wb_head; nfs_page_group_unlock()
224 nfs_page_group_sync_on_bit_locked(struct nfs_page *req, unsigned int bit) nfs_page_group_sync_on_bit_locked() argument
226 struct nfs_page *head = req->wb_head; nfs_page_group_sync_on_bit_locked()
230 WARN_ON_ONCE(test_and_set_bit(bit, &req->wb_flags)); nfs_page_group_sync_on_bit_locked()
232 tmp = req->wb_this_page; nfs_page_group_sync_on_bit_locked()
233 while (tmp != req) { nfs_page_group_sync_on_bit_locked()
240 tmp = req; nfs_page_group_sync_on_bit_locked()
244 } while (tmp != req); nfs_page_group_sync_on_bit_locked()
252 * @req - request in page group
255 bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit) nfs_page_group_sync_on_bit() argument
259 nfs_page_group_lock(req, false); nfs_page_group_sync_on_bit()
260 ret = nfs_page_group_sync_on_bit_locked(req, bit); nfs_page_group_sync_on_bit()
261 nfs_page_group_unlock(req); nfs_page_group_sync_on_bit()
267 * nfs_page_group_init - Initialize the page group linkage for @req
268 * @req - a new nfs request
269 * @prev - the previous request in page group, or NULL if @req is the first
273 nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev) nfs_page_group_init() argument
276 WARN_ON_ONCE(prev == req); nfs_page_group_init()
280 req->wb_head = req; nfs_page_group_init()
281 req->wb_this_page = req; nfs_page_group_init()
286 req->wb_head = prev->wb_head; nfs_page_group_init()
287 req->wb_this_page = prev->wb_this_page; nfs_page_group_init()
288 prev->wb_this_page = req; nfs_page_group_init()
292 kref_get(&req->wb_head->wb_kref); nfs_page_group_init()
298 inode = page_file_mapping(req->wb_page)->host; nfs_page_group_init()
299 set_bit(PG_INODE_REF, &req->wb_flags); nfs_page_group_init()
300 kref_get(&req->wb_kref); nfs_page_group_init()
310 * @req - request that no longer needs the page group
318 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); nfs_page_group_destroy() local
322 if (req->wb_head != req) nfs_page_group_destroy()
323 nfs_release_request(req->wb_head); nfs_page_group_destroy()
325 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) nfs_page_group_destroy()
328 tmp = req; nfs_page_group_destroy()
336 } while (tmp != req); nfs_page_group_destroy()
356 struct nfs_page *req; nfs_create_request() local
362 req = nfs_page_alloc(); nfs_create_request()
363 if (req == NULL) nfs_create_request()
369 nfs_page_free(req); nfs_create_request()
372 req->wb_lock_context = l_ctx; nfs_create_request()
378 req->wb_page = page; nfs_create_request()
379 req->wb_index = page_file_index(page); nfs_create_request()
381 req->wb_offset = offset; nfs_create_request()
382 req->wb_pgbase = offset; nfs_create_request()
383 req->wb_bytes = count; nfs_create_request()
384 req->wb_context = get_nfs_open_context(ctx); nfs_create_request()
385 kref_init(&req->wb_kref); nfs_create_request()
386 nfs_page_group_init(req, last); nfs_create_request()
387 return req; nfs_create_request()
392 * @req:
394 void nfs_unlock_request(struct nfs_page *req) nfs_unlock_request() argument
396 if (!NFS_WBACK_BUSY(req)) { nfs_unlock_request()
401 clear_bit(PG_BUSY, &req->wb_flags); nfs_unlock_request()
403 wake_up_bit(&req->wb_flags, PG_BUSY); nfs_unlock_request()
408 * @req:
410 void nfs_unlock_and_release_request(struct nfs_page *req) nfs_unlock_and_release_request() argument
412 nfs_unlock_request(req); nfs_unlock_and_release_request()
413 nfs_release_request(req); nfs_unlock_and_release_request()
418 * @req:
423 static void nfs_clear_request(struct nfs_page *req) nfs_clear_request() argument
425 struct page *page = req->wb_page; nfs_clear_request()
426 struct nfs_open_context *ctx = req->wb_context; nfs_clear_request()
427 struct nfs_lock_context *l_ctx = req->wb_lock_context; nfs_clear_request()
431 req->wb_page = NULL; nfs_clear_request()
436 req->wb_lock_context = NULL; nfs_clear_request()
440 req->wb_context = NULL; nfs_clear_request()
446 * @req: request to release
450 void nfs_free_request(struct nfs_page *req) nfs_free_request() argument
452 WARN_ON_ONCE(req->wb_this_page != req); nfs_free_request()
455 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags)); nfs_free_request()
456 WARN_ON_ONCE(test_bit(PG_UNLOCKPAGE, &req->wb_flags)); nfs_free_request()
457 WARN_ON_ONCE(test_bit(PG_UPTODATE, &req->wb_flags)); nfs_free_request()
458 WARN_ON_ONCE(test_bit(PG_WB_END, &req->wb_flags)); nfs_free_request()
459 WARN_ON_ONCE(test_bit(PG_REMOVE, &req->wb_flags)); nfs_free_request()
462 nfs_clear_request(req); nfs_free_request()
463 nfs_page_free(req); nfs_free_request()
466 void nfs_release_request(struct nfs_page *req) nfs_release_request() argument
468 kref_put(&req->wb_kref, nfs_page_group_destroy); nfs_release_request()
473 * @req: request to wait upon.
479 nfs_wait_on_request(struct nfs_page *req) nfs_wait_on_request() argument
481 return wait_on_bit_io(&req->wb_flags, PG_BUSY, nfs_wait_on_request()
489 * @req: this request
491 * Returns zero if @req can be coalesced into @desc, otherwise it returns
495 struct nfs_page *prev, struct nfs_page *req) nfs_generic_pg_test()
510 if (((mirror->pg_count + req->wb_bytes) >> PAGE_SHIFT) * nfs_generic_pg_test()
514 return min(mirror->pg_bsize - mirror->pg_count, (size_t)req->wb_bytes); nfs_generic_pg_test()
570 struct nfs_page *req = hdr->req; nfs_pgio_rpcsetup() local
576 hdr->args.offset = req_offset(req) + offset; nfs_pgio_rpcsetup()
579 hdr->args.pgbase = req->wb_pgbase + offset; nfs_pgio_rpcsetup()
582 hdr->args.context = get_nfs_open_context(req->wb_context); nfs_pgio_rpcsetup()
583 hdr->args.lock_context = req->wb_lock_context; nfs_pgio_rpcsetup()
640 "(req %s/%llu, %u bytes @ offset %llu)\n", nfs_initiate_pgio()
796 struct nfs_page *req; nfs_generic_pgio() local
812 req = nfs_list_entry(head->next); nfs_generic_pgio()
813 nfs_list_remove_request(req); nfs_generic_pgio()
814 nfs_list_add_request(req, &hdr->pages); nfs_generic_pgio()
816 if (!last_page || last_page != req->wb_page) { nfs_generic_pgio()
820 *pages++ = last_page = req->wb_page; nfs_generic_pgio()
869 struct nfs_page *req) nfs_pageio_setup_mirroring()
876 mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req); nfs_pageio_setup_mirroring()
923 * @req: pointer to nfs_page
925 * The nfs_page structures 'prev' and 'req' are compared to ensure that the
932 struct nfs_page *req, nfs_can_coalesce_requests()
939 if (!nfs_match_open_context(req->wb_context, prev->wb_context)) nfs_can_coalesce_requests()
941 flctx = d_inode(req->wb_context->dentry)->i_flctx; nfs_can_coalesce_requests()
945 !nfs_match_lock_context(req->wb_lock_context, nfs_can_coalesce_requests()
948 if (req_offset(req) != req_offset(prev) + prev->wb_bytes) nfs_can_coalesce_requests()
950 if (req->wb_page == prev->wb_page) { nfs_can_coalesce_requests()
951 if (req->wb_pgbase != prev->wb_pgbase + prev->wb_bytes) nfs_can_coalesce_requests()
954 if (req->wb_pgbase != 0 || nfs_can_coalesce_requests()
959 size = pgio->pg_ops->pg_test(pgio, prev, req); nfs_can_coalesce_requests()
960 WARN_ON_ONCE(size > req->wb_bytes); nfs_can_coalesce_requests()
961 if (size && size < req->wb_bytes) nfs_can_coalesce_requests()
962 req->wb_bytes = size; nfs_can_coalesce_requests()
969 * @req: request
971 * Returns true if the request 'req' was successfully coalesced into the
975 struct nfs_page *req) nfs_pageio_do_add_request()
985 desc->pg_ops->pg_init(desc, req); nfs_pageio_do_add_request()
986 mirror->pg_base = req->wb_pgbase; nfs_pageio_do_add_request()
988 if (!nfs_can_coalesce_requests(prev, req, desc)) nfs_pageio_do_add_request()
990 nfs_list_remove_request(req); nfs_pageio_do_add_request()
991 nfs_list_add_request(req, &mirror->pg_list); nfs_pageio_do_add_request()
992 mirror->pg_count += req->wb_bytes; nfs_pageio_do_add_request()
1020 * @req: request
1025 * Returns true if the request 'req' was successfully coalesced into the
1029 struct nfs_page *req) __nfs_pageio_add_request()
1037 nfs_page_group_lock(req, false); __nfs_pageio_add_request()
1039 subreq = req; __nfs_pageio_add_request()
1051 nfs_page_group_unlock(req); __nfs_pageio_add_request()
1059 nfs_page_group_lock(req, false); __nfs_pageio_add_request()
1073 subreq = nfs_create_request(req->wb_context, __nfs_pageio_add_request()
1074 req->wb_page, __nfs_pageio_add_request()
1080 subreq->wb_index = req->wb_index; __nfs_pageio_add_request()
1084 nfs_page_group_unlock(req); __nfs_pageio_add_request()
1088 nfs_page_group_unlock(req); __nfs_pageio_add_request()
1107 struct nfs_page *req; nfs_do_recoalesce() local
1109 req = list_first_entry(&head, struct nfs_page, wb_list); nfs_do_recoalesce()
1110 nfs_list_remove_request(req); nfs_do_recoalesce()
1111 if (__nfs_pageio_add_request(desc, req)) nfs_do_recoalesce()
1125 struct nfs_page *req) nfs_pageio_add_request_mirror()
1130 ret = __nfs_pageio_add_request(desc, req); nfs_pageio_add_request_mirror()
1142 struct nfs_page *req) nfs_pageio_add_request()
1148 pgbase = req->wb_pgbase; nfs_pageio_add_request()
1149 offset = req->wb_offset; nfs_pageio_add_request()
1150 bytes = req->wb_bytes; nfs_pageio_add_request()
1152 nfs_pageio_setup_mirroring(desc, req); nfs_pageio_add_request()
1156 nfs_page_group_lock(req, false); nfs_pageio_add_request()
1159 for (lastreq = req->wb_head; nfs_pageio_add_request()
1160 lastreq->wb_this_page != req->wb_head; nfs_pageio_add_request()
1164 dupreq = nfs_create_request(req->wb_context, nfs_pageio_add_request()
1165 req->wb_page, lastreq, pgbase, bytes); nfs_pageio_add_request()
1168 nfs_page_group_unlock(req); nfs_pageio_add_request()
1173 nfs_page_group_unlock(req); nfs_pageio_add_request()
1175 dupreq->wb_index = req->wb_index; nfs_pageio_add_request()
1177 dupreq = req; nfs_pageio_add_request()
1228 struct nfs_page *req = nfs_list_entry(hdr->pages.next); nfs_pageio_resend() local
1230 nfs_list_remove_request(req); nfs_pageio_resend()
1231 if (!nfs_pageio_add_request(desc, req)) nfs_pageio_resend()
1232 nfs_list_add_request(req, &failed); nfs_pageio_resend()
494 nfs_generic_pg_test(struct nfs_pageio_descriptor *desc, struct nfs_page *prev, struct nfs_page *req) nfs_generic_pg_test() argument
868 nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio, struct nfs_page *req) nfs_pageio_setup_mirroring() argument
931 nfs_can_coalesce_requests(struct nfs_page *prev, struct nfs_page *req, struct nfs_pageio_descriptor *pgio) nfs_can_coalesce_requests() argument
974 nfs_pageio_do_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *req) nfs_pageio_do_add_request() argument
1028 __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *req) __nfs_pageio_add_request() argument
1124 nfs_pageio_add_request_mirror(struct nfs_pageio_descriptor *desc, struct nfs_page *req) nfs_pageio_add_request_mirror() argument
1141 nfs_pageio_add_request(struct nfs_pageio_descriptor *desc, struct nfs_page *req) nfs_pageio_add_request() argument
H A Dwrite.c44 static void nfs_redirty_request(struct nfs_page *req);
49 static void nfs_clear_request_commit(struct nfs_page *req);
110 struct nfs_page *req = NULL; nfs_page_find_head_request_locked() local
113 req = (struct nfs_page *)page_private(page); nfs_page_find_head_request_locked()
115 req = nfs_page_search_commits_for_head_request_locked(nfsi, nfs_page_find_head_request_locked()
118 if (req) { nfs_page_find_head_request_locked()
119 WARN_ON_ONCE(req->wb_head != req); nfs_page_find_head_request_locked()
120 kref_get(&req->wb_kref); nfs_page_find_head_request_locked()
123 return req; nfs_page_find_head_request_locked()
134 struct nfs_page *req = NULL; nfs_page_find_head_request() local
137 req = nfs_page_find_head_request_locked(NFS_I(inode), page); nfs_page_find_head_request()
139 return req; nfs_page_find_head_request()
185 struct nfs_page *req; nfs_page_group_search_locked() local
190 req = head; nfs_page_group_search_locked()
192 if (page_offset >= req->wb_pgbase && nfs_page_group_search_locked()
193 page_offset < (req->wb_pgbase + req->wb_bytes)) nfs_page_group_search_locked()
194 return req; nfs_page_group_search_locked()
196 req = req->wb_this_page; nfs_page_group_search_locked()
197 } while (req != head); nfs_page_group_search_locked()
209 static bool nfs_page_group_covers_page(struct nfs_page *req) nfs_page_group_covers_page() argument
213 unsigned int len = nfs_page_length(req->wb_page); nfs_page_group_covers_page()
215 nfs_page_group_lock(req, false); nfs_page_group_covers_page()
218 tmp = nfs_page_group_search_locked(req->wb_head, pos); nfs_page_group_covers_page()
226 nfs_page_group_unlock(req); nfs_page_group_covers_page()
234 static void nfs_mark_uptodate(struct nfs_page *req) nfs_mark_uptodate() argument
236 if (PageUptodate(req->wb_page)) nfs_mark_uptodate()
238 if (!nfs_page_group_covers_page(req)) nfs_mark_uptodate()
240 SetPageUptodate(req->wb_page); nfs_mark_uptodate()
279 static void nfs_end_page_writeback(struct nfs_page *req) nfs_end_page_writeback() argument
281 struct inode *inode = page_file_mapping(req->wb_page)->host; nfs_end_page_writeback()
284 if (!nfs_page_group_sync_on_bit(req, PG_WB_END)) nfs_end_page_writeback()
287 end_page_writeback(req->wb_page); nfs_end_page_writeback()
294 * @req - an nfs request
295 * clears all page group related bits from @req
298 nfs_page_group_clear_bits(struct nfs_page *req) nfs_page_group_clear_bits() argument
300 clear_bit(PG_TEARDOWN, &req->wb_flags); nfs_page_group_clear_bits()
301 clear_bit(PG_UNLOCKPAGE, &req->wb_flags); nfs_page_group_clear_bits()
302 clear_bit(PG_UPTODATE, &req->wb_flags); nfs_page_group_clear_bits()
303 clear_bit(PG_WB_END, &req->wb_flags); nfs_page_group_clear_bits()
304 clear_bit(PG_REMOVE, &req->wb_flags); nfs_page_group_clear_bits()
309 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
315 * @req - request that couldn't lock and needs to wait on the req bit lock
325 struct nfs_page *req, bool nonblock)
332 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
335 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
338 kref_get(&req->wb_kref);
347 ret = nfs_wait_on_request(req);
350 nfs_release_request(req); variable
409 * nfs_lock_and_join_requests - join all subreqs to the head req and return
555 struct nfs_page *req; nfs_page_async_flush() local
558 req = nfs_lock_and_join_requests(page, nonblock); nfs_page_async_flush()
559 if (!req) nfs_page_async_flush()
561 ret = PTR_ERR(req); nfs_page_async_flush()
562 if (IS_ERR(req)) nfs_page_async_flush()
566 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); nfs_page_async_flush()
569 if (!nfs_pageio_add_request(pgio, req)) { nfs_page_async_flush()
570 nfs_redirty_request(req); nfs_page_async_flush()
668 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) nfs_inode_add_request() argument
672 WARN_ON_ONCE(req->wb_this_page != req); nfs_inode_add_request()
675 nfs_lock_request(req); nfs_inode_add_request()
685 if (likely(!PageSwapCache(req->wb_page))) { nfs_inode_add_request()
686 set_bit(PG_MAPPED, &req->wb_flags); nfs_inode_add_request()
687 SetPagePrivate(req->wb_page); nfs_inode_add_request()
688 set_page_private(req->wb_page, (unsigned long)req); nfs_inode_add_request()
695 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); nfs_inode_add_request()
696 kref_get(&req->wb_kref); nfs_inode_add_request()
703 static void nfs_inode_remove_request(struct nfs_page *req) nfs_inode_remove_request() argument
705 struct inode *inode = d_inode(req->wb_context->dentry); nfs_inode_remove_request()
709 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { nfs_inode_remove_request()
710 head = req->wb_head; nfs_inode_remove_request()
728 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) nfs_inode_remove_request()
729 nfs_release_request(req); nfs_inode_remove_request()
733 nfs_mark_request_dirty(struct nfs_page *req) nfs_mark_request_dirty() argument
735 __set_page_dirty_nobuffers(req->wb_page); nfs_mark_request_dirty()
772 * @req: pointer to a struct nfs_page
784 nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, nfs_request_add_commit_list() argument
787 set_bit(PG_CLEAN, &(req)->wb_flags); nfs_request_add_commit_list()
789 nfs_list_add_request(req, dst); nfs_request_add_commit_list()
793 nfs_mark_page_unstable(req->wb_page); nfs_request_add_commit_list()
799 * @req: pointer to a nfs_page
809 nfs_request_remove_commit_list(struct nfs_page *req, nfs_request_remove_commit_list() argument
812 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) nfs_request_remove_commit_list()
814 nfs_list_remove_request(req); nfs_request_remove_commit_list()
844 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, nfs_mark_request_commit() argument
847 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) nfs_mark_request_commit()
849 nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo); nfs_mark_request_commit()
861 nfs_clear_request_commit(struct nfs_page *req) nfs_clear_request_commit() argument
863 if (test_bit(PG_CLEAN, &req->wb_flags)) { nfs_clear_request_commit()
864 struct inode *inode = d_inode(req->wb_context->dentry); nfs_clear_request_commit()
868 if (!pnfs_clear_request_commit(req, &cinfo)) { nfs_clear_request_commit()
869 nfs_request_remove_commit_list(req, &cinfo); nfs_clear_request_commit()
871 nfs_clear_page_commit(req->wb_page); nfs_clear_request_commit()
891 struct nfs_page *req = nfs_list_entry(hdr->pages.next); nfs_write_completion() local
893 bytes += req->wb_bytes; nfs_write_completion()
894 nfs_list_remove_request(req); nfs_write_completion()
897 nfs_set_pageerror(req->wb_page); nfs_write_completion()
898 nfs_context_set_write_error(req->wb_context, hdr->error); nfs_write_completion()
902 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); nfs_write_completion()
903 nfs_mark_request_commit(req, hdr->lseg, &cinfo, nfs_write_completion()
908 nfs_inode_remove_request(req); nfs_write_completion()
910 nfs_unlock_request(req); nfs_write_completion()
911 nfs_end_page_writeback(req); nfs_write_completion()
912 nfs_release_request(req); nfs_write_completion()
929 struct nfs_page *req, *tmp; nfs_scan_commit_list() local
932 list_for_each_entry_safe(req, tmp, src, wb_list) { list_for_each_entry_safe()
933 if (!nfs_lock_request(req)) list_for_each_entry_safe()
935 kref_get(&req->wb_kref); list_for_each_entry_safe()
937 list_safe_reset_next(req, tmp, wb_list); list_for_each_entry_safe()
938 nfs_request_remove_commit_list(req, cinfo); list_for_each_entry_safe()
939 nfs_list_add_request(req, dst); list_for_each_entry_safe()
986 struct nfs_page *req; nfs_try_to_update_request() local
998 req = nfs_page_find_head_request_locked(NFS_I(inode), page); nfs_try_to_update_request()
999 if (req == NULL) nfs_try_to_update_request()
1003 WARN_ON_ONCE(req->wb_head != req); nfs_try_to_update_request()
1004 WARN_ON_ONCE(req->wb_this_page != req); nfs_try_to_update_request()
1006 rqend = req->wb_offset + req->wb_bytes; nfs_try_to_update_request()
1014 || end < req->wb_offset) nfs_try_to_update_request()
1017 if (nfs_lock_request(req)) nfs_try_to_update_request()
1022 error = nfs_wait_on_request(req); nfs_try_to_update_request()
1023 nfs_release_request(req); nfs_try_to_update_request()
1030 if (offset < req->wb_offset) { nfs_try_to_update_request()
1031 req->wb_offset = offset; nfs_try_to_update_request()
1032 req->wb_pgbase = offset; nfs_try_to_update_request()
1035 req->wb_bytes = end - req->wb_offset; nfs_try_to_update_request()
1037 req->wb_bytes = rqend - req->wb_offset; nfs_try_to_update_request()
1039 if (req) nfs_try_to_update_request()
1040 nfs_clear_request_commit(req); nfs_try_to_update_request()
1042 return req; nfs_try_to_update_request()
1045 nfs_release_request(req); nfs_try_to_update_request()
1062 struct nfs_page *req; nfs_setup_write_request() local
1064 req = nfs_try_to_update_request(inode, page, offset, bytes); nfs_setup_write_request()
1065 if (req != NULL) nfs_setup_write_request()
1067 req = nfs_create_request(ctx, page, NULL, offset, bytes); nfs_setup_write_request()
1068 if (IS_ERR(req)) nfs_setup_write_request()
1070 nfs_inode_add_request(inode, req); nfs_setup_write_request()
1072 return req; nfs_setup_write_request()
1078 struct nfs_page *req; nfs_writepage_setup() local
1080 req = nfs_setup_write_request(ctx, page, offset, count); nfs_writepage_setup()
1081 if (IS_ERR(req)) nfs_writepage_setup()
1082 return PTR_ERR(req); nfs_writepage_setup()
1085 nfs_mark_uptodate(req); nfs_writepage_setup()
1086 nfs_mark_request_dirty(req); nfs_writepage_setup()
1087 nfs_unlock_and_release_request(req); nfs_writepage_setup()
1096 struct nfs_page *req; nfs_flush_incompatible() local
1107 req = nfs_page_find_head_request(page); nfs_flush_incompatible()
1108 if (req == NULL) nfs_flush_incompatible()
1110 l_ctx = req->wb_lock_context; nfs_flush_incompatible()
1111 do_flush = req->wb_page != page || req->wb_context != ctx; nfs_flush_incompatible()
1113 do_flush |= req->wb_this_page != req; nfs_flush_incompatible()
1120 nfs_release_request(req); nfs_flush_incompatible()
1289 static void nfs_redirty_request(struct nfs_page *req) nfs_redirty_request() argument
1291 nfs_mark_request_dirty(req); nfs_redirty_request()
1292 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); nfs_redirty_request()
1293 nfs_unlock_request(req); nfs_redirty_request()
1294 nfs_end_page_writeback(req); nfs_redirty_request()
1295 nfs_release_request(req); nfs_redirty_request()
1300 struct nfs_page *req; nfs_async_write_error() local
1303 req = nfs_list_entry(head->next); nfs_async_write_error()
1304 nfs_list_remove_request(req); nfs_async_write_error()
1305 nfs_redirty_request(req); nfs_async_write_error()
1583 struct nfs_page *req; nfs_get_lwb() local
1585 list_for_each_entry(req, head, wb_list) nfs_get_lwb()
1586 if (lwb < (req_offset(req) + req->wb_bytes)) nfs_get_lwb()
1587 lwb = req_offset(req) + req->wb_bytes; nfs_get_lwb()
1634 struct nfs_page *req; nfs_retry_commit() local
1637 req = nfs_list_entry(page_list->next); nfs_retry_commit()
1638 nfs_list_remove_request(req); nfs_retry_commit()
1639 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); nfs_retry_commit()
1641 nfs_clear_page_commit(req->wb_page); nfs_retry_commit()
1642 nfs_unlock_and_release_request(req); nfs_retry_commit()
1688 struct nfs_page *req; nfs_commit_release_pages() local
1694 req = nfs_list_entry(data->pages.next); nfs_commit_release_pages()
1695 nfs_list_remove_request(req); nfs_commit_release_pages()
1696 nfs_clear_page_commit(req->wb_page); nfs_commit_release_pages()
1699 req->wb_context->dentry->d_sb->s_id, nfs_commit_release_pages()
1700 (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)), nfs_commit_release_pages()
1701 req->wb_bytes, nfs_commit_release_pages()
1702 (long long)req_offset(req)); nfs_commit_release_pages()
1704 nfs_context_set_write_error(req->wb_context, status); nfs_commit_release_pages()
1705 nfs_inode_remove_request(req); nfs_commit_release_pages()
1712 if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) { nfs_commit_release_pages()
1714 nfs_inode_remove_request(req); nfs_commit_release_pages()
1720 nfs_mark_request_dirty(req); nfs_commit_release_pages()
1721 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); nfs_commit_release_pages()
1723 nfs_unlock_and_release_request(req); nfs_commit_release_pages()
1871 struct nfs_page *req; nfs_wb_page_cancel() local
1878 req = nfs_lock_and_join_requests(page, false); nfs_wb_page_cancel()
1880 if (IS_ERR(req)) { nfs_wb_page_cancel()
1881 ret = PTR_ERR(req); nfs_wb_page_cancel()
1882 } else if (req) { nfs_wb_page_cancel()
1887 nfs_inode_remove_request(req); nfs_wb_page_cancel()
1888 nfs_unlock_and_release_request(req); nfs_wb_page_cancel()
H A Dread.c121 static void nfs_readpage_release(struct nfs_page *req) nfs_readpage_release() argument
123 struct inode *inode = d_inode(req->wb_context->dentry); nfs_readpage_release()
126 (unsigned long long)NFS_FILEID(inode), req->wb_bytes, nfs_readpage_release()
127 (long long)req_offset(req)); nfs_readpage_release()
129 if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { nfs_readpage_release()
130 if (PageUptodate(req->wb_page)) nfs_readpage_release()
131 nfs_readpage_to_fscache(inode, req->wb_page, 0); nfs_readpage_release()
133 unlock_page(req->wb_page); nfs_readpage_release()
135 nfs_release_request(req); nfs_readpage_release()
138 static void nfs_page_group_set_uptodate(struct nfs_page *req) nfs_page_group_set_uptodate() argument
140 if (nfs_page_group_sync_on_bit(req, PG_UPTODATE)) nfs_page_group_set_uptodate()
141 SetPageUptodate(req->wb_page); nfs_page_group_set_uptodate()
151 struct nfs_page *req = nfs_list_entry(hdr->pages.next); nfs_read_completion() local
152 struct page *page = req->wb_page; nfs_read_completion()
153 unsigned long start = req->wb_pgbase; nfs_read_completion()
154 unsigned long end = req->wb_pgbase + req->wb_bytes; nfs_read_completion()
165 } else if (hdr->good_bytes - bytes < req->wb_bytes) { nfs_read_completion()
169 WARN_ON(start < req->wb_pgbase); nfs_read_completion()
173 bytes += req->wb_bytes; nfs_read_completion()
176 nfs_page_group_set_uptodate(req); nfs_read_completion()
178 nfs_page_group_set_uptodate(req); nfs_read_completion()
179 nfs_list_remove_request(req); nfs_read_completion()
180 nfs_readpage_release(req); nfs_read_completion()
201 struct nfs_page *req; nfs_async_read_error() local
204 req = nfs_list_entry(head->next); nfs_async_read_error()
205 nfs_list_remove_request(req); nfs_async_read_error()
206 nfs_readpage_release(req); nfs_async_read_error()
/linux-4.1.27/net/tipc/
H A Ddiscover.c268 * @req: ptr to link request structure
273 static void disc_update(struct tipc_link_req *req) disc_update() argument
275 if (!req->num_nodes) { disc_update()
276 if ((req->timer_intv == TIPC_LINK_REQ_INACTIVE) || disc_update()
277 (req->timer_intv > TIPC_LINK_REQ_FAST)) { disc_update()
278 req->timer_intv = TIPC_LINK_REQ_INIT; disc_update()
279 mod_timer(&req->timer, jiffies + req->timer_intv); disc_update()
286 * @req: ptr to link request structure
288 void tipc_disc_add_dest(struct tipc_link_req *req) tipc_disc_add_dest() argument
290 spin_lock_bh(&req->lock); tipc_disc_add_dest()
291 req->num_nodes++; tipc_disc_add_dest()
292 spin_unlock_bh(&req->lock); tipc_disc_add_dest()
297 * @req: ptr to link request structure
299 void tipc_disc_remove_dest(struct tipc_link_req *req) tipc_disc_remove_dest() argument
301 spin_lock_bh(&req->lock); tipc_disc_remove_dest()
302 req->num_nodes--; tipc_disc_remove_dest()
303 disc_update(req); tipc_disc_remove_dest()
304 spin_unlock_bh(&req->lock); tipc_disc_remove_dest()
315 struct tipc_link_req *req = (struct tipc_link_req *)data; disc_timeout() local
318 spin_lock_bh(&req->lock); disc_timeout()
321 if (tipc_node(req->domain) && req->num_nodes) { disc_timeout()
322 req->timer_intv = TIPC_LINK_REQ_INACTIVE; disc_timeout()
333 tipc_bearer_send(req->net, req->bearer_id, req->buf, &req->dest); disc_timeout()
336 req->timer_intv *= 2; disc_timeout()
337 if (req->num_nodes) disc_timeout()
341 if (req->timer_intv > max_delay) disc_timeout()
342 req->timer_intv = max_delay; disc_timeout()
344 mod_timer(&req->timer, jiffies + req->timer_intv); disc_timeout()
346 spin_unlock_bh(&req->lock); disc_timeout()
361 struct tipc_link_req *req; tipc_disc_create() local
363 req = kmalloc(sizeof(*req), GFP_ATOMIC); tipc_disc_create()
364 if (!req) tipc_disc_create()
366 req->buf = tipc_buf_acquire(MAX_H_SIZE); tipc_disc_create()
367 if (!req->buf) { tipc_disc_create()
368 kfree(req); tipc_disc_create()
372 tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr); tipc_disc_create()
373 memcpy(&req->dest, dest, sizeof(*dest)); tipc_disc_create()
374 req->net = net; tipc_disc_create()
375 req->bearer_id = b_ptr->identity; tipc_disc_create()
376 req->domain = b_ptr->domain; tipc_disc_create()
377 req->num_nodes = 0; tipc_disc_create()
378 req->timer_intv = TIPC_LINK_REQ_INIT; tipc_disc_create()
379 spin_lock_init(&req->lock); tipc_disc_create()
380 setup_timer(&req->timer, disc_timeout, (unsigned long)req); tipc_disc_create()
381 mod_timer(&req->timer, jiffies + req->timer_intv); tipc_disc_create()
382 b_ptr->link_req = req; tipc_disc_create()
383 tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest); tipc_disc_create()
389 * @req: ptr to link request structure
391 void tipc_disc_delete(struct tipc_link_req *req) tipc_disc_delete() argument
393 del_timer_sync(&req->timer); tipc_disc_delete()
394 kfree_skb(req->buf); tipc_disc_delete()
395 kfree(req); tipc_disc_delete()
406 struct tipc_link_req *req = b_ptr->link_req; tipc_disc_reset() local
408 spin_lock_bh(&req->lock); tipc_disc_reset()
409 tipc_disc_init_msg(net, req->buf, DSC_REQ_MSG, b_ptr); tipc_disc_reset()
410 req->net = net; tipc_disc_reset()
411 req->bearer_id = b_ptr->identity; tipc_disc_reset()
412 req->domain = b_ptr->domain; tipc_disc_reset()
413 req->num_nodes = 0; tipc_disc_reset()
414 req->timer_intv = TIPC_LINK_REQ_INIT; tipc_disc_reset()
415 mod_timer(&req->timer, jiffies + req->timer_intv); tipc_disc_reset()
416 tipc_bearer_send(net, req->bearer_id, req->buf, &req->dest); tipc_disc_reset()
417 spin_unlock_bh(&req->lock); tipc_disc_reset()
/linux-4.1.27/drivers/staging/lustre/lustre/lov/
H A Dlov_request.c64 struct lov_request *req = list_entry(pos, lov_finish_set() local
67 list_del_init(&req->rq_link); lov_finish_set()
69 if (req->rq_oi.oi_oa) lov_finish_set()
70 OBDO_FREE(req->rq_oi.oi_oa); lov_finish_set()
71 if (req->rq_oi.oi_md) lov_finish_set()
72 OBD_FREE_LARGE(req->rq_oi.oi_md, req->rq_buflen); lov_finish_set()
73 if (req->rq_oi.oi_osfs) lov_finish_set()
74 OBD_FREE(req->rq_oi.oi_osfs, lov_finish_set()
75 sizeof(*req->rq_oi.oi_osfs)); lov_finish_set()
76 OBD_FREE(req, sizeof(*req)); lov_finish_set()
105 struct lov_request *req, int rc) lov_update_set()
107 req->rq_complete = 1; lov_update_set()
108 req->rq_rc = rc; lov_update_set()
118 struct lov_request *req, int rc) lov_update_common_set()
122 lov_update_set(set, req, rc); lov_update_common_set()
125 if (rc && !(lov->lov_tgts[req->rq_idx] && lov_update_common_set()
126 lov->lov_tgts[req->rq_idx]->ltd_active)) lov_update_common_set()
133 void lov_set_add_req(struct lov_request *req, struct lov_request_set *set) lov_set_add_req() argument
135 list_add_tail(&req->rq_link, &set->set_list); lov_set_add_req()
137 req->rq_rqset = set; lov_set_add_req()
205 struct lov_request *req; common_attr_done() local
224 req = list_entry(pos, struct lov_request, rq_link); common_attr_done()
226 if (!req->rq_complete || req->rq_rc) common_attr_done()
228 if (req->rq_oi.oi_oa->o_valid == 0) /* inactive stripe */ common_attr_done()
230 lov_merge_attrs(tmp_oa, req->rq_oi.oi_oa, common_attr_done()
231 req->rq_oi.oi_oa->o_valid, common_attr_done()
232 set->set_oi->oi_md, req->rq_stripe, &attrset); common_attr_done()
299 struct lov_request *req; lov_prep_getattr_set() local
315 OBD_ALLOC(req, sizeof(*req)); lov_prep_getattr_set()
316 if (req == NULL) { lov_prep_getattr_set()
321 req->rq_stripe = i; lov_prep_getattr_set()
322 req->rq_idx = loi->loi_ost_idx; lov_prep_getattr_set()
324 OBDO_ALLOC(req->rq_oi.oi_oa); lov_prep_getattr_set()
325 if (req->rq_oi.oi_oa == NULL) { lov_prep_getattr_set()
326 OBD_FREE(req, sizeof(*req)); lov_prep_getattr_set()
330 memcpy(req->rq_oi.oi_oa, oinfo->oi_oa, lov_prep_getattr_set()
331 sizeof(*req->rq_oi.oi_oa)); lov_prep_getattr_set()
332 req->rq_oi.oi_oa->o_oi = loi->loi_oi; lov_prep_getattr_set()
333 req->rq_oi.oi_cb_up = cb_getattr_update; lov_prep_getattr_set()
334 req->rq_oi.oi_capa = oinfo->oi_capa; lov_prep_getattr_set()
336 lov_set_add_req(req, set); lov_prep_getattr_set()
387 struct lov_request *req; lov_prep_destroy_set() local
398 OBD_ALLOC(req, sizeof(*req)); lov_prep_destroy_set()
399 if (req == NULL) { lov_prep_destroy_set()
404 req->rq_stripe = i; lov_prep_destroy_set()
405 req->rq_idx = loi->loi_ost_idx; lov_prep_destroy_set()
407 OBDO_ALLOC(req->rq_oi.oi_oa); lov_prep_destroy_set()
408 if (req->rq_oi.oi_oa == NULL) { lov_prep_destroy_set()
409 OBD_FREE(req, sizeof(*req)); lov_prep_destroy_set()
413 memcpy(req->rq_oi.oi_oa, src_oa, sizeof(*req->rq_oi.oi_oa)); lov_prep_destroy_set()
414 req->rq_oi.oi_oa->o_oi = loi->loi_oi; lov_prep_destroy_set()
415 lov_set_add_req(req, set); lov_prep_destroy_set()
445 struct lov_request *req, int rc) lov_update_setattr_set()
447 struct lov_obd *lov = &req->rq_rqset->set_exp->exp_obd->u.lov; lov_update_setattr_set()
448 struct lov_stripe_md *lsm = req->rq_rqset->set_oi->oi_md; lov_update_setattr_set()
450 lov_update_set(set, req, rc); lov_update_setattr_set()
453 if (rc && !(lov->lov_tgts[req->rq_idx] && lov_update_setattr_set()
454 lov->lov_tgts[req->rq_idx]->ltd_active)) lov_update_setattr_set()
458 if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLCTIME) lov_update_setattr_set()
459 lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_ctime = lov_update_setattr_set()
460 req->rq_oi.oi_oa->o_ctime; lov_update_setattr_set()
461 if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLMTIME) lov_update_setattr_set()
462 lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_mtime = lov_update_setattr_set()
463 req->rq_oi.oi_oa->o_mtime; lov_update_setattr_set()
464 if (req->rq_oi.oi_oa->o_valid & OBD_MD_FLATIME) lov_update_setattr_set()
465 lsm->lsm_oinfo[req->rq_stripe]->loi_lvb.lvb_atime = lov_update_setattr_set()
466 req->rq_oi.oi_oa->o_atime; lov_update_setattr_set()
504 struct lov_request *req; lov_prep_setattr_set() local
514 OBD_ALLOC(req, sizeof(*req)); lov_prep_setattr_set()
515 if (req == NULL) { lov_prep_setattr_set()
519 req->rq_stripe = i; lov_prep_setattr_set()
520 req->rq_idx = loi->loi_ost_idx; lov_prep_setattr_set()
522 OBDO_ALLOC(req->rq_oi.oi_oa); lov_prep_setattr_set()
523 if (req->rq_oi.oi_oa == NULL) { lov_prep_setattr_set()
524 OBD_FREE(req, sizeof(*req)); lov_prep_setattr_set()
528 memcpy(req->rq_oi.oi_oa, oinfo->oi_oa, lov_prep_setattr_set()
529 sizeof(*req->rq_oi.oi_oa)); lov_prep_setattr_set()
530 req->rq_oi.oi_oa->o_oi = loi->loi_oi; lov_prep_setattr_set()
531 req->rq_oi.oi_oa->o_stripe_idx = i; lov_prep_setattr_set()
532 req->rq_oi.oi_cb_up = cb_setattr_update; lov_prep_setattr_set()
533 req->rq_oi.oi_capa = oinfo->oi_capa; lov_prep_setattr_set()
538 &req->rq_oi.oi_oa->o_size); lov_prep_setattr_set()
540 if (off < 0 && req->rq_oi.oi_oa->o_size) lov_prep_setattr_set()
541 req->rq_oi.oi_oa->o_size--; lov_prep_setattr_set()
544 i, req->rq_oi.oi_oa->o_size, lov_prep_setattr_set()
547 lov_set_add_req(req, set); lov_prep_setattr_set()
729 struct lov_request *req; lov_prep_statfs_set() local
745 OBD_ALLOC(req, sizeof(*req)); lov_prep_statfs_set()
746 if (req == NULL) { lov_prep_statfs_set()
751 OBD_ALLOC(req->rq_oi.oi_osfs, sizeof(*req->rq_oi.oi_osfs)); lov_prep_statfs_set()
752 if (req->rq_oi.oi_osfs == NULL) { lov_prep_statfs_set()
753 OBD_FREE(req, sizeof(*req)); lov_prep_statfs_set()
758 req->rq_idx = i; lov_prep_statfs_set()
759 req->rq_oi.oi_cb_up = cb_statfs_update; lov_prep_statfs_set()
760 req->rq_oi.oi_flags = oinfo->oi_flags; lov_prep_statfs_set()
762 lov_set_add_req(req, set); lov_prep_statfs_set()
104 lov_update_set(struct lov_request_set *set, struct lov_request *req, int rc) lov_update_set() argument
117 lov_update_common_set(struct lov_request_set *set, struct lov_request *req, int rc) lov_update_common_set() argument
444 lov_update_setattr_set(struct lov_request_set *set, struct lov_request *req, int rc) lov_update_setattr_set() argument
/linux-4.1.27/drivers/i2c/busses/
H A Di2c-opal.c51 static int i2c_opal_send_request(u32 bus_id, struct opal_i2c_request *req) i2c_opal_send_request() argument
64 rc = opal_i2c_request(token, bus_id, req); i2c_opal_send_request()
89 struct opal_i2c_request req; i2c_opal_master_xfer() local
95 memset(&req, 0, sizeof(req)); i2c_opal_master_xfer()
100 req.type = (msgs[0].flags & I2C_M_RD) ? i2c_opal_master_xfer()
102 req.addr = cpu_to_be16(msgs[0].addr); i2c_opal_master_xfer()
103 req.size = cpu_to_be32(msgs[0].len); i2c_opal_master_xfer()
104 req.buffer_ra = cpu_to_be64(__pa(msgs[0].buf)); i2c_opal_master_xfer()
107 req.type = (msgs[1].flags & I2C_M_RD) ? i2c_opal_master_xfer()
109 req.addr = cpu_to_be16(msgs[0].addr); i2c_opal_master_xfer()
110 req.subaddr_sz = msgs[0].len; i2c_opal_master_xfer()
112 req.subaddr = (req.subaddr << 8) | msgs[0].buf[i]; i2c_opal_master_xfer()
113 req.subaddr = cpu_to_be32(req.subaddr); i2c_opal_master_xfer()
114 req.size = cpu_to_be32(msgs[1].len); i2c_opal_master_xfer()
115 req.buffer_ra = cpu_to_be64(__pa(msgs[1].buf)); i2c_opal_master_xfer()
121 rc = i2c_opal_send_request(opal_id, &req); i2c_opal_master_xfer()
133 struct opal_i2c_request req; i2c_opal_smbus_xfer() local
137 memset(&req, 0, sizeof(req)); i2c_opal_smbus_xfer()
139 req.addr = cpu_to_be16(addr); i2c_opal_smbus_xfer()
142 req.buffer_ra = cpu_to_be64(__pa(&data->byte)); i2c_opal_smbus_xfer()
143 req.size = cpu_to_be32(1); i2c_opal_smbus_xfer()
146 req.type = (read_write == I2C_SMBUS_READ) ? i2c_opal_smbus_xfer()
150 req.buffer_ra = cpu_to_be64(__pa(&data->byte)); i2c_opal_smbus_xfer()
151 req.size = cpu_to_be32(1); i2c_opal_smbus_xfer()
152 req.subaddr = cpu_to_be32(command); i2c_opal_smbus_xfer()
153 req.subaddr_sz = 1; i2c_opal_smbus_xfer()
154 req.type = (read_write == I2C_SMBUS_READ) ? i2c_opal_smbus_xfer()
162 req.buffer_ra = cpu_to_be64(__pa(local)); i2c_opal_smbus_xfer()
163 req.size = cpu_to_be32(2); i2c_opal_smbus_xfer()
164 req.subaddr = cpu_to_be32(command); i2c_opal_smbus_xfer()
165 req.subaddr_sz = 1; i2c_opal_smbus_xfer()
166 req.type = (read_write == I2C_SMBUS_READ) ? i2c_opal_smbus_xfer()
170 req.buffer_ra = cpu_to_be64(__pa(&data->block[1])); i2c_opal_smbus_xfer()
171 req.size = cpu_to_be32(data->block[0]); i2c_opal_smbus_xfer()
172 req.subaddr = cpu_to_be32(command); i2c_opal_smbus_xfer()
173 req.subaddr_sz = 1; i2c_opal_smbus_xfer()
174 req.type = (read_write == I2C_SMBUS_READ) ? i2c_opal_smbus_xfer()
181 rc = i2c_opal_send_request(opal_id, &req); i2c_opal_smbus_xfer()
/linux-4.1.27/drivers/usb/isp1760/
H A Disp1760-udc.c30 struct usb_request req; member in struct:isp1760_request
46 static inline struct isp1760_request *req_to_udc_req(struct usb_request *req) req_to_udc_req() argument
48 return container_of(req, struct isp1760_request, req); req_to_udc_req()
127 struct isp1760_request *req, isp1760_udc_request_complete()
134 req, status); isp1760_udc_request_complete()
136 req->ep = NULL; isp1760_udc_request_complete()
137 req->req.status = status; isp1760_udc_request_complete()
138 req->req.complete(&ep->ep, &req->req); isp1760_udc_request_complete()
180 struct isp1760_request *req) isp1760_udc_receive()
191 __func__, len, req->req.actual, req->req.length); isp1760_udc_receive()
193 len = min(len, req->req.length - req->req.actual); isp1760_udc_receive()
208 buf = req->req.buf + req->req.actual; isp1760_udc_receive()
219 req->req.actual += len; isp1760_udc_receive()
227 "%s: req %p actual/length %u/%u maxpacket %u packet size %u\n", isp1760_udc_receive()
228 __func__, req, req->req.actual, req->req.length, ep->maxpacket, isp1760_udc_receive()
237 if (req->req.actual == req->req.length || len < ep->maxpacket) { isp1760_udc_receive()
238 list_del(&req->queue); isp1760_udc_receive()
246 struct isp1760_request *req) isp1760_udc_transmit()
249 u32 *buf = req->req.buf + req->req.actual; isp1760_udc_transmit()
252 req->packet_size = min(req->req.length - req->req.actual, isp1760_udc_transmit()
256 __func__, req->packet_size, req->req.actual, isp1760_udc_transmit()
257 req->req.length); isp1760_udc_transmit()
261 if (req->packet_size) isp1760_udc_transmit()
262 isp1760_udc_write(udc, DC_BUFLEN, req->packet_size); isp1760_udc_transmit()
270 for (i = req->packet_size; i > 2; i -= 4, ++buf) isp1760_udc_transmit()
277 if (!req->packet_size) isp1760_udc_transmit()
284 struct isp1760_request *req; isp1760_ep_rx_ready() local
311 req = list_first_entry(&ep->queue, struct isp1760_request, isp1760_ep_rx_ready()
313 complete = isp1760_udc_receive(ep, req); isp1760_ep_rx_ready()
318 isp1760_udc_request_complete(ep, req, 0); isp1760_ep_rx_ready()
325 struct isp1760_request *req; isp1760_ep_tx_complete() local
355 req = list_first_entry(&ep->queue, struct isp1760_request, isp1760_ep_tx_complete()
357 req->req.actual += req->packet_size; isp1760_ep_tx_complete()
359 need_zlp = req->req.actual == req->req.length && isp1760_ep_tx_complete()
360 !(req->req.length % ep->maxpacket) && isp1760_ep_tx_complete()
361 req->packet_size && req->req.zero; isp1760_ep_tx_complete()
364 "TX IRQ: req %p actual/length %u/%u maxpacket %u packet size %u zero %u need zlp %u\n", isp1760_ep_tx_complete()
365 req, req->req.actual, req->req.length, ep->maxpacket, isp1760_ep_tx_complete()
366 req->packet_size, req->req.zero, need_zlp); isp1760_ep_tx_complete()
372 if (req->req.actual == req->req.length && !need_zlp) { isp1760_ep_tx_complete()
373 complete = req; isp1760_ep_tx_complete()
374 list_del(&req->queue); isp1760_ep_tx_complete()
380 req = list_first_entry(&ep->queue, isp1760_ep_tx_complete()
383 req = NULL; isp1760_ep_tx_complete()
392 if (req) isp1760_ep_tx_complete()
393 isp1760_udc_transmit(ep, req); isp1760_ep_tx_complete()
436 struct isp1760_request *req; __isp1760_udc_set_halt() local
438 req = list_first_entry(&ep->queue, __isp1760_udc_set_halt()
440 isp1760_udc_transmit(ep, req); __isp1760_udc_set_halt()
454 const struct usb_ctrlrequest *req) isp1760_udc_get_status()
459 if (req->wLength != cpu_to_le16(2) || req->wValue != cpu_to_le16(0)) isp1760_udc_get_status()
462 switch (req->bRequestType) { isp1760_udc_get_status()
472 ep = isp1760_udc_find_ep(udc, le16_to_cpu(req->wIndex)); isp1760_udc_get_status()
524 struct usb_ctrlrequest *req) isp1760_ep0_setup_standard()
528 switch (req->bRequest) { isp1760_ep0_setup_standard()
530 return isp1760_udc_get_status(udc, req); isp1760_ep0_setup_standard()
533 switch (req->bRequestType) { isp1760_ep0_setup_standard()
540 u16 index = le16_to_cpu(req->wIndex); isp1760_ep0_setup_standard()
543 if (req->wLength != cpu_to_le16(0) || isp1760_ep0_setup_standard()
544 req->wValue != cpu_to_le16(USB_ENDPOINT_HALT)) isp1760_ep0_setup_standard()
577 switch (req->bRequestType) { isp1760_ep0_setup_standard()
584 u16 index = le16_to_cpu(req->wIndex); isp1760_ep0_setup_standard()
587 if (req->wLength != cpu_to_le16(0) || isp1760_ep0_setup_standard()
588 req->wValue != cpu_to_le16(USB_ENDPOINT_HALT)) isp1760_ep0_setup_standard()
612 if (req->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) isp1760_ep0_setup_standard()
615 return isp1760_udc_set_address(udc, le16_to_cpu(req->wValue)); isp1760_ep0_setup_standard()
618 if (req->bRequestType != (USB_DIR_OUT | USB_RECIP_DEVICE)) isp1760_ep0_setup_standard()
625 stall = udc->driver->setup(&udc->gadget, req) < 0; isp1760_ep0_setup_standard()
629 usb_gadget_set_state(&udc->gadget, req->wValue ? isp1760_ep0_setup_standard()
641 return udc->driver->setup(&udc->gadget, req) < 0; isp1760_ep0_setup_standard()
650 } req; isp1760_ep0_setup() local
659 if (count != sizeof(req)) { isp1760_ep0_setup()
669 req.data[0] = isp1760_udc_read(udc, DC_DATAPORT); isp1760_ep0_setup()
670 req.data[1] = isp1760_udc_read(udc, DC_DATAPORT); isp1760_ep0_setup()
679 if (!req.r.wLength) isp1760_ep0_setup()
681 else if (req.r.bRequestType & USB_DIR_IN) isp1760_ep0_setup()
686 udc->ep0_dir = req.r.bRequestType & USB_DIR_IN; isp1760_ep0_setup()
687 udc->ep0_length = le16_to_cpu(req.r.wLength); isp1760_ep0_setup()
693 __func__, req.r.bRequestType, req.r.bRequest, isp1760_ep0_setup()
694 le16_to_cpu(req.r.wValue), le16_to_cpu(req.r.wIndex), isp1760_ep0_setup()
695 le16_to_cpu(req.r.wLength)); isp1760_ep0_setup()
697 if ((req.r.bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) isp1760_ep0_setup()
698 stall = isp1760_ep0_setup_standard(udc, &req.r); isp1760_ep0_setup()
700 stall = udc->driver->setup(&udc->gadget, &req.r) < 0; isp1760_ep0_setup()
775 struct isp1760_request *req, *nreq; isp1760_ep_disable() local
801 list_for_each_entry_safe(req, nreq, &req_list, queue) { isp1760_ep_disable()
802 list_del(&req->queue); isp1760_ep_disable()
803 isp1760_udc_request_complete(uep, req, -ESHUTDOWN); isp1760_ep_disable()
812 struct isp1760_request *req; isp1760_ep_alloc_request() local
814 req = kzalloc(sizeof(*req), gfp_flags); isp1760_ep_alloc_request()
816 return &req->req; isp1760_ep_alloc_request()
821 struct isp1760_request *req = req_to_udc_req(_req); isp1760_ep_free_request() local
823 kfree(req); isp1760_ep_free_request()
829 struct isp1760_request *req = req_to_udc_req(_req); isp1760_ep_queue() local
842 "%s: req %p (%u bytes%s) ep %p(0x%02x)\n", __func__, _req, isp1760_ep_queue()
845 req->ep = uep; isp1760_ep_queue()
851 "%s: invalid length %u for req %p\n", isp1760_ep_queue()
852 __func__, _req->length, req); isp1760_ep_queue()
859 dev_dbg(udc->isp->dev, "%s: transmitting req %p\n", isp1760_ep_queue()
860 __func__, req); isp1760_ep_queue()
862 list_add_tail(&req->queue, &uep->queue); isp1760_ep_queue()
863 isp1760_udc_transmit(uep, req); isp1760_ep_queue()
867 list_add_tail(&req->queue, &uep->queue); isp1760_ep_queue()
885 list_add_tail(&req->queue, &uep->queue); isp1760_ep_queue()
887 isp1760_udc_transmit(uep, req); isp1760_ep_queue()
889 complete = isp1760_udc_receive(uep, req); isp1760_ep_queue()
899 req->ep = NULL; isp1760_ep_queue()
904 isp1760_udc_request_complete(uep, req, 0); isp1760_ep_queue()
911 struct isp1760_request *req = req_to_udc_req(_req); isp1760_ep_dequeue() local
920 if (req->ep != uep) isp1760_ep_dequeue()
921 req = NULL; isp1760_ep_dequeue()
923 list_del(&req->queue); isp1760_ep_dequeue()
927 if (!req) isp1760_ep_dequeue()
930 isp1760_udc_request_complete(uep, req, -ECONNRESET); isp1760_ep_dequeue()
126 isp1760_udc_request_complete(struct isp1760_ep *ep, struct isp1760_request *req, int status) isp1760_udc_request_complete() argument
179 isp1760_udc_receive(struct isp1760_ep *ep, struct isp1760_request *req) isp1760_udc_receive() argument
245 isp1760_udc_transmit(struct isp1760_ep *ep, struct isp1760_request *req) isp1760_udc_transmit() argument
453 isp1760_udc_get_status(struct isp1760_udc *udc, const struct usb_ctrlrequest *req) isp1760_udc_get_status() argument
523 isp1760_ep0_setup_standard(struct isp1760_udc *udc, struct usb_ctrlrequest *req) isp1760_ep0_setup_standard() argument
/linux-4.1.27/net/9p/
H A Dclient.c239 struct p9_req_t *req; p9_tag_alloc() local
270 req = &c->reqs[row][col]; p9_tag_alloc()
271 if (!req->wq) { p9_tag_alloc()
272 req->wq = kmalloc(sizeof(wait_queue_head_t), GFP_NOFS); p9_tag_alloc()
273 if (!req->wq) p9_tag_alloc()
275 init_waitqueue_head(req->wq); p9_tag_alloc()
278 if (!req->tc) p9_tag_alloc()
279 req->tc = p9_fcall_alloc(alloc_msize); p9_tag_alloc()
280 if (!req->rc) p9_tag_alloc()
281 req->rc = p9_fcall_alloc(alloc_msize); p9_tag_alloc()
282 if (!req->tc || !req->rc) p9_tag_alloc()
285 p9pdu_reset(req->tc); p9_tag_alloc()
286 p9pdu_reset(req->rc); p9_tag_alloc()
288 req->tc->tag = tag-1; p9_tag_alloc()
289 req->status = REQ_STATUS_ALLOC; p9_tag_alloc()
291 return req; p9_tag_alloc()
295 kfree(req->tc); p9_tag_alloc()
296 kfree(req->rc); p9_tag_alloc()
297 kfree(req->wq); p9_tag_alloc()
298 req->tc = req->rc = NULL; p9_tag_alloc()
299 req->wq = NULL; p9_tag_alloc()
406 p9_debug(P9_DEBUG_MUX, "clnt %p req %p tag: %d\n", c, r, tag); p9_free_req()
416 * req: request received
419 void p9_client_cb(struct p9_client *c, struct p9_req_t *req, int status) p9_client_cb() argument
421 p9_debug(P9_DEBUG_MUX, " tag %d\n", req->tc->tag); p9_client_cb()
424 * This barrier is needed to make sure any change made to req before p9_client_cb()
428 req->status = status; p9_client_cb()
430 wake_up(req->wq); p9_client_cb()
431 p9_debug(P9_DEBUG_MUX, "wakeup: %d\n", req->tc->tag); p9_client_cb()
487 * @req: request to parse and check for error conditions
495 static int p9_check_errors(struct p9_client *c, struct p9_req_t *req) p9_check_errors() argument
501 err = p9_parse_header(req->rc, NULL, &type, NULL, 0); p9_check_errors()
506 trace_9p_protocol_dump(c, req->rc); p9_check_errors()
516 err = p9pdu_readf(req->rc, c->proto_version, "s?d", p9_check_errors()
532 err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode); p9_check_errors()
549 * @req: request to parse and check for error conditions
558 static int p9_check_zc_errors(struct p9_client *c, struct p9_req_t *req, p9_check_zc_errors() argument
566 err = p9_parse_header(req->rc, NULL, &type, NULL, 0); p9_check_zc_errors()
571 trace_9p_protocol_dump(c, req->rc); p9_check_zc_errors()
586 len = req->rc->size - req->rc->offset; p9_check_zc_errors()
592 ename = &req->rc->sdata[req->rc->offset]; p9_check_zc_errors()
603 err = p9pdu_readf(req->rc, c->proto_version, "s?d", p9_check_zc_errors()
619 err = p9pdu_readf(req->rc, c->proto_version, "d", &ecode); p9_check_zc_errors()
648 struct p9_req_t *req; p9_client_flush() local
658 req = p9_client_rpc(c, P9_TFLUSH, "w", oldtag); p9_client_flush()
659 if (IS_ERR(req)) p9_client_flush()
660 return PTR_ERR(req); p9_client_flush()
670 p9_free_req(c, req); p9_client_flush()
679 struct p9_req_t *req; p9_client_prepare_req() local
698 req = p9_tag_alloc(c, tag, req_size); p9_client_prepare_req()
699 if (IS_ERR(req)) p9_client_prepare_req()
700 return req; p9_client_prepare_req()
703 p9pdu_prepare(req->tc, tag, type); p9_client_prepare_req()
704 err = p9pdu_vwritef(req->tc, c->proto_version, fmt, ap); p9_client_prepare_req()
707 p9pdu_finalize(c, req->tc); p9_client_prepare_req()
709 return req; p9_client_prepare_req()
711 p9_free_req(c, req); p9_client_prepare_req()
730 struct p9_req_t *req; p9_client_rpc() local
733 req = p9_client_prepare_req(c, type, c->msize, fmt, ap); p9_client_rpc()
735 if (IS_ERR(req)) p9_client_rpc()
736 return req; p9_client_rpc()
744 err = c->trans_mod->request(c, req); p9_client_rpc()
752 err = wait_event_interruptible(*req->wq, p9_client_rpc()
753 req->status >= REQ_STATUS_RCVD); p9_client_rpc()
756 * Make sure our req is coherent with regard to updates in other p9_client_rpc()
768 if (req->status == REQ_STATUS_ERROR) { p9_client_rpc()
769 p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); p9_client_rpc()
770 err = req->t_err; p9_client_rpc()
777 if (c->trans_mod->cancel(c, req)) p9_client_rpc()
778 p9_client_flush(c, req); p9_client_rpc()
781 if (req->status == REQ_STATUS_RCVD) p9_client_rpc()
792 err = p9_check_errors(c, req); p9_client_rpc()
793 trace_9p_client_res(c, type, req->rc->tag, err); p9_client_rpc()
795 return req; p9_client_rpc()
797 p9_free_req(c, req); p9_client_rpc()
823 struct p9_req_t *req; p9_client_zc_rpc() local
830 req = p9_client_prepare_req(c, type, P9_ZC_HDR_SZ, fmt, ap); p9_client_zc_rpc()
832 if (IS_ERR(req)) p9_client_zc_rpc()
833 return req; p9_client_zc_rpc()
841 err = c->trans_mod->zc_request(c, req, uidata, uodata, p9_client_zc_rpc()
849 if (req->status == REQ_STATUS_ERROR) { p9_client_zc_rpc()
850 p9_debug(P9_DEBUG_ERROR, "req_status error %d\n", req->t_err); p9_client_zc_rpc()
851 err = req->t_err; p9_client_zc_rpc()
858 if (c->trans_mod->cancel(c, req)) p9_client_zc_rpc()
859 p9_client_flush(c, req); p9_client_zc_rpc()
862 if (req->status == REQ_STATUS_RCVD) p9_client_zc_rpc()
873 err = p9_check_zc_errors(c, req, uidata, in_hdrlen); p9_client_zc_rpc()
874 trace_9p_client_res(c, type, req->rc->tag, err); p9_client_zc_rpc()
876 return req; p9_client_zc_rpc()
878 p9_free_req(c, req); p9_client_zc_rpc()
934 struct p9_req_t *req; p9_client_version() local
943 req = p9_client_rpc(c, P9_TVERSION, "ds", p9_client_version()
947 req = p9_client_rpc(c, P9_TVERSION, "ds", p9_client_version()
951 req = p9_client_rpc(c, P9_TVERSION, "ds", p9_client_version()
958 if (IS_ERR(req)) p9_client_version()
959 return PTR_ERR(req); p9_client_version()
961 err = p9pdu_readf(req->rc, c->proto_version, "ds", &msize, &version); p9_client_version()
964 trace_9p_protocol_dump(c, req->rc); p9_client_version()
985 p9_free_req(c, req); p9_client_version()
1107 struct p9_req_t *req; p9_client_attach() local
1122 req = p9_client_rpc(clnt, P9_TATTACH, "ddss?u", fid->fid, p9_client_attach()
1124 if (IS_ERR(req)) { p9_client_attach()
1125 err = PTR_ERR(req); p9_client_attach()
1129 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", &qid); p9_client_attach()
1131 trace_9p_protocol_dump(clnt, req->rc); p9_client_attach()
1132 p9_free_req(clnt, req); p9_client_attach()
1141 p9_free_req(clnt, req); p9_client_attach()
1158 struct p9_req_t *req; p9_client_walk() local
1180 req = p9_client_rpc(clnt, P9_TWALK, "ddT", oldfid->fid, fid->fid, p9_client_walk()
1182 if (IS_ERR(req)) { p9_client_walk()
1183 err = PTR_ERR(req); p9_client_walk()
1187 err = p9pdu_readf(req->rc, clnt->proto_version, "R", &nwqids, &wqids); p9_client_walk()
1189 trace_9p_protocol_dump(clnt, req->rc); p9_client_walk()
1190 p9_free_req(clnt, req); p9_client_walk()
1193 p9_free_req(clnt, req); p9_client_walk()
1233 struct p9_req_t *req; p9_client_open() local
1246 req = p9_client_rpc(clnt, P9_TLOPEN, "dd", fid->fid, mode); p9_client_open()
1248 req = p9_client_rpc(clnt, P9_TOPEN, "db", fid->fid, mode); p9_client_open()
1249 if (IS_ERR(req)) { p9_client_open()
1250 err = PTR_ERR(req); p9_client_open()
1254 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); p9_client_open()
1256 trace_9p_protocol_dump(clnt, req->rc); p9_client_open()
1268 p9_free_req(clnt, req); p9_client_open()
1279 struct p9_req_t *req; p9_client_create_dotl() local
1291 req = p9_client_rpc(clnt, P9_TLCREATE, "dsddg", ofid->fid, name, flags, p9_client_create_dotl()
1293 if (IS_ERR(req)) { p9_client_create_dotl()
1294 err = PTR_ERR(req); p9_client_create_dotl()
1298 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", qid, &iounit); p9_client_create_dotl()
1300 trace_9p_protocol_dump(clnt, req->rc); p9_client_create_dotl()
1313 p9_free_req(clnt, req); p9_client_create_dotl()
1324 struct p9_req_t *req; p9_client_fcreate() local
1336 req = p9_client_rpc(clnt, P9_TCREATE, "dsdb?s", fid->fid, name, perm, p9_client_fcreate()
1338 if (IS_ERR(req)) { p9_client_fcreate()
1339 err = PTR_ERR(req); p9_client_fcreate()
1343 err = p9pdu_readf(req->rc, clnt->proto_version, "Qd", &qid, &iounit); p9_client_fcreate()
1345 trace_9p_protocol_dump(clnt, req->rc); p9_client_fcreate()
1358 p9_free_req(clnt, req); p9_client_fcreate()
1369 struct p9_req_t *req; p9_client_symlink() local
1375 req = p9_client_rpc(clnt, P9_TSYMLINK, "dssg", dfid->fid, name, symtgt, p9_client_symlink()
1377 if (IS_ERR(req)) { p9_client_symlink()
1378 err = PTR_ERR(req); p9_client_symlink()
1382 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); p9_client_symlink()
1384 trace_9p_protocol_dump(clnt, req->rc); p9_client_symlink()
1392 p9_free_req(clnt, req); p9_client_symlink()
1401 struct p9_req_t *req; p9_client_link() local
1406 req = p9_client_rpc(clnt, P9_TLINK, "dds", dfid->fid, oldfid->fid, p9_client_link()
1408 if (IS_ERR(req)) p9_client_link()
1409 return PTR_ERR(req); p9_client_link()
1412 p9_free_req(clnt, req); p9_client_link()
1421 struct p9_req_t *req; p9_client_fsync() local
1428 req = p9_client_rpc(clnt, P9_TFSYNC, "dd", fid->fid, datasync); p9_client_fsync()
1429 if (IS_ERR(req)) { p9_client_fsync()
1430 err = PTR_ERR(req); p9_client_fsync()
1436 p9_free_req(clnt, req); p9_client_fsync()
1447 struct p9_req_t *req; p9_client_clunk() local
1463 req = p9_client_rpc(clnt, P9_TCLUNK, "d", fid->fid); p9_client_clunk()
1464 if (IS_ERR(req)) { p9_client_clunk()
1465 err = PTR_ERR(req); p9_client_clunk()
1471 p9_free_req(clnt, req); p9_client_clunk()
1491 struct p9_req_t *req; p9_client_remove() local
1497 req = p9_client_rpc(clnt, P9_TREMOVE, "d", fid->fid); p9_client_remove()
1498 if (IS_ERR(req)) { p9_client_remove()
1499 err = PTR_ERR(req); p9_client_remove()
1505 p9_free_req(clnt, req); p9_client_remove()
1518 struct p9_req_t *req; p9_client_unlinkat() local
1525 req = p9_client_rpc(clnt, P9_TUNLINKAT, "dsd", dfid->fid, name, flags); p9_client_unlinkat()
1526 if (IS_ERR(req)) { p9_client_unlinkat()
1527 err = PTR_ERR(req); p9_client_unlinkat()
1532 p9_free_req(clnt, req); p9_client_unlinkat()
1542 struct p9_req_t *req; p9_client_read() local
1567 req = p9_client_zc_rpc(clnt, P9_TREAD, to, NULL, rsize, p9_client_read()
1572 req = p9_client_rpc(clnt, P9_TREAD, "dqd", fid->fid, offset, p9_client_read()
1575 if (IS_ERR(req)) { p9_client_read()
1576 *err = PTR_ERR(req); p9_client_read()
1580 *err = p9pdu_readf(req->rc, clnt->proto_version, p9_client_read()
1583 trace_9p_protocol_dump(clnt, req->rc); p9_client_read()
1584 p9_free_req(clnt, req); p9_client_read()
1590 p9_free_req(clnt, req); p9_client_read()
1600 p9_free_req(clnt, req); p9_client_read()
1608 p9_free_req(clnt, req); p9_client_read()
1618 struct p9_req_t *req; p9_client_write() local
1637 req = p9_client_zc_rpc(clnt, P9_TWRITE, NULL, from, 0, p9_client_write()
1641 req = p9_client_rpc(clnt, P9_TWRITE, "dqV", fid->fid, p9_client_write()
1644 if (IS_ERR(req)) { p9_client_write()
1645 *err = PTR_ERR(req); p9_client_write()
1649 *err = p9pdu_readf(req->rc, clnt->proto_version, "d", &count); p9_client_write()
1651 trace_9p_protocol_dump(clnt, req->rc); p9_client_write()
1652 p9_free_req(clnt, req); p9_client_write()
1658 p9_free_req(clnt, req); p9_client_write()
1672 struct p9_req_t *req; p9_client_stat() local
1683 req = p9_client_rpc(clnt, P9_TSTAT, "d", fid->fid); p9_client_stat()
1684 if (IS_ERR(req)) { p9_client_stat()
1685 err = PTR_ERR(req); p9_client_stat()
1689 err = p9pdu_readf(req->rc, clnt->proto_version, "wS", &ignored, ret); p9_client_stat()
1691 trace_9p_protocol_dump(clnt, req->rc); p9_client_stat()
1692 p9_free_req(clnt, req); p9_client_stat()
1709 p9_free_req(clnt, req); p9_client_stat()
1725 struct p9_req_t *req; p9_client_getattr_dotl() local
1736 req = p9_client_rpc(clnt, P9_TGETATTR, "dq", fid->fid, request_mask); p9_client_getattr_dotl()
1737 if (IS_ERR(req)) { p9_client_getattr_dotl()
1738 err = PTR_ERR(req); p9_client_getattr_dotl()
1742 err = p9pdu_readf(req->rc, clnt->proto_version, "A", ret); p9_client_getattr_dotl()
1744 trace_9p_protocol_dump(clnt, req->rc); p9_client_getattr_dotl()
1745 p9_free_req(clnt, req); p9_client_getattr_dotl()
1770 p9_free_req(clnt, req); p9_client_getattr_dotl()
1811 struct p9_req_t *req; p9_client_wstat() local
1831 req = p9_client_rpc(clnt, P9_TWSTAT, "dwS", fid->fid, wst->size+2, wst); p9_client_wstat()
1832 if (IS_ERR(req)) { p9_client_wstat()
1833 err = PTR_ERR(req); p9_client_wstat()
1839 p9_free_req(clnt, req); p9_client_wstat()
1848 struct p9_req_t *req; p9_client_setattr() local
1864 req = p9_client_rpc(clnt, P9_TSETATTR, "dI", fid->fid, p9attr); p9_client_setattr()
1866 if (IS_ERR(req)) { p9_client_setattr()
1867 err = PTR_ERR(req); p9_client_setattr()
1871 p9_free_req(clnt, req); p9_client_setattr()
1880 struct p9_req_t *req; p9_client_statfs() local
1888 req = p9_client_rpc(clnt, P9_TSTATFS, "d", fid->fid); p9_client_statfs()
1889 if (IS_ERR(req)) { p9_client_statfs()
1890 err = PTR_ERR(req); p9_client_statfs()
1894 err = p9pdu_readf(req->rc, clnt->proto_version, "ddqqqqqqd", &sb->type, p9_client_statfs()
1898 trace_9p_protocol_dump(clnt, req->rc); p9_client_statfs()
1899 p9_free_req(clnt, req); p9_client_statfs()
1910 p9_free_req(clnt, req); p9_client_statfs()
1920 struct p9_req_t *req; p9_client_rename() local
1929 req = p9_client_rpc(clnt, P9_TRENAME, "dds", fid->fid, p9_client_rename()
1931 if (IS_ERR(req)) { p9_client_rename()
1932 err = PTR_ERR(req); p9_client_rename()
1938 p9_free_req(clnt, req); p9_client_rename()
1948 struct p9_req_t *req; p9_client_renameat() local
1958 req = p9_client_rpc(clnt, P9_TRENAMEAT, "dsds", olddirfid->fid, p9_client_renameat()
1960 if (IS_ERR(req)) { p9_client_renameat()
1961 err = PTR_ERR(req); p9_client_renameat()
1968 p9_free_req(clnt, req); p9_client_renameat()
1981 struct p9_req_t *req; p9_client_xattrwalk() local
1997 req = p9_client_rpc(clnt, P9_TXATTRWALK, "dds", p9_client_xattrwalk()
1999 if (IS_ERR(req)) { p9_client_xattrwalk()
2000 err = PTR_ERR(req); p9_client_xattrwalk()
2003 err = p9pdu_readf(req->rc, clnt->proto_version, "q", attr_size); p9_client_xattrwalk()
2005 trace_9p_protocol_dump(clnt, req->rc); p9_client_xattrwalk()
2006 p9_free_req(clnt, req); p9_client_xattrwalk()
2009 p9_free_req(clnt, req); p9_client_xattrwalk()
2028 struct p9_req_t *req; p9_client_xattrcreate() local
2036 req = p9_client_rpc(clnt, P9_TXATTRCREATE, "dsqd", p9_client_xattrcreate()
2038 if (IS_ERR(req)) { p9_client_xattrcreate()
2039 err = PTR_ERR(req); p9_client_xattrcreate()
2043 p9_free_req(clnt, req); p9_client_xattrcreate()
2053 struct p9_req_t *req; p9_client_readdir() local
2079 req = p9_client_zc_rpc(clnt, P9_TREADDIR, &to, NULL, rsize, 0, p9_client_readdir()
2083 req = p9_client_rpc(clnt, P9_TREADDIR, "dqd", fid->fid, p9_client_readdir()
2086 if (IS_ERR(req)) { p9_client_readdir()
2087 err = PTR_ERR(req); p9_client_readdir()
2091 err = p9pdu_readf(req->rc, clnt->proto_version, "D", &count, &dataptr); p9_client_readdir()
2093 trace_9p_protocol_dump(clnt, req->rc); p9_client_readdir()
2102 p9_free_req(clnt, req); p9_client_readdir()
2106 p9_free_req(clnt, req); p9_client_readdir()
2117 struct p9_req_t *req; p9_client_mknod_dotl() local
2123 req = p9_client_rpc(clnt, P9_TMKNOD, "dsdddg", fid->fid, name, mode, p9_client_mknod_dotl()
2125 if (IS_ERR(req)) p9_client_mknod_dotl()
2126 return PTR_ERR(req); p9_client_mknod_dotl()
2128 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); p9_client_mknod_dotl()
2130 trace_9p_protocol_dump(clnt, req->rc); p9_client_mknod_dotl()
2137 p9_free_req(clnt, req); p9_client_mknod_dotl()
2148 struct p9_req_t *req; p9_client_mkdir_dotl() local
2154 req = p9_client_rpc(clnt, P9_TMKDIR, "dsdg", fid->fid, name, mode, p9_client_mkdir_dotl()
2156 if (IS_ERR(req)) p9_client_mkdir_dotl()
2157 return PTR_ERR(req); p9_client_mkdir_dotl()
2159 err = p9pdu_readf(req->rc, clnt->proto_version, "Q", qid); p9_client_mkdir_dotl()
2161 trace_9p_protocol_dump(clnt, req->rc); p9_client_mkdir_dotl()
2168 p9_free_req(clnt, req); p9_client_mkdir_dotl()
2178 struct p9_req_t *req; p9_client_lock_dotl() local
2187 req = p9_client_rpc(clnt, P9_TLOCK, "dbdqqds", fid->fid, flock->type, p9_client_lock_dotl()
2191 if (IS_ERR(req)) p9_client_lock_dotl()
2192 return PTR_ERR(req); p9_client_lock_dotl()
2194 err = p9pdu_readf(req->rc, clnt->proto_version, "b", status); p9_client_lock_dotl()
2196 trace_9p_protocol_dump(clnt, req->rc); p9_client_lock_dotl()
2201 p9_free_req(clnt, req); p9_client_lock_dotl()
2211 struct p9_req_t *req; p9_client_getlock_dotl() local
2219 req = p9_client_rpc(clnt, P9_TGETLOCK, "dbqqds", fid->fid, glock->type, p9_client_getlock_dotl()
2222 if (IS_ERR(req)) p9_client_getlock_dotl()
2223 return PTR_ERR(req); p9_client_getlock_dotl()
2225 err = p9pdu_readf(req->rc, clnt->proto_version, "bqqds", &glock->type, p9_client_getlock_dotl()
2229 trace_9p_protocol_dump(clnt, req->rc); p9_client_getlock_dotl()
2236 p9_free_req(clnt, req); p9_client_getlock_dotl()
2245 struct p9_req_t *req; p9_client_readlink() local
2251 req = p9_client_rpc(clnt, P9_TREADLINK, "d", fid->fid); p9_client_readlink()
2252 if (IS_ERR(req)) p9_client_readlink()
2253 return PTR_ERR(req); p9_client_readlink()
2255 err = p9pdu_readf(req->rc, clnt->proto_version, "s", target); p9_client_readlink()
2257 trace_9p_protocol_dump(clnt, req->rc); p9_client_readlink()
2262 p9_free_req(clnt, req); p9_client_readlink()
/linux-4.1.27/fs/fuse/
H A Ddev.c37 static void fuse_request_init(struct fuse_req *req, struct page **pages, fuse_request_init() argument
41 memset(req, 0, sizeof(*req)); fuse_request_init()
44 INIT_LIST_HEAD(&req->list); fuse_request_init()
45 INIT_LIST_HEAD(&req->intr_entry); fuse_request_init()
46 init_waitqueue_head(&req->waitq); fuse_request_init()
47 atomic_set(&req->count, 1); fuse_request_init()
48 req->pages = pages; fuse_request_init()
49 req->page_descs = page_descs; fuse_request_init()
50 req->max_pages = npages; fuse_request_init()
55 struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, flags); __fuse_request_alloc() local
56 if (req) { __fuse_request_alloc()
61 pages = req->inline_pages; __fuse_request_alloc()
62 page_descs = req->inline_page_descs; __fuse_request_alloc()
72 kmem_cache_free(fuse_req_cachep, req); __fuse_request_alloc()
76 fuse_request_init(req, pages, page_descs, npages); __fuse_request_alloc()
78 return req; __fuse_request_alloc()
92 void fuse_request_free(struct fuse_req *req) fuse_request_free() argument
94 if (req->pages != req->inline_pages) { fuse_request_free()
95 kfree(req->pages); fuse_request_free()
96 kfree(req->page_descs); fuse_request_free()
98 kmem_cache_free(fuse_req_cachep, req); fuse_request_free()
114 void __fuse_get_request(struct fuse_req *req) __fuse_get_request() argument
116 atomic_inc(&req->count); __fuse_get_request()
120 static void __fuse_put_request(struct fuse_req *req) __fuse_put_request() argument
122 BUG_ON(atomic_read(&req->count) < 2); __fuse_put_request()
123 atomic_dec(&req->count); __fuse_put_request()
126 static void fuse_req_init_context(struct fuse_req *req) fuse_req_init_context() argument
128 req->in.h.uid = from_kuid_munged(&init_user_ns, current_fsuid()); fuse_req_init_context()
129 req->in.h.gid = from_kgid_munged(&init_user_ns, current_fsgid()); fuse_req_init_context()
130 req->in.h.pid = current->pid; fuse_req_init_context()
148 struct fuse_req *req; __fuse_get_req() local
171 req = fuse_request_alloc(npages); __fuse_get_req()
173 if (!req) { __fuse_get_req()
179 fuse_req_init_context(req); __fuse_get_req()
180 req->waiting = 1; __fuse_get_req()
181 req->background = for_background; __fuse_get_req()
182 return req; __fuse_get_req()
210 struct fuse_req *req = NULL; get_reserved_req() local
217 req = ff->reserved_req; get_reserved_req()
219 req->stolen_file = get_file(file); get_reserved_req()
222 } while (!req); get_reserved_req()
224 return req; get_reserved_req()
230 static void put_reserved_req(struct fuse_conn *fc, struct fuse_req *req) put_reserved_req() argument
232 struct file *file = req->stolen_file; put_reserved_req()
236 fuse_request_init(req, req->pages, req->page_descs, req->max_pages); put_reserved_req()
238 ff->reserved_req = req; put_reserved_req()
260 struct fuse_req *req; fuse_get_req_nofail_nopages() local
266 req = fuse_request_alloc(0); fuse_get_req_nofail_nopages()
267 if (!req) fuse_get_req_nofail_nopages()
268 req = get_reserved_req(fc, file); fuse_get_req_nofail_nopages()
270 fuse_req_init_context(req); fuse_get_req_nofail_nopages()
271 req->waiting = 1; fuse_get_req_nofail_nopages()
272 req->background = 0; fuse_get_req_nofail_nopages()
273 return req; fuse_get_req_nofail_nopages()
276 void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req) fuse_put_request() argument
278 if (atomic_dec_and_test(&req->count)) { fuse_put_request()
279 if (unlikely(req->background)) { fuse_put_request()
290 if (req->waiting) fuse_put_request()
293 if (req->stolen_file) fuse_put_request()
294 put_reserved_req(fc, req); fuse_put_request()
296 fuse_request_free(req); fuse_put_request()
322 static void queue_request(struct fuse_conn *fc, struct fuse_req *req) queue_request() argument
324 req->in.h.len = sizeof(struct fuse_in_header) + queue_request()
325 len_args(req->in.numargs, (struct fuse_arg *) req->in.args); queue_request()
326 list_add_tail(&req->list, &fc->pending); queue_request()
327 req->state = FUSE_REQ_PENDING; queue_request()
328 if (!req->waiting) { queue_request()
329 req->waiting = 1; queue_request()
358 struct fuse_req *req; flush_bg_queue() local
360 req = list_entry(fc->bg_queue.next, struct fuse_req, list); flush_bg_queue()
361 list_del(&req->list); flush_bg_queue()
363 req->in.h.unique = fuse_get_unique(fc); flush_bg_queue()
364 queue_request(fc, req); flush_bg_queue()
378 static void request_end(struct fuse_conn *fc, struct fuse_req *req)
381 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
382 req->end = NULL;
383 list_del(&req->list);
384 list_del(&req->intr_entry);
385 req->state = FUSE_REQ_FINISHED;
386 if (req->background) {
387 req->background = 0;
406 wake_up(&req->waitq);
408 end(fc, req);
409 fuse_put_request(fc, req);
413 struct fuse_req *req)
421 wait_event_interruptible(req->waitq, req->state == FUSE_REQ_FINISHED);
425 static void queue_interrupt(struct fuse_conn *fc, struct fuse_req *req) queue_interrupt() argument
427 list_add_tail(&req->intr_entry, &fc->interrupts); queue_interrupt()
432 static void request_wait_answer(struct fuse_conn *fc, struct fuse_req *req)
438 wait_answer_interruptible(fc, req);
440 if (req->aborted)
442 if (req->state == FUSE_REQ_FINISHED)
445 req->interrupted = 1;
446 if (req->state == FUSE_REQ_SENT)
447 queue_interrupt(fc, req);
450 if (!req->force) {
455 wait_answer_interruptible(fc, req);
458 if (req->aborted)
460 if (req->state == FUSE_REQ_FINISHED)
464 if (req->state == FUSE_REQ_PENDING) {
465 list_del(&req->list);
466 __fuse_put_request(req); variable
467 req->out.h.error = -EINTR;
477 wait_event(req->waitq, req->state == FUSE_REQ_FINISHED);
480 if (!req->aborted)
484 BUG_ON(req->state != FUSE_REQ_FINISHED);
485 if (req->locked) {
487 being copied to/from the buffers of req. During
492 wait_event(req->waitq, !req->locked);
497 static void __fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) __fuse_request_send() argument
499 BUG_ON(req->background); __fuse_request_send()
502 req->out.h.error = -ENOTCONN; __fuse_request_send()
504 req->out.h.error = -ECONNREFUSED; __fuse_request_send()
506 req->in.h.unique = fuse_get_unique(fc); __fuse_request_send()
507 queue_request(fc, req); __fuse_request_send()
510 __fuse_get_request(req); __fuse_request_send()
512 request_wait_answer(fc, req); __fuse_request_send()
517 void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) fuse_request_send() argument
519 req->isreply = 1; fuse_request_send()
520 __fuse_request_send(fc, req); fuse_request_send()
559 struct fuse_req *req; fuse_simple_request() local
562 req = fuse_get_req(fc, 0); fuse_simple_request()
563 if (IS_ERR(req)) fuse_simple_request()
564 return PTR_ERR(req); fuse_simple_request()
569 req->in.h.opcode = args->in.h.opcode; fuse_simple_request()
570 req->in.h.nodeid = args->in.h.nodeid; fuse_simple_request()
571 req->in.numargs = args->in.numargs; fuse_simple_request()
572 memcpy(req->in.args, args->in.args, fuse_simple_request()
574 req->out.argvar = args->out.argvar; fuse_simple_request()
575 req->out.numargs = args->out.numargs; fuse_simple_request()
576 memcpy(req->out.args, args->out.args, fuse_simple_request()
578 fuse_request_send(fc, req); fuse_simple_request()
579 ret = req->out.h.error; fuse_simple_request()
582 ret = req->out.args[0].size; fuse_simple_request()
584 fuse_put_request(fc, req); fuse_simple_request()
590 struct fuse_req *req) fuse_request_send_nowait_locked()
592 BUG_ON(!req->background); fuse_request_send_nowait_locked()
601 list_add_tail(&req->list, &fc->bg_queue); fuse_request_send_nowait_locked()
605 static void fuse_request_send_nowait(struct fuse_conn *fc, struct fuse_req *req) fuse_request_send_nowait() argument
609 fuse_request_send_nowait_locked(fc, req); fuse_request_send_nowait()
612 req->out.h.error = -ENOTCONN; fuse_request_send_nowait()
613 request_end(fc, req); fuse_request_send_nowait()
617 void fuse_request_send_background(struct fuse_conn *fc, struct fuse_req *req) fuse_request_send_background() argument
619 req->isreply = 1; fuse_request_send_background()
620 fuse_request_send_nowait(fc, req); fuse_request_send_background()
625 struct fuse_req *req, u64 unique) fuse_request_send_notify_reply()
629 req->isreply = 0; fuse_request_send_notify_reply()
630 req->in.h.unique = unique; fuse_request_send_notify_reply()
633 queue_request(fc, req); fuse_request_send_notify_reply()
647 struct fuse_req *req) fuse_request_send_background_locked()
649 req->isreply = 1; fuse_request_send_background_locked()
650 fuse_request_send_nowait_locked(fc, req); fuse_request_send_background_locked()
657 struct fuse_req *req; fuse_force_forget() local
662 req = fuse_get_req_nofail_nopages(fc, file); fuse_force_forget()
663 req->in.h.opcode = FUSE_FORGET; fuse_force_forget()
664 req->in.h.nodeid = nodeid; fuse_force_forget()
665 req->in.numargs = 1; fuse_force_forget()
666 req->in.args[0].size = sizeof(inarg); fuse_force_forget()
667 req->in.args[0].value = &inarg; fuse_force_forget()
668 req->isreply = 0; fuse_force_forget()
669 __fuse_request_send(fc, req); fuse_force_forget()
671 fuse_put_request(fc, req); fuse_force_forget()
679 static int lock_request(struct fuse_conn *fc, struct fuse_req *req) lock_request() argument
682 if (req) { lock_request()
684 if (req->aborted) lock_request()
687 req->locked = 1; lock_request()
698 static void unlock_request(struct fuse_conn *fc, struct fuse_req *req) unlock_request() argument
700 if (req) { unlock_request()
702 req->locked = 0; unlock_request()
703 if (req->aborted) unlock_request()
704 wake_up(&req->waitq); unlock_request()
712 struct fuse_req *req; member in struct:fuse_copy_state
763 unlock_request(cs->fc, cs->req); fuse_copy_fill()
812 return lock_request(cs->fc, cs->req); fuse_copy_fill()
863 unlock_request(cs->fc, cs->req); fuse_try_move_page()
918 if (cs->req->aborted) fuse_try_move_page()
942 err = lock_request(cs->fc, cs->req); fuse_try_move_page()
957 unlock_request(cs->fc, cs->req); fuse_ref_page()
1019 struct fuse_req *req = cs->req; fuse_copy_pages() local
1021 for (i = 0; i < req->num_pages && (nbytes || zeroing); i++) { fuse_copy_pages()
1023 unsigned offset = req->page_descs[i].offset; fuse_copy_pages()
1024 unsigned count = min(nbytes, req->page_descs[i].length); fuse_copy_pages()
1026 err = fuse_copy_page(cs, &req->pages[i], offset, count, fuse_copy_pages()
1109 size_t nbytes, struct fuse_req *req)
1117 list_del_init(&req->intr_entry);
1118 req->intr_unique = fuse_get_unique(fc);
1123 ih.unique = req->intr_unique;
1124 arg.unique = req->in.h.unique;
1265 struct fuse_req *req; fuse_dev_do_read() local
1285 req = list_entry(fc->interrupts.next, struct fuse_req, fuse_dev_do_read()
1287 return fuse_read_interrupt(fc, cs, nbytes, req); fuse_dev_do_read()
1298 req = list_entry(fc->pending.next, struct fuse_req, list); fuse_dev_do_read()
1299 req->state = FUSE_REQ_READING; fuse_dev_do_read()
1300 list_move(&req->list, &fc->io); fuse_dev_do_read()
1302 in = &req->in; fuse_dev_do_read()
1306 req->out.h.error = -EIO; fuse_dev_do_read()
1309 req->out.h.error = -E2BIG; fuse_dev_do_read()
1310 request_end(fc, req); fuse_dev_do_read()
1314 cs->req = req; fuse_dev_do_read()
1321 req->locked = 0; fuse_dev_do_read()
1322 if (req->aborted) { fuse_dev_do_read()
1323 request_end(fc, req); fuse_dev_do_read()
1327 req->out.h.error = -EIO; fuse_dev_do_read()
1328 request_end(fc, req); fuse_dev_do_read()
1331 if (!req->isreply) fuse_dev_do_read()
1332 request_end(fc, req); fuse_dev_do_read()
1334 req->state = FUSE_REQ_SENT; fuse_dev_do_read()
1335 list_move_tail(&req->list, &fc->processing); fuse_dev_do_read()
1336 if (req->interrupted) fuse_dev_do_read()
1337 queue_interrupt(fc, req); fuse_dev_do_read()
1690 static void fuse_retrieve_end(struct fuse_conn *fc, struct fuse_req *req) fuse_retrieve_end() argument
1692 release_pages(req->pages, req->num_pages, false); fuse_retrieve_end()
1700 struct fuse_req *req; fuse_retrieve() local
1720 req = fuse_get_req(fc, num_pages); fuse_retrieve()
1721 if (IS_ERR(req)) fuse_retrieve()
1722 return PTR_ERR(req); fuse_retrieve()
1724 req->in.h.opcode = FUSE_NOTIFY_REPLY; fuse_retrieve()
1725 req->in.h.nodeid = outarg->nodeid; fuse_retrieve()
1726 req->in.numargs = 2; fuse_retrieve()
1727 req->in.argpages = 1; fuse_retrieve()
1728 req->page_descs[0].offset = offset; fuse_retrieve()
1729 req->end = fuse_retrieve_end; fuse_retrieve()
1733 while (num && req->num_pages < num_pages) { fuse_retrieve()
1742 req->pages[req->num_pages] = page; fuse_retrieve()
1743 req->page_descs[req->num_pages].length = this_num; fuse_retrieve()
1744 req->num_pages++; fuse_retrieve()
1751 req->misc.retrieve_in.offset = outarg->offset; fuse_retrieve()
1752 req->misc.retrieve_in.size = total_len; fuse_retrieve()
1753 req->in.args[0].size = sizeof(req->misc.retrieve_in); fuse_retrieve()
1754 req->in.args[0].value = &req->misc.retrieve_in; fuse_retrieve()
1755 req->in.args[1].size = total_len; fuse_retrieve()
1757 err = fuse_request_send_notify_reply(fc, req, outarg->notify_unique); fuse_retrieve()
1759 fuse_retrieve_end(fc, req); fuse_retrieve()
1835 struct fuse_req *req; request_find() local
1837 list_for_each_entry(req, &fc->processing, list) { request_find()
1838 if (req->in.h.unique == unique || req->intr_unique == unique) request_find()
1839 return req; request_find()
1878 struct fuse_req *req; fuse_dev_do_write() local
1910 req = request_find(fc, oh.unique); fuse_dev_do_write()
1911 if (!req) fuse_dev_do_write()
1914 if (req->aborted) { fuse_dev_do_write()
1918 request_end(fc, req); fuse_dev_do_write()
1922 if (req->intr_unique == oh.unique) { fuse_dev_do_write()
1930 queue_interrupt(fc, req); fuse_dev_do_write()
1937 req->state = FUSE_REQ_WRITING; fuse_dev_do_write()
1938 list_move(&req->list, &fc->io); fuse_dev_do_write()
1939 req->out.h = oh; fuse_dev_do_write()
1940 req->locked = 1; fuse_dev_do_write()
1941 cs->req = req; fuse_dev_do_write()
1942 if (!req->out.page_replace) fuse_dev_do_write()
1946 err = copy_out_args(cs, &req->out, nbytes); fuse_dev_do_write()
1950 req->locked = 0; fuse_dev_do_write()
1952 if (req->aborted) fuse_dev_do_write()
1954 } else if (!req->aborted) fuse_dev_do_write()
1955 req->out.h.error = -EIO; fuse_dev_do_write()
1956 request_end(fc, req); fuse_dev_do_write()
2090 struct fuse_req *req; variable in typeref:struct:fuse_req
2091 req = list_entry(head->next, struct fuse_req, list);
2092 req->out.h.error = -ECONNABORTED;
2093 request_end(fc, req);
2114 struct fuse_req *req = variable in typeref:struct:fuse_req
2116 void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
2118 req->aborted = 1;
2119 req->out.h.error = -ECONNABORTED;
2120 req->state = FUSE_REQ_FINISHED;
2121 list_del_init(&req->list);
2122 wake_up(&req->waitq);
2124 req->end = NULL;
2125 __fuse_get_request(req); variable
2127 wait_event(req->waitq, !req->locked);
2128 end(fc, req);
2129 fuse_put_request(fc, req);
2175 * onto the pending list is prevented by req->connected being false.
2178 * prevented by the req->aborted flag being true for these requests.
589 fuse_request_send_nowait_locked(struct fuse_conn *fc, struct fuse_req *req) fuse_request_send_nowait_locked() argument
624 fuse_request_send_notify_reply(struct fuse_conn *fc, struct fuse_req *req, u64 unique) fuse_request_send_notify_reply() argument
646 fuse_request_send_background_locked(struct fuse_conn *fc, struct fuse_req *req) fuse_request_send_background_locked() argument
H A Dfile.c84 static void fuse_release_end(struct fuse_conn *fc, struct fuse_req *req) fuse_release_end() argument
86 iput(req->misc.release.inode); fuse_release_end()
92 struct fuse_req *req = ff->reserved_req; fuse_file_put() local
99 req->background = 0; fuse_file_put()
100 iput(req->misc.release.inode); fuse_file_put()
101 fuse_put_request(ff->fc, req); fuse_file_put()
103 req->background = 0; fuse_file_put()
104 fuse_request_send(ff->fc, req); fuse_file_put()
105 iput(req->misc.release.inode); fuse_file_put()
106 fuse_put_request(ff->fc, req); fuse_file_put()
108 req->end = fuse_release_end; fuse_file_put()
109 req->background = 1; fuse_file_put()
110 fuse_request_send_background(ff->fc, req); fuse_file_put()
226 struct fuse_req *req = ff->reserved_req; fuse_prepare_release() local
227 struct fuse_release_in *inarg = &req->misc.release.in; fuse_prepare_release()
239 req->in.h.opcode = opcode; fuse_prepare_release()
240 req->in.h.nodeid = ff->nodeid; fuse_prepare_release()
241 req->in.numargs = 1; fuse_prepare_release()
242 req->in.args[0].size = sizeof(struct fuse_release_in); fuse_prepare_release()
243 req->in.args[0].value = inarg; fuse_prepare_release()
249 struct fuse_req *req; fuse_release_common() local
255 req = ff->reserved_req; fuse_release_common()
259 struct fuse_release_in *inarg = &req->misc.release.in; fuse_release_common()
265 req->misc.release.inode = igrab(file_inode(file)); fuse_release_common()
343 struct fuse_req *req; fuse_range_is_writeback() local
347 list_for_each_entry(req, &fi->writepages, writepages_entry) { fuse_range_is_writeback()
350 BUG_ON(req->inode != inode); fuse_range_is_writeback()
351 curr_index = req->misc.write.in.offset >> PAGE_CACHE_SHIFT; fuse_range_is_writeback()
352 if (idx_from < curr_index + req->num_pages && fuse_range_is_writeback()
402 struct fuse_req *req; fuse_flush() local
420 req = fuse_get_req_nofail_nopages(fc, file); fuse_flush()
424 req->in.h.opcode = FUSE_FLUSH; fuse_flush()
425 req->in.h.nodeid = get_node_id(inode); fuse_flush()
426 req->in.numargs = 1; fuse_flush()
427 req->in.args[0].size = sizeof(inarg); fuse_flush()
428 req->in.args[0].value = &inarg; fuse_flush()
429 req->force = 1; fuse_flush()
430 fuse_request_send(fc, req); fuse_flush()
431 err = req->out.h.error; fuse_flush()
432 fuse_put_request(fc, req); fuse_flush()
499 void fuse_read_fill(struct fuse_req *req, struct file *file, loff_t pos, fuse_read_fill() argument
502 struct fuse_read_in *inarg = &req->misc.read.in; fuse_read_fill()
509 req->in.h.opcode = opcode; fuse_read_fill()
510 req->in.h.nodeid = ff->nodeid; fuse_read_fill()
511 req->in.numargs = 1; fuse_read_fill()
512 req->in.args[0].size = sizeof(struct fuse_read_in); fuse_read_fill()
513 req->in.args[0].value = inarg; fuse_read_fill()
514 req->out.argvar = 1; fuse_read_fill()
515 req->out.numargs = 1; fuse_read_fill()
516 req->out.args[0].size = count; fuse_read_fill()
519 static void fuse_release_user_pages(struct fuse_req *req, int write) fuse_release_user_pages() argument
523 for (i = 0; i < req->num_pages; i++) { fuse_release_user_pages()
524 struct page *page = req->pages[i]; fuse_release_user_pages()
555 * fully completed (req->out.args[0].size == 32K) resulting in pos == -1. The
598 static void fuse_aio_complete_req(struct fuse_conn *fc, struct fuse_req *req) fuse_aio_complete_req() argument
600 struct fuse_io_priv *io = req->io; fuse_aio_complete_req()
603 fuse_release_user_pages(req, !io->write); fuse_aio_complete_req()
606 if (req->misc.write.in.size != req->misc.write.out.size) fuse_aio_complete_req()
607 pos = req->misc.write.in.offset - io->offset + fuse_aio_complete_req()
608 req->misc.write.out.size; fuse_aio_complete_req()
610 if (req->misc.read.in.size != req->out.args[0].size) fuse_aio_complete_req()
611 pos = req->misc.read.in.offset - io->offset + fuse_aio_complete_req()
612 req->out.args[0].size; fuse_aio_complete_req()
615 fuse_aio_complete(io, req->out.h.error, pos); fuse_aio_complete_req()
618 static size_t fuse_async_req_send(struct fuse_conn *fc, struct fuse_req *req, fuse_async_req_send() argument
627 req->io = io; fuse_async_req_send()
628 req->end = fuse_aio_complete_req; fuse_async_req_send()
630 __fuse_get_request(req); fuse_async_req_send()
631 fuse_request_send_background(fc, req); fuse_async_req_send()
636 static size_t fuse_send_read(struct fuse_req *req, struct fuse_io_priv *io, fuse_send_read() argument
643 fuse_read_fill(req, file, pos, count, FUSE_READ); fuse_send_read()
645 struct fuse_read_in *inarg = &req->misc.read.in; fuse_send_read()
652 return fuse_async_req_send(fc, req, count, io); fuse_send_read()
654 fuse_request_send(fc, req); fuse_send_read()
655 return req->out.args[0].size; fuse_send_read()
673 static void fuse_short_read(struct fuse_req *req, struct inode *inode, fuse_short_read() argument
676 size_t num_read = req->out.args[0].size; fuse_short_read()
689 for (i = start_idx; i < req->num_pages; i++) { fuse_short_read()
690 zero_user_segment(req->pages[i], off, PAGE_CACHE_SIZE); fuse_short_read()
694 loff_t pos = page_offset(req->pages[0]) + num_read; fuse_short_read()
704 struct fuse_req *req; fuse_do_readpage() local
718 req = fuse_get_req(fc, 1); fuse_do_readpage()
719 if (IS_ERR(req)) fuse_do_readpage()
720 return PTR_ERR(req); fuse_do_readpage()
724 req->out.page_zeroing = 1; fuse_do_readpage()
725 req->out.argpages = 1; fuse_do_readpage()
726 req->num_pages = 1; fuse_do_readpage()
727 req->pages[0] = page; fuse_do_readpage()
728 req->page_descs[0].length = count; fuse_do_readpage()
729 num_read = fuse_send_read(req, &io, pos, count, NULL); fuse_do_readpage()
730 err = req->out.h.error; fuse_do_readpage()
737 fuse_short_read(req, inode, attr_ver); fuse_do_readpage()
742 fuse_put_request(fc, req); fuse_do_readpage()
763 static void fuse_readpages_end(struct fuse_conn *fc, struct fuse_req *req) fuse_readpages_end() argument
766 size_t count = req->misc.read.in.size; fuse_readpages_end()
767 size_t num_read = req->out.args[0].size; fuse_readpages_end()
770 for (i = 0; mapping == NULL && i < req->num_pages; i++) fuse_readpages_end()
771 mapping = req->pages[i]->mapping; fuse_readpages_end()
779 if (!req->out.h.error && num_read < count) fuse_readpages_end()
780 fuse_short_read(req, inode, req->misc.read.attr_ver); fuse_readpages_end()
785 for (i = 0; i < req->num_pages; i++) { fuse_readpages_end()
786 struct page *page = req->pages[i]; fuse_readpages_end()
787 if (!req->out.h.error) fuse_readpages_end()
794 if (req->ff) fuse_readpages_end()
795 fuse_file_put(req->ff, false); fuse_readpages_end()
798 static void fuse_send_readpages(struct fuse_req *req, struct file *file) fuse_send_readpages() argument
802 loff_t pos = page_offset(req->pages[0]); fuse_send_readpages()
803 size_t count = req->num_pages << PAGE_CACHE_SHIFT; fuse_send_readpages()
805 req->out.argpages = 1; fuse_send_readpages()
806 req->out.page_zeroing = 1; fuse_send_readpages()
807 req->out.page_replace = 1; fuse_send_readpages()
808 fuse_read_fill(req, file, pos, count, FUSE_READ); fuse_send_readpages()
809 req->misc.read.attr_ver = fuse_get_attr_version(fc); fuse_send_readpages()
811 req->ff = fuse_file_get(ff); fuse_send_readpages()
812 req->end = fuse_readpages_end; fuse_send_readpages()
813 fuse_request_send_background(fc, req); fuse_send_readpages()
815 fuse_request_send(fc, req); fuse_send_readpages()
816 fuse_readpages_end(fc, req); fuse_send_readpages()
817 fuse_put_request(fc, req); fuse_send_readpages()
822 struct fuse_req *req; member in struct:fuse_fill_data
831 struct fuse_req *req = data->req; fuse_readpages_fill() local
837 if (req->num_pages && fuse_readpages_fill()
838 (req->num_pages == FUSE_MAX_PAGES_PER_REQ || fuse_readpages_fill()
839 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_read || fuse_readpages_fill()
840 req->pages[req->num_pages - 1]->index + 1 != page->index)) { fuse_readpages_fill()
843 fuse_send_readpages(req, data->file); fuse_readpages_fill()
845 req = fuse_get_req_for_background(fc, nr_alloc); fuse_readpages_fill()
847 req = fuse_get_req(fc, nr_alloc); fuse_readpages_fill()
849 data->req = req; fuse_readpages_fill()
850 if (IS_ERR(req)) { fuse_readpages_fill()
852 return PTR_ERR(req); fuse_readpages_fill()
856 if (WARN_ON(req->num_pages >= req->max_pages)) { fuse_readpages_fill()
857 fuse_put_request(fc, req); fuse_readpages_fill()
862 req->pages[req->num_pages] = page; fuse_readpages_fill()
863 req->page_descs[req->num_pages].length = PAGE_SIZE; fuse_readpages_fill()
864 req->num_pages++; fuse_readpages_fill()
885 data.req = fuse_get_req_for_background(fc, nr_alloc); fuse_readpages()
887 data.req = fuse_get_req(fc, nr_alloc); fuse_readpages()
889 err = PTR_ERR(data.req); fuse_readpages()
890 if (IS_ERR(data.req)) fuse_readpages()
895 if (data.req->num_pages) fuse_readpages()
896 fuse_send_readpages(data.req, file); fuse_readpages()
898 fuse_put_request(fc, data.req); fuse_readpages()
925 static void fuse_write_fill(struct fuse_req *req, struct fuse_file *ff, fuse_write_fill() argument
928 struct fuse_write_in *inarg = &req->misc.write.in; fuse_write_fill()
929 struct fuse_write_out *outarg = &req->misc.write.out; fuse_write_fill()
934 req->in.h.opcode = FUSE_WRITE; fuse_write_fill()
935 req->in.h.nodeid = ff->nodeid; fuse_write_fill()
936 req->in.numargs = 2; fuse_write_fill()
938 req->in.args[0].size = FUSE_COMPAT_WRITE_IN_SIZE; fuse_write_fill()
940 req->in.args[0].size = sizeof(struct fuse_write_in); fuse_write_fill()
941 req->in.args[0].value = inarg; fuse_write_fill()
942 req->in.args[1].size = count; fuse_write_fill()
943 req->out.numargs = 1; fuse_write_fill()
944 req->out.args[0].size = sizeof(struct fuse_write_out); fuse_write_fill()
945 req->out.args[0].value = outarg; fuse_write_fill()
948 static size_t fuse_send_write(struct fuse_req *req, struct fuse_io_priv *io, fuse_send_write() argument
954 struct fuse_write_in *inarg = &req->misc.write.in; fuse_send_write()
956 fuse_write_fill(req, ff, pos, count); fuse_send_write()
964 return fuse_async_req_send(fc, req, count, io); fuse_send_write()
966 fuse_request_send(fc, req); fuse_send_write()
967 return req->misc.write.out.size; fuse_send_write()
987 static size_t fuse_send_write_pages(struct fuse_req *req, struct file *file, fuse_send_write_pages() argument
996 for (i = 0; i < req->num_pages; i++) fuse_send_write_pages()
997 fuse_wait_on_page_writeback(inode, req->pages[i]->index); fuse_send_write_pages()
999 res = fuse_send_write(req, &io, pos, count, NULL); fuse_send_write_pages()
1001 offset = req->page_descs[0].offset; fuse_send_write_pages()
1003 for (i = 0; i < req->num_pages; i++) { fuse_send_write_pages()
1004 struct page *page = req->pages[i]; fuse_send_write_pages()
1006 if (!req->out.h.error && !offset && count >= PAGE_CACHE_SIZE) fuse_send_write_pages()
1022 static ssize_t fuse_fill_write_pages(struct fuse_req *req, fuse_fill_write_pages() argument
1031 req->in.argpages = 1; fuse_fill_write_pages()
1032 req->page_descs[0].offset = offset; fuse_fill_write_pages()
1068 req->pages[req->num_pages] = page; fuse_fill_write_pages()
1069 req->page_descs[req->num_pages].length = tmp; fuse_fill_write_pages()
1070 req->num_pages++; fuse_fill_write_pages()
1081 req->num_pages < req->max_pages && offset == 0); fuse_fill_write_pages()
1111 struct fuse_req *req; fuse_perform_write() local
1115 req = fuse_get_req(fc, nr_pages); fuse_perform_write()
1116 if (IS_ERR(req)) { fuse_perform_write()
1117 err = PTR_ERR(req); fuse_perform_write()
1121 count = fuse_fill_write_pages(req, mapping, ii, pos); fuse_perform_write()
1127 num_written = fuse_send_write_pages(req, file, inode, fuse_perform_write()
1129 err = req->out.h.error; fuse_perform_write()
1139 fuse_put_request(fc, req); fuse_perform_write()
1225 static inline void fuse_page_descs_length_init(struct fuse_req *req, fuse_page_descs_length_init() argument
1231 req->page_descs[i].length = PAGE_SIZE - fuse_page_descs_length_init()
1232 req->page_descs[i].offset; fuse_page_descs_length_init()
1246 static int fuse_get_user_pages(struct fuse_req *req, struct iov_iter *ii, fuse_get_user_pages() argument
1249 size_t nbytes = 0; /* # bytes already packed in req */ fuse_get_user_pages()
1257 req->in.args[1].value = (void *) user_addr; fuse_get_user_pages()
1259 req->out.args[0].value = (void *) user_addr; fuse_get_user_pages()
1266 while (nbytes < *nbytesp && req->num_pages < req->max_pages) { fuse_get_user_pages()
1270 &req->pages[req->num_pages], fuse_get_user_pages()
1272 req->max_pages - req->num_pages, fuse_get_user_pages()
1283 req->page_descs[req->num_pages].offset = start; fuse_get_user_pages()
1284 fuse_page_descs_length_init(req, req->num_pages, npages); fuse_get_user_pages()
1286 req->num_pages += npages; fuse_get_user_pages()
1287 req->page_descs[req->num_pages - 1].length -= fuse_get_user_pages()
1292 req->in.argpages = 1; fuse_get_user_pages()
1294 req->out.argpages = 1; fuse_get_user_pages()
1321 struct fuse_req *req; fuse_direct_io() local
1324 req = fuse_get_req_for_background(fc, fuse_iter_npages(iter)); fuse_direct_io()
1326 req = fuse_get_req(fc, fuse_iter_npages(iter)); fuse_direct_io()
1327 if (IS_ERR(req)) fuse_direct_io()
1328 return PTR_ERR(req); fuse_direct_io()
1342 int err = fuse_get_user_pages(req, iter, &nbytes, write); fuse_direct_io()
1349 nres = fuse_send_write(req, io, pos, nbytes, owner); fuse_direct_io()
1351 nres = fuse_send_read(req, io, pos, nbytes, owner); fuse_direct_io()
1354 fuse_release_user_pages(req, !write); fuse_direct_io()
1355 if (req->out.h.error) { fuse_direct_io()
1357 res = req->out.h.error; fuse_direct_io()
1369 fuse_put_request(fc, req); fuse_direct_io()
1371 req = fuse_get_req_for_background(fc, fuse_direct_io()
1374 req = fuse_get_req(fc, fuse_iter_npages(iter)); fuse_direct_io()
1375 if (IS_ERR(req)) fuse_direct_io()
1379 if (!IS_ERR(req)) fuse_direct_io()
1380 fuse_put_request(fc, req); fuse_direct_io()
1435 static void fuse_writepage_free(struct fuse_conn *fc, struct fuse_req *req) fuse_writepage_free() argument
1439 for (i = 0; i < req->num_pages; i++) fuse_writepage_free()
1440 __free_page(req->pages[i]); fuse_writepage_free()
1442 if (req->ff) fuse_writepage_free()
1443 fuse_file_put(req->ff, false); fuse_writepage_free()
1446 static void fuse_writepage_finish(struct fuse_conn *fc, struct fuse_req *req) fuse_writepage_finish() argument
1448 struct inode *inode = req->inode; fuse_writepage_finish()
1453 list_del(&req->writepages_entry); fuse_writepage_finish()
1454 for (i = 0; i < req->num_pages; i++) { fuse_writepage_finish()
1456 dec_zone_page_state(req->pages[i], NR_WRITEBACK_TEMP); fuse_writepage_finish()
1463 static void fuse_send_writepage(struct fuse_conn *fc, struct fuse_req *req,
1468 struct fuse_inode *fi = get_fuse_inode(req->inode);
1469 struct fuse_write_in *inarg = &req->misc.write.in;
1470 __u64 data_size = req->num_pages * PAGE_CACHE_SIZE;
1484 req->in.args[1].size = inarg->size;
1486 fuse_request_send_background_locked(fc, req);
1490 fuse_writepage_finish(fc, req);
1492 fuse_writepage_free(fc, req);
1493 fuse_put_request(fc, req);
1510 struct fuse_req *req; variable in typeref:struct:fuse_req
1513 req = list_entry(fi->queued_writes.next, struct fuse_req, list);
1514 list_del_init(&req->list);
1515 fuse_send_writepage(fc, req, crop);
1519 static void fuse_writepage_end(struct fuse_conn *fc, struct fuse_req *req) fuse_writepage_end() argument
1521 struct inode *inode = req->inode; fuse_writepage_end()
1524 mapping_set_error(inode->i_mapping, req->out.h.error); fuse_writepage_end()
1526 while (req->misc.write.next) { fuse_writepage_end()
1528 struct fuse_write_in *inarg = &req->misc.write.in; fuse_writepage_end()
1529 struct fuse_req *next = req->misc.write.next; fuse_writepage_end()
1530 req->misc.write.next = next->misc.write.next; fuse_writepage_end()
1532 next->ff = fuse_file_get(req->ff); fuse_writepage_end()
1561 fuse_writepage_finish(fc, req); fuse_writepage_end()
1563 fuse_writepage_free(fc, req); fuse_writepage_end()
1611 struct fuse_req *req; fuse_writepage_locked() local
1617 req = fuse_request_alloc_nofs(1); fuse_writepage_locked()
1618 if (!req) fuse_writepage_locked()
1621 req->background = 1; /* writeback always goes to bg_queue */ fuse_writepage_locked()
1627 req->ff = fuse_write_file_get(fc, fi); fuse_writepage_locked()
1628 if (!req->ff) fuse_writepage_locked()
1631 fuse_write_fill(req, req->ff, page_offset(page), 0); fuse_writepage_locked()
1634 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; fuse_writepage_locked()
1635 req->misc.write.next = NULL; fuse_writepage_locked()
1636 req->in.argpages = 1; fuse_writepage_locked()
1637 req->num_pages = 1; fuse_writepage_locked()
1638 req->pages[0] = tmp_page; fuse_writepage_locked()
1639 req->page_descs[0].offset = 0; fuse_writepage_locked()
1640 req->page_descs[0].length = PAGE_SIZE; fuse_writepage_locked()
1641 req->end = fuse_writepage_end; fuse_writepage_locked()
1642 req->inode = inode; fuse_writepage_locked()
1648 list_add(&req->writepages_entry, &fi->writepages); fuse_writepage_locked()
1649 list_add_tail(&req->list, &fi->queued_writes); fuse_writepage_locked()
1660 fuse_request_free(req); fuse_writepage_locked()
1689 struct fuse_req *req; member in struct:fuse_fill_wb_data
1697 struct fuse_req *req = data->req; fuse_writepages_send() local
1701 int num_pages = req->num_pages; fuse_writepages_send()
1704 req->ff = fuse_file_get(data->ff); fuse_writepages_send()
1706 list_add_tail(&req->list, &fi->queued_writes); fuse_writepages_send()
1779 struct fuse_req *req = data->req; fuse_writepages_fill() local
1801 if (req && req->num_pages && fuse_writepages_fill()
1802 (is_writeback || req->num_pages == FUSE_MAX_PAGES_PER_REQ || fuse_writepages_fill()
1803 (req->num_pages + 1) * PAGE_CACHE_SIZE > fc->max_write || fuse_writepages_fill()
1804 data->orig_pages[req->num_pages - 1]->index + 1 != page->index)) { fuse_writepages_fill()
1806 data->req = NULL; fuse_writepages_fill()
1822 * request to the fi->writepages list and increment req->num_pages. fuse_writepages_fill()
1826 if (data->req == NULL) { fuse_writepages_fill()
1830 req = fuse_request_alloc_nofs(FUSE_MAX_PAGES_PER_REQ); fuse_writepages_fill()
1831 if (!req) { fuse_writepages_fill()
1836 fuse_write_fill(req, data->ff, page_offset(page), 0); fuse_writepages_fill()
1837 req->misc.write.in.write_flags |= FUSE_WRITE_CACHE; fuse_writepages_fill()
1838 req->misc.write.next = NULL; fuse_writepages_fill()
1839 req->in.argpages = 1; fuse_writepages_fill()
1840 req->background = 1; fuse_writepages_fill()
1841 req->num_pages = 0; fuse_writepages_fill()
1842 req->end = fuse_writepage_end; fuse_writepages_fill()
1843 req->inode = inode; fuse_writepages_fill()
1846 list_add(&req->writepages_entry, &fi->writepages); fuse_writepages_fill()
1849 data->req = req; fuse_writepages_fill()
1854 req->pages[req->num_pages] = tmp_page; fuse_writepages_fill()
1855 req->page_descs[req->num_pages].offset = 0; fuse_writepages_fill()
1856 req->page_descs[req->num_pages].length = PAGE_SIZE; fuse_writepages_fill()
1862 if (is_writeback && fuse_writepage_in_flight(req, page)) { fuse_writepages_fill()
1864 data->req = NULL; fuse_writepages_fill()
1867 data->orig_pages[req->num_pages] = page; fuse_writepages_fill()
1874 req->num_pages++; fuse_writepages_fill()
1895 data.req = NULL; fuse_writepages()
1906 if (data.req) { fuse_writepages()
1908 BUG_ON(!data.req->num_pages); fuse_writepages()
2448 struct fuse_req *req = NULL; fuse_do_ioctl() local
2515 req = fuse_get_req(fc, num_pages); fuse_do_ioctl()
2516 if (IS_ERR(req)) { fuse_do_ioctl()
2517 err = PTR_ERR(req); fuse_do_ioctl()
2518 req = NULL; fuse_do_ioctl()
2521 memcpy(req->pages, pages, sizeof(req->pages[0]) * num_pages); fuse_do_ioctl()
2522 req->num_pages = num_pages; fuse_do_ioctl()
2523 fuse_page_descs_length_init(req, 0, req->num_pages); fuse_do_ioctl()
2526 req->in.h.opcode = FUSE_IOCTL; fuse_do_ioctl()
2527 req->in.h.nodeid = ff->nodeid; fuse_do_ioctl()
2528 req->in.numargs = 1; fuse_do_ioctl()
2529 req->in.args[0].size = sizeof(inarg); fuse_do_ioctl()
2530 req->in.args[0].value = &inarg; fuse_do_ioctl()
2532 req->in.numargs++; fuse_do_ioctl()
2533 req->in.args[1].size = in_size; fuse_do_ioctl()
2534 req->in.argpages = 1; fuse_do_ioctl()
2542 req->out.numargs = 2; fuse_do_ioctl()
2543 req->out.args[0].size = sizeof(outarg); fuse_do_ioctl()
2544 req->out.args[0].value = &outarg; fuse_do_ioctl()
2545 req->out.args[1].size = out_size; fuse_do_ioctl()
2546 req->out.argpages = 1; fuse_do_ioctl()
2547 req->out.argvar = 1; fuse_do_ioctl()
2549 fuse_request_send(fc, req); fuse_do_ioctl()
2550 err = req->out.h.error; fuse_do_ioctl()
2551 transferred = req->out.args[1].size; fuse_do_ioctl()
2552 fuse_put_request(fc, req); fuse_do_ioctl()
2553 req = NULL; fuse_do_ioctl()
2607 if (req) fuse_do_ioctl()
2608 fuse_put_request(fc, req); fuse_do_ioctl()
/linux-4.1.27/drivers/mfd/
H A Dpcf50633-adc.c85 adc_enqueue_request(struct pcf50633 *pcf, struct pcf50633_adc_request *req) adc_enqueue_request() argument
101 adc->queue[tail] = req; adc_enqueue_request()
114 struct pcf50633_adc_sync_request *req = param; pcf50633_adc_sync_read_callback() local
116 req->result = result; pcf50633_adc_sync_read_callback()
117 complete(&req->completion); pcf50633_adc_sync_read_callback()
122 struct pcf50633_adc_sync_request req; pcf50633_adc_sync_read() local
125 init_completion(&req.completion); pcf50633_adc_sync_read()
128 pcf50633_adc_sync_read_callback, &req); pcf50633_adc_sync_read()
132 wait_for_completion(&req.completion); pcf50633_adc_sync_read()
134 return req.result; pcf50633_adc_sync_read()
142 struct pcf50633_adc_request *req; pcf50633_adc_async_read() local
144 /* req is freed when the result is ready, in interrupt handler */ pcf50633_adc_async_read()
145 req = kmalloc(sizeof(*req), GFP_KERNEL); pcf50633_adc_async_read()
146 if (!req) pcf50633_adc_async_read()
149 req->mux = mux; pcf50633_adc_async_read()
150 req->avg = avg; pcf50633_adc_async_read()
151 req->callback = callback; pcf50633_adc_async_read()
152 req->callback_param = callback_param; pcf50633_adc_async_read()
154 return adc_enqueue_request(pcf, req); pcf50633_adc_async_read()
176 struct pcf50633_adc_request *req; pcf50633_adc_irq() local
182 req = adc->queue[head]; pcf50633_adc_irq()
183 if (WARN_ON(!req)) { pcf50633_adc_irq()
197 req->callback(pcf, req->callback_param, res); pcf50633_adc_irq()
198 kfree(req); pcf50633_adc_irq()
/linux-4.1.27/include/net/
H A Drequest_sock.h36 struct request_sock *req);
38 struct request_sock *req);
41 void (*destructor)(struct request_sock *req);
42 void (*syn_ack_timeout)(const struct request_sock *req);
45 int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req);
74 struct request_sock *req = kmem_cache_alloc(ops->slab, GFP_ATOMIC); reqsk_alloc() local
76 if (req) { reqsk_alloc()
77 req->rsk_ops = ops; reqsk_alloc()
79 req->rsk_listener = sk_listener; reqsk_alloc()
84 atomic_set(&req->rsk_refcnt, 0); reqsk_alloc()
86 return req; reqsk_alloc()
94 static inline struct sock *req_to_sk(struct request_sock *req) req_to_sk() argument
96 return (struct sock *)req; req_to_sk()
99 static inline void reqsk_free(struct request_sock *req) reqsk_free() argument
102 WARN_ON_ONCE(atomic_read(&req->rsk_refcnt) != 0); reqsk_free()
104 req->rsk_ops->destructor(req); reqsk_free()
105 if (req->rsk_listener) reqsk_free()
106 sock_put(req->rsk_listener); reqsk_free()
107 kmem_cache_free(req->rsk_ops->slab, req); reqsk_free()
110 static inline void reqsk_put(struct request_sock *req) reqsk_put() argument
112 if (atomic_dec_and_test(&req->rsk_refcnt)) reqsk_put()
113 reqsk_free(req); reqsk_put()
198 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req,
204 struct request_sock *req = queue->rskq_accept_head; reqsk_queue_yank_acceptq() local
207 return req; reqsk_queue_yank_acceptq()
216 struct request_sock *req, reqsk_queue_add()
220 req->sk = child; reqsk_queue_add()
224 queue->rskq_accept_head = req; reqsk_queue_add()
226 queue->rskq_accept_tail->dl_next = req; reqsk_queue_add()
228 queue->rskq_accept_tail = req; reqsk_queue_add()
229 req->dl_next = NULL; reqsk_queue_add()
234 struct request_sock *req = queue->rskq_accept_head; reqsk_queue_remove() local
236 WARN_ON(req == NULL); reqsk_queue_remove()
238 queue->rskq_accept_head = req->dl_next; reqsk_queue_remove()
242 return req; reqsk_queue_remove()
246 const struct request_sock *req) reqsk_queue_removed()
250 if (req->num_timeout == 0) reqsk_queue_removed()
291 u32 hash, struct request_sock *req,
215 reqsk_queue_add(struct request_sock_queue *queue, struct request_sock *req, struct sock *parent, struct sock *child) reqsk_queue_add() argument
245 reqsk_queue_removed(struct request_sock_queue *queue, const struct request_sock *req) reqsk_queue_removed() argument
/linux-4.1.27/drivers/crypto/qce/
H A Dsha.c40 struct ahash_request *req = ahash_request_cast(async_req); qce_ahash_done() local
41 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); qce_ahash_done()
42 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); qce_ahash_done()
54 qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, qce_ahash_done()
59 if (req->result) qce_ahash_done()
60 memcpy(req->result, result->auth_iv, digestsize); qce_ahash_done()
69 req->src = rctx->src_orig; qce_ahash_done()
70 req->nbytes = rctx->nbytes_orig; qce_ahash_done()
79 struct ahash_request *req = ahash_request_cast(async_req); qce_ahash_async_req_handle() local
80 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); qce_ahash_async_req_handle()
95 rctx->src_nents = qce_countsg(req->src, req->nbytes, qce_ahash_async_req_handle()
97 ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, qce_ahash_async_req_handle()
108 ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents, qce_ahash_async_req_handle()
126 qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE, qce_ahash_async_req_handle()
131 static int qce_ahash_init(struct ahash_request *req) qce_ahash_init() argument
133 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); qce_ahash_init()
134 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); qce_ahash_init()
146 static int qce_ahash_export(struct ahash_request *req, void *out) qce_ahash_export() argument
148 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); qce_ahash_export()
149 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); qce_ahash_export()
176 static int qce_import_common(struct ahash_request *req, u64 in_count, qce_import_common() argument
179 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); qce_import_common()
180 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); qce_import_common()
211 static int qce_ahash_import(struct ahash_request *req, const void *in) qce_ahash_import() argument
213 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); qce_ahash_import()
221 ret = qce_import_common(req, state->count, state->state, qce_ahash_import()
226 ret = qce_import_common(req, state->count, state->state, qce_ahash_import()
233 static int qce_ahash_update(struct ahash_request *req) qce_ahash_update() argument
235 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); qce_ahash_update()
236 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); qce_ahash_update()
237 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); qce_ahash_update()
246 rctx->count += req->nbytes; qce_ahash_update()
249 total = req->nbytes + rctx->buflen; qce_ahash_update()
252 scatterwalk_map_and_copy(rctx->buf + rctx->buflen, req->src, qce_ahash_update()
253 0, req->nbytes, 0); qce_ahash_update()
254 rctx->buflen += req->nbytes; qce_ahash_update()
258 /* save the original req structure fields */ qce_ahash_update()
259 rctx->src_orig = req->src; qce_ahash_update()
260 rctx->nbytes_orig = req->nbytes; qce_ahash_update()
272 unsigned int src_offset = req->nbytes - hash_later; qce_ahash_update()
273 scatterwalk_map_and_copy(rctx->buf, req->src, src_offset, qce_ahash_update()
281 sg = sg_last = req->src; qce_ahash_update()
299 scatterwalk_sg_chain(rctx->sg, 2, req->src); qce_ahash_update()
300 req->src = rctx->sg; qce_ahash_update()
303 req->nbytes = nbytes; qce_ahash_update()
306 return qce->async_req_enqueue(tmpl->qce, &req->base); qce_ahash_update()
309 static int qce_ahash_final(struct ahash_request *req) qce_ahash_final() argument
311 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); qce_ahash_final()
312 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); qce_ahash_final()
320 rctx->src_orig = req->src; qce_ahash_final()
321 rctx->nbytes_orig = req->nbytes; qce_ahash_final()
326 req->src = rctx->sg; qce_ahash_final()
327 req->nbytes = rctx->buflen; qce_ahash_final()
329 return qce->async_req_enqueue(tmpl->qce, &req->base); qce_ahash_final()
332 static int qce_ahash_digest(struct ahash_request *req) qce_ahash_digest() argument
334 struct qce_sha_reqctx *rctx = ahash_request_ctx(req); qce_ahash_digest()
335 struct qce_alg_template *tmpl = to_ahash_tmpl(req->base.tfm); qce_ahash_digest()
339 ret = qce_ahash_init(req); qce_ahash_digest()
343 rctx->src_orig = req->src; qce_ahash_digest()
344 rctx->nbytes_orig = req->nbytes; qce_ahash_digest()
348 return qce->async_req_enqueue(tmpl->qce, &req->base); qce_ahash_digest()
356 static void qce_digest_complete(struct crypto_async_request *req, int error) qce_digest_complete() argument
358 struct qce_ahash_result *result = req->data; qce_digest_complete()
373 struct ahash_request *req; qce_ahash_hmac_setkey() local
401 req = ahash_request_alloc(ahash_tfm, GFP_KERNEL); qce_ahash_hmac_setkey()
402 if (!req) { qce_ahash_hmac_setkey()
408 ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, qce_ahash_hmac_setkey()
420 ahash_request_set_crypt(req, &sg, ctx->authkey, keylen); qce_ahash_hmac_setkey()
422 ret = crypto_ahash_digest(req); qce_ahash_hmac_setkey()
434 ahash_request_free(req); qce_ahash_hmac_setkey()
H A Dablkcipher.c28 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); qce_ablkcipher_done() local
29 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); qce_ablkcipher_done()
37 diff_dst = (req->src != req->dst) ? true : false; qce_ablkcipher_done()
64 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); qce_ablkcipher_async_req_handle() local
65 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); qce_ablkcipher_async_req_handle()
66 struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); qce_ablkcipher_async_req_handle()
75 rctx->iv = req->info; qce_ablkcipher_async_req_handle()
77 rctx->cryptlen = req->nbytes; qce_ablkcipher_async_req_handle()
79 diff_dst = (req->src != req->dst) ? true : false; qce_ablkcipher_async_req_handle()
83 rctx->src_nents = qce_countsg(req->src, req->nbytes, qce_ablkcipher_async_req_handle()
86 rctx->dst_nents = qce_countsg(req->dst, req->nbytes, qce_ablkcipher_async_req_handle()
95 gfp = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? qce_ablkcipher_async_req_handle()
104 sg = qce_sgtable_add(&rctx->dst_tbl, req->dst); qce_ablkcipher_async_req_handle()
125 ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, dir_src, qce_ablkcipher_async_req_handle()
129 rctx->src_sg = req->src; qce_ablkcipher_async_req_handle()
142 ret = qce_start(async_req, tmpl->crypto_alg_type, req->nbytes, 0); qce_ablkcipher_async_req_handle()
152 qce_unmapsg(qce->dev, req->src, rctx->src_nents, dir_src, qce_ablkcipher_async_req_handle()
203 static int qce_ablkcipher_crypt(struct ablkcipher_request *req, int encrypt) qce_ablkcipher_crypt() argument
206 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); qce_ablkcipher_crypt()
208 struct qce_cipher_reqctx *rctx = ablkcipher_request_ctx(req); qce_ablkcipher_crypt()
217 ablkcipher_request_set_tfm(req, ctx->fallback); qce_ablkcipher_crypt()
218 ret = encrypt ? crypto_ablkcipher_encrypt(req) : qce_ablkcipher_crypt()
219 crypto_ablkcipher_decrypt(req); qce_ablkcipher_crypt()
220 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); qce_ablkcipher_crypt()
224 return tmpl->qce->async_req_enqueue(tmpl->qce, &req->base); qce_ablkcipher_crypt()
227 static int qce_ablkcipher_encrypt(struct ablkcipher_request *req) qce_ablkcipher_encrypt() argument
229 return qce_ablkcipher_crypt(req, 1); qce_ablkcipher_encrypt()
232 static int qce_ablkcipher_decrypt(struct ablkcipher_request *req) qce_ablkcipher_decrypt() argument
234 return qce_ablkcipher_crypt(req, 0); qce_ablkcipher_decrypt()
/linux-4.1.27/drivers/net/wireless/ti/wl18xx/
H A Dscan.c42 struct cfg80211_scan_request *req) wl18xx_scan_send()
78 WARN_ON(req->n_ssids > 1); wl18xx_scan_send()
86 wlcore_set_scan_chan_params(wl, cmd_channels, req->channels, wl18xx_scan_send()
87 req->n_channels, req->n_ssids, wl18xx_scan_send()
97 if (req->no_cck) wl18xx_scan_send()
102 if (req->n_ssids) { wl18xx_scan_send()
103 cmd->ssid_len = req->ssids[0].ssid_len; wl18xx_scan_send()
104 memcpy(cmd->ssid, req->ssids[0].ssid, cmd->ssid_len); wl18xx_scan_send()
112 req->ssids ? req->ssids[0].ssid : NULL, wl18xx_scan_send()
113 req->ssids ? req->ssids[0].ssid_len : 0, wl18xx_scan_send()
114 req->ie, wl18xx_scan_send()
115 req->ie_len, wl18xx_scan_send()
129 req->ssids ? req->ssids[0].ssid : NULL, wl18xx_scan_send()
130 req->ssids ? req->ssids[0].ssid_len : 0, wl18xx_scan_send()
131 req->ie, wl18xx_scan_send()
132 req->ie_len, wl18xx_scan_send()
167 struct cfg80211_sched_scan_request *req, wl18xx_scan_sched_scan_config()
178 filter_type = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req); wl18xx_scan_sched_scan_config()
221 wlcore_set_scan_chan_params(wl, cmd_channels, req->channels, wl18xx_scan_sched_scan_config()
222 req->n_channels, req->n_ssids, wl18xx_scan_sched_scan_config()
227 cmd->long_cycles_sec = cpu_to_le16(req->interval); wl18xx_scan_sched_scan_config()
242 req->ssids ? req->ssids[0].ssid : NULL, wl18xx_scan_sched_scan_config()
243 req->ssids ? req->ssids[0].ssid_len : 0, wl18xx_scan_sched_scan_config()
259 req->ssids ? req->ssids[0].ssid : NULL, wl18xx_scan_sched_scan_config()
260 req->ssids ? req->ssids[0].ssid_len : 0, wl18xx_scan_sched_scan_config()
287 struct cfg80211_sched_scan_request *req, wl18xx_sched_scan_start()
290 return wl18xx_scan_sched_scan_config(wl, wlvif, req, ies); wl18xx_sched_scan_start()
326 struct cfg80211_scan_request *req) wl18xx_scan_start()
328 return wl18xx_scan_send(wl, wlvif, req); wl18xx_scan_start()
41 wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_scan_request *req) wl18xx_scan_send() argument
165 wl18xx_scan_sched_scan_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) wl18xx_scan_sched_scan_config() argument
286 wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) wl18xx_sched_scan_start() argument
325 wl18xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif, struct cfg80211_scan_request *req) wl18xx_scan_start() argument
/linux-4.1.27/drivers/base/power/
H A Dqos.c133 * @req: Constraint request to apply
141 static int apply_constraint(struct dev_pm_qos_request *req, apply_constraint() argument
144 struct dev_pm_qos *qos = req->dev->power.qos; apply_constraint()
147 switch(req->type) { apply_constraint()
150 &req->data.pnode, action, value); apply_constraint()
155 req); apply_constraint()
160 &req->data.pnode, action, value); apply_constraint()
163 req->dev->power.set_latency_tolerance(req->dev, value); apply_constraint()
167 ret = pm_qos_update_flags(&qos->flags, &req->data.flr, apply_constraint()
237 struct dev_pm_qos_request *req, *tmp; dev_pm_qos_constraints_destroy() local
261 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { dev_pm_qos_constraints_destroy()
266 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); dev_pm_qos_constraints_destroy()
267 memset(req, 0, sizeof(*req)); dev_pm_qos_constraints_destroy()
270 plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) { dev_pm_qos_constraints_destroy()
271 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); dev_pm_qos_constraints_destroy()
272 memset(req, 0, sizeof(*req)); dev_pm_qos_constraints_destroy()
275 list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) { dev_pm_qos_constraints_destroy()
276 apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); dev_pm_qos_constraints_destroy()
277 memset(req, 0, sizeof(*req)); dev_pm_qos_constraints_destroy()
294 struct dev_pm_qos_request *req) dev_pm_qos_invalid_request()
296 return !req || (req->type == DEV_PM_QOS_LATENCY_TOLERANCE dev_pm_qos_invalid_request()
301 struct dev_pm_qos_request *req, __dev_pm_qos_add_request()
306 if (!dev || dev_pm_qos_invalid_request(dev, req)) __dev_pm_qos_add_request()
309 if (WARN(dev_pm_qos_request_active(req), __dev_pm_qos_add_request()
320 req->dev = dev; __dev_pm_qos_add_request()
321 req->type = type; __dev_pm_qos_add_request()
322 ret = apply_constraint(req, PM_QOS_ADD_REQ, value); __dev_pm_qos_add_request()
330 * @req: pointer to a preallocated handle
349 int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, dev_pm_qos_add_request() argument
355 ret = __dev_pm_qos_add_request(dev, req, type, value); dev_pm_qos_add_request()
363 * @req : PM QoS request to modify.
366 static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req, __dev_pm_qos_update_request() argument
372 if (!req) /*guard against callers passing in null */ __dev_pm_qos_update_request()
375 if (WARN(!dev_pm_qos_request_active(req), __dev_pm_qos_update_request()
379 if (IS_ERR_OR_NULL(req->dev->power.qos)) __dev_pm_qos_update_request()
382 switch(req->type) { __dev_pm_qos_update_request()
385 curr_value = req->data.pnode.prio; __dev_pm_qos_update_request()
388 curr_value = req->data.flr.flags; __dev_pm_qos_update_request()
394 trace_dev_pm_qos_update_request(dev_name(req->dev), req->type, __dev_pm_qos_update_request()
397 ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value); __dev_pm_qos_update_request()
404 * @req : handle to list element holding a dev_pm_qos request to use
420 int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value) dev_pm_qos_update_request() argument
425 ret = __dev_pm_qos_update_request(req, new_value); dev_pm_qos_update_request()
431 static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req) __dev_pm_qos_remove_request() argument
435 if (!req) /*guard against callers passing in null */ __dev_pm_qos_remove_request()
438 if (WARN(!dev_pm_qos_request_active(req), __dev_pm_qos_remove_request()
442 if (IS_ERR_OR_NULL(req->dev->power.qos)) __dev_pm_qos_remove_request()
445 trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type, __dev_pm_qos_remove_request()
447 ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE); __dev_pm_qos_remove_request()
448 memset(req, 0, sizeof(*req)); __dev_pm_qos_remove_request()
454 * @req: handle to request list element
467 int dev_pm_qos_remove_request(struct dev_pm_qos_request *req) dev_pm_qos_remove_request() argument
472 ret = __dev_pm_qos_remove_request(req); dev_pm_qos_remove_request()
571 * @req: Pointer to the preallocated handle.
576 struct dev_pm_qos_request *req, dev_pm_qos_add_ancestor_request()
597 ret = dev_pm_qos_add_request(ancestor, req, type, value); dev_pm_qos_add_ancestor_request()
600 req->dev = NULL; dev_pm_qos_add_ancestor_request()
609 struct dev_pm_qos_request *req = NULL; __dev_pm_qos_drop_user_request() local
613 req = dev->power.qos->resume_latency_req; __dev_pm_qos_drop_user_request()
617 req = dev->power.qos->latency_tolerance_req; __dev_pm_qos_drop_user_request()
621 req = dev->power.qos->flags_req; __dev_pm_qos_drop_user_request()
625 __dev_pm_qos_remove_request(req); __dev_pm_qos_drop_user_request()
626 kfree(req); __dev_pm_qos_drop_user_request()
644 struct dev_pm_qos_request *req; dev_pm_qos_expose_latency_limit() local
650 req = kzalloc(sizeof(*req), GFP_KERNEL); dev_pm_qos_expose_latency_limit()
651 if (!req) dev_pm_qos_expose_latency_limit()
654 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value); dev_pm_qos_expose_latency_limit()
656 kfree(req); dev_pm_qos_expose_latency_limit()
670 __dev_pm_qos_remove_request(req); dev_pm_qos_expose_latency_limit()
671 kfree(req); dev_pm_qos_expose_latency_limit()
675 dev->power.qos->resume_latency_req = req; dev_pm_qos_expose_latency_limit()
720 struct dev_pm_qos_request *req; dev_pm_qos_expose_flags() local
726 req = kzalloc(sizeof(*req), GFP_KERNEL); dev_pm_qos_expose_flags()
727 if (!req) dev_pm_qos_expose_flags()
730 ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val); dev_pm_qos_expose_flags()
732 kfree(req); dev_pm_qos_expose_flags()
747 __dev_pm_qos_remove_request(req); dev_pm_qos_expose_flags()
748 kfree(req); dev_pm_qos_expose_flags()
752 dev->power.qos->flags_req = req; dev_pm_qos_expose_flags()
856 struct dev_pm_qos_request *req; dev_pm_qos_update_user_latency_tolerance() local
862 req = kzalloc(sizeof(*req), GFP_KERNEL); dev_pm_qos_update_user_latency_tolerance()
863 if (!req) { dev_pm_qos_update_user_latency_tolerance()
867 ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val); dev_pm_qos_update_user_latency_tolerance()
869 kfree(req); dev_pm_qos_update_user_latency_tolerance()
872 dev->power.qos->latency_tolerance_req = req; dev_pm_qos_update_user_latency_tolerance()
293 dev_pm_qos_invalid_request(struct device *dev, struct dev_pm_qos_request *req) dev_pm_qos_invalid_request() argument
300 __dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) __dev_pm_qos_add_request() argument
575 dev_pm_qos_add_ancestor_request(struct device *dev, struct dev_pm_qos_request *req, enum dev_pm_qos_req_type type, s32 value) dev_pm_qos_add_ancestor_request() argument
/linux-4.1.27/include/scsi/
H A Dscsi_tcq.h17 struct request *req = NULL; scsi_mq_find_tag() local
20 req = blk_mq_tag_to_rq(shost->tag_set.tags[hwq], scsi_mq_find_tag()
22 return req ? (struct scsi_cmnd *)req->special : NULL; scsi_mq_find_tag()
35 struct request *req; scsi_find_tag() local
41 req = blk_queue_find_tag(sdev->request_queue, tag); scsi_find_tag()
42 return req ? (struct scsi_cmnd *)req->special : NULL; scsi_find_tag()
89 struct request *req; scsi_host_find_tag() local
94 req = blk_map_queue_find_tag(shost->bqt, tag); scsi_host_find_tag()
95 return req ? (struct scsi_cmnd *)req->special : NULL; scsi_host_find_tag()
/linux-4.1.27/drivers/base/
H A Ddevtmpfs.c39 static struct req { struct
40 struct req *next;
85 struct req req; devtmpfs_create_node() local
90 req.mode = 0; devtmpfs_create_node()
91 req.uid = GLOBAL_ROOT_UID; devtmpfs_create_node()
92 req.gid = GLOBAL_ROOT_GID; devtmpfs_create_node()
93 req.name = device_get_devnode(dev, &req.mode, &req.uid, &req.gid, &tmp); devtmpfs_create_node()
94 if (!req.name) devtmpfs_create_node()
97 if (req.mode == 0) devtmpfs_create_node()
98 req.mode = 0600; devtmpfs_create_node()
100 req.mode |= S_IFBLK; devtmpfs_create_node()
102 req.mode |= S_IFCHR; devtmpfs_create_node()
104 req.dev = dev; devtmpfs_create_node()
106 init_completion(&req.done); devtmpfs_create_node()
109 req.next = requests; devtmpfs_create_node()
110 requests = &req; devtmpfs_create_node()
114 wait_for_completion(&req.done); devtmpfs_create_node()
118 return req.err; devtmpfs_create_node()
124 struct req req; devtmpfs_delete_node() local
129 req.name = device_get_devnode(dev, NULL, NULL, NULL, &tmp); devtmpfs_delete_node()
130 if (!req.name) devtmpfs_delete_node()
133 req.mode = 0; devtmpfs_delete_node()
134 req.dev = dev; devtmpfs_delete_node()
136 init_completion(&req.done); devtmpfs_delete_node()
139 req.next = requests; devtmpfs_delete_node()
140 requests = &req; devtmpfs_delete_node()
144 wait_for_completion(&req.done); devtmpfs_delete_node()
147 return req.err; devtmpfs_delete_node()
392 struct req *req = requests; devtmpfsd() local
395 while (req) { devtmpfsd()
396 struct req *next = req->next; devtmpfsd()
397 req->err = handle(req->name, req->mode, devtmpfsd()
398 req->uid, req->gid, req->dev); devtmpfsd()
399 complete(&req->done); devtmpfsd()
400 req = next; devtmpfsd()
/linux-4.1.27/drivers/crypto/
H A Dpicoxcell_crypto.c94 struct crypto_async_request *req; member in struct:spacc_req
100 void (*complete)(struct spacc_req *req);
180 static int spacc_ablk_submit(struct spacc_req *req);
321 static int spacc_aead_make_ddts(struct spacc_req *req, u8 *giv) spacc_aead_make_ddts() argument
323 struct aead_request *areq = container_of(req->req, struct aead_request, spacc_aead_make_ddts()
325 struct spacc_engine *engine = req->engine; spacc_aead_make_ddts()
334 src_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->src_addr); spacc_aead_make_ddts()
338 dst_ddt = dma_pool_alloc(engine->req_pool, GFP_ATOMIC, &req->dst_addr); spacc_aead_make_ddts()
340 dma_pool_free(engine->req_pool, src_ddt, req->src_addr); spacc_aead_make_ddts()
344 req->src_ddt = src_ddt; spacc_aead_make_ddts()
345 req->dst_ddt = dst_ddt; spacc_aead_make_ddts()
366 req->giv_pa = iv_addr; spacc_aead_make_ddts()
374 if (req->is_encrypt) spacc_aead_make_ddts()
380 if (giv || req->is_encrypt) spacc_aead_make_ddts()
404 static void spacc_aead_free_ddts(struct spacc_req *req) spacc_aead_free_ddts() argument
406 struct aead_request *areq = container_of(req->req, struct aead_request, spacc_aead_free_ddts()
408 struct spacc_alg *alg = to_spacc_alg(req->req->tfm->__crt_alg); spacc_aead_free_ddts()
409 struct spacc_ablk_ctx *aead_ctx = crypto_tfm_ctx(req->req->tfm); spacc_aead_free_ddts()
425 dma_unmap_single(engine->dev, req->giv_pa, ivsize, DMA_BIDIRECTIONAL); spacc_aead_free_ddts()
427 dma_pool_free(engine->req_pool, req->src_ddt, req->src_addr); spacc_aead_free_ddts()
428 dma_pool_free(engine->req_pool, req->dst_ddt, req->dst_addr); spacc_aead_free_ddts()
431 static void spacc_free_ddt(struct spacc_req *req, struct spacc_ddt *ddt, spacc_free_ddt() argument
437 dma_unmap_sg(req->engine->dev, payload, nents, dir); spacc_free_ddt()
438 dma_pool_free(req->engine->req_pool, ddt, ddt_addr); spacc_free_ddt()
544 static int spacc_aead_need_fallback(struct spacc_req *req) spacc_aead_need_fallback() argument
547 struct crypto_tfm *tfm = req->req->tfm; spacc_aead_need_fallback()
548 struct crypto_alg *alg = req->req->tfm->__crt_alg; spacc_aead_need_fallback()
552 aead_req = container_of(req->req, struct aead_request, base); spacc_aead_need_fallback()
566 static int spacc_aead_do_fallback(struct aead_request *req, unsigned alg_type, spacc_aead_do_fallback() argument
569 struct crypto_tfm *old_tfm = crypto_aead_tfm(crypto_aead_reqtfm(req)); spacc_aead_do_fallback()
579 aead_request_set_tfm(req, ctx->sw_cipher); spacc_aead_do_fallback()
580 err = is_encrypt ? crypto_aead_encrypt(req) : spacc_aead_do_fallback()
581 crypto_aead_decrypt(req); spacc_aead_do_fallback()
582 aead_request_set_tfm(req, __crypto_aead_cast(old_tfm)); spacc_aead_do_fallback()
589 static void spacc_aead_complete(struct spacc_req *req) spacc_aead_complete() argument
591 spacc_aead_free_ddts(req); spacc_aead_complete()
592 req->req->complete(req->req, req->result); spacc_aead_complete()
595 static int spacc_aead_submit(struct spacc_req *req) spacc_aead_submit() argument
597 struct crypto_tfm *tfm = req->req->tfm; spacc_aead_submit()
599 struct crypto_alg *alg = req->req->tfm->__crt_alg; spacc_aead_submit()
604 container_of(req->req, struct aead_request, base); spacc_aead_submit()
606 req->result = -EINPROGRESS; spacc_aead_submit()
607 req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->cipher_key, spacc_aead_submit()
612 writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); spacc_aead_submit()
613 writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); spacc_aead_submit()
623 if (!req->giv) { spacc_aead_submit()
627 proc_len += req->giv_len; spacc_aead_submit()
633 if (!req->is_encrypt) spacc_aead_submit()
642 ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | spacc_aead_submit()
644 if (req->is_encrypt) spacc_aead_submit()
656 static int spacc_req_submit(struct spacc_req *req);
660 struct spacc_req *req; spacc_push() local
666 req = list_first_entry(&engine->pending, struct spacc_req, spacc_push()
668 list_move_tail(&req->list, &engine->in_progress); spacc_push()
670 req->result = spacc_req_submit(req); spacc_push()
681 static int spacc_aead_setup(struct aead_request *req, u8 *giv, spacc_aead_setup() argument
684 struct crypto_alg *alg = req->base.tfm->__crt_alg; spacc_aead_setup()
686 struct spacc_req *dev_req = aead_request_ctx(req); spacc_aead_setup()
689 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); spacc_aead_setup()
693 dev_req->req = &req->base; spacc_aead_setup()
700 return spacc_aead_do_fallback(req, alg_type, is_encrypt); spacc_aead_setup()
708 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { spacc_aead_setup()
728 static int spacc_aead_encrypt(struct aead_request *req) spacc_aead_encrypt() argument
730 struct crypto_aead *aead = crypto_aead_reqtfm(req); spacc_aead_encrypt()
734 return spacc_aead_setup(req, NULL, alg->type, 1); spacc_aead_encrypt()
737 static int spacc_aead_givencrypt(struct aead_givcrypt_request *req) spacc_aead_givencrypt() argument
739 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); spacc_aead_givencrypt()
746 memcpy(req->areq.iv, ctx->salt, ivsize); spacc_aead_givencrypt()
749 memset(req->giv, 0, ivsize - sizeof(u64)); spacc_aead_givencrypt()
752 seq = cpu_to_be64(req->seq); spacc_aead_givencrypt()
753 memcpy(req->giv + ivsize - len, &seq, len); spacc_aead_givencrypt()
755 return spacc_aead_setup(&req->areq, req->giv, alg->type, 1); spacc_aead_givencrypt()
758 static int spacc_aead_decrypt(struct aead_request *req) spacc_aead_decrypt() argument
760 struct crypto_aead *aead = crypto_aead_reqtfm(req); spacc_aead_decrypt()
764 return spacc_aead_setup(req, NULL, alg->type, 0); spacc_aead_decrypt()
910 static int spacc_ablk_need_fallback(struct spacc_req *req) spacc_ablk_need_fallback() argument
913 struct crypto_tfm *tfm = req->req->tfm; spacc_ablk_need_fallback()
914 struct crypto_alg *alg = req->req->tfm->__crt_alg; spacc_ablk_need_fallback()
925 static void spacc_ablk_complete(struct spacc_req *req) spacc_ablk_complete() argument
928 container_of(req->req, struct ablkcipher_request, base); spacc_ablk_complete()
931 spacc_free_ddt(req, req->src_ddt, req->src_addr, ablk_req->src, spacc_ablk_complete()
933 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, spacc_ablk_complete()
936 spacc_free_ddt(req, req->dst_ddt, req->dst_addr, ablk_req->dst, spacc_ablk_complete()
939 req->req->complete(req->req, req->result); spacc_ablk_complete()
942 static int spacc_ablk_submit(struct spacc_req *req) spacc_ablk_submit() argument
944 struct crypto_tfm *tfm = req->req->tfm; spacc_ablk_submit()
946 struct ablkcipher_request *ablk_req = ablkcipher_request_cast(req->req); spacc_ablk_submit()
947 struct crypto_alg *alg = req->req->tfm->__crt_alg; spacc_ablk_submit()
952 req->ctx_id = spacc_load_ctx(&ctx->generic, ctx->key, spacc_ablk_submit()
956 writel(req->src_addr, engine->regs + SPA_SRC_PTR_REG_OFFSET); spacc_ablk_submit()
957 writel(req->dst_addr, engine->regs + SPA_DST_PTR_REG_OFFSET); spacc_ablk_submit()
965 ctrl = spacc_alg->ctrl_default | (req->ctx_id << SPA_CTRL_CTX_IDX) | spacc_ablk_submit()
966 (req->is_encrypt ? (1 << SPA_CTRL_ENCRYPT_IDX) : spacc_ablk_submit()
976 static int spacc_ablk_do_fallback(struct ablkcipher_request *req, spacc_ablk_do_fallback() argument
980 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); spacc_ablk_do_fallback()
992 ablkcipher_request_set_tfm(req, ctx->sw_cipher); spacc_ablk_do_fallback()
993 err = is_encrypt ? crypto_ablkcipher_encrypt(req) : spacc_ablk_do_fallback()
994 crypto_ablkcipher_decrypt(req); spacc_ablk_do_fallback()
995 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(old_tfm)); spacc_ablk_do_fallback()
1000 static int spacc_ablk_setup(struct ablkcipher_request *req, unsigned alg_type, spacc_ablk_setup() argument
1003 struct crypto_alg *alg = req->base.tfm->__crt_alg; spacc_ablk_setup()
1005 struct spacc_req *dev_req = ablkcipher_request_ctx(req); spacc_ablk_setup()
1009 dev_req->req = &req->base; spacc_ablk_setup()
1016 return spacc_ablk_do_fallback(req, alg_type, is_encrypt); spacc_ablk_setup()
1022 if (req->src != req->dst) { spacc_ablk_setup()
1023 dev_req->src_ddt = spacc_sg_to_ddt(engine, req->src, spacc_ablk_setup()
1024 req->nbytes, DMA_TO_DEVICE, &dev_req->src_addr); spacc_ablk_setup()
1028 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, spacc_ablk_setup()
1029 req->nbytes, DMA_FROM_DEVICE, &dev_req->dst_addr); spacc_ablk_setup()
1033 dev_req->dst_ddt = spacc_sg_to_ddt(engine, req->dst, spacc_ablk_setup()
1034 req->nbytes, DMA_BIDIRECTIONAL, &dev_req->dst_addr); spacc_ablk_setup()
1051 if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) { spacc_ablk_setup()
1066 spacc_free_ddt(dev_req, dev_req->dst_ddt, dev_req->dst_addr, req->dst, spacc_ablk_setup()
1067 req->nbytes, req->src == req->dst ? spacc_ablk_setup()
1070 if (req->src != req->dst) spacc_ablk_setup()
1072 req->src, req->nbytes, DMA_TO_DEVICE); spacc_ablk_setup()
1112 static int spacc_ablk_encrypt(struct ablkcipher_request *req) spacc_ablk_encrypt() argument
1114 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); spacc_ablk_encrypt()
1118 return spacc_ablk_setup(req, alg->type, 1); spacc_ablk_encrypt()
1121 static int spacc_ablk_decrypt(struct ablkcipher_request *req) spacc_ablk_decrypt() argument
1123 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req); spacc_ablk_decrypt()
1127 return spacc_ablk_setup(req, alg->type, 0); spacc_ablk_decrypt()
1138 struct spacc_req *req; spacc_process_done() local
1144 req = list_first_entry(&engine->in_progress, struct spacc_req, spacc_process_done()
1146 list_move_tail(&req->list, &engine->completed); spacc_process_done()
1151 req->result = (readl(engine->regs + SPA_STATUS_REG_OFFSET) & spacc_process_done()
1158 if (unlikely(req->result)) { spacc_process_done()
1159 switch (req->result) { spacc_process_done()
1161 req->result = -EBADMSG; spacc_process_done()
1167 req->result = -EFAULT; spacc_process_done()
1173 req->result = -EIO; spacc_process_done()
1202 static int spacc_req_submit(struct spacc_req *req) spacc_req_submit() argument
1204 struct crypto_alg *alg = req->req->tfm->__crt_alg; spacc_req_submit()
1207 return spacc_aead_submit(req); spacc_req_submit()
1209 return spacc_ablk_submit(req); spacc_req_submit()
1215 struct spacc_req *req, *tmp; spacc_spacc_complete() local
1228 list_for_each_entry_safe(req, tmp, &completed, list) { spacc_spacc_complete()
1229 list_del(&req->list); spacc_spacc_complete()
1230 req->complete(req); spacc_spacc_complete()
H A Dimg-hash.c132 struct ahash_request *req; member in struct:img_hash_dev
165 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_start()
211 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_dma_callback()
224 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_xmit_dma()
254 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_write_via_cpu()
256 ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg), img_hash_write_via_cpu()
257 ctx->buffer, hdev->req->nbytes); img_hash_write_via_cpu()
259 ctx->total = hdev->req->nbytes; img_hash_write_via_cpu()
269 static int img_hash_finish(struct ahash_request *req) img_hash_finish() argument
271 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); img_hash_finish()
273 if (!req->result) img_hash_finish()
276 memcpy(req->result, ctx->digest, ctx->digsize); img_hash_finish()
281 static void img_hash_copy_hash(struct ahash_request *req) img_hash_copy_hash() argument
283 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); img_hash_copy_hash()
291 static void img_hash_finish_req(struct ahash_request *req, int err) img_hash_finish_req() argument
293 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); img_hash_finish_req()
297 img_hash_copy_hash(req); img_hash_finish_req()
299 err = img_hash_finish(req); img_hash_finish_req()
308 if (req->base.complete) img_hash_finish_req()
309 req->base.complete(&req->base, err); img_hash_finish_req()
314 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_write_via_dma()
359 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_dma_task()
430 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_write_via_dma_stop()
440 struct ahash_request *req = hdev->req; img_hash_process_data() local
441 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); img_hash_process_data()
446 if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) { img_hash_process_data()
448 req->nbytes); img_hash_process_data()
452 req->nbytes); img_hash_process_data()
467 nbits = (u64)hdev->req->nbytes << 3; img_hash_hw_init()
481 static int img_hash_init(struct ahash_request *req) img_hash_init() argument
483 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); img_hash_init()
484 struct img_hash_request_ctx *rctx = ahash_request_ctx(req); img_hash_init()
488 rctx->fallback_req.base.flags = req->base.flags img_hash_init()
495 struct ahash_request *req) img_hash_handle_queue()
504 if (req) img_hash_handle_queue()
505 res = ahash_enqueue_request(&hdev->queue, req); img_hash_handle_queue()
525 req = ahash_request_cast(async_req); img_hash_handle_queue()
526 hdev->req = req; img_hash_handle_queue()
528 ctx = ahash_request_ctx(req); img_hash_handle_queue()
530 dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n", img_hash_handle_queue()
531 ctx->op, req->nbytes); img_hash_handle_queue()
540 img_hash_finish_req(req, err); img_hash_handle_queue()
545 static int img_hash_update(struct ahash_request *req) img_hash_update() argument
547 struct img_hash_request_ctx *rctx = ahash_request_ctx(req); img_hash_update()
548 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); img_hash_update()
552 rctx->fallback_req.base.flags = req->base.flags img_hash_update()
554 rctx->fallback_req.nbytes = req->nbytes; img_hash_update()
555 rctx->fallback_req.src = req->src; img_hash_update()
560 static int img_hash_final(struct ahash_request *req) img_hash_final() argument
562 struct img_hash_request_ctx *rctx = ahash_request_ctx(req); img_hash_final()
563 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); img_hash_final()
567 rctx->fallback_req.base.flags = req->base.flags img_hash_final()
569 rctx->fallback_req.result = req->result; img_hash_final()
574 static int img_hash_finup(struct ahash_request *req) img_hash_finup() argument
576 struct img_hash_request_ctx *rctx = ahash_request_ctx(req); img_hash_finup()
577 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); img_hash_finup()
581 rctx->fallback_req.base.flags = req->base.flags img_hash_finup()
583 rctx->fallback_req.nbytes = req->nbytes; img_hash_finup()
584 rctx->fallback_req.src = req->src; img_hash_finup()
585 rctx->fallback_req.result = req->result; img_hash_finup()
590 static int img_hash_digest(struct ahash_request *req) img_hash_digest() argument
592 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); img_hash_digest()
594 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); img_hash_digest()
636 ctx->total = req->nbytes; img_hash_digest()
637 ctx->sg = req->src; img_hash_digest()
638 ctx->sgfirst = req->src; img_hash_digest()
641 err = img_hash_handle_queue(tctx->hdev, req); img_hash_digest()
868 img_hash_finish_req(hdev->req, err); img_hash_done_task()
494 img_hash_handle_queue(struct img_hash_dev *hdev, struct ahash_request *req) img_hash_handle_queue() argument
H A Dmxs-dcp.c197 struct ablkcipher_request *req, int init) mxs_dcp_run_aes()
201 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); mxs_dcp_run_aes()
253 struct ablkcipher_request *req = ablkcipher_request_cast(arq); mxs_dcp_aes_block_crypt() local
255 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); mxs_dcp_aes_block_crypt()
257 struct scatterlist *dst = req->dst; mxs_dcp_aes_block_crypt()
258 struct scatterlist *src = req->src; mxs_dcp_aes_block_crypt()
259 const int nents = sg_nents(req->src); mxs_dcp_aes_block_crypt()
282 memcpy(key + AES_KEYSIZE_128, req->info, AES_KEYSIZE_128); mxs_dcp_aes_block_crypt()
289 for_each_sg(req->src, src, nents, i) { mxs_dcp_aes_block_crypt()
309 ret = mxs_dcp_run_aes(actx, req, init); mxs_dcp_aes_block_crypt()
375 static int mxs_dcp_block_fallback(struct ablkcipher_request *req, int enc) mxs_dcp_block_fallback() argument
378 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); mxs_dcp_block_fallback()
380 crypto_ablkcipher_reqtfm(req)); mxs_dcp_block_fallback()
383 ablkcipher_request_set_tfm(req, ctx->fallback); mxs_dcp_block_fallback()
386 ret = crypto_ablkcipher_encrypt(req); mxs_dcp_block_fallback()
388 ret = crypto_ablkcipher_decrypt(req); mxs_dcp_block_fallback()
390 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); mxs_dcp_block_fallback()
395 static int mxs_dcp_aes_enqueue(struct ablkcipher_request *req, int enc, int ecb) mxs_dcp_aes_enqueue() argument
398 struct crypto_async_request *arq = &req->base; mxs_dcp_aes_enqueue()
400 struct dcp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); mxs_dcp_aes_enqueue()
404 return mxs_dcp_block_fallback(req, enc); mxs_dcp_aes_enqueue()
411 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); mxs_dcp_aes_enqueue()
419 static int mxs_dcp_aes_ecb_decrypt(struct ablkcipher_request *req) mxs_dcp_aes_ecb_decrypt() argument
421 return mxs_dcp_aes_enqueue(req, 0, 1); mxs_dcp_aes_ecb_decrypt()
424 static int mxs_dcp_aes_ecb_encrypt(struct ablkcipher_request *req) mxs_dcp_aes_ecb_encrypt() argument
426 return mxs_dcp_aes_enqueue(req, 1, 1); mxs_dcp_aes_ecb_encrypt()
429 static int mxs_dcp_aes_cbc_decrypt(struct ablkcipher_request *req) mxs_dcp_aes_cbc_decrypt() argument
431 return mxs_dcp_aes_enqueue(req, 0, 0); mxs_dcp_aes_cbc_decrypt()
434 static int mxs_dcp_aes_cbc_encrypt(struct ablkcipher_request *req) mxs_dcp_aes_cbc_encrypt() argument
436 return mxs_dcp_aes_enqueue(req, 1, 0); mxs_dcp_aes_cbc_encrypt()
509 static int mxs_dcp_run_sha(struct ahash_request *req) mxs_dcp_run_sha() argument
514 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); mxs_dcp_run_sha()
516 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); mxs_dcp_run_sha()
542 digest_phys = dma_map_single(sdcp->dev, req->result, mxs_dcp_run_sha()
563 struct ahash_request *req = ahash_request_cast(arq); dcp_sha_req_to_buf() local
564 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); dcp_sha_req_to_buf()
566 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); dcp_sha_req_to_buf()
568 const int nents = sg_nents(req->src); dcp_sha_req_to_buf()
583 for_each_sg(req->src, src, nents, i) { dcp_sha_req_to_buf()
603 ret = mxs_dcp_run_sha(req); dcp_sha_req_to_buf()
616 if (!req->result) dcp_sha_req_to_buf()
619 ret = mxs_dcp_run_sha(req); dcp_sha_req_to_buf()
627 swap(req->result[i], dcp_sha_req_to_buf()
628 req->result[halg->digestsize - i - 1]); dcp_sha_req_to_buf()
645 struct ahash_request *req; dcp_chan_thread_sha() local
660 req = ahash_request_cast(arq); dcp_chan_thread_sha()
661 rctx = ahash_request_ctx(req); dcp_chan_thread_sha()
676 static int dcp_sha_init(struct ahash_request *req) dcp_sha_init() argument
678 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); dcp_sha_init()
703 static int dcp_sha_update_fx(struct ahash_request *req, int fini) dcp_sha_update_fx() argument
707 struct dcp_sha_req_ctx *rctx = ahash_request_ctx(req); dcp_sha_update_fx()
708 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); dcp_sha_update_fx()
717 if (!req->nbytes && !fini) dcp_sha_update_fx()
730 ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base); dcp_sha_update_fx()
739 static int dcp_sha_update(struct ahash_request *req) dcp_sha_update() argument
741 return dcp_sha_update_fx(req, 0); dcp_sha_update()
744 static int dcp_sha_final(struct ahash_request *req) dcp_sha_final() argument
746 ahash_request_set_crypt(req, NULL, req->result, 0); dcp_sha_final()
747 req->nbytes = 0; dcp_sha_final()
748 return dcp_sha_update_fx(req, 1); dcp_sha_final()
751 static int dcp_sha_finup(struct ahash_request *req) dcp_sha_finup() argument
753 return dcp_sha_update_fx(req, 1); dcp_sha_finup()
756 static int dcp_sha_digest(struct ahash_request *req) dcp_sha_digest() argument
760 ret = dcp_sha_init(req); dcp_sha_digest()
764 return dcp_sha_finup(req); dcp_sha_digest()
196 mxs_dcp_run_aes(struct dcp_async_ctx *actx, struct ablkcipher_request *req, int init) mxs_dcp_run_aes() argument
H A Dmv_cesa.c255 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); mv_process_current_q() local
256 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); mv_process_current_q()
257 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); mv_process_current_q()
270 memcpy(cpg->sram + SRAM_DATA_IV, req->info, 16); mv_process_current_q()
310 struct ablkcipher_request *req = ablkcipher_request_cast(cpg->cur_req); mv_crypto_algo_completion() local
311 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); mv_crypto_algo_completion()
319 memcpy(req->info, cpg->sram + SRAM_DATA_IV_BUF, 16); mv_crypto_algo_completion()
324 struct ahash_request *req = ahash_request_cast(cpg->cur_req); mv_process_hash_current() local
325 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); mv_process_hash_current()
326 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); mv_process_hash_current()
401 static int mv_hash_final_fallback(struct ahash_request *req) mv_hash_final_fallback() argument
403 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); mv_hash_final_fallback()
404 struct mv_req_hash_ctx *req_ctx = ahash_request_ctx(req); mv_hash_final_fallback()
421 rc = crypto_shash_final(shash, req->result); mv_hash_final_fallback()
437 struct ahash_request *req = ahash_request_cast(cpg->cur_req); mv_hash_algo_completion() local
438 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); mv_hash_algo_completion()
446 memcpy(req->result, cpg->sram + SRAM_DIGEST_BUF, mv_hash_algo_completion()
448 (req))); mv_hash_algo_completion()
451 mv_hash_final_fallback(req); mv_hash_algo_completion()
460 struct crypto_async_request *req = cpg->cur_req; dequeue_complete_req() local
503 req->complete(req, 0); dequeue_complete_req()
525 static void mv_start_new_crypt_req(struct ablkcipher_request *req) mv_start_new_crypt_req() argument
530 cpg->cur_req = &req->base; mv_start_new_crypt_req()
532 p->hw_nbytes = req->nbytes; mv_start_new_crypt_req()
537 num_sgs = count_sgs(req->src, req->nbytes); mv_start_new_crypt_req()
538 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); mv_start_new_crypt_req()
540 num_sgs = count_sgs(req->dst, req->nbytes); mv_start_new_crypt_req()
541 sg_miter_start(&p->dst_sg_it, req->dst, num_sgs, SG_MITER_TO_SG); mv_start_new_crypt_req()
546 static void mv_start_new_hash_req(struct ahash_request *req) mv_start_new_hash_req() argument
549 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); mv_start_new_hash_req()
551 cpg->cur_req = &req->base; mv_start_new_hash_req()
553 hw_bytes = req->nbytes + ctx->extra_bytes; mv_start_new_hash_req()
563 num_sgs = count_sgs(req->src, req->nbytes); mv_start_new_hash_req()
564 sg_miter_start(&p->src_sg_it, req->src, num_sgs, SG_MITER_FROM_SG); mv_start_new_hash_req()
583 rc = mv_hash_final_fallback(req); mv_start_new_hash_req()
588 req->base.complete(&req->base, rc); mv_start_new_hash_req()
624 struct ablkcipher_request *req = queue_manag() local
626 mv_start_new_crypt_req(req); queue_manag()
628 struct ahash_request *req = queue_manag() local
630 mv_start_new_hash_req(req); queue_manag()
641 static int mv_handle_req(struct crypto_async_request *req) mv_handle_req() argument
647 ret = crypto_enqueue_request(&cpg->queue, req); mv_handle_req()
653 static int mv_enc_aes_ecb(struct ablkcipher_request *req) mv_enc_aes_ecb() argument
655 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); mv_enc_aes_ecb()
660 return mv_handle_req(&req->base); mv_enc_aes_ecb()
663 static int mv_dec_aes_ecb(struct ablkcipher_request *req) mv_dec_aes_ecb() argument
665 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); mv_dec_aes_ecb()
666 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); mv_dec_aes_ecb()
672 return mv_handle_req(&req->base); mv_dec_aes_ecb()
675 static int mv_enc_aes_cbc(struct ablkcipher_request *req) mv_enc_aes_cbc() argument
677 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); mv_enc_aes_cbc()
682 return mv_handle_req(&req->base); mv_enc_aes_cbc()
685 static int mv_dec_aes_cbc(struct ablkcipher_request *req) mv_dec_aes_cbc() argument
687 struct mv_ctx *ctx = crypto_tfm_ctx(req->base.tfm); mv_dec_aes_cbc()
688 struct mv_req_ctx *req_ctx = ablkcipher_request_ctx(req); mv_dec_aes_cbc()
694 return mv_handle_req(&req->base); mv_dec_aes_cbc()
722 static int mv_hash_init(struct ahash_request *req) mv_hash_init() argument
724 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); mv_hash_init()
725 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 0, 0, mv_hash_init()
730 static int mv_hash_update(struct ahash_request *req) mv_hash_update() argument
732 if (!req->nbytes) mv_hash_update()
735 mv_update_hash_req_ctx(ahash_request_ctx(req), 0, req->nbytes); mv_hash_update()
736 return mv_handle_req(&req->base); mv_hash_update()
739 static int mv_hash_final(struct ahash_request *req) mv_hash_final() argument
741 struct mv_req_hash_ctx *ctx = ahash_request_ctx(req); mv_hash_final()
743 ahash_request_set_crypt(req, NULL, req->result, 0); mv_hash_final()
745 return mv_handle_req(&req->base); mv_hash_final()
748 static int mv_hash_finup(struct ahash_request *req) mv_hash_finup() argument
750 mv_update_hash_req_ctx(ahash_request_ctx(req), 1, req->nbytes); mv_hash_finup()
751 return mv_handle_req(&req->base); mv_hash_finup()
754 static int mv_hash_digest(struct ahash_request *req) mv_hash_digest() argument
756 const struct mv_tfm_hash_ctx *tfm_ctx = crypto_tfm_ctx(req->base.tfm); mv_hash_digest()
757 mv_init_hash_req_ctx(ahash_request_ctx(req), tfm_ctx->op, 1, mv_hash_digest()
758 req->nbytes, tfm_ctx->count_add); mv_hash_digest()
759 return mv_handle_req(&req->base); mv_hash_digest()
H A Dsahara.c577 static int sahara_aes_process(struct ablkcipher_request *req) sahara_aes_process() argument
588 req->nbytes, req->src, req->dst); sahara_aes_process()
591 dev->total = req->nbytes; sahara_aes_process()
592 dev->in_sg = req->src; sahara_aes_process()
593 dev->out_sg = req->dst; sahara_aes_process()
595 rctx = ablkcipher_request_ctx(req); sahara_aes_process()
596 ctx = crypto_ablkcipher_ctx(crypto_ablkcipher_reqtfm(req)); sahara_aes_process()
600 if ((dev->flags & FLAGS_CBC) && req->info) sahara_aes_process()
601 memcpy(dev->iv_base, req->info, AES_KEYSIZE_128); sahara_aes_process()
664 static int sahara_aes_crypt(struct ablkcipher_request *req, unsigned long mode) sahara_aes_crypt() argument
666 struct sahara_aes_reqctx *rctx = ablkcipher_request_ctx(req); sahara_aes_crypt()
671 req->nbytes, !!(mode & FLAGS_ENCRYPT), !!(mode & FLAGS_CBC)); sahara_aes_crypt()
673 if (!IS_ALIGNED(req->nbytes, AES_BLOCK_SIZE)) { sahara_aes_crypt()
682 err = ablkcipher_enqueue_request(&dev->queue, req); sahara_aes_crypt()
690 static int sahara_aes_ecb_encrypt(struct ablkcipher_request *req) sahara_aes_ecb_encrypt() argument
693 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); sahara_aes_ecb_encrypt()
695 crypto_ablkcipher_reqtfm(req)); sahara_aes_ecb_encrypt()
699 ablkcipher_request_set_tfm(req, ctx->fallback); sahara_aes_ecb_encrypt()
700 err = crypto_ablkcipher_encrypt(req); sahara_aes_ecb_encrypt()
701 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); sahara_aes_ecb_encrypt()
705 return sahara_aes_crypt(req, FLAGS_ENCRYPT); sahara_aes_ecb_encrypt()
708 static int sahara_aes_ecb_decrypt(struct ablkcipher_request *req) sahara_aes_ecb_decrypt() argument
711 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); sahara_aes_ecb_decrypt()
713 crypto_ablkcipher_reqtfm(req)); sahara_aes_ecb_decrypt()
717 ablkcipher_request_set_tfm(req, ctx->fallback); sahara_aes_ecb_decrypt()
718 err = crypto_ablkcipher_decrypt(req); sahara_aes_ecb_decrypt()
719 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); sahara_aes_ecb_decrypt()
723 return sahara_aes_crypt(req, 0); sahara_aes_ecb_decrypt()
726 static int sahara_aes_cbc_encrypt(struct ablkcipher_request *req) sahara_aes_cbc_encrypt() argument
729 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); sahara_aes_cbc_encrypt()
731 crypto_ablkcipher_reqtfm(req)); sahara_aes_cbc_encrypt()
735 ablkcipher_request_set_tfm(req, ctx->fallback); sahara_aes_cbc_encrypt()
736 err = crypto_ablkcipher_encrypt(req); sahara_aes_cbc_encrypt()
737 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); sahara_aes_cbc_encrypt()
741 return sahara_aes_crypt(req, FLAGS_ENCRYPT | FLAGS_CBC); sahara_aes_cbc_encrypt()
744 static int sahara_aes_cbc_decrypt(struct ablkcipher_request *req) sahara_aes_cbc_decrypt() argument
747 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); sahara_aes_cbc_decrypt()
749 crypto_ablkcipher_reqtfm(req)); sahara_aes_cbc_decrypt()
753 ablkcipher_request_set_tfm(req, ctx->fallback); sahara_aes_cbc_decrypt()
754 err = crypto_ablkcipher_decrypt(req); sahara_aes_cbc_decrypt()
755 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); sahara_aes_cbc_decrypt()
759 return sahara_aes_crypt(req, FLAGS_CBC); sahara_aes_cbc_decrypt()
868 struct ahash_request *req, sahara_sha_hw_data_descriptor_create()
921 struct ahash_request *req, sahara_sha_hw_context_descriptor_create()
956 static int sahara_sha_prepare_request(struct ahash_request *req) sahara_sha_prepare_request() argument
958 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); sahara_sha_prepare_request()
959 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); sahara_sha_prepare_request()
967 len = rctx->buf_cnt + req->nbytes; sahara_sha_prepare_request()
972 scatterwalk_map_and_copy(rctx->buf + rctx->buf_cnt, req->src, sahara_sha_prepare_request()
973 0, req->nbytes, 0); sahara_sha_prepare_request()
974 rctx->buf_cnt += req->nbytes; sahara_sha_prepare_request()
986 unsigned int offset = req->nbytes - hash_later; sahara_sha_prepare_request()
988 scatterwalk_map_and_copy(rctx->buf, req->src, offset, sahara_sha_prepare_request()
993 req->nbytes = req->nbytes - hash_later; sahara_sha_prepare_request()
995 sahara_walk_and_recalc(req->src, req->nbytes); sahara_sha_prepare_request()
998 if (rctx->buf_cnt && req->nbytes) { sahara_sha_prepare_request()
1002 scatterwalk_sg_chain(rctx->in_sg_chain, 2, req->src); sahara_sha_prepare_request()
1004 rctx->total = req->nbytes + rctx->buf_cnt; sahara_sha_prepare_request()
1008 req->src = rctx->in_sg_chain; sahara_sha_prepare_request()
1011 if (req->src) sahara_sha_prepare_request()
1012 rctx->in_sg = req->src; sahara_sha_prepare_request()
1021 rctx->in_sg = req->src; sahara_sha_prepare_request()
1022 rctx->total = req->nbytes; sahara_sha_prepare_request()
1023 req->src = rctx->in_sg; sahara_sha_prepare_request()
1050 static int sahara_sha_process(struct ahash_request *req) sahara_sha_process() argument
1053 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); sahara_sha_process()
1057 ret = sahara_sha_prepare_request(req); sahara_sha_process()
1062 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 0); sahara_sha_process()
1068 sahara_sha_hw_context_descriptor_create(dev, rctx, req, 0); sahara_sha_process()
1070 sahara_sha_hw_data_descriptor_create(dev, rctx, req, 1); sahara_sha_process()
1093 if (req->result) sahara_sha_process()
1094 memcpy(req->result, rctx->context, rctx->digest_size); sahara_sha_process()
1120 struct ahash_request *req = sahara_queue_manage() local
1123 ret = sahara_sha_process(req); sahara_queue_manage()
1125 struct ablkcipher_request *req = sahara_queue_manage() local
1128 ret = sahara_aes_process(req); sahara_queue_manage()
1142 static int sahara_sha_enqueue(struct ahash_request *req, int last) sahara_sha_enqueue() argument
1144 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); sahara_sha_enqueue()
1148 if (!req->nbytes && !last) sahara_sha_enqueue()
1160 ret = crypto_enqueue_request(&dev->queue, &req->base); sahara_sha_enqueue()
1169 static int sahara_sha_init(struct ahash_request *req) sahara_sha_init() argument
1171 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); sahara_sha_init()
1172 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); sahara_sha_init()
1197 static int sahara_sha_update(struct ahash_request *req) sahara_sha_update() argument
1199 return sahara_sha_enqueue(req, 0); sahara_sha_update()
1202 static int sahara_sha_final(struct ahash_request *req) sahara_sha_final() argument
1204 req->nbytes = 0; sahara_sha_final()
1205 return sahara_sha_enqueue(req, 1); sahara_sha_final()
1208 static int sahara_sha_finup(struct ahash_request *req) sahara_sha_finup() argument
1210 return sahara_sha_enqueue(req, 1); sahara_sha_finup()
1213 static int sahara_sha_digest(struct ahash_request *req) sahara_sha_digest() argument
1215 sahara_sha_init(req); sahara_sha_digest()
1217 return sahara_sha_finup(req); sahara_sha_digest()
1220 static int sahara_sha_export(struct ahash_request *req, void *out) sahara_sha_export() argument
1222 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); sahara_sha_export()
1224 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); sahara_sha_export()
1233 static int sahara_sha_import(struct ahash_request *req, const void *in) sahara_sha_import() argument
1235 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); sahara_sha_import()
1237 struct sahara_sha_reqctx *rctx = ahash_request_ctx(req); sahara_sha_import()
866 sahara_sha_hw_data_descriptor_create(struct sahara_dev *dev, struct sahara_sha_reqctx *rctx, struct ahash_request *req, int index) sahara_sha_hw_data_descriptor_create() argument
919 sahara_sha_hw_context_descriptor_create(struct sahara_dev *dev, struct sahara_sha_reqctx *rctx, struct ahash_request *req, int index) sahara_sha_hw_context_descriptor_create() argument
H A Dixp4xx_crypto.c338 struct aead_request *req = crypt->data.aead_req; finish_scattered_hmac() local
339 struct aead_ctx *req_ctx = aead_request_ctx(req); finish_scattered_hmac()
340 struct crypto_aead *tfm = crypto_aead_reqtfm(req); finish_scattered_hmac()
342 int decryptlen = req->cryptlen - authsize; finish_scattered_hmac()
346 req->src, decryptlen, authsize, 1); finish_scattered_hmac()
364 struct aead_request *req = crypt->data.aead_req; one_packet() local
365 struct aead_ctx *req_ctx = aead_request_ctx(req); one_packet()
371 req->base.complete(&req->base, failed); one_packet()
375 struct ablkcipher_request *req = crypt->data.ablk_req; one_packet() local
376 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); one_packet()
382 req->base.complete(&req->base, failed); one_packet()
866 static int ablk_perform(struct ablkcipher_request *req, int encrypt) ablk_perform() argument
868 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); ablk_perform()
873 unsigned int nbytes = req->nbytes; ablk_perform()
875 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req); ablk_perform()
878 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? ablk_perform()
892 crypt->data.ablk_req = req; ablk_perform()
900 BUG_ON(ivsize && !req->info); ablk_perform()
901 memcpy(crypt->iv, req->info, ivsize); ablk_perform()
902 if (req->src != req->dst) { ablk_perform()
908 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, ablk_perform()
918 if (!chainup_buffers(dev, req->src, nbytes, &src_hook, ablk_perform()
932 if (req->src != req->dst) { ablk_perform()
939 static int ablk_encrypt(struct ablkcipher_request *req) ablk_encrypt() argument
941 return ablk_perform(req, 1); ablk_encrypt()
944 static int ablk_decrypt(struct ablkcipher_request *req) ablk_decrypt() argument
946 return ablk_perform(req, 0); ablk_decrypt()
949 static int ablk_rfc3686_crypt(struct ablkcipher_request *req) ablk_rfc3686_crypt() argument
951 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); ablk_rfc3686_crypt()
954 u8 *info = req->info; ablk_rfc3686_crypt()
965 req->info = iv; ablk_rfc3686_crypt()
966 ret = ablk_perform(req, 1); ablk_rfc3686_crypt()
967 req->info = info; ablk_rfc3686_crypt()
989 static int aead_perform(struct aead_request *req, int encrypt, aead_perform() argument
992 struct crypto_aead *tfm = crypto_aead_reqtfm(req); aead_perform()
1000 struct aead_ctx *req_ctx = aead_request_ctx(req); aead_perform()
1002 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? aead_perform()
1012 cryptlen = req->cryptlen; aead_perform()
1015 /* req->cryptlen includes the authsize when decrypting */ aead_perform()
1016 cryptlen = req->cryptlen -authsize; aead_perform()
1023 crypt->data.aead_req = req; aead_perform()
1032 crypt->auth_len = req->assoclen + ivsize + cryptlen; aead_perform()
1033 BUG_ON(ivsize && !req->iv); aead_perform()
1034 memcpy(crypt->iv, req->iv, ivsize); aead_perform()
1036 if (req->src != req->dst) { aead_perform()
1041 buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook, aead_perform()
1054 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) { aead_perform()
1063 req->src, cryptlen, authsize, 0); aead_perform()
1070 buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags, aead_perform()
1173 static int aead_encrypt(struct aead_request *req) aead_encrypt() argument
1175 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); aead_encrypt()
1176 return aead_perform(req, 1, req->assoclen + ivsize, aead_encrypt()
1177 req->cryptlen, req->iv); aead_encrypt()
1180 static int aead_decrypt(struct aead_request *req) aead_decrypt() argument
1182 unsigned ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); aead_decrypt()
1183 return aead_perform(req, 0, req->assoclen + ivsize, aead_decrypt()
1184 req->cryptlen, req->iv); aead_decrypt()
1187 static int aead_givencrypt(struct aead_givcrypt_request *req) aead_givencrypt() argument
1189 struct crypto_aead *tfm = aead_givcrypt_reqtfm(req); aead_givencrypt()
1199 memcpy(req->areq.iv, ctx->salt, ivsize); aead_givencrypt()
1202 memset(req->giv, 0, ivsize - sizeof(u64)); aead_givencrypt()
1205 seq = cpu_to_be64(req->seq); aead_givencrypt()
1206 memcpy(req->giv + ivsize - len, &seq, len); aead_givencrypt()
1207 return aead_perform(&req->areq, 1, req->areq.assoclen, aead_givencrypt()
1208 req->areq.cryptlen +ivsize, req->giv); aead_givencrypt()
/linux-4.1.27/drivers/usb/gadget/udc/
H A Dgoku_udc.c268 struct goku_request *req; goku_alloc_request() local
272 req = kzalloc(sizeof *req, gfp_flags); goku_alloc_request()
273 if (!req) goku_alloc_request()
276 INIT_LIST_HEAD(&req->queue); goku_alloc_request()
277 return &req->req; goku_alloc_request()
283 struct goku_request *req; goku_free_request() local
288 req = container_of(_req, struct goku_request, req); goku_free_request()
289 WARN_ON(!list_empty(&req->queue)); goku_free_request()
290 kfree(req); goku_free_request()
296 done(struct goku_ep *ep, struct goku_request *req, int status) done() argument
301 list_del_init(&req->queue); done()
303 if (likely(req->req.status == -EINPROGRESS)) done()
304 req->req.status = status; done()
306 status = req->req.status; done()
311 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); done()
316 VDBG(dev, "complete %s req %p stat %d len %u/%u\n", done()
317 ep->ep.name, &req->req, status, done()
318 req->req.actual, req->req.length); done()
323 usb_gadget_giveback_request(&ep->ep, &req->req); done()
331 write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max) write_packet() argument
335 length = min(req->req.length - req->req.actual, max); write_packet()
336 req->req.actual += length; write_packet()
345 static int write_fifo(struct goku_ep *ep, struct goku_request *req) write_fifo() argument
354 buf = req->req.buf + req->req.actual; write_fifo()
369 count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket); write_fifo()
380 if (likely(req->req.length != req->req.actual) write_fifo()
381 || req->req.zero) write_fifo()
390 req->req.length - req->req.actual, req); write_fifo()
397 done(ep, req, 0); write_fifo()
404 static int read_fifo(struct goku_ep *ep, struct goku_request *req) read_fifo() argument
413 buf = req->req.buf + req->req.actual; read_fifo()
427 bufferspace = req->req.length - req->req.actual; read_fifo()
445 req->req.actual += size; read_fifo()
448 VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n", read_fifo()
450 req, req->req.actual, req->req.length); read_fifo()
460 if (req->req.status != -EOVERFLOW) read_fifo()
463 req->req.status = -EOVERFLOW; read_fifo()
471 if (unlikely(is_short || req->req.actual == req->req.length)) { read_fifo()
484 done(ep, req, 0); read_fifo()
488 req = list_entry(ep->queue.next, read_fifo()
519 struct goku_request *req; pio_advance() local
523 req = list_entry(ep->queue.next, struct goku_request, queue); pio_advance()
524 (ep->is_in ? write_fifo : read_fifo)(ep, req); pio_advance()
531 static int start_dma(struct goku_ep *ep, struct goku_request *req) start_dma() argument
535 u32 start = req->req.dma; start_dma()
536 u32 end = start + req->req.length - 1; start_dma()
551 if (unlikely(req->req.length == 0)) start_dma()
553 else if ((req->req.length % ep->ep.maxpacket) != 0 start_dma()
554 || req->req.zero) start_dma()
588 struct goku_request *req; dma_advance() local
603 req = list_entry(ep->queue.next, struct goku_request, queue); dma_advance()
609 req->req.actual = readl(&regs->in_dma_current); dma_advance()
617 req->req.actual = readl(&regs->out_dma_current); dma_advance()
619 req->req.actual -= req->req.dma; dma_advance()
620 req->req.actual++; dma_advance()
623 VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n", dma_advance()
625 req->req.actual, req->req.length, req); dma_advance()
627 done(ep, req, 0); dma_advance()
630 req = list_entry(ep->queue.next, struct goku_request, queue); dma_advance()
631 (void) start_dma(ep, req); dma_advance()
637 struct goku_request *req; abort_dma() local
648 req = list_entry(ep->queue.next, struct goku_request, queue); abort_dma()
686 req->req.actual = (curr - req->req.dma) + 1; abort_dma()
687 req->req.status = status; abort_dma()
691 req->req.actual, req->req.length); abort_dma()
700 req->req.actual = req->req.length; abort_dma()
701 req->req.status = 0; abort_dma()
709 struct goku_request *req; goku_queue() local
716 req = container_of(_req, struct goku_request, req); goku_queue()
718 || !_req->buf || !list_empty(&req->queue))) goku_queue()
733 status = usb_gadget_map_request(&dev->gadget, &req->req, goku_queue()
740 VDBG(dev, "%s queue req %p, len %u buf %p\n", goku_queue()
762 status = start_dma(ep, req); goku_queue()
764 status = (ep->is_in ? write_fifo : read_fifo)(ep, req); goku_queue()
769 req = NULL; goku_queue()
774 if (likely(req != NULL)) goku_queue()
775 list_add_tail(&req->queue, &ep->queue); goku_queue()
792 struct goku_request *req; nuke() local
800 req = list_entry(ep->queue.next, struct goku_request, queue); nuke()
801 done(ep, req, status); nuke()
808 struct goku_request *req; goku_dequeue() local
832 list_for_each_entry (req, &ep->queue, queue) { goku_dequeue()
833 if (&req->req == _req) goku_dequeue()
836 if (&req->req != _req) { goku_dequeue()
841 if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) { goku_dequeue()
843 done(ep, req, -ECONNRESET); goku_dequeue()
845 } else if (!list_empty(&req->queue)) goku_dequeue()
846 done(ep, req, -ECONNRESET); goku_dequeue()
848 req = NULL; goku_dequeue()
851 return req ? 0 : -EOPNOTSUPP; goku_dequeue()
865 struct goku_request *req; goku_clear_halt() local
869 req = list_entry(ep->queue.next, struct goku_request, goku_clear_halt()
871 (void) start_dma(ep, req); goku_clear_halt()
1164 struct goku_request *req; udc_proc_read() local
1190 list_for_each_entry(req, &ep->queue, queue) { udc_proc_read()
1191 if (ep->dma && req->queue.prev == &ep->queue) { udc_proc_read()
1196 tmp -= req->req.dma; udc_proc_read()
1199 tmp = req->req.actual; udc_proc_read()
1202 &req->req, tmp, req->req.length, udc_proc_read()
1203 req->req.buf); udc_proc_read()
1491 VDBG(dev, "req %02x.%02x protocol STALL; err %d\n", ep0_setup()
H A Dr8a66597-udc.c38 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req);
40 struct r8a66597_request *req);
45 struct r8a66597_request *req, int status);
572 struct r8a66597_request *req) start_ep0_write()
579 if (req->req.length == 0) { start_ep0_write()
582 transfer_complete(ep, req, 0); start_ep0_write()
585 irq_ep0_write(ep, req); start_ep0_write()
634 struct r8a66597_request *req) sudmac_alloc_channel()
666 return usb_gadget_map_request(&r8a66597->gadget, &req->req, dma->dir); sudmac_alloc_channel()
671 struct r8a66597_request *req) sudmac_free_channel()
676 usb_gadget_unmap_request(&r8a66597->gadget, &req->req, ep->dma->dir); sudmac_free_channel()
689 struct r8a66597_request *req) sudmac_start()
691 BUG_ON(req->req.length == 0); sudmac_start()
694 r8a66597_sudmac_write(r8a66597, req->req.dma, CH0BA); sudmac_start()
695 r8a66597_sudmac_write(r8a66597, req->req.length, CH0BBC); sudmac_start()
702 struct r8a66597_request *req) start_packet_write()
711 if (req->req.length == 0) { start_packet_write()
712 transfer_complete(ep, req, 0); start_packet_write()
715 if (sudmac_alloc_channel(r8a66597, ep, req) < 0) { start_packet_write()
724 irq_packet_write(ep, req); start_packet_write()
731 sudmac_start(r8a66597, ep, req); start_packet_write()
737 struct r8a66597_request *req) start_packet_read()
753 DIV_ROUND_UP(req->req.length, ep->ep.maxpacket), start_packet_read()
758 if (sudmac_alloc_channel(r8a66597, ep, req) < 0) { start_packet_read()
765 sudmac_start(r8a66597, ep, req); start_packet_read()
771 static void start_packet(struct r8a66597_ep *ep, struct r8a66597_request *req) start_packet() argument
774 start_packet_write(ep, req); start_packet()
776 start_packet_read(ep, req); start_packet()
779 static void start_ep0(struct r8a66597_ep *ep, struct r8a66597_request *req) start_ep0() argument
787 start_ep0_write(ep, req); start_ep0()
790 start_packet_read(ep, req); start_ep0()
902 struct r8a66597_request *req, int status)
915 list_del_init(&req->queue);
917 req->req.status = -ESHUTDOWN;
919 req->req.status = status;
925 sudmac_free_channel(ep->r8a66597, ep, req);
928 usb_gadget_giveback_request(&ep->ep, &req->req);
932 req = get_request_from_ep(ep);
934 start_packet(ep, req);
938 static void irq_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req) irq_ep0_write() argument
965 buf = req->req.buf + req->req.actual; irq_ep0_write()
966 size = min(bufsize, req->req.length - req->req.actual); irq_ep0_write()
969 if (req->req.buf) { irq_ep0_write()
977 req->req.actual += size; irq_ep0_write()
980 if ((!req->req.zero && (req->req.actual == req->req.length)) irq_ep0_write()
993 struct r8a66597_request *req) irq_packet_write()
1014 buf = req->req.buf + req->req.actual; irq_packet_write()
1015 size = min(bufsize, req->req.length - req->req.actual); irq_packet_write()
1018 if (req->req.buf) { irq_packet_write()
1028 req->req.actual += size; irq_packet_write()
1030 if ((!req->req.zero && (req->req.actual == req->req.length)) irq_packet_write()
1042 struct r8a66597_request *req) irq_packet_read()
1055 req->req.status = -EPIPE; irq_packet_read()
1066 buf = req->req.buf + req->req.actual; irq_packet_read()
1067 req_len = req->req.length - req->req.actual; irq_packet_read()
1074 req->req.actual += size; irq_packet_read()
1077 if ((!req->req.zero && (req->req.actual == req->req.length)) irq_packet_read()
1086 if (req->req.buf) { irq_packet_read()
1095 transfer_complete(ep, req, 0); irq_packet_read()
1103 struct r8a66597_request *req; irq_pipe_ready() local
1110 req = get_request_from_ep(ep); irq_pipe_ready()
1111 irq_packet_read(ep, req); irq_pipe_ready()
1118 req = get_request_from_ep(ep); irq_pipe_ready()
1120 irq_packet_write(ep, req); irq_pipe_ready()
1122 irq_packet_read(ep, req); irq_pipe_ready()
1134 struct r8a66597_request *req; irq_pipe_empty() local
1140 req = get_request_from_ep(ep); irq_pipe_empty()
1141 irq_ep0_write(ep, req); irq_pipe_empty()
1153 req = get_request_from_ep(ep); irq_pipe_empty()
1155 transfer_complete(ep, req, 0); irq_pipe_empty()
1212 struct r8a66597_request *req; clear_feature() local
1226 req = get_request_from_ep(ep); clear_feature()
1231 start_packet(ep, req); clear_feature()
1374 struct r8a66597_request *req; variable in typeref:struct:r8a66597_request
1376 req = get_request_from_ep(ep);
1377 transfer_complete(ep, req, 0);
1406 struct r8a66597_request *req; sudmac_finish() local
1424 req = get_request_from_ep(ep); sudmac_finish()
1428 req->req.actual += len; sudmac_finish()
1434 if ((!req->req.zero && (req->req.actual == req->req.length)) sudmac_finish()
1442 transfer_complete(ep, req, 0); sudmac_finish()
1568 struct r8a66597_request *req; r8a66597_disable() local
1575 req = get_request_from_ep(ep); r8a66597_disable()
1577 transfer_complete(ep, req, -ECONNRESET); r8a66597_disable()
1588 struct r8a66597_request *req; r8a66597_alloc_request() local
1590 req = kzalloc(sizeof(struct r8a66597_request), gfp_flags); r8a66597_alloc_request()
1591 if (!req) r8a66597_alloc_request()
1594 INIT_LIST_HEAD(&req->queue); r8a66597_alloc_request()
1596 return &req->req; r8a66597_alloc_request()
1601 struct r8a66597_request *req; r8a66597_free_request() local
1603 req = container_of(_req, struct r8a66597_request, req); r8a66597_free_request()
1604 kfree(req); r8a66597_free_request()
1611 struct r8a66597_request *req; r8a66597_queue() local
1616 req = container_of(_req, struct r8a66597_request, req); r8a66597_queue()
1626 list_add_tail(&req->queue, &ep->queue); r8a66597_queue()
1627 req->req.actual = 0; r8a66597_queue()
1628 req->req.status = -EINPROGRESS; r8a66597_queue()
1631 start_ep0(ep, req); r8a66597_queue()
1634 start_packet(ep, req); r8a66597_queue()
1645 struct r8a66597_request *req; r8a66597_dequeue() local
1649 req = container_of(_req, struct r8a66597_request, req); r8a66597_dequeue()
1653 transfer_complete(ep, req, -ECONNRESET); r8a66597_dequeue()
1662 struct r8a66597_request *req; r8a66597_set_halt() local
1667 req = get_request_from_ep(ep); r8a66597_set_halt()
571 start_ep0_write(struct r8a66597_ep *ep, struct r8a66597_request *req) start_ep0_write() argument
632 sudmac_alloc_channel(struct r8a66597 *r8a66597, struct r8a66597_ep *ep, struct r8a66597_request *req) sudmac_alloc_channel() argument
669 sudmac_free_channel(struct r8a66597 *r8a66597, struct r8a66597_ep *ep, struct r8a66597_request *req) sudmac_free_channel() argument
688 sudmac_start(struct r8a66597 *r8a66597, struct r8a66597_ep *ep, struct r8a66597_request *req) sudmac_start() argument
701 start_packet_write(struct r8a66597_ep *ep, struct r8a66597_request *req) start_packet_write() argument
736 start_packet_read(struct r8a66597_ep *ep, struct r8a66597_request *req) start_packet_read() argument
992 irq_packet_write(struct r8a66597_ep *ep, struct r8a66597_request *req) irq_packet_write() argument
1041 irq_packet_read(struct r8a66597_ep *ep, struct r8a66597_request *req) irq_packet_read() argument
H A Dgr_udc.c96 struct gr_request *req) gr_dbgprint_request()
98 int buflen = ep->is_in ? req->req.length : req->req.actual; gr_dbgprint_request()
102 dev_dbg(ep->dev->dev, "%s: 0x%p, %d bytes data%s:\n", str, req, buflen, gr_dbgprint_request()
105 rowlen, 4, req->req.buf, plen, false); gr_dbgprint_request()
117 struct gr_request *req) {}
134 struct gr_request *req; gr_seq_ep_show() local
167 list_for_each_entry(req, &ep->queue, queue) { gr_seq_ep_show()
171 seq_printf(seq, " 0x%p: 0x%p %d %d\n", req, gr_seq_ep_show()
172 &req->req.buf, req->req.actual, req->req.length); gr_seq_ep_show()
174 next = req->first_desc; gr_seq_ep_show()
179 desc == req->curr_desc ? 'c' : ' ', gr_seq_ep_show()
181 } while (desc != req->last_desc); gr_seq_ep_show()
275 static void gr_free_dma_desc_chain(struct gr_udc *dev, struct gr_request *req) gr_free_dma_desc_chain() argument
280 next = req->first_desc; gr_free_dma_desc_chain()
288 } while (desc != req->last_desc); gr_free_dma_desc_chain()
290 req->first_desc = NULL; gr_free_dma_desc_chain()
291 req->curr_desc = NULL; gr_free_dma_desc_chain()
292 req->last_desc = NULL; gr_free_dma_desc_chain()
295 static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req);
303 static void gr_finish_request(struct gr_ep *ep, struct gr_request *req,
310 list_del_init(&req->queue);
312 if (likely(req->req.status == -EINPROGRESS))
313 req->req.status = status;
315 status = req->req.status;
318 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
319 gr_free_dma_desc_chain(dev, req);
321 if (ep->is_in) { /* For OUT, req->req.actual gets updated bit by bit */
322 req->req.actual = req->req.length;
323 } else if (req->oddlen && req->req.actual > req->evenlen) {
329 char *buftail = ((char *)req->req.buf + req->evenlen);
331 memcpy(buftail, ep->tailbuf, req->oddlen);
333 if (req->req.actual > req->req.length) {
337 gr_dbgprint_request("OVFL", ep, req);
338 req->req.status = -EOVERFLOW;
344 gr_dbgprint_request("SENT", ep, req);
346 gr_dbgprint_request("RECV", ep, req);
351 if (req == dev->ep0reqo && !status) {
352 if (req->setup)
353 gr_ep0_setup(dev, req);
357 } else if (req->req.complete) {
360 usb_gadget_giveback_request(&ep->ep, &req->req);
369 struct gr_request *req; gr_alloc_request() local
371 req = kzalloc(sizeof(*req), gfp_flags); gr_alloc_request()
372 if (!req) gr_alloc_request()
375 INIT_LIST_HEAD(&req->queue); gr_alloc_request()
377 return &req->req; gr_alloc_request()
387 struct gr_request *req; gr_start_dma() local
395 req = list_first_entry(&ep->queue, struct gr_request, queue); gr_start_dma()
398 BUG_ON(!req->curr_desc); gr_start_dma()
406 if (!ep->is_in && req->oddlen) gr_start_dma()
407 req->last_desc->data = ep->tailbuf_paddr; gr_start_dma()
412 gr_write32(&ep->regs->dmaaddr, req->curr_desc->paddr); gr_start_dma()
429 struct gr_request *req; gr_dma_advance() local
431 req = list_first_entry(&ep->queue, struct gr_request, queue); gr_dma_advance()
432 gr_finish_request(ep, req, status); gr_dma_advance()
457 static int gr_add_dma_desc(struct gr_ep *ep, struct gr_request *req, gr_add_dma_desc() argument
473 if (!req->first_desc) { gr_add_dma_desc()
474 req->first_desc = desc; gr_add_dma_desc()
475 req->curr_desc = desc; gr_add_dma_desc()
477 req->last_desc->next_desc = desc; gr_add_dma_desc()
478 req->last_desc->next = desc->paddr; gr_add_dma_desc()
479 req->last_desc->ctrl |= GR_DESC_OUT_CTRL_NX; gr_add_dma_desc()
481 req->last_desc = desc; gr_add_dma_desc()
488 * together covers req->req.length bytes of the buffer at DMA address
489 * req->req.dma for the OUT direction.
496 static int gr_setup_out_desc_list(struct gr_ep *ep, struct gr_request *req, gr_setup_out_desc_list() argument
503 req->first_desc = NULL; /* Signals that no allocation is done yet */ gr_setup_out_desc_list()
504 bytes_left = req->req.length; gr_setup_out_desc_list()
507 dma_addr_t start = req->req.dma + bytes_used; gr_setup_out_desc_list()
512 req->evenlen = req->req.length - bytes_left; gr_setup_out_desc_list()
513 req->oddlen = size; gr_setup_out_desc_list()
516 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags); gr_setup_out_desc_list()
524 req->first_desc->ctrl |= GR_DESC_OUT_CTRL_EN; gr_setup_out_desc_list()
529 gr_free_dma_desc_chain(ep->dev, req); gr_setup_out_desc_list()
536 * together covers req->req.length bytes of the buffer at DMA address
537 * req->req.dma for the IN direction.
549 static int gr_setup_in_desc_list(struct gr_ep *ep, struct gr_request *req, gr_setup_in_desc_list() argument
552 u16 bytes_left; /* Bytes left in req to provide descriptors for */ gr_setup_in_desc_list()
553 u16 bytes_used; /* Bytes in req accommodated for */ gr_setup_in_desc_list()
556 req->first_desc = NULL; /* Signals that no allocation is done yet */ gr_setup_in_desc_list()
557 bytes_left = req->req.length; gr_setup_in_desc_list()
560 dma_addr_t start = req->req.dma + bytes_used; gr_setup_in_desc_list()
563 ret = gr_add_dma_desc(ep, req, start, size, gfp_flags); gr_setup_in_desc_list()
573 * available when req->req.zero is set and the data length is even gr_setup_in_desc_list()
576 if (req->req.zero && (req->req.length % ep->ep.maxpacket == 0)) { gr_setup_in_desc_list()
577 ret = gr_add_dma_desc(ep, req, 0, 0, gfp_flags); gr_setup_in_desc_list()
586 req->last_desc->ctrl |= GR_DESC_IN_CTRL_PI; gr_setup_in_desc_list()
591 gr_free_dma_desc_chain(ep->dev, req); gr_setup_in_desc_list()
597 static int gr_queue(struct gr_ep *ep, struct gr_request *req, gfp_t gfp_flags) gr_queue() argument
607 if (unlikely(!req->req.buf || !list_empty(&req->queue))) { gr_queue()
610 ep->ep.name, req->req.buf, list_empty(&req->queue)); gr_queue()
626 ret = usb_gadget_map_request(&dev->gadget, &req->req, ep->is_in); gr_queue()
633 ret = gr_setup_in_desc_list(ep, req, gfp_flags); gr_queue()
635 ret = gr_setup_out_desc_list(ep, req, gfp_flags); gr_queue()
639 req->req.status = -EINPROGRESS; gr_queue()
640 req->req.actual = 0; gr_queue()
641 list_add_tail(&req->queue, &ep->queue); gr_queue()
655 static inline int gr_queue_int(struct gr_ep *ep, struct gr_request *req, gr_queue_int() argument
659 gr_dbgprint_request("RESP", ep, req); gr_queue_int()
661 return gr_queue(ep, req, gfp_flags); gr_queue_int()
674 struct gr_request *req; gr_ep_nuke() local
681 req = list_first_entry(&ep->queue, struct gr_request, queue); gr_ep_nuke()
682 gr_finish_request(ep, req, -ESHUTDOWN); gr_ep_nuke()
842 struct usb_request *req)) gr_ep0_respond()
844 u8 *reqbuf = dev->ep0reqi->req.buf; gr_ep0_respond()
850 dev->ep0reqi->req.length = length; gr_ep0_respond()
851 dev->ep0reqi->req.complete = complete; gr_ep0_respond()
1062 static void gr_ep0_setup(struct gr_udc *dev, struct gr_request *req)
1082 if (!req->req.actual)
1088 if (req->req.actual > 0)
1101 } else if (!req->req.actual) {
1108 for (i = 0; i < req->req.actual; i++)
1109 u.raw[i] = ((u8 *)req->req.buf)[i];
1259 struct gr_request *req; gr_handle_in_ep() local
1261 req = list_first_entry(&ep->queue, struct gr_request, queue); gr_handle_in_ep()
1262 if (!req->last_desc) gr_handle_in_ep()
1265 if (ACCESS_ONCE(req->last_desc->ctrl) & GR_DESC_IN_CTRL_EN) gr_handle_in_ep()
1287 struct gr_request *req; gr_handle_out_ep() local
1290 req = list_first_entry(&ep->queue, struct gr_request, queue); gr_handle_out_ep()
1291 if (!req->curr_desc) gr_handle_out_ep()
1294 ctrl = ACCESS_ONCE(req->curr_desc->ctrl); gr_handle_out_ep()
1300 req->req.actual += len; gr_handle_out_ep()
1302 req->setup = 1; gr_handle_out_ep()
1304 if (len < ep->ep.maxpacket || req->req.actual >= req->req.length) { gr_handle_out_ep()
1321 req->curr_desc = req->curr_desc->next_desc; gr_handle_out_ep()
1322 req->curr_desc->ctrl |= GR_DESC_OUT_CTRL_EN; gr_handle_out_ep()
1658 struct gr_request *req; gr_free_request() local
1662 req = container_of(_req, struct gr_request, req); gr_free_request()
1665 WARN(!list_empty(&req->queue), gr_free_request()
1668 kfree(req); gr_free_request()
1676 struct gr_request *req; gr_queue_ext() local
1684 req = container_of(_req, struct gr_request, req); gr_queue_ext()
1701 gr_dbgprint_request("EXTERN", ep, req); gr_queue_ext()
1703 ret = gr_queue(ep, req, GFP_ATOMIC); gr_queue_ext()
1713 struct gr_request *req; gr_dequeue() local
1733 list_for_each_entry(req, &ep->queue, queue) { gr_dequeue()
1734 if (&req->req == _req) gr_dequeue()
1737 if (&req->req != _req) { gr_dequeue()
1742 if (list_first_entry(&ep->queue, struct gr_request, queue) == req) { gr_dequeue()
1746 gr_finish_request(ep, req, -ECONNRESET); gr_dequeue()
1749 } else if (!list_empty(&req->queue)) { gr_dequeue()
1751 gr_finish_request(ep, req, -ECONNRESET); gr_dequeue()
1981 struct gr_request *req; gr_ep_init() local
2010 req = container_of(_req, struct gr_request, req); gr_ep_init()
2011 req->req.buf = buf; gr_ep_init()
2012 req->req.length = MAX_CTRL_PL_SIZE; gr_ep_init()
2015 dev->ep0reqi = req; /* Complete gets set as used */ gr_ep_init()
2017 dev->ep0reqo = req; /* Completion treated separately */ gr_ep_init()
2113 gr_free_request(&dev->epi[0].ep, &dev->ep0reqi->req); gr_remove()
2114 gr_free_request(&dev->epo[0].ep, &dev->ep0reqo->req); gr_remove()
95 gr_dbgprint_request(const char *str, struct gr_ep *ep, struct gr_request *req) gr_dbgprint_request() argument
116 gr_dbgprint_request(const char *str, struct gr_ep *ep, struct gr_request *req) gr_dbgprint_request() argument
840 gr_ep0_respond(struct gr_udc *dev, u8 *buf, int length, void (*complete)(struct usb_ep *ep, struct usb_request *req)) gr_ep0_respond() argument
H A Dfusb300_udc.c36 static void done(struct fusb300_ep *ep, struct fusb300_request *req,
254 struct fusb300_request *req; fusb300_disable() local
262 req = list_entry(ep->queue.next, struct fusb300_request, queue); fusb300_disable()
264 done(ep, req, -ECONNRESET); fusb300_disable()
274 struct fusb300_request *req; fusb300_alloc_request() local
276 req = kzalloc(sizeof(struct fusb300_request), gfp_flags); fusb300_alloc_request()
277 if (!req) fusb300_alloc_request()
279 INIT_LIST_HEAD(&req->queue); fusb300_alloc_request()
281 return &req->req; fusb300_alloc_request()
286 struct fusb300_request *req; fusb300_free_request() local
288 req = container_of(_req, struct fusb300_request, req); fusb300_free_request()
289 kfree(req); fusb300_free_request()
334 struct fusb300_request *req) fusb300_wrcxf()
340 u32 length = req->req.length - req->req.actual; fusb300_wrcxf()
342 tmp = req->req.buf + req->req.actual; fusb300_wrcxf()
352 req->req.actual += SS_CTL_MAX_PACKET_SIZE; fusb300_wrcxf()
381 req->req.actual += length; fusb300_wrcxf()
402 static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req) ep0_queue() argument
405 if (req->req.length) { ep0_queue()
406 fusb300_wrcxf(ep, req); ep0_queue()
408 printk(KERN_DEBUG "%s : req->req.length = 0x%x\n", ep0_queue()
409 __func__, req->req.length); ep0_queue()
410 if ((req->req.length == req->req.actual) || ep0_queue()
411 (req->req.actual < ep->ep.maxpacket)) ep0_queue()
412 done(ep, req, 0); ep0_queue()
414 if (!req->req.length) ep0_queue()
415 done(ep, req, 0); ep0_queue()
426 struct fusb300_request *req; fusb300_queue() local
431 req = container_of(_req, struct fusb300_request, req); fusb300_queue()
441 list_add_tail(&req->queue, &ep->queue); fusb300_queue()
443 req->req.actual = 0; fusb300_queue()
444 req->req.status = -EINPROGRESS; fusb300_queue()
447 ep0_queue(ep, req); fusb300_queue()
459 struct fusb300_request *req; fusb300_dequeue() local
463 req = container_of(_req, struct fusb300_request, req); fusb300_dequeue()
467 done(ep, req, -ECONNRESET); fusb300_dequeue()
604 struct fusb300_request *req, fusb300_rdfifo()
612 tmp = req->req.buf + req->req.actual; fusb300_rdfifo()
613 req->req.actual += length; fusb300_rdfifo()
615 if (req->req.actual > req->req.length) fusb300_rdfifo()
616 printk(KERN_DEBUG "req->req.actual > req->req.length\n"); fusb300_rdfifo()
867 static void done(struct fusb300_ep *ep, struct fusb300_request *req, done() argument
870 list_del_init(&req->queue); done()
874 req->req.status = -ESHUTDOWN; done()
876 req->req.status = status; done()
879 usb_gadget_giveback_request(&ep->ep, &req->req); done()
942 struct fusb300_request *req) fusb300_set_idma()
947 &req->req, DMA_TO_DEVICE); fusb300_set_idma()
954 fusb300_fill_idma_prdtbl(ep, req->req.dma, req->req.length); fusb300_set_idma()
959 &req->req, DMA_TO_DEVICE); fusb300_set_idma()
964 struct fusb300_request *req = list_entry(ep->queue.next, in_ep_fifo_handler() local
967 if (req->req.length) in_ep_fifo_handler()
968 fusb300_set_idma(ep, req); in_ep_fifo_handler()
969 done(ep, req, 0); in_ep_fifo_handler()
975 struct fusb300_request *req = list_entry(ep->queue.next, out_ep_fifo_handler() local
980 fusb300_rdfifo(ep, req, length); out_ep_fifo_handler()
983 if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket)) out_ep_fifo_handler()
984 done(ep, req, 0); out_ep_fifo_handler()
1015 struct fusb300_request *req; fusb300_ep0out() local
1017 req = list_first_entry(&ep->queue, fusb300_ep0out()
1019 if (req->req.length) fusb300_ep0out()
1020 fusb300_rdcxf(ep->fusb300, req->req.buf, fusb300_ep0out()
1021 req->req.length); fusb300_ep0out()
1022 done(ep, req, 0); fusb300_ep0out()
1032 struct fusb300_request *req; fusb300_ep0in() local
1036 req = list_entry(ep->queue.next, fusb300_ep0in()
1038 if (req->req.length) fusb300_ep0in()
1039 fusb300_wrcxf(ep, req); fusb300_ep0in()
1040 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) fusb300_ep0in()
1041 done(ep, req, 0); fusb300_ep0in()
333 fusb300_wrcxf(struct fusb300_ep *ep, struct fusb300_request *req) fusb300_wrcxf() argument
603 fusb300_rdfifo(struct fusb300_ep *ep, struct fusb300_request *req, u32 length) fusb300_rdfifo() argument
941 fusb300_set_idma(struct fusb300_ep *ep, struct fusb300_request *req) fusb300_set_idma() argument
H A Datmel_usba_udc.c40 struct usba_request *req, *req_copy; queue_dbg_open() local
49 list_for_each_entry(req, &ep->queue, queue) { queue_dbg_open()
50 req_copy = kmemdup(req, sizeof(*req_copy), GFP_ATOMIC); queue_dbg_open()
62 list_for_each_entry_safe(req, req_copy, queue_data, queue) { list_for_each_entry_safe()
63 list_del(&req->queue); list_for_each_entry_safe()
64 kfree(req); list_for_each_entry_safe()
88 struct usba_request *req, *tmp_req; queue_dbg_read() local
96 list_for_each_entry_safe(req, tmp_req, queue, queue) { list_for_each_entry_safe()
99 req->req.buf, req->req.length, list_for_each_entry_safe()
100 req->req.no_interrupt ? 'i' : 'I', list_for_each_entry_safe()
101 req->req.zero ? 'Z' : 'z', list_for_each_entry_safe()
102 req->req.short_not_ok ? 's' : 'S', list_for_each_entry_safe()
103 req->req.status, list_for_each_entry_safe()
104 req->submitted ? 'F' : 'f', list_for_each_entry_safe()
105 req->using_dma ? 'D' : 'd', list_for_each_entry_safe()
106 req->last_transaction ? 'L' : 'l'); list_for_each_entry_safe()
111 list_del(&req->queue); list_for_each_entry_safe()
112 kfree(req); list_for_each_entry_safe()
130 struct usba_request *req, *tmp_req; queue_dbg_release() local
132 list_for_each_entry_safe(req, tmp_req, queue_data, queue) { list_for_each_entry_safe()
133 list_del(&req->queue); list_for_each_entry_safe()
134 kfree(req); list_for_each_entry_safe()
359 static void next_fifo_transaction(struct usba_ep *ep, struct usba_request *req) next_fifo_transaction() argument
363 transaction_len = req->req.length - req->req.actual; next_fifo_transaction()
364 req->last_transaction = 1; next_fifo_transaction()
367 req->last_transaction = 0; next_fifo_transaction()
368 } else if (transaction_len == ep->ep.maxpacket && req->req.zero) next_fifo_transaction()
369 req->last_transaction = 0; next_fifo_transaction()
371 DBG(DBG_QUEUE, "%s: submit_transaction, req %p (length %d)%s\n", next_fifo_transaction()
372 ep->ep.name, req, transaction_len, next_fifo_transaction()
373 req->last_transaction ? ", done" : ""); next_fifo_transaction()
375 memcpy_toio(ep->fifo, req->req.buf + req->req.actual, transaction_len); next_fifo_transaction()
377 req->req.actual += transaction_len; next_fifo_transaction()
380 static void submit_request(struct usba_ep *ep, struct usba_request *req) submit_request() argument
382 DBG(DBG_QUEUE, "%s: submit_request: req %p (length %d)\n", submit_request()
383 ep->ep.name, req, req->req.length); submit_request()
385 req->req.actual = 0; submit_request()
386 req->submitted = 1; submit_request()
388 if (req->using_dma) { submit_request()
389 if (req->req.length == 0) { submit_request()
394 if (req->req.zero) submit_request()
399 usba_dma_writel(ep, ADDRESS, req->req.dma); submit_request()
400 usba_dma_writel(ep, CONTROL, req->ctrl); submit_request()
402 next_fifo_transaction(ep, req); submit_request()
403 if (req->last_transaction) { submit_request()
415 struct usba_request *req; submit_next_request() local
422 req = list_entry(ep->queue.next, struct usba_request, queue); submit_next_request()
423 if (!req->submitted) submit_next_request()
424 submit_request(ep, req); submit_next_request()
437 struct usba_request *req; receive_data() local
452 req = list_entry(ep->queue.next, receive_data()
459 if (req->req.actual + bytecount >= req->req.length) { receive_data()
461 bytecount = req->req.length - req->req.actual; receive_data()
464 memcpy_fromio(req->req.buf + req->req.actual, receive_data()
466 req->req.actual += bytecount; receive_data()
472 req->req.status = 0; receive_data()
473 list_del_init(&req->queue); receive_data()
476 usb_gadget_giveback_request(&ep->ep, &req->req); receive_data()
491 request_complete(struct usba_ep *ep, struct usba_request *req, int status) request_complete() argument
495 WARN_ON(!list_empty(&req->queue)); request_complete()
497 if (req->req.status == -EINPROGRESS) request_complete()
498 req->req.status = status; request_complete()
500 if (req->using_dma) request_complete()
501 usb_gadget_unmap_request(&udc->gadget, &req->req, ep->is_in); request_complete()
504 "%s: req %p complete: status %d, actual %u\n", request_complete()
505 ep->ep.name, req, req->req.status, req->req.actual); request_complete()
508 usb_gadget_giveback_request(&ep->ep, &req->req); request_complete()
515 struct usba_request *req, *tmp_req; request_complete_list() local
517 list_for_each_entry_safe(req, tmp_req, list, queue) { list_for_each_entry_safe()
518 list_del_init(&req->queue); list_for_each_entry_safe()
519 request_complete(ep, req, status); list_for_each_entry_safe()
678 struct usba_request *req; usba_ep_alloc_request() local
682 req = kzalloc(sizeof(*req), gfp_flags); usba_ep_alloc_request()
683 if (!req) usba_ep_alloc_request()
686 INIT_LIST_HEAD(&req->queue); usba_ep_alloc_request()
688 return &req->req; usba_ep_alloc_request()
694 struct usba_request *req = to_usba_req(_req); usba_ep_free_request() local
698 kfree(req); usba_ep_free_request()
702 struct usba_request *req, gfp_t gfp_flags) queue_dma()
707 DBG(DBG_DMA, "%s: req l/%u d/%08x %c%c%c\n", queue_dma()
708 ep->ep.name, req->req.length, req->req.dma, queue_dma()
709 req->req.zero ? 'Z' : 'z', queue_dma()
710 req->req.short_not_ok ? 'S' : 's', queue_dma()
711 req->req.no_interrupt ? 'I' : 'i'); queue_dma()
713 if (req->req.length > 0x10000) { queue_dma()
715 DBG(DBG_ERR, "invalid request length %u\n", req->req.length); queue_dma()
719 ret = usb_gadget_map_request(&udc->gadget, &req->req, ep->is_in); queue_dma()
723 req->using_dma = 1; queue_dma()
724 req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length) queue_dma()
729 req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; queue_dma()
740 submit_request(ep, req); queue_dma()
742 list_add_tail(&req->queue, &ep->queue); queue_dma()
753 struct usba_request *req = to_usba_req(_req); usba_ep_queue() local
759 DBG(DBG_GADGET | DBG_QUEUE | DBG_REQ, "%s: queue req %p, len %u\n", usba_ep_queue()
760 ep->ep.name, req, _req->length); usba_ep_queue()
766 req->submitted = 0; usba_ep_queue()
767 req->using_dma = 0; usba_ep_queue()
768 req->last_transaction = 0; usba_ep_queue()
774 return queue_dma(udc, ep, req, gfp_flags); usba_ep_queue()
780 list_add_tail(&req->queue, &ep->queue); usba_ep_queue()
797 usba_update_req(struct usba_ep *ep, struct usba_request *req, u32 status) usba_update_req() argument
799 req->req.actual = req->req.length - USBA_BFEXT(DMA_BUF_LEN, status); usba_update_req()
838 struct usba_request *req; usba_ep_dequeue() local
842 DBG(DBG_GADGET | DBG_QUEUE, "ep_dequeue: %s, req %p\n", usba_ep_dequeue()
843 ep->ep.name, req); usba_ep_dequeue()
847 list_for_each_entry(req, &ep->queue, queue) { usba_ep_dequeue()
848 if (&req->req == _req) usba_ep_dequeue()
852 if (&req->req != _req) { usba_ep_dequeue()
857 if (req->using_dma) { usba_ep_dequeue()
862 if (ep->queue.next == &req->queue) { usba_ep_dequeue()
873 usba_update_req(ep, req, status); usba_ep_dequeue()
881 list_del_init(&req->queue); usba_ep_dequeue()
883 request_complete(ep, req, -ECONNRESET); usba_ep_dequeue()
1040 struct usba_request *req, *tmp_req; reset_all_endpoints() local
1045 list_for_each_entry_safe(req, tmp_req, &ep->queue, queue) { reset_all_endpoints()
1046 list_del_init(&req->queue); reset_all_endpoints()
1047 request_complete(ep, req, -ECONNRESET); reset_all_endpoints()
1348 struct usba_request *req; usba_control_irq() local
1359 req = NULL; usba_control_irq()
1361 req = list_entry(ep->queue.next, usba_control_irq()
1365 if (req->submitted) usba_control_irq()
1366 next_fifo_transaction(ep, req); usba_control_irq()
1368 submit_request(ep, req); usba_control_irq()
1370 if (req->last_transaction) { usba_control_irq()
1393 if (req) { usba_control_irq()
1394 list_del_init(&req->queue); usba_control_irq()
1395 request_complete(ep, req, 0); usba_control_irq()
1423 if (req) { usba_control_irq()
1424 list_del_init(&req->queue); usba_control_irq()
1425 request_complete(ep, req, 0); usba_control_irq()
1473 if (req) { usba_control_irq()
1474 list_del_init(&req->queue); usba_control_irq()
1475 request_complete(ep, req, status); usba_control_irq()
1523 DBG(DBG_BUS, "req %02x.%02x, length %d, state %d, ret %d\n", usba_control_irq()
1536 struct usba_request *req; usba_ep_irq() local
1554 req = list_entry(ep->queue.next, struct usba_request, queue); usba_ep_irq()
1556 if (req->using_dma) { usba_ep_irq()
1562 list_del_init(&req->queue); usba_ep_irq()
1564 request_complete(ep, req, 0); usba_ep_irq()
1566 if (req->submitted) usba_ep_irq()
1567 next_fifo_transaction(ep, req); usba_ep_irq()
1569 submit_request(ep, req); usba_ep_irq()
1571 if (req->last_transaction) { usba_ep_irq()
1572 list_del_init(&req->queue); usba_ep_irq()
1574 request_complete(ep, req, 0); usba_ep_irq()
1589 struct usba_request *req; usba_dma_irq() local
1618 req = list_entry(ep->queue.next, struct usba_request, queue); usba_dma_irq()
1619 usba_update_req(ep, req, status); usba_dma_irq()
1621 list_del_init(&req->queue); usba_dma_irq()
1623 request_complete(ep, req, 0); usba_dma_irq()
701 queue_dma(struct usba_udc *udc, struct usba_ep *ep, struct usba_request *req, gfp_t gfp_flags) queue_dma() argument
H A Damd5536udc.c74 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
75 static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
101 * used if EPIN irq came but no req was available
496 udc_free_request(&ep->ep, &ep->bna_dummy_req->req); udc_ep_disable()
508 struct udc_request *req; udc_alloc_request() local
518 req = kzalloc(sizeof(struct udc_request), gfp); udc_alloc_request()
519 if (!req) udc_alloc_request()
522 req->req.dma = DMA_DONT_USE; udc_alloc_request()
523 INIT_LIST_HEAD(&req->queue); udc_alloc_request()
528 &req->td_phys); udc_alloc_request()
530 kfree(req); udc_alloc_request()
534 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, " udc_alloc_request()
536 req, dma_desc, udc_alloc_request()
537 (unsigned long)req->td_phys); udc_alloc_request()
543 req->td_data = dma_desc; udc_alloc_request()
544 req->td_data_last = NULL; udc_alloc_request()
545 req->chain_len = 1; udc_alloc_request()
548 return &req->req; udc_alloc_request()
556 struct udc_request *req; udc_free_request() local
562 req = container_of(usbreq, struct udc_request, req); udc_free_request()
563 VDBG(ep->dev, "free_req req=%p\n", req); udc_free_request()
564 BUG_ON(!list_empty(&req->queue)); udc_free_request()
565 if (req->td_data) { udc_free_request()
566 VDBG(ep->dev, "req->td_data=%p\n", req->td_data); udc_free_request()
569 if (req->chain_len > 1) udc_free_request()
570 udc_free_dma_chain(ep->dev, req); udc_free_request()
572 pci_pool_free(ep->dev->data_requests, req->td_data, udc_free_request()
573 req->td_phys); udc_free_request()
575 kfree(req); udc_free_request()
579 static void udc_init_bna_dummy(struct udc_request *req) udc_init_bna_dummy() argument
581 if (req) { udc_init_bna_dummy()
583 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); udc_init_bna_dummy()
585 req->td_data->next = req->td_phys; udc_init_bna_dummy()
587 req->td_data->status udc_init_bna_dummy()
588 = AMD_ADDBITS(req->td_data->status, udc_init_bna_dummy()
593 req->td_data, req->td_data->status); udc_init_bna_dummy()
601 struct udc_request *req = NULL; udc_alloc_bna_dummy() local
607 req = container_of(_req, struct udc_request, req); udc_alloc_bna_dummy()
608 ep->bna_dummy_req = req; udc_alloc_bna_dummy()
609 udc_init_bna_dummy(req); udc_alloc_bna_dummy()
611 return req; udc_alloc_bna_dummy()
616 udc_txfifo_write(struct udc_ep *ep, struct usb_request *req) udc_txfifo_write() argument
624 if (!req || !ep) udc_txfifo_write()
627 req_buf = req->buf + req->actual; udc_txfifo_write()
629 remaining = req->length - req->actual; udc_txfifo_write()
689 udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req) udc_rxfifo_read() argument
700 buf_space = req->req.length - req->req.actual; udc_rxfifo_read()
701 buf = req->req.buf + req->req.actual; udc_rxfifo_read()
707 req->req.status = -EOVERFLOW; udc_rxfifo_read()
711 req->req.actual += bytes; udc_rxfifo_read()
715 || ((req->req.actual == req->req.length) && !req->req.zero)) udc_rxfifo_read()
726 static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp) prep_dma() argument
732 VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n", prep_dma()
733 ep->num, req->td_data); prep_dma()
736 req->td_data->bufptr = req->req.dma; prep_dma()
739 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L); prep_dma()
744 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp); prep_dma()
751 if (req->req.length == ep->ep.maxpacket) { prep_dma()
753 req->td_data->status = prep_dma()
754 AMD_ADDBITS(req->td_data->status, prep_dma()
764 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d " prep_dma()
766 use_dma_ppb, req->req.length, prep_dma()
772 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket prep_dma()
776 req->td_data->status = prep_dma()
777 AMD_ADDBITS(req->td_data->status, prep_dma()
778 req->req.length, prep_dma()
781 req->td_data->status = prep_dma()
782 AMD_ADDBITS(req->td_data->status, prep_dma()
787 req->td_data->status = prep_dma()
788 AMD_ADDBITS(req->td_data->status, prep_dma()
794 req->td_data->status = prep_dma()
795 AMD_ADDBITS(req->td_data->status, prep_dma()
816 complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
828 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
834 if (req->req.status == -EINPROGRESS)
835 req->req.status = sts;
838 list_del_init(&req->queue);
840 VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
841 &req->req, req->req.length, ep->ep.name, sts);
844 usb_gadget_giveback_request(&ep->ep, &req->req);
850 static int udc_free_dma_chain(struct udc *dev, struct udc_request *req) udc_free_dma_chain() argument
858 DBG(dev, "free chain req = %p\n", req); udc_free_dma_chain()
861 td_last = req->td_data; udc_free_dma_chain()
864 for (i = 1; i < req->chain_len; i++) { udc_free_dma_chain()
876 static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req) udc_get_last_dma_desc() argument
880 td = req->td_data; udc_get_last_dma_desc()
889 static u32 udc_get_ppbdu_rxbytes(struct udc_request *req) udc_get_ppbdu_rxbytes() argument
894 td = req->td_data; udc_get_ppbdu_rxbytes()
914 struct udc_request *req, udc_create_dma_chain()
918 unsigned long bytes = req->req.length; udc_create_dma_chain()
933 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L); udc_create_dma_chain()
936 len = req->req.length / ep->ep.maxpacket; udc_create_dma_chain()
937 if (req->req.length % ep->ep.maxpacket) udc_create_dma_chain()
940 if (len > req->chain_len) { udc_create_dma_chain()
942 if (req->chain_len > 1) udc_create_dma_chain()
943 udc_free_dma_chain(ep->dev, req); udc_create_dma_chain()
944 req->chain_len = len; udc_create_dma_chain()
948 td = req->td_data; udc_create_dma_chain()
963 req->td_data->next); udc_create_dma_chain()
972 td->bufptr = req->req.dma + i; /* assign buffer */ udc_create_dma_chain()
987 req->td_data->next = dma_addr; udc_create_dma_chain()
990 req->td_data->next = virt_to_phys(td); udc_create_dma_chain()
995 req->td_data->status = udc_create_dma_chain()
996 AMD_ADDBITS(req->td_data->status, udc_create_dma_chain()
1024 req->td_data_last = td; udc_create_dma_chain()
1055 struct udc_request *req; udc_queue() local
1060 req = container_of(usbreq, struct udc_request, req); udc_queue()
1063 || !list_empty(&req->queue)) udc_queue()
1078 VDBG(dev, "DMA map req %p\n", req); udc_queue()
1084 VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n", udc_queue()
1086 req->td_data, usbreq->buf); udc_queue()
1091 req->dma_done = 0; udc_queue()
1098 complete_req(ep, req, 0); udc_queue()
1124 retval = prep_dma(ep, req, GFP_ATOMIC); udc_queue()
1130 req->td_data->status = udc_queue()
1131 AMD_ADDBITS(req->td_data->status, udc_queue()
1156 req->td_data, udc_queue()
1161 writel(req->td_phys, &ep->regs->desptr); udc_queue()
1192 retval = prep_dma(ep, req, GFP_ATOMIC); udc_queue()
1199 if (req) { udc_queue()
1201 list_add_tail(&req->queue, &ep->queue); udc_queue()
1206 req->dma_going = 1; udc_queue()
1220 if (udc_rxfifo_read(ep, req)) { udc_queue()
1222 complete_req(ep, req, 0); udc_queue()
1238 struct udc_request *req; empty_req_queue() local
1242 req = list_entry(ep->queue.next, empty_req_queue()
1245 complete_req(ep, req, -ESHUTDOWN); empty_req_queue()
1253 struct udc_request *req; udc_dequeue() local
1262 req = container_of(usbreq, struct udc_request, req); udc_dequeue()
1268 if (ep->queue.next == &req->queue) { udc_dequeue()
1269 if (ep->dma && req->dma_going) { udc_dequeue()
1283 dma_sts = AMD_GETBITS(req->td_data->status, udc_dequeue()
1288 udc_init_bna_dummy(ep->req); udc_dequeue()
1296 complete_req(ep, req, -ECONNRESET); udc_dequeue()
2050 struct udc_request *req; udc_data_out_isr() local
2087 req = list_entry(ep->queue.next, udc_data_out_isr()
2090 req = NULL; udc_data_out_isr()
2093 VDBG(dev, "req = %p\n", req); udc_data_out_isr()
2098 if (req && udc_rxfifo_read(ep, req)) { udc_data_out_isr()
2102 complete_req(ep, req, 0); udc_data_out_isr()
2105 req = list_entry(ep->queue.next, udc_data_out_isr()
2108 req = NULL; udc_data_out_isr()
2112 } else if (!ep->cancel_transfer && req != NULL) { udc_data_out_isr()
2117 dma_done = AMD_GETBITS(req->td_data->status, udc_data_out_isr()
2127 memcpy(req->td_data, ep->bna_dummy_req->td_data, udc_data_out_isr()
2130 udc_init_bna_dummy(ep->req); udc_data_out_isr()
2132 td = udc_get_last_dma_desc(req); udc_data_out_isr()
2139 count = AMD_GETBITS(req->td_data->status, udc_data_out_isr()
2144 VDBG(dev, "req->td_data=%p\n", req->td_data); udc_data_out_isr()
2149 count = udc_get_ppbdu_rxbytes(req); udc_data_out_isr()
2154 if (!count && req->req.length udc_data_out_isr()
2166 tmp = req->req.length - req->req.actual; udc_data_out_isr()
2171 req->req.status = -EOVERFLOW; udc_data_out_isr()
2175 req->req.actual += count; udc_data_out_isr()
2176 req->dma_going = 0; udc_data_out_isr()
2178 complete_req(ep, req, 0); udc_data_out_isr()
2182 req = list_entry(ep->queue.next, udc_data_out_isr()
2191 if (req->dma_going == 0) { udc_data_out_isr()
2193 if (prep_dma(ep, req, GFP_ATOMIC) != 0) udc_data_out_isr()
2196 writel(req->td_phys, udc_data_out_isr()
2198 req->dma_going = 1; udc_data_out_isr()
2265 struct udc_request *req; udc_data_in_isr() local
2304 req = list_entry(ep->queue.next, udc_data_in_isr()
2311 td = udc_get_last_dma_desc(req); udc_data_in_isr()
2317 req->req.actual = req->req.length; udc_data_in_isr()
2321 req->req.actual = req->req.length; udc_data_in_isr()
2324 if (req->req.actual == req->req.length) { udc_data_in_isr()
2325 /* complete req */ udc_data_in_isr()
2326 complete_req(ep, req, 0); udc_data_in_isr()
2327 req->dma_going = 0; udc_data_in_isr()
2349 req = list_entry(ep->queue.next, udc_data_in_isr()
2354 udc_txfifo_write(ep, &req->req); udc_data_in_isr()
2355 len = req->req.length - req->req.actual; udc_data_in_isr()
2358 req->req.actual += len; udc_data_in_isr()
2359 if (req->req.actual == req->req.length udc_data_in_isr()
2361 /* complete req */ udc_data_in_isr()
2362 complete_req(ep, req, 0); udc_data_in_isr()
2365 } else if (req && !req->dma_going) { udc_data_in_isr()
2366 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n", udc_data_in_isr()
2367 req, req->td_data); udc_data_in_isr()
2368 if (req->td_data) { udc_data_in_isr()
2370 req->dma_going = 1; udc_data_in_isr()
2376 if (use_dma_ppb && req->req.length > udc_data_in_isr()
2378 req->td_data->status &= udc_data_in_isr()
2384 writel(req->td_phys, &ep->regs->desptr); udc_data_in_isr()
2387 req->td_data->status = udc_data_in_isr()
2389 req->td_data->status, udc_data_in_isr()
2580 /* no req if 0 packet, just reactivate */
2639 struct udc_request *req; udc_control_in_isr() local
2675 req = list_entry(ep->queue.next, udc_control_in_isr()
2680 writel(req->td_phys, &ep->regs->desptr); udc_control_in_isr()
2682 req->td_data->status = udc_control_in_isr()
2684 req->td_data->status, udc_control_in_isr()
2696 req->req.actual = req->req.length; udc_control_in_isr()
2698 /* complete req */ udc_control_in_isr()
2699 complete_req(ep, req, 0); udc_control_in_isr()
2703 udc_txfifo_write(ep, &req->req); udc_control_in_isr()
2706 len = req->req.length - req->req.actual; udc_control_in_isr()
2710 req->req.actual += len; udc_control_in_isr()
2711 if (req->req.actual == req->req.length udc_control_in_isr()
2713 /* complete req */ udc_control_in_isr()
2714 complete_req(ep, req, 0); udc_control_in_isr()
2877 /* disable ep0 to empty req queue */
2926 /* disable ep0 to empty req queue */
912 udc_create_dma_chain( struct udc_ep *ep, struct udc_request *req, unsigned long buf_len, gfp_t gfp_flags ) udc_create_dma_chain() argument
H A Dfotg210-udc.c61 static void fotg210_done(struct fotg210_ep *ep, struct fotg210_request *req, fotg210_done() argument
64 list_del_init(&req->queue); fotg210_done()
68 req->req.status = -ESHUTDOWN; fotg210_done()
70 req->req.status = status; fotg210_done()
73 usb_gadget_giveback_request(&ep->ep, &req->req); fotg210_done()
215 struct fotg210_request *req; fotg210_ep_disable() local
223 req = list_entry(ep->queue.next, fotg210_ep_disable()
226 fotg210_done(ep, req, -ECONNRESET); fotg210_ep_disable()
236 struct fotg210_request *req; fotg210_ep_alloc_request() local
238 req = kzalloc(sizeof(struct fotg210_request), gfp_flags); fotg210_ep_alloc_request()
239 if (!req) fotg210_ep_alloc_request()
242 INIT_LIST_HEAD(&req->queue); fotg210_ep_alloc_request()
244 return &req->req; fotg210_ep_alloc_request()
250 struct fotg210_request *req; fotg210_ep_free_request() local
252 req = container_of(_req, struct fotg210_request, req); fotg210_ep_free_request()
253 kfree(req); fotg210_ep_free_request()
330 struct fotg210_request *req) fotg210_start_dma()
338 buffer = req->req.buf; fotg210_start_dma()
339 length = req->req.length; fotg210_start_dma()
341 buffer = req->req.buf + req->req.actual; fotg210_start_dma()
347 buffer = req->req.buf + req->req.actual; fotg210_start_dma()
348 if (req->req.length - req->req.actual > ep->ep.maxpacket) fotg210_start_dma()
351 length = req->req.length; fotg210_start_dma()
374 req->req.actual += length; fotg210_start_dma()
380 struct fotg210_request *req) fotg210_ep0_queue()
382 if (!req->req.length) { fotg210_ep0_queue()
383 fotg210_done(ep, req, 0); fotg210_ep0_queue()
387 if (req->req.length) { fotg210_ep0_queue()
388 fotg210_start_dma(ep, req); fotg210_ep0_queue()
390 pr_err("%s : req->req.length = 0x%x\n", fotg210_ep0_queue()
391 __func__, req->req.length); fotg210_ep0_queue()
393 if ((req->req.length == req->req.actual) || fotg210_ep0_queue()
394 (req->req.actual < ep->ep.maxpacket)) fotg210_ep0_queue()
395 fotg210_done(ep, req, 0); fotg210_ep0_queue()
397 if (!req->req.length) { fotg210_ep0_queue()
398 fotg210_done(ep, req, 0); fotg210_ep0_queue()
413 struct fotg210_request *req; fotg210_ep_queue() local
418 req = container_of(_req, struct fotg210_request, req); fotg210_ep_queue()
428 list_add_tail(&req->queue, &ep->queue); fotg210_ep_queue()
430 req->req.actual = 0; fotg210_ep_queue()
431 req->req.status = -EINPROGRESS; fotg210_ep_queue()
434 fotg210_ep0_queue(ep, req); fotg210_ep_queue()
446 struct fotg210_request *req; fotg210_ep_dequeue() local
450 req = container_of(_req, struct fotg210_request, req); fotg210_ep_dequeue()
454 fotg210_done(ep, req, -ECONNRESET); fotg210_ep_dequeue()
812 struct fotg210_request *req; fotg210_ep0out() local
814 req = list_first_entry(&ep->queue, fotg210_ep0out()
817 if (req->req.length) fotg210_ep0out()
818 fotg210_start_dma(ep, req); fotg210_ep0out()
820 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) fotg210_ep0out()
821 fotg210_done(ep, req, 0); fotg210_ep0out()
832 struct fotg210_request *req; fotg210_ep0in() local
834 req = list_entry(ep->queue.next, fotg210_ep0in()
837 if (req->req.length) fotg210_ep0in()
838 fotg210_start_dma(ep, req); fotg210_ep0in()
840 if ((req->req.length - req->req.actual) < ep->ep.maxpacket) fotg210_ep0in()
841 fotg210_done(ep, req, 0); fotg210_ep0in()
857 struct fotg210_request *req = list_entry(ep->queue.next, fotg210_in_fifo_handler() local
860 if (req->req.length) fotg210_in_fifo_handler()
861 fotg210_start_dma(ep, req); fotg210_in_fifo_handler()
862 fotg210_done(ep, req, 0); fotg210_in_fifo_handler()
867 struct fotg210_request *req = list_entry(ep->queue.next, fotg210_out_fifo_handler() local
870 fotg210_start_dma(ep, req); fotg210_out_fifo_handler()
873 if (req->req.length == req->req.actual || fotg210_out_fifo_handler()
874 req->req.actual < ep->ep.maxpacket) fotg210_out_fifo_handler()
875 fotg210_done(ep, req, 0); fotg210_out_fifo_handler()
329 fotg210_start_dma(struct fotg210_ep *ep, struct fotg210_request *req) fotg210_start_dma() argument
379 fotg210_ep0_queue(struct fotg210_ep *ep, struct fotg210_request *req) fotg210_ep0_queue() argument
H A Dm66592-udc.c38 static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req);
39 static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req);
44 struct m66592_request *req, int status);
497 static void start_ep0_write(struct m66592_ep *ep, struct m66592_request *req) start_ep0_write() argument
506 if (req->req.length == 0) { start_ep0_write()
509 transfer_complete(ep, req, 0); start_ep0_write()
512 irq_ep0_write(ep, req); start_ep0_write()
516 static void start_packet_write(struct m66592_ep *ep, struct m66592_request *req) start_packet_write() argument
529 irq_packet_write(ep, req); start_packet_write()
532 static void start_packet_read(struct m66592_ep *ep, struct m66592_request *req) start_packet_read() argument
550 (req->req.length + ep->ep.maxpacket - 1) start_packet_read()
559 static void start_packet(struct m66592_ep *ep, struct m66592_request *req) start_packet() argument
562 start_packet_write(ep, req); start_packet()
564 start_packet_read(ep, req); start_packet()
567 static void start_ep0(struct m66592_ep *ep, struct m66592_request *req) start_ep0() argument
575 start_ep0_write(ep, req); start_ep0()
578 start_packet_read(ep, req); start_ep0()
709 struct m66592_request *req, int status)
722 list_del_init(&req->queue);
724 req->req.status = -ESHUTDOWN;
726 req->req.status = status;
732 usb_gadget_giveback_request(&ep->ep, &req->req);
736 req = list_entry(ep->queue.next, struct m66592_request, queue);
738 start_packet(ep, req);
742 static void irq_ep0_write(struct m66592_ep *ep, struct m66592_request *req) irq_ep0_write() argument
768 buf = req->req.buf + req->req.actual; irq_ep0_write()
769 size = min(bufsize, req->req.length - req->req.actual); irq_ep0_write()
772 if (req->req.buf) { irq_ep0_write()
780 req->req.actual += size; irq_ep0_write()
783 if ((!req->req.zero && (req->req.actual == req->req.length)) irq_ep0_write()
795 static void irq_packet_write(struct m66592_ep *ep, struct m66592_request *req) irq_packet_write() argument
815 buf = req->req.buf + req->req.actual; irq_packet_write()
816 size = min(bufsize, req->req.length - req->req.actual); irq_packet_write()
819 if (req->req.buf) { irq_packet_write()
829 req->req.actual += size; irq_packet_write()
831 if ((!req->req.zero && (req->req.actual == req->req.length)) irq_packet_write()
842 static void irq_packet_read(struct m66592_ep *ep, struct m66592_request *req) irq_packet_read() argument
855 req->req.status = -EPIPE; irq_packet_read()
866 buf = req->req.buf + req->req.actual; irq_packet_read()
867 req_len = req->req.length - req->req.actual; irq_packet_read()
874 req->req.actual += size; irq_packet_read()
877 if ((!req->req.zero && (req->req.actual == req->req.length)) irq_packet_read()
886 if (req->req.buf) { irq_packet_read()
894 transfer_complete(ep, req, 0); irq_packet_read()
902 struct m66592_request *req; irq_pipe_ready() local
910 req = list_entry(ep->queue.next, struct m66592_request, queue); irq_pipe_ready()
911 irq_packet_read(ep, req); irq_pipe_ready()
918 req = list_entry(ep->queue.next, irq_pipe_ready()
921 irq_packet_write(ep, req); irq_pipe_ready()
923 irq_packet_read(ep, req); irq_pipe_ready()
935 struct m66592_request *req; irq_pipe_empty() local
941 req = list_entry(ep->queue.next, struct m66592_request, queue); irq_pipe_empty()
942 irq_ep0_write(ep, req); irq_pipe_empty()
954 req = list_entry(ep->queue.next, irq_pipe_empty()
958 transfer_complete(ep, req, 0); irq_pipe_empty()
1014 struct m66592_request *req; clear_feature() local
1023 req = list_entry(ep->queue.next, clear_feature()
1029 start_packet(ep, req); clear_feature()
1170 struct m66592_request *req; variable in typeref:struct:m66592_request
1172 req = list_entry(ep->queue.next, struct m66592_request, queue);
1173 transfer_complete(ep, req, 0);
1318 struct m66592_request *req; m66592_disable() local
1325 req = list_entry(ep->queue.next, struct m66592_request, queue); m66592_disable()
1327 transfer_complete(ep, req, -ECONNRESET); m66592_disable()
1338 struct m66592_request *req; m66592_alloc_request() local
1340 req = kzalloc(sizeof(struct m66592_request), gfp_flags); m66592_alloc_request()
1341 if (!req) m66592_alloc_request()
1344 INIT_LIST_HEAD(&req->queue); m66592_alloc_request()
1346 return &req->req; m66592_alloc_request()
1351 struct m66592_request *req; m66592_free_request() local
1353 req = container_of(_req, struct m66592_request, req); m66592_free_request()
1354 kfree(req); m66592_free_request()
1361 struct m66592_request *req; m66592_queue() local
1366 req = container_of(_req, struct m66592_request, req); m66592_queue()
1376 list_add_tail(&req->queue, &ep->queue); m66592_queue()
1377 req->req.actual = 0; m66592_queue()
1378 req->req.status = -EINPROGRESS; m66592_queue()
1381 start_ep0(ep, req); m66592_queue()
1384 start_packet(ep, req); m66592_queue()
1395 struct m66592_request *req; m66592_dequeue() local
1399 req = container_of(_req, struct m66592_request, req); m66592_dequeue()
1403 transfer_complete(ep, req, -ECONNRESET); m66592_dequeue()
1412 struct m66592_request *req; m66592_set_halt() local
1417 req = list_entry(ep->queue.next, struct m66592_request, queue); m66592_set_halt()
H A Dpxa25x_udc.c303 struct pxa25x_request *req; pxa25x_ep_alloc_request() local
305 req = kzalloc(sizeof(*req), gfp_flags); pxa25x_ep_alloc_request()
306 if (!req) pxa25x_ep_alloc_request()
309 INIT_LIST_HEAD (&req->queue); pxa25x_ep_alloc_request()
310 return &req->req; pxa25x_ep_alloc_request()
320 struct pxa25x_request *req; pxa25x_ep_free_request() local
322 req = container_of (_req, struct pxa25x_request, req); pxa25x_ep_free_request()
323 WARN_ON(!list_empty (&req->queue)); pxa25x_ep_free_request()
324 kfree(req); pxa25x_ep_free_request()
332 static void done(struct pxa25x_ep *ep, struct pxa25x_request *req, int status) done() argument
336 list_del_init(&req->queue); done()
338 if (likely (req->req.status == -EINPROGRESS)) done()
339 req->req.status = status; done()
341 status = req->req.status; done()
344 DBG(DBG_VERBOSE, "complete %s req %p stat %d len %u/%u\n", done()
345 ep->ep.name, &req->req, status, done()
346 req->req.actual, req->req.length); done()
350 usb_gadget_giveback_request(&ep->ep, &req->req); done()
361 write_packet(volatile u32 *uddr, struct pxa25x_request *req, unsigned max) write_packet() argument
366 buf = req->req.buf + req->req.actual; write_packet()
370 length = min(req->req.length - req->req.actual, max); write_packet()
371 req->req.actual += length; write_packet()
386 write_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) write_fifo() argument
395 count = write_packet(ep->reg_uddr, req, max); write_fifo()
401 if (likely(req->req.length != req->req.actual) write_fifo()
402 || req->req.zero) write_fifo()
413 req->req.length - req->req.actual, req); write_fifo()
425 done (ep, req, 0); write_fifo()
439 /* caller asserts req->pending (ep0 irq status nyet cleared); starts
453 write_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) write_ep0_fifo() argument
458 count = write_packet(&UDDR0, req, EP0_FIFO_SIZE); write_ep0_fifo()
465 req->req.length - req->req.actual, req); write_ep0_fifo()
473 count = req->req.length; write_ep0_fifo()
474 done (ep, req, 0); write_ep0_fifo()
513 read_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) read_fifo() argument
527 buf = req->req.buf + req->req.actual; read_fifo()
529 bufferspace = req->req.length - req->req.actual; read_fifo()
534 req->req.actual += min (count, bufferspace); read_fifo()
538 DBG(DBG_VERY_NOISY, "read %s %02x, %d bytes%s req %p %d/%d\n", read_fifo()
541 req, req->req.actual, req->req.length); read_fifo()
550 if (req->req.status != -EOVERFLOW) read_fifo()
553 req->req.status = -EOVERFLOW; read_fifo()
565 req->req.status = -EHOSTUNREACH; read_fifo()
571 if (is_short || req->req.actual == req->req.length) { read_fifo()
572 done (ep, req, 0); read_fifo()
590 read_ep0_fifo (struct pxa25x_ep *ep, struct pxa25x_request *req) read_ep0_fifo() argument
595 buf = req->req.buf + req->req.actual; read_ep0_fifo()
596 bufferspace = req->req.length - req->req.actual; read_ep0_fifo()
606 if (req->req.status != -EOVERFLOW) read_ep0_fifo()
608 req->req.status = -EOVERFLOW; read_ep0_fifo()
611 req->req.actual++; read_ep0_fifo()
619 if (req->req.actual >= req->req.length) read_ep0_fifo()
631 struct pxa25x_request *req; pxa25x_ep_queue() local
636 req = container_of(_req, struct pxa25x_request, req); pxa25x_ep_queue()
638 || !list_empty(&req->queue))) { pxa25x_ep_queue()
660 && req->req.length > usb_endpoint_maxp(ep->ep.desc))) pxa25x_ep_queue()
663 DBG(DBG_NOISY, "%s queue req %p, len %d buf %p\n", pxa25x_ep_queue()
679 if (write_ep0_fifo(ep, req)) pxa25x_ep_queue()
680 req = NULL; pxa25x_ep_queue()
692 done(ep, req, 0); pxa25x_ep_queue()
700 && read_ep0_fifo(ep, req))) { pxa25x_ep_queue()
702 done(ep, req, 0); pxa25x_ep_queue()
703 req = NULL; pxa25x_ep_queue()
715 && write_fifo(ep, req)) pxa25x_ep_queue()
716 req = NULL; pxa25x_ep_queue()
718 && read_fifo(ep, req)) { pxa25x_ep_queue()
719 req = NULL; pxa25x_ep_queue()
722 if (likely(req && ep->ep.desc)) pxa25x_ep_queue()
727 if (likely(req != NULL)) pxa25x_ep_queue()
728 list_add_tail(&req->queue, &ep->queue); pxa25x_ep_queue()
740 struct pxa25x_request *req; nuke() local
744 req = list_entry(ep->queue.next, nuke()
747 done(ep, req, status); nuke()
758 struct pxa25x_request *req; pxa25x_ep_dequeue() local
768 list_for_each_entry (req, &ep->queue, queue) { pxa25x_ep_dequeue()
769 if (&req->req == _req) pxa25x_ep_dequeue()
772 if (&req->req != _req) { pxa25x_ep_dequeue()
777 done(ep, req, -ECONNRESET); pxa25x_ep_dequeue()
1082 struct pxa25x_request *req; udc_seq_show() local
1105 list_for_each_entry(req, &ep->queue, queue) { udc_seq_show()
1108 &req->req, req->req.actual, udc_seq_show()
1109 req->req.length, req->req.buf); udc_seq_show()
1423 struct pxa25x_request *req; handle_ep0() local
1431 req = NULL; handle_ep0()
1433 req = list_entry(ep->queue.next, struct pxa25x_request, queue); handle_ep0()
1601 if (req) handle_ep0()
1602 done(ep, req, 0); handle_ep0()
1605 if (req) { handle_ep0()
1607 (void) write_ep0_fifo(ep, req); handle_ep0()
1613 if (req) { handle_ep0()
1615 if (read_ep0_fifo(ep, req)) handle_ep0()
1616 done(ep, req, 0); handle_ep0()
1621 if (req) handle_ep0()
1622 done(ep, req, 0); handle_ep0()
1627 if (req) handle_ep0()
1628 done(ep, req, 0); handle_ep0()
1645 struct pxa25x_request *req; handle_ep() local
1653 req = list_entry(ep->queue.next, handle_ep()
1656 req = NULL; handle_ep()
1668 if (req && likely ((udccs & UDCCS_BI_TFS) != 0)) handle_ep()
1669 completed = write_fifo(ep, req); handle_ep()
1681 if (likely(req)) { handle_ep()
1682 completed = read_fifo(ep, req); handle_ep()
H A Dnet2280.c485 struct net2280_request *req; net2280_alloc_request() local
493 req = kzalloc(sizeof(*req), gfp_flags); net2280_alloc_request()
494 if (!req) net2280_alloc_request()
497 INIT_LIST_HEAD(&req->queue); net2280_alloc_request()
504 &req->td_dma); net2280_alloc_request()
506 kfree(req); net2280_alloc_request()
511 req->td = td; net2280_alloc_request()
513 return &req->req; net2280_alloc_request()
519 struct net2280_request *req; net2280_free_request() local
523 dev_err(&ep->dev->pdev->dev, "%s: Inavlid ep=%p or req=%p\n", net2280_free_request()
528 req = container_of(_req, struct net2280_request, req); net2280_free_request()
529 WARN_ON(!list_empty(&req->queue)); net2280_free_request()
530 if (req->td) net2280_free_request()
531 pci_pool_free(ep->dev->requests, req->td, req->td_dma); net2280_free_request()
532 kfree(req); net2280_free_request()
544 static void write_fifo(struct net2280_ep *ep, struct usb_request *req) write_fifo() argument
553 if (req) { write_fifo()
554 buf = req->buf + req->actual; write_fifo()
556 total = req->length - req->actual; write_fifo()
567 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n", write_fifo()
570 req); write_fifo()
644 static int read_fifo(struct net2280_ep *ep, struct net2280_request *req) read_fifo() argument
647 u8 *buf = req->req.buf + req->req.actual; read_fifo()
681 tmp = req->req.length - req->req.actual; read_fifo()
688 req->req.status = -EOVERFLOW; read_fifo()
696 req->req.actual += count; read_fifo()
700 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n", read_fifo()
703 req, req->req.actual, req->req.length); read_fifo()
727 return is_short || ((req->req.actual == req->req.length) && read_fifo()
728 !req->req.zero); read_fifo()
733 struct net2280_request *req, int valid) fill_dma_desc()
735 struct net2280_dma *td = req->td; fill_dma_desc()
736 u32 dmacount = req->req.length; fill_dma_desc()
749 req->valid = valid; fill_dma_desc()
755 td->dmaaddr = cpu_to_le32 (req->req.dma); fill_dma_desc()
809 static void start_dma(struct net2280_ep *ep, struct net2280_request *req) start_dma() argument
831 writel(req->req.dma, &dma->dmaaddr); start_dma()
832 tmp = min(tmp, req->req.length); start_dma()
835 req->td->dmacount = cpu_to_le32(req->req.length - tmp); start_dma()
838 req->td->dmadesc = 0; start_dma()
839 req->valid = 1; start_dma()
854 if (likely((req->req.length % ep->ep.maxpacket) || start_dma()
855 req->req.zero)){ start_dma()
862 /* init req->td, pointing to the current dummy */ start_dma()
863 req->td->dmadesc = cpu_to_le32 (ep->td_dma); start_dma()
864 fill_dma_desc(ep, req, 1); start_dma()
866 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN)); start_dma()
868 start_queue(ep, tmp, req->td_dma); start_dma()
872 queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid) queue_dma() argument
879 ep->dummy = req->td; queue_dma()
880 req->td = end; queue_dma()
883 ep->td_dma = req->td_dma; queue_dma()
884 req->td_dma = tmp; queue_dma()
888 fill_dma_desc(ep, req, valid); queue_dma()
892 done(struct net2280_ep *ep, struct net2280_request *req, int status) done() argument
897 list_del_init(&req->queue); done()
899 if (req->req.status == -EINPROGRESS) done()
900 req->req.status = status; done()
902 status = req->req.status; done()
906 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in); done()
909 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n", done()
910 ep->ep.name, &req->req, status, done()
911 req->req.actual, req->req.length); done()
916 usb_gadget_giveback_request(&ep->ep, &req->req); done()
926 struct net2280_request *req; net2280_queue() local
940 req = container_of(_req, struct net2280_request, req); net2280_queue()
942 !list_empty(&req->queue)) { net2280_queue()
970 ep_vdbg(dev, "%s queue req %p, len %d buf %p\n", net2280_queue()
985 start_dma(ep, req); net2280_queue()
990 done(ep, req, 0); net2280_queue()
1010 if (read_fifo(ep, req) && net2280_queue()
1012 done(ep, req, 0); net2280_queue()
1015 req = NULL; net2280_queue()
1016 } else if (read_fifo(ep, req) && net2280_queue()
1018 done(ep, req, 0); net2280_queue()
1019 req = NULL; net2280_queue()
1025 if (req && (s & BIT(NAK_OUT_PACKETS))) net2280_queue()
1040 expect = likely(req->req.zero || net2280_queue()
1041 (req->req.length % ep->ep.maxpacket)); net2280_queue()
1045 queue_dma(ep, req, valid); net2280_queue()
1050 if (req) net2280_queue()
1051 list_add_tail(&req->queue, &ep->queue); net2280_queue()
1064 dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount, dma_done() argument
1067 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount); dma_done()
1068 done(ep, req, status); dma_done()
1077 struct net2280_request *req; scan_dma_completions() local
1080 req = list_entry(ep->queue.next, scan_dma_completions()
1082 if (!req->valid) scan_dma_completions()
1085 tmp = le32_to_cpup(&req->td->dmacount); scan_dma_completions()
1093 if (unlikely(req->td->dmadesc == 0)) { scan_dma_completions()
1099 dma_done(ep, req, tmp, 0); scan_dma_completions()
1102 (req->req.length % ep->ep.maxpacket) && scan_dma_completions()
1113 req->req.status = -EOVERFLOW; scan_dma_completions()
1122 req->req.length); scan_dma_completions()
1123 req->req.status = -EOVERFLOW; scan_dma_completions()
1127 dma_done(ep, req, tmp, 0); scan_dma_completions()
1133 struct net2280_request *req; restart_dma() local
1137 req = list_entry(ep->queue.next, struct net2280_request, queue); restart_dma()
1139 start_dma(ep, req); restart_dma()
1157 struct net2280_request *req; nuke() local
1164 req = list_entry(ep->queue.next, nuke()
1167 done(ep, req, -ESHUTDOWN); nuke()
1175 struct net2280_request *req; net2280_dequeue() local
1182 pr_err("%s: Invalid ep=%p or ep->desc or req=%p\n", net2280_dequeue()
1201 list_for_each_entry(req, &ep->queue, queue) { net2280_dequeue()
1202 if (&req->req == _req) net2280_dequeue()
1205 if (&req->req != _req) { net2280_dequeue()
1213 if (ep->queue.next == &req->queue) { net2280_dequeue()
1218 if (likely(ep->queue.next == &req->queue)) { net2280_dequeue()
1220 req->td->dmacount = 0; /* invalidate */ net2280_dequeue()
1221 dma_done(ep, req, net2280_dequeue()
1227 done(ep, req, -ECONNRESET); net2280_dequeue()
1229 req = NULL; net2280_dequeue()
1232 if (req) net2280_dequeue()
1233 done(ep, req, -ECONNRESET); net2280_dequeue()
1242 if (req) net2280_dequeue()
1683 struct net2280_request *req; queues_show() local
1717 list_for_each_entry(req, &ep->queue, queue) { queues_show()
1718 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc)) queues_show()
1722 &req->req, req->req.actual, queues_show()
1723 req->req.length, req->req.buf, queues_show()
1728 &req->req, req->req.actual, queues_show()
1729 req->req.length, req->req.buf); queues_show()
1738 td = req->td; queues_show()
1741 (u32) req->td_dma, queues_show()
2350 struct net2280_request *req; handle_ep_small() local
2356 req = list_entry(ep->queue.next, handle_ep_small()
2359 req = NULL; handle_ep_small()
2365 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n", handle_ep_small()
2366 ep->ep.name, t, req ? &req->req : NULL); handle_ep_small()
2392 if (!req) handle_ep_small()
2402 !req && !ep->stopped) handle_ep_small()
2415 req && handle_ep_small()
2416 req->req.actual == req->req.length) || handle_ep_small()
2417 (ep->responded && !req)) { handle_ep_small()
2421 if (req) handle_ep_small()
2422 done(ep, req, -EOVERFLOW); handle_ep_small()
2423 req = NULL; handle_ep_small()
2428 if (unlikely(!req)) handle_ep_small()
2450 req = NULL; handle_ep_small()
2453 req = list_entry(ep->queue.next, handle_ep_small()
2463 != req->td_dma) handle_ep_small()
2464 req = NULL; handle_ep_small()
2474 if (likely(req)) { handle_ep_small()
2475 req->td->dmacount = 0; handle_ep_small()
2477 dma_done(ep, req, count, handle_ep_small()
2502 if (read_fifo(ep, req) && ep->num != 0) handle_ep_small()
2509 len = req->req.length - req->req.actual; handle_ep_small()
2512 req->req.actual += len; handle_ep_small()
2516 if ((req->req.actual == req->req.length) && handle_ep_small()
2517 (!req->req.zero || len != ep->ep.maxpacket) && ep->num) handle_ep_small()
2527 done(ep, req, 0); handle_ep_small()
2537 req = NULL; handle_ep_small()
2540 req = list_entry(ep->queue.next, handle_ep_small()
2543 req = NULL; handle_ep_small()
2544 if (req && !ep->is_in) handle_ep_small()
2552 if (req && !ep->stopped) { handle_ep_small()
2556 write_fifo(ep, &req->req); handle_ep_small()
2861 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n", handle_stat0_irqs_superspeed()
2895 struct net2280_request *req; handle_stat0_irqs() local
2923 req = list_entry(ep->queue.next, handle_stat0_irqs()
2925 done(ep, req, (req->req.actual == req->req.length) handle_stat0_irqs()
3085 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n", handle_stat0_irqs()
3272 * less than req->length. NAK_OUT_PACKETS may be set,
732 fill_dma_desc(struct net2280_ep *ep, struct net2280_request *req, int valid) fill_dma_desc() argument
H A Dfsl_qe_udc.c80 static void done(struct qe_ep *ep, struct qe_req *req, int status) done() argument
85 /* the req->queue pointer is used by ep_queue() func, in which done()
87 * so here the req will be dropped from the ep->queue done()
89 list_del_init(&req->queue); done()
91 /* req.status should be set as -EINPROGRESS in ep_queue() */ done()
92 if (req->req.status == -EINPROGRESS) done()
93 req->req.status = status; done()
95 status = req->req.status; done()
97 if (req->mapped) { done()
99 req->req.dma, req->req.length, done()
103 req->req.dma = DMA_ADDR_INVALID; done()
104 req->mapped = 0; done()
107 req->req.dma, req->req.length, done()
113 dev_vdbg(udc->dev, "complete %s req %p stat %d len %u/%u\n", done()
114 ep->ep.name, &req->req, status, done()
115 req->req.actual, req->req.length); done()
121 usb_gadget_giveback_request(&ep->ep, &req->req); done()
135 struct qe_req *req = NULL; nuke() local
136 req = list_entry(ep->queue.next, struct qe_req, queue); nuke()
138 done(ep, req, status); nuke()
792 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req);
897 struct qe_req *req; qe_ep_rxframe_handle() local
913 req = list_entry(ep->queue.next, struct qe_req, queue); qe_ep_rxframe_handle()
915 cp = (u8 *)(req->req.buf) + req->req.actual; qe_ep_rxframe_handle()
918 req->req.actual += fsize; qe_ep_rxframe_handle()
920 (req->req.actual >= req->req.length)) { qe_ep_rxframe_handle()
922 ep0_req_complete(ep->udc, req); qe_ep_rxframe_handle()
924 done(ep, req, 0); qe_ep_rxframe_handle()
1058 dev_vdbg(udc->dev, "The rxep have no req queued with %d BDs\n", qe_ep_rx()
1145 struct qe_req *req = ep->tx_req; txcomplete() local
1148 last_len = min_t(unsigned, req->req.length - ep->sent, txcomplete()
1159 /* zlp needed when req->re.zero is set */ txcomplete()
1160 if (req->req.zero) { txcomplete()
1162 (req->req.length % ep->ep.maxpacket) != 0) txcomplete()
1170 if (((ep->tx_req->req.length - ep->sent) <= 0) && !zlp) { txcomplete()
1198 size = min_t(u32, (ep->tx_req->req.length - ep->sent), qe_usb_senddata()
1200 buf = (u8 *)ep->tx_req->req.buf + ep->sent; qe_usb_senddata()
1203 ep->tx_req->req.actual += size; qe_usb_senddata()
1232 struct qe_req *req = ep->tx_req; frame_create_tx() local
1235 if (req == NULL) frame_create_tx()
1238 if ((req->req.length - ep->sent) > 0) frame_create_tx()
1267 static void ep0_req_complete(struct qe_udc *udc, struct qe_req *req) ep0_req_complete() argument
1274 done(ep, req, 0); ep0_req_complete()
1281 done(ep, req, 0); ep0_req_complete()
1286 done(ep, req, 0); ep0_req_complete()
1293 done(ep, req, 0); ep0_req_complete()
1331 if ((ep->tx_req->req.length - ep->sent) <= 0) { ep0_txcomplete()
1332 ep->tx_req->req.actual = (unsigned int)ep->sent; ep0_txcomplete()
1339 dev_vdbg(ep->udc->dev, "the ep0_controller have no req\n"); ep0_txcomplete()
1459 static int ep_req_send(struct qe_ep *ep, struct qe_req *req) ep_req_send() argument
1473 static int ep_req_rx(struct qe_ep *ep, struct qe_req *req) ep_req_rx() argument
1485 dev_vdbg(udc->dev, "the req already finish!\n"); ep_req_rx()
1522 cp = (u8 *)(req->req.buf) + req->req.actual; ep_req_rx()
1525 req->req.actual += fsize; ep_req_rx()
1527 || (req->req.actual >= ep_req_rx()
1528 req->req.length)) { ep_req_rx()
1530 done(ep, req, 0); ep_req_rx()
1563 static int ep_req_receive(struct qe_ep *ep, struct qe_req *req) ep_req_receive() argument
1571 ep_req_rx(ep, req); ep_req_receive()
1674 struct qe_req *req; qe_alloc_request() local
1676 req = kzalloc(sizeof(*req), gfp_flags); qe_alloc_request()
1677 if (!req) qe_alloc_request()
1680 req->req.dma = DMA_ADDR_INVALID; qe_alloc_request()
1682 INIT_LIST_HEAD(&req->queue); qe_alloc_request()
1684 return &req->req; qe_alloc_request()
1689 struct qe_req *req; qe_free_request() local
1691 req = container_of(_req, struct qe_req, req); qe_free_request()
1694 kfree(req); qe_free_request()
1700 struct qe_req *req = container_of(_req, struct qe_req, req); __qe_ep_queue() local
1706 if (!_req || !req->req.complete || !req->req.buf __qe_ep_queue()
1707 || !list_empty(&req->queue)) { __qe_ep_queue()
1719 req->ep = ep; __qe_ep_queue()
1722 if (req->req.dma == DMA_ADDR_INVALID) { __qe_ep_queue()
1723 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, __qe_ep_queue()
1724 req->req.buf, __qe_ep_queue()
1725 req->req.length, __qe_ep_queue()
1729 req->mapped = 1; __qe_ep_queue()
1732 req->req.dma, req->req.length, __qe_ep_queue()
1736 req->mapped = 0; __qe_ep_queue()
1739 req->req.status = -EINPROGRESS; __qe_ep_queue()
1740 req->req.actual = 0; __qe_ep_queue()
1742 list_add_tail(&req->queue, &ep->queue); __qe_ep_queue()
1744 ep->name, req->req.length); __qe_ep_queue()
1748 reval = ep_req_send(ep, req); __qe_ep_queue()
1751 if (ep_index(ep) == 0 && req->req.length > 0) { __qe_ep_queue()
1759 reval = ep_req_receive(ep, req); __qe_ep_queue()
1783 struct qe_req *req; qe_ep_dequeue() local
1792 list_for_each_entry(req, &ep->queue, queue) { qe_ep_dequeue()
1793 if (&req->req == _req) qe_ep_dequeue()
1797 if (&req->req != _req) { qe_ep_dequeue()
1802 done(ep, req, -ECONNRESET); qe_ep_dequeue()
1941 struct qe_req *req = container_of(_req, struct qe_req, req); ownercomplete() local
1943 req->req.buf = NULL; ownercomplete()
1944 kfree(req); ownercomplete()
1951 struct qe_req *req; ch9getstatus() local
1987 req = container_of(qe_alloc_request(&ep->ep, GFP_KERNEL), ch9getstatus()
1988 struct qe_req, req); ch9getstatus()
1989 req->req.length = 2; ch9getstatus()
1990 req->req.buf = udc->statusbuf; ch9getstatus()
1991 *(u16 *)req->req.buf = cpu_to_le16(usb_status); ch9getstatus()
1992 req->req.status = -EINPROGRESS; ch9getstatus()
1993 req->req.actual = 0; ch9getstatus()
1994 req->req.complete = ownercomplete; ch9getstatus()
1999 status = __qe_ep_queue(&ep->ep, &req->req); ch9getstatus()
2435 /* the queue lists any req for this ep */ qe_ep_config()
H A Dmv_u3d_core.c134 actual = curr_req->req.length; mv_u3d_process_ep_req()
177 curr_req->req.actual = actual; mv_u3d_process_ep_req()
187 void mv_u3d_done(struct mv_u3d_ep *ep, struct mv_u3d_req *req, int status)
193 dev_dbg(u3d->dev, "mv_u3d_done: remove req->queue\n");
194 /* Removed the req from ep queue */
195 list_del_init(&req->queue);
197 /* req.status should be set as -EINPROGRESS in ep_queue() */
198 if (req->req.status == -EINPROGRESS)
199 req->req.status = status;
201 status = req->req.status;
204 if (!req->chain)
206 req->trb_head->trb_hw, req->trb_head->trb_dma);
209 (dma_addr_t)req->trb_head->trb_dma,
210 req->trb_count * sizeof(struct mv_u3d_trb_hw),
212 kfree(req->trb_head->trb_hw);
214 kfree(req->trb_head);
216 usb_gadget_unmap_request(&u3d->gadget, &req->req, mv_u3d_ep_dir(ep));
219 dev_dbg(u3d->dev, "complete %s req %p stat %d len %u/%u",
220 ep->ep.name, &req->req, status,
221 req->req.actual, req->req.length);
226 usb_gadget_giveback_request(&ep->ep, &req->req);
231 static int mv_u3d_queue_trb(struct mv_u3d_ep *ep, struct mv_u3d_req *req) mv_u3d_queue_trb() argument
260 cpu_to_le32(req->trb_head->trb_dma | DCS_ENABLE); mv_u3d_queue_trb()
280 static struct mv_u3d_trb *mv_u3d_build_trb_one(struct mv_u3d_req *req, mv_u3d_build_trb_one() argument
290 *length = req->req.length - req->req.actual; mv_u3d_build_trb_one()
293 u3d = req->ep->u3d; mv_u3d_build_trb_one()
315 temp = (u32)(req->req.dma + req->req.actual); mv_u3d_build_trb_one()
322 if (req->ep->ep_num == 0) mv_u3d_build_trb_one()
327 req->req.actual += *length; mv_u3d_build_trb_one()
329 direction = mv_u3d_ep_dir(req->ep); mv_u3d_build_trb_one()
336 if (!req->req.no_interrupt) mv_u3d_build_trb_one()
345 static int mv_u3d_build_trb_chain(struct mv_u3d_req *req, unsigned *length, mv_u3d_build_trb_chain() argument
353 *length = min(req->req.length - req->req.actual, mv_u3d_build_trb_chain()
356 u3d = req->ep->u3d; mv_u3d_build_trb_chain()
361 temp = (u32)(req->req.dma + req->req.actual); mv_u3d_build_trb_chain()
368 if (req->ep->ep_num == 0) mv_u3d_build_trb_chain()
373 req->req.actual += *length; mv_u3d_build_trb_chain()
375 direction = mv_u3d_ep_dir(req->ep); mv_u3d_build_trb_chain()
381 /* zlp is needed if req->req.zero is set */ mv_u3d_build_trb_chain()
382 if (req->req.zero) { mv_u3d_build_trb_chain()
383 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) mv_u3d_build_trb_chain()
387 } else if (req->req.length == req->req.actual) mv_u3d_build_trb_chain()
393 if (*is_last && !req->req.no_interrupt) mv_u3d_build_trb_chain()
412 static int mv_u3d_req_to_trb(struct mv_u3d_req *req) mv_u3d_req_to_trb() argument
423 u3d = req->ep->u3d; mv_u3d_req_to_trb()
425 INIT_LIST_HEAD(&req->trb_list); mv_u3d_req_to_trb()
427 length = req->req.length - req->req.actual; mv_u3d_req_to_trb()
432 trb = mv_u3d_build_trb_one(req, &count, &dma); mv_u3d_req_to_trb()
433 list_add_tail(&trb->trb_list, &req->trb_list); mv_u3d_req_to_trb()
434 req->trb_head = trb; mv_u3d_req_to_trb()
435 req->trb_count = 1; mv_u3d_req_to_trb()
436 req->chain = 0; mv_u3d_req_to_trb()
454 if (mv_u3d_build_trb_chain(req, &count, mv_u3d_req_to_trb()
462 list_add_tail(&trb->trb_list, &req->trb_list); mv_u3d_req_to_trb()
463 req->trb_count++; mv_u3d_req_to_trb()
468 req->trb_head = list_entry(req->trb_list.next, mv_u3d_req_to_trb()
470 req->trb_head->trb_dma = dma_map_single(u3d->gadget.dev.parent, mv_u3d_req_to_trb()
471 req->trb_head->trb_hw, mv_u3d_req_to_trb()
475 req->chain = 1; mv_u3d_req_to_trb()
485 struct mv_u3d_req *req; mv_u3d_start_queue() local
489 req = list_entry(ep->req_list.next, struct mv_u3d_req, list); mv_u3d_start_queue()
496 ret = usb_gadget_map_request(&u3d->gadget, &req->req, mv_u3d_start_queue()
501 req->req.status = -EINPROGRESS; mv_u3d_start_queue()
502 req->req.actual = 0; mv_u3d_start_queue()
503 req->trb_count = 0; mv_u3d_start_queue()
506 if (!mv_u3d_req_to_trb(req)) { mv_u3d_start_queue()
507 ret = mv_u3d_queue_trb(ep, req); mv_u3d_start_queue()
519 if (req) mv_u3d_start_queue()
520 list_add_tail(&req->queue, &ep->queue); mv_u3d_start_queue()
681 struct mv_u3d_req *req = NULL; mv_u3d_alloc_request() local
683 req = kzalloc(sizeof *req, gfp_flags); mv_u3d_alloc_request()
684 if (!req) mv_u3d_alloc_request()
687 INIT_LIST_HEAD(&req->queue); mv_u3d_alloc_request()
689 return &req->req; mv_u3d_alloc_request()
694 struct mv_u3d_req *req = container_of(_req, struct mv_u3d_req, req); mv_u3d_free_request() local
696 kfree(req); mv_u3d_free_request()
786 struct mv_u3d_req *req; mv_u3d_ep_queue() local
797 req = container_of(_req, struct mv_u3d_req, req); mv_u3d_ep_queue()
807 dev_dbg(u3d->dev, "%s: %s, req: 0x%p\n", mv_u3d_ep_queue()
808 __func__, _ep->name, req); mv_u3d_ep_queue()
811 if (!req->req.complete || !req->req.buf mv_u3d_ep_queue()
812 || !list_empty(&req->queue)) { mv_u3d_ep_queue()
815 "req->req.complete: 0x%p, req->req.buf: 0x%p," mv_u3d_ep_queue()
818 req->req.complete, req->req.buf, mv_u3d_ep_queue()
819 list_empty(&req->queue)); mv_u3d_ep_queue()
827 if (req->req.length > ep->ep.maxpacket) mv_u3d_ep_queue()
837 req->ep = ep; mv_u3d_ep_queue()
842 list_add_tail(&req->list, &ep->req_list); mv_u3d_ep_queue()
860 struct mv_u3d_req *req; mv_u3d_ep_dequeue() local
877 list_for_each_entry(req, &ep->queue, queue) { mv_u3d_ep_dequeue()
878 if (&req->req == _req) mv_u3d_ep_dequeue()
881 if (&req->req != _req) { mv_u3d_ep_dequeue()
887 if (ep->queue.next == &req->queue) { mv_u3d_ep_dequeue()
892 if (req->queue.next != &ep->queue) { mv_u3d_ep_dequeue()
896 next_req = list_entry(req->queue.next, mv_u3d_ep_dequeue()
912 mv_u3d_done(ep, req, -ECONNRESET); mv_u3d_ep_dequeue()
914 /* remove the req from the ep req list */ mv_u3d_ep_dequeue()
919 if (curr_req == req) { mv_u3d_ep_dequeue()
920 list_del_init(&req->list); mv_u3d_ep_dequeue()
1372 struct mv_u3d_req *req = NULL; mv_u3d_nuke() local
1373 req = list_entry(ep->queue.next, struct mv_u3d_req, queue); mv_u3d_nuke()
1374 mv_u3d_done(ep, req, status); mv_u3d_nuke()
1643 /* remove req out of ep request list after completion */ mv_u3d_irq_process_tr_complete()
1647 struct mv_u3d_req *req; mv_u3d_irq_process_tr_complete() local
1648 req = list_entry(curr_ep->req_list.next, mv_u3d_irq_process_tr_complete()
1650 list_del_init(&req->list); mv_u3d_irq_process_tr_complete()
1655 /* process the req queue until an uncomplete request */ mv_u3d_irq_process_tr_complete()
1661 /* write back status to req */ mv_u3d_irq_process_tr_complete()
1662 curr_req->req.status = status; mv_u3d_irq_process_tr_complete()
1908 u3d->status_req->req.buf = (char *)u3d->status_req mv_u3d_probe()
1910 u3d->status_req->req.dma = virt_to_phys(u3d->status_req->req.buf); mv_u3d_probe()
H A Ds3c2410_udc.c255 struct s3c2410_request *req, int status) s3c2410_udc_done()
259 list_del_init(&req->queue); s3c2410_udc_done()
261 if (likely(req->req.status == -EINPROGRESS)) s3c2410_udc_done()
262 req->req.status = status; s3c2410_udc_done()
264 status = req->req.status; s3c2410_udc_done()
267 usb_gadget_giveback_request(&ep->ep, &req->req); s3c2410_udc_done()
279 struct s3c2410_request *req; s3c2410_udc_nuke() local
280 req = list_entry(ep->queue.next, struct s3c2410_request, s3c2410_udc_nuke()
282 s3c2410_udc_done(ep, req, status); s3c2410_udc_nuke()
299 struct s3c2410_request *req, s3c2410_udc_write_packet()
302 unsigned len = min(req->req.length - req->req.actual, max); s3c2410_udc_write_packet()
303 u8 *buf = req->req.buf + req->req.actual; s3c2410_udc_write_packet()
308 req->req.actual, req->req.length, len, req->req.actual + len); s3c2410_udc_write_packet()
310 req->req.actual += len; s3c2410_udc_write_packet()
323 struct s3c2410_request *req) s3c2410_udc_write_fifo()
352 count = s3c2410_udc_write_packet(fifo_reg, req, ep->ep.maxpacket); s3c2410_udc_write_fifo()
357 else if (req->req.length != req->req.actual || req->req.zero) s3c2410_udc_write_fifo()
366 idx, count, req->req.actual, req->req.length, s3c2410_udc_write_fifo()
367 is_last, req->req.zero); s3c2410_udc_write_fifo()
387 s3c2410_udc_done(ep, req, 0); s3c2410_udc_write_fifo()
408 struct s3c2410_request *req, unsigned avail) s3c2410_udc_read_packet()
412 len = min(req->req.length - req->req.actual, avail); s3c2410_udc_read_packet()
413 req->req.actual += len; s3c2410_udc_read_packet()
423 struct s3c2410_request *req) s3c2410_udc_read_fifo()
456 if (!req->req.length) s3c2410_udc_read_fifo()
459 buf = req->req.buf + req->req.actual; s3c2410_udc_read_fifo()
460 bufferspace = req->req.length - req->req.actual; s3c2410_udc_read_fifo()
476 fifo_count = s3c2410_udc_read_packet(fifo_reg, buf, req, avail); s3c2410_udc_read_fifo()
485 req->req.status = -EOVERFLOW; s3c2410_udc_read_fifo()
487 is_last = (req->req.length <= req->req.actual) ? 1 : 0; s3c2410_udc_read_fifo()
510 s3c2410_udc_done(ep, req, 0); s3c2410_udc_read_fifo()
745 struct s3c2410_request *req; s3c2410_udc_handle_ep0() local
749 req = NULL; s3c2410_udc_handle_ep0()
751 req = list_entry(ep->queue.next, struct s3c2410_request, queue); s3c2410_udc_handle_ep0()
786 if (!(ep0csr & S3C2410_UDC_EP0_CSR_IPKRDY) && req) s3c2410_udc_handle_ep0()
787 s3c2410_udc_write_fifo(ep, req); s3c2410_udc_handle_ep0()
792 if ((ep0csr & S3C2410_UDC_EP0_CSR_OPKRDY) && req) s3c2410_udc_handle_ep0()
793 s3c2410_udc_read_fifo(ep, req); s3c2410_udc_handle_ep0()
814 struct s3c2410_request *req; s3c2410_udc_handle_ep() local
820 req = list_entry(ep->queue.next, s3c2410_udc_handle_ep()
823 req = NULL; s3c2410_udc_handle_ep()
831 idx, ep_csr1, req ? 1 : 0); s3c2410_udc_handle_ep()
841 if (!(ep_csr1 & S3C2410_UDC_ICSR1_PKTRDY) && req) s3c2410_udc_handle_ep()
842 s3c2410_udc_write_fifo(ep, req); s3c2410_udc_handle_ep()
855 if ((ep_csr1 & S3C2410_UDC_OCSR1_PKTRDY) && req) s3c2410_udc_handle_ep()
856 s3c2410_udc_read_fifo(ep, req); s3c2410_udc_handle_ep()
1023 static inline struct s3c2410_request *to_s3c2410_req(struct usb_request *req) to_s3c2410_req() argument
1025 return container_of(req, struct s3c2410_request, req); to_s3c2410_req()
1149 struct s3c2410_request *req; s3c2410_udc_alloc_request() local
1156 req = kzalloc(sizeof(struct s3c2410_request), mem_flags); s3c2410_udc_alloc_request()
1157 if (!req) s3c2410_udc_alloc_request()
1160 INIT_LIST_HEAD(&req->queue); s3c2410_udc_alloc_request()
1161 return &req->req; s3c2410_udc_alloc_request()
1171 struct s3c2410_request *req = to_s3c2410_req(_req); s3c2410_udc_free_request() local
1178 WARN_ON(!list_empty(&req->queue)); s3c2410_udc_free_request()
1179 kfree(req); s3c2410_udc_free_request()
1188 struct s3c2410_request *req = to_s3c2410_req(_req); s3c2410_udc_queue() local
1209 || !_req->buf || !list_empty(&req->queue))) { s3c2410_udc_queue()
1215 !list_empty(&req->queue)); s3c2410_udc_queue()
1248 req)) { s3c2410_udc_queue()
1250 req = NULL; s3c2410_udc_queue()
1258 req))) { s3c2410_udc_queue()
1260 req = NULL; s3c2410_udc_queue()
1270 && s3c2410_udc_write_fifo(ep, req)) { s3c2410_udc_queue()
1271 req = NULL; s3c2410_udc_queue()
1274 && s3c2410_udc_read_fifo(ep, req)) { s3c2410_udc_queue()
1275 req = NULL; s3c2410_udc_queue()
1280 if (likely(req)) s3c2410_udc_queue()
1281 list_add_tail(&req->queue, &ep->queue); s3c2410_udc_queue()
1298 struct s3c2410_request *req = NULL; s3c2410_udc_dequeue() local
1312 list_for_each_entry(req, &ep->queue, queue) { s3c2410_udc_dequeue()
1313 if (&req->req == _req) { s3c2410_udc_dequeue()
1314 list_del_init(&req->queue); s3c2410_udc_dequeue()
1323 "dequeued req %p from %s, len %d buf %p\n", s3c2410_udc_dequeue()
1324 req, _ep->name, _req->length, _req->buf); s3c2410_udc_dequeue()
1326 s3c2410_udc_done(ep, req, -ECONNRESET); s3c2410_udc_dequeue()
254 s3c2410_udc_done(struct s3c2410_ep *ep, struct s3c2410_request *req, int status) s3c2410_udc_done() argument
298 s3c2410_udc_write_packet(int fifo, struct s3c2410_request *req, unsigned max) s3c2410_udc_write_packet() argument
322 s3c2410_udc_write_fifo(struct s3c2410_ep *ep, struct s3c2410_request *req) s3c2410_udc_write_fifo() argument
407 s3c2410_udc_read_packet(int fifo, u8 *buf, struct s3c2410_request *req, unsigned avail) s3c2410_udc_read_packet() argument
422 s3c2410_udc_read_fifo(struct s3c2410_ep *ep, struct s3c2410_request *req) s3c2410_udc_read_fifo() argument
H A Dnet2272.c333 struct net2272_request *req; net2272_alloc_request() local
339 req = kzalloc(sizeof(*req), gfp_flags); net2272_alloc_request()
340 if (!req) net2272_alloc_request()
343 INIT_LIST_HEAD(&req->queue); net2272_alloc_request()
345 return &req->req; net2272_alloc_request()
352 struct net2272_request *req; net2272_free_request() local
358 req = container_of(_req, struct net2272_request, req); net2272_free_request()
359 WARN_ON(!list_empty(&req->queue)); net2272_free_request()
360 kfree(req); net2272_free_request()
364 net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status) net2272_done() argument
377 list_del_init(&req->queue); net2272_done()
379 if (req->req.status == -EINPROGRESS) net2272_done()
380 req->req.status = status; net2272_done()
382 status = req->req.status; net2272_done()
386 usb_gadget_unmap_request(&dev->gadget, &req->req, net2272_done()
390 dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n", net2272_done()
391 ep->ep.name, &req->req, status, net2272_done()
392 req->req.actual, req->req.length, req->req.buf); net2272_done()
397 usb_gadget_giveback_request(&ep->ep, &req->req); net2272_done()
404 struct net2272_request *req, unsigned max) net2272_write_packet()
411 length = min(req->req.length - req->req.actual, max); net2272_write_packet()
412 req->req.actual += length; net2272_write_packet()
414 dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n", net2272_write_packet()
415 ep->ep.name, req, max, length, net2272_write_packet()
440 net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req) net2272_write_fifo() argument
447 ep->ep.name, req->req.actual, req->req.length); net2272_write_fifo()
461 buf = req->req.buf + req->req.actual; net2272_write_fifo()
474 count = net2272_write_packet(ep, buf, req, max); net2272_write_fifo()
476 if (req->req.length == req->req.actual) { net2272_write_fifo()
480 net2272_done(ep, req, 0); net2272_write_fifo()
483 req = list_entry(ep->queue.next, net2272_write_fifo()
486 status = net2272_kick_dma(ep, req); net2272_write_fifo()
512 struct net2272_request *req, unsigned avail) net2272_read_packet()
518 req->req.actual += avail; net2272_read_packet()
520 dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n", net2272_read_packet()
521 ep->ep.name, req, avail, net2272_read_packet()
553 net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req) net2272_read_fifo() argument
563 ep->ep.name, req->req.actual, req->req.length); net2272_read_fifo()
567 buf = req->req.buf + req->req.actual; net2272_read_fifo()
577 tmp = req->req.length - req->req.actual; net2272_read_fifo()
589 is_short = net2272_read_packet(ep, buf, req, count); net2272_read_fifo()
593 ((req->req.actual == req->req.length) net2272_read_fifo()
594 && !req->req.zero))) { net2272_read_fifo()
598 net2272_done(ep, req, -EOVERFLOW); net2272_read_fifo()
600 net2272_done(ep, req, 0); net2272_read_fifo()
613 req = list_entry(ep->queue.next, net2272_read_fifo()
615 status = net2272_kick_dma(ep, req); net2272_read_fifo()
630 struct net2272_request *req; net2272_pio_advance() local
635 req = list_entry(ep->queue.next, struct net2272_request, queue); net2272_pio_advance()
636 (ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req); net2272_pio_advance()
720 net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req) net2272_kick_dma() argument
731 if (req->req.length & 1) net2272_kick_dma()
734 dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n", net2272_kick_dma()
735 ep->ep.name, req, (unsigned long long) req->req.dma); net2272_kick_dma()
746 size = req->req.length; net2272_kick_dma()
752 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0)) net2272_kick_dma()
755 req->req.actual += size; net2272_kick_dma()
762 if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1)) net2272_kick_dma()
828 struct net2272_request *req; net2272_queue() local
835 req = container_of(_req, struct net2272_request, req); net2272_queue()
837 || !list_empty(&req->queue)) net2272_queue()
854 dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n", net2272_queue()
867 net2272_done(ep, req, 0); net2272_queue()
883 status = net2272_read_fifo(ep, req); net2272_queue()
891 status = net2272_kick_dma(ep, req); net2272_queue()
900 status = net2272_write_fifo(ep, req); net2272_queue()
904 status = net2272_read_fifo(ep, req); net2272_queue()
910 req = NULL; net2272_queue()
914 if (likely(req)) net2272_queue()
915 list_add_tail(&req->queue, &ep->queue); net2272_queue()
929 struct net2272_request *req; net2272_dequeue_all() local
935 req = list_entry(ep->queue.next, net2272_dequeue_all()
938 net2272_done(ep, req, -ESHUTDOWN); net2272_dequeue_all()
947 struct net2272_request *req; net2272_dequeue() local
960 list_for_each_entry(req, &ep->queue, queue) { net2272_dequeue()
961 if (&req->req == _req) net2272_dequeue()
964 if (&req->req != _req) { net2272_dequeue()
970 if (ep->queue.next == &req->queue) { net2272_dequeue()
972 net2272_done(ep, req, -ECONNRESET); net2272_dequeue()
974 req = NULL; net2272_dequeue()
1223 t1 & (1 << DMA_REQUEST) ? "req " : "", registers_show()
1520 struct net2272_request *req; net2272_handle_dma() local
1525 req = list_entry(ep->queue.next, net2272_handle_dma()
1528 req = NULL; net2272_handle_dma()
1530 dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req); net2272_handle_dma()
1552 if ((req->req.length % ep->ep.maxpacket != 0) || net2272_handle_dma()
1553 req->req.zero) net2272_handle_dma()
1556 net2272_done(ep, req, 0); net2272_handle_dma()
1558 req = list_entry(ep->queue.next, net2272_handle_dma()
1560 status = net2272_kick_dma(ep, req); net2272_handle_dma()
1586 req->req.actual += len; net2272_handle_dma()
1598 struct net2272_request *req; net2272_handle_ep() local
1602 req = list_entry(ep->queue.next, net2272_handle_ep()
1605 req = NULL; net2272_handle_ep()
1612 dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n", net2272_handle_ep()
1613 ep->ep.name, stat0, stat1, req ? &req->req : NULL); net2272_handle_ep()
1728 struct net2272_request *req; net2272_handle_stat0_irqs() local
1745 req = list_entry(ep->queue.next, net2272_handle_stat0_irqs()
1747 net2272_done(ep, req, net2272_handle_stat0_irqs()
1748 (req->req.actual == req->req.length) ? 0 : -EPROTO); net2272_handle_stat0_irqs()
1932 dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n", net2272_handle_stat0_irqs()
403 net2272_write_packet(struct net2272_ep *ep, u8 *buf, struct net2272_request *req, unsigned max) net2272_write_packet() argument
511 net2272_read_packet(struct net2272_ep *ep, u8 *buf, struct net2272_request *req, unsigned avail) net2272_read_packet() argument
H A Domap_udc.c270 struct omap_req *req; omap_alloc_request() local
272 req = kzalloc(sizeof(*req), gfp_flags); omap_alloc_request()
273 if (!req) omap_alloc_request()
276 INIT_LIST_HEAD(&req->queue); omap_alloc_request()
278 return &req->req; omap_alloc_request()
284 struct omap_req *req = container_of(_req, struct omap_req, req); omap_free_request() local
286 kfree(req); omap_free_request()
292 done(struct omap_ep *ep, struct omap_req *req, int status) done() argument
297 list_del_init(&req->queue); done()
299 if (req->req.status == -EINPROGRESS) done()
300 req->req.status = status; done()
302 status = req->req.status; done()
305 usb_gadget_unmap_request(&udc->gadget, &req->req, done()
311 VDBG("complete %s req %p stat %d len %u/%u\n", done()
312 ep->ep.name, &req->req, status, done()
313 req->req.actual, req->req.length); done()
318 usb_gadget_giveback_request(&ep->ep, &req->req); done()
332 write_packet(u8 *buf, struct omap_req *req, unsigned max) write_packet() argument
337 len = min(req->req.length - req->req.actual, max); write_packet()
338 req->req.actual += len; write_packet()
358 static int write_fifo(struct omap_ep *ep, struct omap_req *req) write_fifo() argument
365 buf = req->req.buf + req->req.actual; write_fifo()
374 count = write_packet(buf, req, count); write_fifo()
381 else if (req->req.length == req->req.actual write_fifo()
382 && !req->req.zero) write_fifo()
392 done(ep, req, 0); write_fifo()
397 read_packet(u8 *buf, struct omap_req *req, unsigned avail) read_packet() argument
402 len = min(req->req.length - req->req.actual, avail); read_packet()
403 req->req.actual += len; read_packet()
420 static int read_fifo(struct omap_ep *ep, struct omap_req *req) read_fifo() argument
426 buf = req->req.buf + req->req.actual; read_fifo()
447 count = read_packet(buf, req, avail); read_fifo()
454 req->req.status = -EOVERFLOW; read_fifo()
459 } else if (req->req.length == req->req.actual) read_fifo()
467 done(ep, req, 0); read_fifo()
517 static void next_in_dma(struct omap_ep *ep, struct omap_req *req) next_in_dma() argument
520 unsigned length = req->req.length - req->req.actual; next_in_dma()
542 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual, next_in_dma()
551 req->dma_bytes = length; next_in_dma()
554 static void finish_in_dma(struct omap_ep *ep, struct omap_req *req, int status) finish_in_dma() argument
559 req->req.actual += req->dma_bytes; finish_in_dma()
562 if (req->req.actual < req->req.length) finish_in_dma()
564 if (req->req.zero finish_in_dma()
565 && req->dma_bytes != 0 finish_in_dma()
566 && (req->req.actual % ep->maxpacket) == 0) finish_in_dma()
569 req->req.actual += dma_src_len(ep, req->req.dma finish_in_dma()
570 + req->req.actual); finish_in_dma()
577 done(ep, req, status); finish_in_dma()
580 static void next_out_dma(struct omap_ep *ep, struct omap_req *req) next_out_dma() argument
582 unsigned packets = req->req.length - req->req.actual; next_out_dma()
589 req->dma_bytes = packets * ep->ep.maxpacket; next_out_dma()
595 OMAP_DMA_AMODE_POST_INC, req->req.dma + req->req.actual, next_out_dma()
610 finish_out_dma(struct omap_ep *ep, struct omap_req *req, int status, int one) finish_out_dma() argument
615 ep->dma_counter = (u16) (req->req.dma + req->req.actual); finish_out_dma()
616 count = dma_dest_len(ep, req->req.dma + req->req.actual); finish_out_dma()
617 count += req->req.actual; finish_out_dma()
620 if (count <= req->req.length) finish_out_dma()
621 req->req.actual = count; finish_out_dma()
623 if (count != req->dma_bytes || status) finish_out_dma()
627 else if (req->req.actual < req->req.length) finish_out_dma()
634 done(ep, req, status); finish_out_dma()
641 struct omap_req *req; dma_irq() local
649 req = container_of(ep->queue.next, dma_irq()
651 finish_in_dma(ep, req, 0); dma_irq()
656 req = container_of(ep->queue.next, dma_irq()
658 next_in_dma(ep, req); dma_irq()
668 req = container_of(ep->queue.next, dma_irq()
670 finish_out_dma(ep, req, 0, dman_stat & UDC_DMA_RX_SB); dma_irq()
675 req = container_of(ep->queue.next, dma_irq()
677 next_out_dma(ep, req); dma_irq()
791 struct omap_req *req; dma_channel_claim() local
792 req = container_of(ep->queue.next, struct omap_req, queue); dma_channel_claim()
794 (is_in ? next_in_dma : next_out_dma)(ep, req); dma_channel_claim()
797 (is_in ? write_fifo : read_fifo)(ep, req); dma_channel_claim()
812 struct omap_req *req; dma_channel_release() local
817 req = container_of(ep->queue.next, struct omap_req, queue); dma_channel_release()
819 req = NULL; dma_channel_release()
826 ep->dma_channel - 1, req); dma_channel_release()
837 if (req) { dma_channel_release()
838 finish_in_dma(ep, req, -ECONNRESET); dma_channel_release()
854 if (req) dma_channel_release()
855 finish_out_dma(ep, req, -ECONNRESET, 0); dma_channel_release()
870 struct omap_req *req = container_of(_req, struct omap_req, req); omap_ep_queue() local
876 if (!_req || !req->req.complete || !req->req.buf omap_ep_queue()
877 || !list_empty(&req->queue)) { omap_ep_queue()
886 if (req->req.length > ep->ep.maxpacket) omap_ep_queue()
898 && (req->req.length % ep->ep.maxpacket) != 0) { omap_ep_queue()
908 usb_gadget_map_request(&udc->gadget, &req->req, omap_ep_queue()
911 VDBG("%s queue req %p, len %d buf %p\n", omap_ep_queue()
916 req->req.status = -EINPROGRESS; omap_ep_queue()
917 req->req.actual = 0; omap_ep_queue()
937 if (!req->req.length) { omap_ep_queue()
965 done(ep, req, 0); omap_ep_queue()
966 req = NULL; omap_ep_queue()
985 (is_in ? next_in_dma : next_out_dma)(ep, req); omap_ep_queue()
986 else if (req) { omap_ep_queue()
987 if ((is_in ? write_fifo : read_fifo)(ep, req) == 1) omap_ep_queue()
988 req = NULL; omap_ep_queue()
1000 if (req != NULL) omap_ep_queue()
1001 list_add_tail(&req->queue, &ep->queue); omap_ep_queue()
1010 struct omap_req *req; omap_ep_dequeue() local
1019 list_for_each_entry(req, &ep->queue, queue) { omap_ep_dequeue()
1020 if (&req->req == _req) omap_ep_dequeue()
1023 if (&req->req != _req) { omap_ep_dequeue()
1028 if (use_dma && ep->dma_channel && ep->queue.next == &req->queue) { omap_ep_dequeue()
1037 done(ep, req, -ECONNRESET); omap_ep_dequeue()
1333 struct omap_req *req; nuke() local
1346 req = list_entry(ep->queue.next, struct omap_req, queue); nuke()
1347 done(ep, req, status); nuke()
1396 struct omap_req *req = NULL; ep0_irq() local
1422 req = container_of(ep0->queue.next, struct omap_req, queue); ep0_irq()
1436 if (req) ep0_irq()
1437 stat = write_fifo(ep0, req); ep0_irq()
1439 if (!req && udc->ep0_pending) { ep0_irq()
1449 if (req) ep0_irq()
1450 done(ep0, req, 0); ep0_irq()
1452 req = NULL; ep0_irq()
1474 stat = read_fifo(ep0, req); ep0_irq()
1475 if (!req || stat < 0) { ep0_irq()
1485 done(ep0, req, 0); ep0_irq()
1497 if (req) ep0_irq()
1498 done(ep0, req, 0); ep0_irq()
1706 VDBG("req %02x.%02x protocol STALL; stat %d\n", ep0_irq()
1874 struct omap_req *req; pio_out_timer() local
1877 req = container_of(ep->queue.next, pio_out_timer()
1879 (void) read_fifo(ep, req); pio_out_timer()
1897 struct omap_req *req; omap_udc_pio_irq() local
1918 req = container_of(ep->queue.next, omap_udc_pio_irq()
1920 stat = read_fifo(ep, req); omap_udc_pio_irq()
1952 req = container_of(ep->queue.next, omap_udc_pio_irq()
1954 (void) write_fifo(ep, req); omap_udc_pio_irq()
1981 struct omap_req *req; omap_udc_iso_irq() local
1985 req = list_entry(ep->queue.next, struct omap_req, queue); omap_udc_iso_irq()
1995 /* done(ep, req, -EPROTO) */; omap_udc_iso_irq()
1997 write_fifo(ep, req); omap_udc_iso_irq()
2009 /* done(ep, req, status) */; omap_udc_iso_irq()
2011 read_fifo(ep, req); omap_udc_iso_irq()
2147 struct omap_req *req; proc_ep_show() local
2197 list_for_each_entry(req, &ep->queue, queue) { proc_ep_show()
2198 unsigned length = req->req.actual; proc_ep_show()
2203 (ep, req->req.dma + length); proc_ep_show()
2207 &req->req, length, proc_ep_show()
2208 req->req.length, req->req.buf); proc_ep_show()
2325 seq_printf(s, "ULPD control %04x req %04x status %04x\n", proc_udc_show()
H A Dmv_udc_core.c143 actual = curr_req->req.length; process_ep_req()
203 curr_req->req.actual = actual; process_ep_req()
213 static void done(struct mv_ep *ep, struct mv_req *req, int status)
223 /* Removed the req from fsl_ep->queue */
224 list_del_init(&req->queue);
226 /* req.status should be set as -EINPROGRESS in ep_queue() */
227 if (req->req.status == -EINPROGRESS)
228 req->req.status = status;
230 status = req->req.status;
233 next_td = req->head;
234 for (j = 0; j < req->dtd_count; j++) {
236 if (j != req->dtd_count - 1)
241 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep));
244 dev_info(&udc->dev->dev, "complete %s req %p stat %d len %u/%u",
245 ep->ep.name, &req->req, status,
246 req->req.actual, req->req.length);
252 usb_gadget_giveback_request(&ep->ep, &req->req);
258 static int queue_dtd(struct mv_ep *ep, struct mv_req *req) queue_dtd() argument
277 req->head->td_dma & EP_QUEUE_HEAD_NEXT_POINTER_MASK; queue_dtd()
325 dqh->next_dtd_ptr = req->head->td_dma queue_dtd()
341 static struct mv_dtd *build_dtd(struct mv_req *req, unsigned *length, build_dtd() argument
350 if (usb_endpoint_xfer_isoc(req->ep->ep.desc)) { build_dtd()
351 dqh = req->ep->dqh; build_dtd()
354 *length = min(req->req.length - req->req.actual, build_dtd()
355 (unsigned)(mult * req->ep->ep.maxpacket)); build_dtd()
357 *length = min(req->req.length - req->req.actual, build_dtd()
360 udc = req->ep->udc; build_dtd()
372 temp = (u32)(req->req.dma + req->req.actual); build_dtd()
380 req->req.actual += *length; build_dtd()
382 /* zlp is needed if req->req.zero is set */ build_dtd()
383 if (req->req.zero) { build_dtd()
384 if (*length == 0 || (*length % req->ep->ep.maxpacket) != 0) build_dtd()
388 } else if (req->req.length == req->req.actual) build_dtd()
397 if (*is_last && !req->req.no_interrupt) build_dtd()
410 static int req_to_dtd(struct mv_req *req) req_to_dtd() argument
418 udc = req->ep->udc; req_to_dtd()
421 dtd = build_dtd(req, &count, &dma, &is_last); req_to_dtd()
427 req->head = dtd; req_to_dtd()
433 req->dtd_count++; req_to_dtd()
439 req->tail = dtd; req_to_dtd()
470 * driver handles zero length packet through req->req.zero mv_ep_enable()
611 struct mv_req *req = NULL; mv_alloc_request() local
613 req = kzalloc(sizeof *req, gfp_flags); mv_alloc_request()
614 if (!req) mv_alloc_request()
617 req->req.dma = DMA_ADDR_INVALID; mv_alloc_request()
618 INIT_LIST_HEAD(&req->queue); mv_alloc_request()
620 return &req->req; mv_alloc_request()
625 struct mv_req *req = NULL; mv_free_request() local
627 req = container_of(_req, struct mv_req, req); mv_free_request()
630 kfree(req); mv_free_request()
698 struct mv_req *req = container_of(_req, struct mv_req, req); mv_ep_queue() local
704 if (!_req || !req->req.complete || !req->req.buf mv_ep_queue()
705 || !list_empty(&req->queue)) { mv_ep_queue()
718 req->ep = ep; mv_ep_queue()
725 req->req.status = -EINPROGRESS; mv_ep_queue()
726 req->req.actual = 0; mv_ep_queue()
727 req->dtd_count = 0; mv_ep_queue()
732 if (!req_to_dtd(req)) { mv_ep_queue()
733 retval = queue_dtd(ep, req); mv_ep_queue()
751 list_add_tail(&req->queue, &ep->queue); mv_ep_queue()
762 static void mv_prime_ep(struct mv_ep *ep, struct mv_req *req) mv_prime_ep() argument
768 dqh->next_dtd_ptr = req->head->td_dma mv_prime_ep()
787 struct mv_req *req; mv_ep_dequeue() local
809 list_for_each_entry(req, &ep->queue, queue) { mv_ep_dequeue()
810 if (&req->req == _req) mv_ep_dequeue()
813 if (&req->req != _req) { mv_ep_dequeue()
819 if (ep->queue.next == &req->queue) { mv_ep_dequeue()
824 if (req->queue.next != &ep->queue) { mv_ep_dequeue()
827 next_req = list_entry(req->queue.next, mv_ep_dequeue()
844 prev_req = list_entry(req->queue.prev, struct mv_req, queue); mv_ep_dequeue()
845 writel(readl(&req->tail->dtd_next), mv_ep_dequeue()
850 done(ep, req, -ECONNRESET); mv_ep_dequeue()
1304 struct mv_req *req = NULL; nuke() local
1305 req = list_entry(ep->queue.next, struct mv_req, queue); nuke()
1306 done(ep, req, status); nuke()
1425 struct mv_req *req = container_of(_req, struct mv_req, req); prime_status_complete() local
1431 dev_info(&udc->dev->dev, "switch to test mode %d\n", req->test_mode); prime_status_complete()
1434 if (req->test_mode) { prime_status_complete()
1435 mv_set_ptc(udc, req->test_mode); prime_status_complete()
1436 req->test_mode = 0; prime_status_complete()
1445 struct mv_req *req; udc_prime_status() local
1452 req = udc->status_req; udc_prime_status()
1456 *((u16 *) req->req.buf) = cpu_to_le16(status); udc_prime_status()
1457 req->req.length = 2; udc_prime_status()
1459 req->req.length = 0; udc_prime_status()
1461 req->ep = ep; udc_prime_status()
1462 req->req.status = -EINPROGRESS; udc_prime_status()
1463 req->req.actual = 0; udc_prime_status()
1465 req->req.complete = prime_status_complete; udc_prime_status()
1466 req->test_mode = udc->test_mode; udc_prime_status()
1469 req->req.complete = NULL; udc_prime_status()
1470 req->dtd_count = 0; udc_prime_status()
1472 if (req->req.dma == DMA_ADDR_INVALID) { udc_prime_status()
1473 req->req.dma = dma_map_single(ep->udc->gadget.dev.parent, udc_prime_status()
1474 req->req.buf, req->req.length, udc_prime_status()
1476 req->mapped = 1; udc_prime_status()
1480 if (!req_to_dtd(req)) { udc_prime_status()
1481 retval = queue_dtd(ep, req); udc_prime_status()
1494 list_add_tail(&req->queue, &ep->queue); udc_prime_status()
1498 usb_gadget_unmap_request(&udc->gadget, &req->req, ep_dir(ep)); udc_prime_status()
1722 struct mv_ep *ep0, struct mv_req *req) ep0_req_complete()
1733 done(ep0, req, 0); ep0_req_complete()
1835 /* process the req queue until an uncomplete request */ irq_process_tr_complete()
1842 /* write back status to req */ irq_process_tr_complete()
1843 curr_req->req.status = status; irq_process_tr_complete()
2235 udc->status_req->req.buf = kzalloc(8, GFP_KERNEL); mv_udc_probe()
2236 udc->status_req->req.dma = DMA_ADDR_INVALID; mv_udc_probe()
1721 ep0_req_complete(struct mv_udc *udc, struct mv_ep *ep0, struct mv_req *req) ep0_req_complete() argument
H A Dpxa27x_udc.c142 struct pxa27x_request *req; queues_dbg_show() local
160 list_for_each_entry(req, &ep->queue, queue) { queues_dbg_show()
162 &req->req, req->req.actual, queues_dbg_show()
163 req->req.length, req->req.buf); queues_dbg_show()
571 * @req: usb request
647 struct pxa27x_request *req; pxa_ep_alloc_request() local
649 req = kzalloc(sizeof *req, gfp_flags); pxa_ep_alloc_request()
650 if (!req) pxa_ep_alloc_request()
653 INIT_LIST_HEAD(&req->queue); pxa_ep_alloc_request()
654 req->in_use = 0; pxa_ep_alloc_request()
655 req->udc_usb_ep = container_of(_ep, struct udc_usb_ep, usb_ep); pxa_ep_alloc_request()
657 return &req->req; pxa_ep_alloc_request()
669 struct pxa27x_request *req; pxa_ep_free_request() local
671 req = container_of(_req, struct pxa27x_request, req); pxa_ep_free_request()
672 WARN_ON(!list_empty(&req->queue)); pxa_ep_free_request()
673 kfree(req); pxa_ep_free_request()
679 * @req: usb request
686 static void ep_add_request(struct pxa_ep *ep, struct pxa27x_request *req) ep_add_request() argument
688 if (unlikely(!req)) ep_add_request()
690 ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req, ep_add_request()
691 req->req.length, udc_ep_readl(ep, UDCCSR)); ep_add_request()
693 req->in_use = 1; ep_add_request()
694 list_add_tail(&req->queue, &ep->queue); ep_add_request()
701 * @req: usb request
709 static void ep_del_request(struct pxa_ep *ep, struct pxa27x_request *req) ep_del_request() argument
711 if (unlikely(!req)) ep_del_request()
713 ep_vdbg(ep, "req:%p, lg=%d, udccsr=0x%03x\n", req, ep_del_request()
714 req->req.length, udc_ep_readl(ep, UDCCSR)); ep_del_request()
716 list_del_init(&req->queue); ep_del_request()
717 req->in_use = 0; ep_del_request()
725 * @req: pxa request
733 static void req_done(struct pxa_ep *ep, struct pxa27x_request *req, int status, req_done() argument
738 ep_del_request(ep, req); req_done()
739 if (likely(req->req.status == -EINPROGRESS)) req_done()
740 req->req.status = status; req_done()
742 status = req->req.status; req_done()
745 ep_dbg(ep, "complete req %p stat %d len %u/%u\n", req_done()
746 &req->req, status, req_done()
747 req->req.actual, req->req.length); req_done()
752 usb_gadget_giveback_request(&req->udc_usb_ep->usb_ep, &req->req); req_done()
761 * @req: pxa request
768 static void ep_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, ep_end_out_req() argument
772 req_done(ep, req, 0, pflags); ep_end_out_req()
778 * @req: pxa request
786 static void ep0_end_out_req(struct pxa_ep *ep, struct pxa27x_request *req, ep0_end_out_req() argument
790 ep_end_out_req(ep, req, pflags); ep0_end_out_req()
797 * @req: pxa request
804 static void ep_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, ep_end_in_req() argument
808 req_done(ep, req, 0, pflags); ep_end_in_req()
814 * @req: pxa request
822 static void ep0_end_in_req(struct pxa_ep *ep, struct pxa27x_request *req, ep0_end_in_req() argument
826 ep_end_in_req(ep, req, pflags); ep0_end_in_req()
841 struct pxa27x_request *req; nuke() local
846 req = list_entry(ep->queue.next, struct pxa27x_request, queue); nuke()
847 req_done(ep, req, status, &flags); nuke()
855 * @req: usb request
863 static int read_packet(struct pxa_ep *ep, struct pxa27x_request *req) read_packet() argument
869 bufferspace = req->req.length - req->req.actual; read_packet()
871 buf = (u32 *)(req->req.buf + req->req.actual); read_packet()
881 req->req.actual += count; read_packet()
891 * @req: usb request
900 static int write_packet(struct pxa_ep *ep, struct pxa27x_request *req, write_packet() argument
907 buf = (u32 *)(req->req.buf + req->req.actual); write_packet()
910 length = min(req->req.length - req->req.actual, max); write_packet()
911 req->req.actual += length; write_packet()
931 * @req: usb request
942 static int read_fifo(struct pxa_ep *ep, struct pxa27x_request *req) read_fifo() argument
947 count = read_packet(ep, req); read_fifo()
951 ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n", read_fifo()
953 &req->req, req->req.actual, req->req.length); read_fifo()
956 if (is_short || req->req.actual == req->req.length) { read_fifo()
968 * @req: pxa usb request
977 static int write_fifo(struct pxa_ep *ep, struct pxa27x_request *req) write_fifo() argument
999 count = write_packet(ep, req, max); write_fifo()
1008 if (likely(req->req.length > req->req.actual) write_fifo()
1009 || req->req.zero) write_fifo()
1027 ep_dbg(ep, "wrote count:%d bytes%s%s, left:%d req=%p\n", write_fifo()
1029 req->req.length - req->req.actual, &req->req); write_fifo()
1037 * @req: pxa usb request
1045 static int read_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req) read_ep0_fifo() argument
1050 count = read_packet(ep, req); read_ep0_fifo()
1055 ep_dbg(ep, "read udccsr:%03x, count:%d bytes%s req %p %d/%d\n", read_ep0_fifo()
1057 &req->req, req->req.actual, req->req.length); read_ep0_fifo()
1059 if (is_short || req->req.actual >= req->req.length) { read_ep0_fifo()
1071 * @req: request
1083 static int write_ep0_fifo(struct pxa_ep *ep, struct pxa27x_request *req) write_ep0_fifo() argument
1088 count = write_packet(ep, req, EP0_FIFO_SIZE); write_ep0_fifo()
1098 ep_dbg(ep, "in %d bytes%s%s, %d left, req=%p, udccsr0=0x%03x\n", write_ep0_fifo()
1100 req->req.length - req->req.actual, write_ep0_fifo()
1101 &req->req, udc_ep_readl(ep, UDCCSR)); write_ep0_fifo()
1123 struct pxa27x_request *req; pxa_ep_queue() local
1131 req = container_of(_req, struct pxa27x_request, req); pxa_ep_queue()
1155 && req->req.length > ep->fifo_size)) pxa_ep_queue()
1162 ep_dbg(ep, "queue req %p(first=%s), len %d buf %p\n", pxa_ep_queue()
1172 if (req->in_use) { pxa_ep_queue()
1173 ep_err(ep, "refusing to queue req %p (already queued)\n", req); pxa_ep_queue()
1181 ep_add_request(ep, req); pxa_ep_queue()
1188 ep_end_in_req(ep, req, NULL); pxa_ep_queue()
1193 ep_del_request(ep, req); pxa_ep_queue()
1200 if (write_ep0_fifo(ep, req)) pxa_ep_queue()
1201 ep0_end_in_req(ep, req, NULL); pxa_ep_queue()
1205 if (read_ep0_fifo(ep, req)) pxa_ep_queue()
1206 ep0_end_out_req(ep, req, NULL); pxa_ep_queue()
1211 ep_del_request(ep, req); pxa_ep_queue()
1238 struct pxa27x_request *req; pxa_ep_dequeue() local
1252 list_for_each_entry(req, &ep->queue, queue) { pxa_ep_dequeue()
1253 if (&req->req == _req) { pxa_ep_dequeue()
1261 req_done(ep, req, -ECONNRESET, NULL); pxa_ep_dequeue()
1863 * @req: control request
1866 struct pxa27x_request *req) handle_ep0_ctrl_req()
1985 struct pxa27x_request *req = NULL; handle_ep0() local
1989 req = list_entry(ep->queue.next, struct pxa27x_request, queue); handle_ep0()
1992 ep_dbg(ep, "state=%s, req=%p, udccsr0=0x%03x, udcbcr=%d, irq_msk=%x\n", handle_ep0()
1993 EP0_STNAME(udc), req, udccsr0, udc_ep_readl(ep, UDCBCR), handle_ep0()
2020 handle_ep0_ctrl_req(udc, req); handle_ep0()
2025 if (req && !ep_is_full(ep)) handle_ep0()
2026 completed = write_ep0_fifo(ep, req); handle_ep0()
2028 ep0_end_in_req(ep, req, NULL); handle_ep0()
2031 if (epout_has_pkt(ep) && req) handle_ep0()
2032 completed = read_ep0_fifo(ep, req); handle_ep0()
2034 ep0_end_out_req(ep, req, NULL); handle_ep0()
2068 struct pxa27x_request *req; handle_ep() local
2085 req = list_entry(ep->queue.next, handle_ep()
2088 req = NULL; handle_ep()
2090 ep_dbg(ep, "req:%p, udccsr 0x%03x loop=%d\n", handle_ep()
2091 req, udccsr, loop++); handle_ep()
2096 if (!req) handle_ep()
2101 completed = write_fifo(ep, req); handle_ep()
2104 completed = read_fifo(ep, req); handle_ep()
2109 ep_end_in_req(ep, req, &flags); handle_ep()
2111 ep_end_out_req(ep, req, &flags); handle_ep()
2130 struct usb_ctrlrequest req ; pxa27x_change_configuration() local
2138 req.bRequestType = 0; pxa27x_change_configuration()
2139 req.bRequest = USB_REQ_SET_CONFIGURATION; pxa27x_change_configuration()
2140 req.wValue = config; pxa27x_change_configuration()
2141 req.wIndex = 0; pxa27x_change_configuration()
2142 req.wLength = 0; pxa27x_change_configuration()
2145 udc->driver->setup(&udc->gadget, &req); pxa27x_change_configuration()
2160 struct usb_ctrlrequest req; pxa27x_change_interface() local
2167 req.bRequestType = USB_RECIP_INTERFACE; pxa27x_change_interface()
2168 req.bRequest = USB_REQ_SET_INTERFACE; pxa27x_change_interface()
2169 req.wValue = alt; pxa27x_change_interface()
2170 req.wIndex = iface; pxa27x_change_interface()
2171 req.wLength = 0; pxa27x_change_interface()
2174 udc->driver->setup(&udc->gadget, &req); pxa27x_change_interface()
1865 handle_ep0_ctrl_req(struct pxa_udc *udc, struct pxa27x_request *req) handle_ep0_ctrl_req() argument
H A Dudc-xilinx.c112 #define to_xusb_req(req) container_of((req), struct xusb_req, usb_req)
170 * @req: pointer to dummy request for get status command
187 struct xusb_req *req; member in struct:xusb_udc
371 * @req: pointer to the usb request structure.
380 static int xudc_dma_send(struct xusb_ep *ep, struct xusb_req *req, xudc_dma_send() argument
388 src = req->usb_req.dma + req->usb_req.actual; xudc_dma_send()
389 if (req->usb_req.length) xudc_dma_send()
425 * @req: pointer to the usb request structure.
434 static int xudc_dma_receive(struct xusb_ep *ep, struct xusb_req *req, xudc_dma_receive() argument
442 dst = req->usb_req.dma + req->usb_req.actual; xudc_dma_receive()
474 * @req: pointer to the usb request structure.
483 static int xudc_eptxrx(struct xusb_ep *ep, struct xusb_req *req, xudc_eptxrx() argument
494 rc = xudc_dma_send(ep, req, bufferptr, bufferlen); xudc_eptxrx()
496 rc = xudc_dma_receive(ep, req, bufferptr, bufferlen); xudc_eptxrx()
545 * @req: pointer to the usb request structure.
551 static void xudc_done(struct xusb_ep *ep, struct xusb_req *req, int status) xudc_done() argument
555 list_del_init(&req->queue); xudc_done()
557 if (req->usb_req.status == -EINPROGRESS) xudc_done()
558 req->usb_req.status = status; xudc_done()
560 status = req->usb_req.status; xudc_done()
564 ep->ep_usb.name, req, status); xudc_done()
566 if (udc->dma_enabled && ep->epnumber && req->usb_req.length) xudc_done()
567 usb_gadget_unmap_request(&udc->gadget, &req->usb_req, xudc_done()
570 if (req->usb_req.complete) { xudc_done()
572 req->usb_req.complete(&ep->ep_usb, &req->usb_req); xudc_done()
580 * @req: pointer to the usb request structure.
586 static int xudc_read_fifo(struct xusb_ep *ep, struct xusb_req *req) xudc_read_fifo() argument
611 buf = req->usb_req.buf + req->usb_req.actual; xudc_read_fifo()
613 bufferspace = req->usb_req.length - req->usb_req.actual; xudc_read_fifo()
622 if (req->usb_req.status != -EOVERFLOW) xudc_read_fifo()
625 req->usb_req.status = -EOVERFLOW; xudc_read_fifo()
626 xudc_done(ep, req, -EOVERFLOW); xudc_read_fifo()
630 ret = xudc_eptxrx(ep, req, buf, count); xudc_read_fifo()
633 req->usb_req.actual += min(count, bufferspace); xudc_read_fifo()
634 dev_dbg(udc->dev, "read %s, %d bytes%s req %p %d/%d\n", xudc_read_fifo()
635 ep->ep_usb.name, count, is_short ? "/S" : "", req, xudc_read_fifo()
636 req->usb_req.actual, req->usb_req.length); xudc_read_fifo()
639 if ((req->usb_req.actual == req->usb_req.length) || is_short) { xudc_read_fifo()
640 if (udc->dma_enabled && req->usb_req.length) xudc_read_fifo()
642 req->usb_req.dma, xudc_read_fifo()
643 req->usb_req.actual, xudc_read_fifo()
645 xudc_done(ep, req, 0); xudc_read_fifo()
659 xudc_done(ep, req, -ECONNRESET); xudc_read_fifo()
670 * @req: pointer to the usb request structure.
676 static int xudc_write_fifo(struct xusb_ep *ep, struct xusb_req *req) xudc_write_fifo() argument
687 buf = req->usb_req.buf + req->usb_req.actual; xudc_write_fifo()
689 length = req->usb_req.length - req->usb_req.actual; xudc_write_fifo()
692 ret = xudc_eptxrx(ep, req, buf, length); xudc_write_fifo()
695 req->usb_req.actual += length; xudc_write_fifo()
699 if (likely(req->usb_req.length != xudc_write_fifo()
700 req->usb_req.actual) || req->usb_req.zero) xudc_write_fifo()
708 req->usb_req.length - req->usb_req.actual, req); xudc_write_fifo()
711 xudc_done(ep, req, 0); xudc_write_fifo()
721 xudc_done(ep, req, -ECONNRESET); xudc_write_fifo()
736 struct xusb_req *req; xudc_nuke() local
739 req = list_first_entry(&ep->queue, struct xusb_req, queue); xudc_nuke()
740 xudc_done(ep, req, status); xudc_nuke()
972 struct xusb_req *req; xudc_ep_alloc_request() local
975 req = kzalloc(sizeof(*req), gfp_flags); xudc_ep_alloc_request()
976 if (!req) { xudc_ep_alloc_request()
981 req->ep = ep; xudc_ep_alloc_request()
982 INIT_LIST_HEAD(&req->queue); xudc_ep_alloc_request()
983 return &req->usb_req; xudc_ep_alloc_request()
993 struct xusb_req *req = to_xusb_req(_req); xudc_free_request() local
995 kfree(req); xudc_free_request()
1001 * @req: pointer to the xusb request structure.
1005 static int __xudc_ep0_queue(struct xusb_ep *ep0, struct xusb_req *req) __xudc_ep0_queue() argument
1020 req->usb_req.status = -EINPROGRESS; __xudc_ep0_queue()
1021 req->usb_req.actual = 0; __xudc_ep0_queue()
1023 list_add_tail(&req->queue, &ep0->queue); __xudc_ep0_queue()
1026 prefetch(req->usb_req.buf); __xudc_ep0_queue()
1027 length = req->usb_req.length; __xudc_ep0_queue()
1030 length = req->usb_req.actual = min_t(u32, length, __xudc_ep0_queue()
1032 memcpy(corebuf, req->usb_req.buf, length); __xudc_ep0_queue()
1059 struct xusb_req *req = to_xusb_req(_req); xudc_ep0_queue() local
1066 ret = __xudc_ep0_queue(ep0, req); xudc_ep0_queue()
1083 struct xusb_req *req = to_xusb_req(_req); xudc_ep_queue() local
1106 ret = usb_gadget_map_request(&udc->gadget, &req->usb_req, xudc_ep_queue()
1119 if (!xudc_write_fifo(ep, req)) xudc_ep_queue()
1120 req = NULL; xudc_ep_queue()
1123 if (!xudc_read_fifo(ep, req)) xudc_ep_queue()
1124 req = NULL; xudc_ep_queue()
1128 if (req != NULL) xudc_ep_queue()
1129 list_add_tail(&req->queue, &ep->queue); xudc_ep_queue()
1145 struct xusb_req *req = to_xusb_req(_req); xudc_ep_dequeue() local
1151 list_for_each_entry(req, &ep->queue, queue) { xudc_ep_dequeue()
1152 if (&req->usb_req == _req) xudc_ep_dequeue()
1155 if (&req->usb_req != _req) { xudc_ep_dequeue()
1159 xudc_done(ep, req, -ECONNRESET); xudc_ep_dequeue()
1576 struct xusb_req *req = udc->req; xudc_setaddress() local
1579 req->usb_req.length = 0; xudc_setaddress()
1580 ret = __xudc_ep0_queue(ep0, req); xudc_setaddress()
1597 struct xusb_req *req = udc->req; xudc_getstatus() local
1633 req->usb_req.length = 2; xudc_getstatus()
1634 *(u16 *)req->usb_req.buf = cpu_to_le16(status); xudc_getstatus()
1635 ret = __xudc_ep0_queue(ep0, req); xudc_getstatus()
1652 struct xusb_req *req = udc->req; xudc_set_clear_feature() local
1720 req->usb_req.length = 0; xudc_set_clear_feature()
1721 ret = __xudc_ep0_queue(ep0, req); xudc_set_clear_feature()
1804 struct xusb_req *req; xudc_ep0_out() local
1809 req = list_first_entry(&ep0->queue, struct xusb_req, queue); xudc_ep0_out()
1819 req->usb_req.actual = req->usb_req.length; xudc_ep0_out()
1820 xudc_done(ep0, req, 0); xudc_ep0_out()
1828 buffer = req->usb_req.buf + req->usb_req.actual; xudc_ep0_out()
1829 req->usb_req.actual = req->usb_req.actual + bytes_to_rx; xudc_ep0_out()
1832 if (req->usb_req.length == req->usb_req.actual) { xudc_ep0_out()
1853 struct xusb_req *req; xudc_ep0_in() local
1862 req = list_first_entry(&ep0->queue, struct xusb_req, queue); xudc_ep0_in()
1863 bytes_to_tx = req->usb_req.length - req->usb_req.actual; xudc_ep0_in()
1884 req->usb_req.actual = req->usb_req.length; xudc_ep0_in()
1885 xudc_done(ep0, req, 0); xudc_ep0_in()
1904 buffer = req->usb_req.buf + req->usb_req.actual; xudc_ep0_in()
1905 req->usb_req.actual = req->usb_req.actual + length; xudc_ep0_in()
1949 struct xusb_req *req; xudc_nonctrl_ep_handler() local
1962 req = list_first_entry(&ep->queue, struct xusb_req, queue); xudc_nonctrl_ep_handler()
1965 xudc_write_fifo(ep, req); xudc_nonctrl_ep_handler()
1967 xudc_read_fifo(ep, req); xudc_nonctrl_ep_handler()
2060 udc->req = devm_kzalloc(&pdev->dev, sizeof(struct xusb_req), xudc_probe()
2062 if (!udc->req) xudc_probe()
2069 udc->req->usb_req.buf = buff; xudc_probe()
/linux-4.1.27/arch/arm64/crypto/
H A Daes-ce-ccm-glue.c68 static int ccm_init_mac(struct aead_request *req, u8 maciv[], u32 msglen) ccm_init_mac() argument
70 struct crypto_aead *aead = crypto_aead_reqtfm(req); ccm_init_mac()
72 u32 l = req->iv[0] + 1; ccm_init_mac()
89 memcpy(maciv, req->iv, AES_BLOCK_SIZE - l); ccm_init_mac()
99 if (req->assoclen) ccm_init_mac()
102 memset(&req->iv[AES_BLOCK_SIZE - l], 0, l); ccm_init_mac()
106 static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[]) ccm_calculate_auth_mac() argument
108 struct crypto_aead *aead = crypto_aead_reqtfm(req); ccm_calculate_auth_mac()
112 u32 len = req->assoclen; ccm_calculate_auth_mac()
127 scatterwalk_start(&walk, req->assoc); ccm_calculate_auth_mac()
148 static int ccm_encrypt(struct aead_request *req) ccm_encrypt() argument
150 struct crypto_aead *aead = crypto_aead_reqtfm(req); ccm_encrypt()
152 struct blkcipher_desc desc = { .info = req->iv }; ccm_encrypt()
156 u32 len = req->cryptlen; ccm_encrypt()
159 err = ccm_init_mac(req, mac, len); ccm_encrypt()
165 if (req->assoclen) ccm_encrypt()
166 ccm_calculate_auth_mac(req, mac); ccm_encrypt()
169 memcpy(buf, req->iv, AES_BLOCK_SIZE); ccm_encrypt()
171 blkcipher_walk_init(&walk, req->dst, req->src, len); ccm_encrypt()
197 scatterwalk_map_and_copy(mac, req->dst, req->cryptlen, ccm_encrypt()
203 static int ccm_decrypt(struct aead_request *req) ccm_decrypt() argument
205 struct crypto_aead *aead = crypto_aead_reqtfm(req); ccm_decrypt()
208 struct blkcipher_desc desc = { .info = req->iv }; ccm_decrypt()
212 u32 len = req->cryptlen - authsize; ccm_decrypt()
215 err = ccm_init_mac(req, mac, len); ccm_decrypt()
221 if (req->assoclen) ccm_decrypt()
222 ccm_calculate_auth_mac(req, mac); ccm_decrypt()
225 memcpy(buf, req->iv, AES_BLOCK_SIZE); ccm_decrypt()
227 blkcipher_walk_init(&walk, req->dst, req->src, len); ccm_decrypt()
253 scatterwalk_map_and_copy(buf, req->src, req->cryptlen - authsize, ccm_decrypt()
/linux-4.1.27/include/crypto/internal/
H A Dskcipher.h58 int skcipher_null_givencrypt(struct skcipher_givcrypt_request *req);
59 int skcipher_null_givdecrypt(struct skcipher_givcrypt_request *req);
88 struct skcipher_givcrypt_request *req) skcipher_givcrypt_reqctx()
90 return ablkcipher_request_ctx(&req->creq); skcipher_givcrypt_reqctx()
93 static inline void ablkcipher_request_complete(struct ablkcipher_request *req, ablkcipher_request_complete() argument
96 req->base.complete(&req->base, err); ablkcipher_request_complete()
100 struct skcipher_givcrypt_request *req, int err) skcipher_givcrypt_complete()
102 ablkcipher_request_complete(&req->creq, err); skcipher_givcrypt_complete()
105 static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req) ablkcipher_request_flags() argument
107 return req->base.flags; ablkcipher_request_flags()
87 skcipher_givcrypt_reqctx( struct skcipher_givcrypt_request *req) skcipher_givcrypt_reqctx() argument
99 skcipher_givcrypt_complete( struct skcipher_givcrypt_request *req, int err) skcipher_givcrypt_complete() argument
/linux-4.1.27/drivers/crypto/nx/
H A Dnx-aes-ccm.c173 struct aead_request *req, generate_pat()
203 if (!req->assoclen) { generate_pat()
205 } else if (req->assoclen <= 14) { generate_pat()
211 iauth_len = req->assoclen; generate_pat()
212 } else if (req->assoclen <= 65280) { generate_pat()
226 rc = generate_b0(iv, req->assoclen, authsize, nbytes, b0); generate_pat()
236 if (req->assoclen <= 65280) { generate_pat()
237 *(u16 *)b1 = (u16)req->assoclen; generate_pat()
238 scatterwalk_map_and_copy(b1 + 2, req->assoc, 0, generate_pat()
242 *(u32 *)&b1[2] = (u32)req->assoclen; generate_pat()
243 scatterwalk_map_and_copy(b1 + 6, req->assoc, 0, generate_pat()
249 if (!req->assoclen) { generate_pat()
251 } else if (req->assoclen <= 14) { generate_pat()
278 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); generate_pat()
283 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); generate_pat()
297 to_process = min_t(u32, req->assoclen - processed, generate_pat()
302 req->assoc, processed, generate_pat()
305 if ((to_process + processed) < req->assoclen) { generate_pat()
320 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); generate_pat()
331 atomic64_add(req->assoclen, generate_pat()
335 } while (processed < req->assoclen); generate_pat()
345 static int ccm_nx_decrypt(struct aead_request *req, ccm_nx_decrypt() argument
348 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); ccm_nx_decrypt()
350 unsigned int nbytes = req->cryptlen; ccm_nx_decrypt()
351 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); ccm_nx_decrypt()
363 req->src, nbytes, authsize, ccm_nx_decrypt()
366 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, ccm_nx_decrypt()
385 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, ccm_nx_decrypt()
392 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); ccm_nx_decrypt()
422 static int ccm_nx_encrypt(struct aead_request *req, ccm_nx_encrypt() argument
425 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); ccm_nx_encrypt()
427 unsigned int nbytes = req->cryptlen; ccm_nx_encrypt()
428 unsigned int authsize = crypto_aead_authsize(crypto_aead_reqtfm(req)); ccm_nx_encrypt()
435 rc = generate_pat(desc->info, req, nx_ctx, authsize, nbytes, ccm_nx_encrypt()
453 rc = nx_build_sg_lists(nx_ctx, desc, req->dst, req->src, ccm_nx_encrypt()
460 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); ccm_nx_encrypt()
486 req->dst, nbytes, authsize, ccm_nx_encrypt()
494 static int ccm4309_aes_nx_encrypt(struct aead_request *req) ccm4309_aes_nx_encrypt() argument
496 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); ccm4309_aes_nx_encrypt()
497 struct nx_gcm_rctx *rctx = aead_request_ctx(req); ccm4309_aes_nx_encrypt()
503 memcpy(iv + 4, req->iv, 8); ccm4309_aes_nx_encrypt()
506 desc.tfm = (struct crypto_blkcipher *)req->base.tfm; ccm4309_aes_nx_encrypt()
508 return ccm_nx_encrypt(req, &desc); ccm4309_aes_nx_encrypt()
511 static int ccm_aes_nx_encrypt(struct aead_request *req) ccm_aes_nx_encrypt() argument
516 desc.info = req->iv; ccm_aes_nx_encrypt()
517 desc.tfm = (struct crypto_blkcipher *)req->base.tfm; ccm_aes_nx_encrypt()
523 return ccm_nx_encrypt(req, &desc); ccm_aes_nx_encrypt()
526 static int ccm4309_aes_nx_decrypt(struct aead_request *req) ccm4309_aes_nx_decrypt() argument
528 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); ccm4309_aes_nx_decrypt()
529 struct nx_gcm_rctx *rctx = aead_request_ctx(req); ccm4309_aes_nx_decrypt()
535 memcpy(iv + 4, req->iv, 8); ccm4309_aes_nx_decrypt()
538 desc.tfm = (struct crypto_blkcipher *)req->base.tfm; ccm4309_aes_nx_decrypt()
540 return ccm_nx_decrypt(req, &desc); ccm4309_aes_nx_decrypt()
543 static int ccm_aes_nx_decrypt(struct aead_request *req) ccm_aes_nx_decrypt() argument
548 desc.info = req->iv; ccm_aes_nx_decrypt()
549 desc.tfm = (struct crypto_blkcipher *)req->base.tfm; ccm_aes_nx_decrypt()
555 return ccm_nx_decrypt(req, &desc); ccm_aes_nx_decrypt()
172 generate_pat(u8 *iv, struct aead_request *req, struct nx_crypto_ctx *nx_ctx, unsigned int authsize, unsigned int nbytes, u8 *out) generate_pat() argument
H A Dnx-aes-gcm.c125 struct aead_request *req, nx_gca()
132 unsigned int nbytes = req->assoclen; nx_gca()
137 scatterwalk_start(&walk, req->assoc); nx_gca()
162 req->assoc, processed, &to_process); nx_gca()
173 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); nx_gca()
183 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); nx_gca()
193 static int gmac(struct aead_request *req, struct blkcipher_desc *desc) gmac() argument
196 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); gmac()
199 unsigned int nbytes = req->assoclen; gmac()
228 req->assoc, processed, &to_process); gmac()
242 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); gmac()
254 atomic64_add(req->assoclen, &(nx_ctx->stats->aes_bytes)); gmac()
265 static int gcm_empty(struct aead_request *req, struct blkcipher_desc *desc, gcm_empty() argument
269 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); gcm_empty()
316 crypto_aead_authsize(crypto_aead_reqtfm(req))); gcm_empty()
330 static int gcm_aes_nx_crypt(struct aead_request *req, int enc) gcm_aes_nx_crypt() argument
332 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); gcm_aes_nx_crypt()
333 struct nx_gcm_rctx *rctx = aead_request_ctx(req); gcm_aes_nx_crypt()
336 unsigned int nbytes = req->cryptlen; gcm_aes_nx_crypt()
348 if (req->assoclen == 0) gcm_aes_nx_crypt()
349 rc = gcm_empty(req, &desc, enc); gcm_aes_nx_crypt()
351 rc = gmac(req, &desc); gcm_aes_nx_crypt()
359 csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; gcm_aes_nx_crypt()
360 if (req->assoclen) { gcm_aes_nx_crypt()
361 rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); gcm_aes_nx_crypt()
372 nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); gcm_aes_nx_crypt()
379 desc.tfm = (struct crypto_blkcipher *) req->base.tfm; gcm_aes_nx_crypt()
380 rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, gcm_aes_nx_crypt()
381 req->src, &to_process, processed, gcm_aes_nx_crypt()
394 req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); gcm_aes_nx_crypt()
417 req->dst, nbytes, gcm_aes_nx_crypt()
418 crypto_aead_authsize(crypto_aead_reqtfm(req)), gcm_aes_nx_crypt()
424 scatterwalk_map_and_copy(itag, req->src, nbytes, gcm_aes_nx_crypt()
425 crypto_aead_authsize(crypto_aead_reqtfm(req)), gcm_aes_nx_crypt()
428 crypto_aead_authsize(crypto_aead_reqtfm(req))) ? gcm_aes_nx_crypt()
436 static int gcm_aes_nx_encrypt(struct aead_request *req) gcm_aes_nx_encrypt() argument
438 struct nx_gcm_rctx *rctx = aead_request_ctx(req); gcm_aes_nx_encrypt()
441 memcpy(iv, req->iv, 12); gcm_aes_nx_encrypt()
443 return gcm_aes_nx_crypt(req, 1); gcm_aes_nx_encrypt()
446 static int gcm_aes_nx_decrypt(struct aead_request *req) gcm_aes_nx_decrypt() argument
448 struct nx_gcm_rctx *rctx = aead_request_ctx(req); gcm_aes_nx_decrypt()
451 memcpy(iv, req->iv, 12); gcm_aes_nx_decrypt()
453 return gcm_aes_nx_crypt(req, 0); gcm_aes_nx_decrypt()
456 static int gcm4106_aes_nx_encrypt(struct aead_request *req) gcm4106_aes_nx_encrypt() argument
458 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); gcm4106_aes_nx_encrypt()
459 struct nx_gcm_rctx *rctx = aead_request_ctx(req); gcm4106_aes_nx_encrypt()
464 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); gcm4106_aes_nx_encrypt()
466 return gcm_aes_nx_crypt(req, 1); gcm4106_aes_nx_encrypt()
469 static int gcm4106_aes_nx_decrypt(struct aead_request *req) gcm4106_aes_nx_decrypt() argument
471 struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); gcm4106_aes_nx_decrypt()
472 struct nx_gcm_rctx *rctx = aead_request_ctx(req); gcm4106_aes_nx_decrypt()
477 memcpy(iv + NX_GCM4106_NONCE_LEN, req->iv, 8); gcm4106_aes_nx_decrypt()
479 return gcm_aes_nx_crypt(req, 0); gcm4106_aes_nx_decrypt()
124 nx_gca(struct nx_crypto_ctx *nx_ctx, struct aead_request *req, u8 *out) nx_gca() argument
/linux-4.1.27/net/ipv4/
H A Dudp_diag.c22 const struct inet_diag_req_v2 *req, sk_diag_dump()
28 return inet_sk_diag_fill(sk, NULL, skb, req, sk_diag_dump()
36 const struct inet_diag_req_v2 *req) udp_dump_one()
43 if (req->sdiag_family == AF_INET) udp_dump_one()
45 req->id.idiag_src[0], req->id.idiag_sport, udp_dump_one()
46 req->id.idiag_dst[0], req->id.idiag_dport, udp_dump_one()
47 req->id.idiag_if, tbl); udp_dump_one()
49 else if (req->sdiag_family == AF_INET6) udp_dump_one()
51 (struct in6_addr *)req->id.idiag_src, udp_dump_one()
52 req->id.idiag_sport, udp_dump_one()
53 (struct in6_addr *)req->id.idiag_dst, udp_dump_one()
54 req->id.idiag_dport, udp_dump_one()
55 req->id.idiag_if, tbl); udp_dump_one()
64 err = sock_diag_check_cookie(sk, req->id.idiag_cookie); udp_dump_one()
75 err = inet_sk_diag_fill(sk, NULL, rep, req, udp_dump_one()
156 const struct inet_diag_req_v2 *req) udp_diag_dump_one()
158 return udp_dump_one(&udp_table, in_skb, nlh, req); udp_diag_dump_one()
183 const struct inet_diag_req_v2 *req) udplite_diag_dump_one()
185 return udp_dump_one(&udplite_table, in_skb, nlh, req); udplite_diag_dump_one()
20 sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct netlink_callback *cb, const struct inet_diag_req_v2 *req, struct nlattr *bc) sk_diag_dump() argument
34 udp_dump_one(struct udp_table *tbl, struct sk_buff *in_skb, const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req) udp_dump_one() argument
155 udp_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req) udp_diag_dump_one() argument
182 udplite_diag_dump_one(struct sk_buff *in_skb, const struct nlmsghdr *nlh, const struct inet_diag_req_v2 *req) udplite_diag_dump_one() argument
H A Dinet_connection_sock.c298 struct request_sock *req; inet_csk_accept() local
324 req = reqsk_queue_remove(queue); inet_csk_accept()
325 newsk = req->sk; inet_csk_accept()
329 tcp_rsk(req)->tfo_listener && inet_csk_accept()
332 if (tcp_rsk(req)->tfo_listener) { inet_csk_accept()
334 * so can't free req now. Instead, we set req->sk to inet_csk_accept()
336 * so reqsk_fastopen_remove() will free the req inet_csk_accept()
339 req->sk = NULL; inet_csk_accept()
340 req = NULL; inet_csk_accept()
346 if (req) inet_csk_accept()
347 reqsk_put(req); inet_csk_accept()
351 req = NULL; inet_csk_accept()
404 const struct request_sock *req) inet_csk_route_req()
406 const struct inet_request_sock *ireq = inet_rsk(req); inet_csk_route_req()
417 security_req_classify_flow(req, flowi4_to_flowi(fl4)); inet_csk_route_req()
435 const struct request_sock *req) inet_csk_route_child_sock()
437 const struct inet_request_sock *ireq = inet_rsk(req); inet_csk_route_child_sock()
454 security_req_classify_flow(req, flowi4_to_flowi(fl4)); inet_csk_route_child_sock()
485 * req sock will no longer be in listener hash table
494 struct request_sock *req; inet_csk_search_req() local
499 for (req = lopt->syn_table[hash]; req != NULL; req = req->dl_next) { inet_csk_search_req()
500 const struct inet_request_sock *ireq = inet_rsk(req); inet_csk_search_req()
505 AF_INET_FAMILY(req->rsk_ops->family)) { inet_csk_search_req()
506 atomic_inc(&req->rsk_refcnt); inet_csk_search_req()
507 WARN_ON(req->sk); inet_csk_search_req()
513 return req; inet_csk_search_req()
517 void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req, inet_csk_reqsk_queue_hash_add() argument
522 const u32 h = inet_synq_hash(inet_rsk(req)->ir_rmt_addr, inet_csk_reqsk_queue_hash_add()
523 inet_rsk(req)->ir_rmt_port, inet_csk_reqsk_queue_hash_add()
526 reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, timeout); inet_csk_reqsk_queue_hash_add()
536 static inline void syn_ack_recalc(struct request_sock *req, const int thresh, syn_ack_recalc() argument
542 *expire = req->num_timeout >= thresh; syn_ack_recalc()
546 *expire = req->num_timeout >= thresh && syn_ack_recalc()
547 (!inet_rsk(req)->acked || req->num_timeout >= max_retries); syn_ack_recalc()
553 *resend = !inet_rsk(req)->acked || syn_ack_recalc()
554 req->num_timeout >= rskq_defer_accept - 1; syn_ack_recalc()
557 int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req) inet_rtx_syn_ack() argument
559 int err = req->rsk_ops->rtx_syn_ack(parent, req); inet_rtx_syn_ack()
562 req->num_retrans++; inet_rtx_syn_ack()
567 /* return true if req was found in the syn_table[] */ reqsk_queue_unlink()
569 struct request_sock *req) reqsk_queue_unlink()
578 for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL; reqsk_queue_unlink()
580 if (*prev == req) { reqsk_queue_unlink()
581 *prev = req->dl_next; reqsk_queue_unlink()
588 if (timer_pending(&req->rsk_timer) && del_timer_sync(&req->rsk_timer)) reqsk_queue_unlink()
589 reqsk_put(req); reqsk_queue_unlink()
593 void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req) inet_csk_reqsk_queue_drop() argument
595 if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) { inet_csk_reqsk_queue_drop()
596 reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req); inet_csk_reqsk_queue_drop()
597 reqsk_put(req); inet_csk_reqsk_queue_drop()
604 struct request_sock *req = (struct request_sock *)data; reqsk_timer_handler() local
605 struct sock *sk_listener = req->rsk_listener; reqsk_timer_handler()
614 reqsk_put(req); reqsk_timer_handler()
651 syn_ack_recalc(req, thresh, max_retries, defer_accept, reqsk_timer_handler()
653 req->rsk_ops->syn_ack_timeout(req); reqsk_timer_handler()
656 !inet_rtx_syn_ack(sk_listener, req) || reqsk_timer_handler()
657 inet_rsk(req)->acked)) { reqsk_timer_handler()
660 if (req->num_timeout++ == 0) reqsk_timer_handler()
662 timeo = min(TCP_TIMEOUT_INIT << req->num_timeout, TCP_RTO_MAX); reqsk_timer_handler()
663 mod_timer_pinned(&req->rsk_timer, jiffies + timeo); reqsk_timer_handler()
666 inet_csk_reqsk_queue_drop(sk_listener, req); reqsk_timer_handler()
667 reqsk_put(req); reqsk_timer_handler()
671 u32 hash, struct request_sock *req, reqsk_queue_hash_req()
676 req->num_retrans = 0; reqsk_queue_hash_req()
677 req->num_timeout = 0; reqsk_queue_hash_req()
678 req->sk = NULL; reqsk_queue_hash_req()
680 setup_timer(&req->rsk_timer, reqsk_timer_handler, (unsigned long)req); reqsk_queue_hash_req()
681 mod_timer_pinned(&req->rsk_timer, jiffies + timeout); reqsk_queue_hash_req()
682 req->rsk_hash = hash; reqsk_queue_hash_req()
684 /* before letting lookups find us, make sure all req fields reqsk_queue_hash_req()
688 atomic_set(&req->rsk_refcnt, 2); reqsk_queue_hash_req()
691 req->dl_next = lopt->syn_table[hash]; reqsk_queue_hash_req()
692 lopt->syn_table[hash] = req; reqsk_queue_hash_req()
700 * @req: request_sock
706 const struct request_sock *req, inet_csk_clone_lock()
717 inet_sk(newsk)->inet_dport = inet_rsk(req)->ir_rmt_port; inet_csk_clone_lock()
718 inet_sk(newsk)->inet_num = inet_rsk(req)->ir_num; inet_csk_clone_lock()
719 inet_sk(newsk)->inet_sport = htons(inet_rsk(req)->ir_num); inet_csk_clone_lock()
722 newsk->sk_mark = inet_rsk(req)->ir_mark; inet_csk_clone_lock()
724 atomic64_read(&inet_rsk(req)->ir_cookie)); inet_csk_clone_lock()
733 security_inet_csk_clone(newsk, req); inet_csk_clone_lock()
829 struct request_sock *req; inet_csk_listen_stop() local
844 while ((req = acc_req) != NULL) { inet_csk_listen_stop()
845 struct sock *child = req->sk; inet_csk_listen_stop()
847 acc_req = req->dl_next; inet_csk_listen_stop()
860 if (sk->sk_protocol == IPPROTO_TCP && tcp_rsk(req)->tfo_listener) { inet_csk_listen_stop()
861 BUG_ON(tcp_sk(child)->fastopen_rsk != req); inet_csk_listen_stop()
862 BUG_ON(sk != req->rsk_listener); inet_csk_listen_stop()
879 reqsk_put(req); inet_csk_listen_stop()
887 while ((req = acc_req) != NULL) { inet_csk_listen_stop()
888 acc_req = req->dl_next; inet_csk_listen_stop()
889 reqsk_put(req); inet_csk_listen_stop()
402 inet_csk_route_req(struct sock *sk, struct flowi4 *fl4, const struct request_sock *req) inet_csk_route_req() argument
433 inet_csk_route_child_sock(struct sock *sk, struct sock *newsk, const struct request_sock *req) inet_csk_route_child_sock() argument
568 reqsk_queue_unlink(struct request_sock_queue *queue, struct request_sock *req) reqsk_queue_unlink() argument
670 reqsk_queue_hash_req(struct request_sock_queue *queue, u32 hash, struct request_sock *req, unsigned long timeout) reqsk_queue_hash_req() argument
705 inet_csk_clone_lock(const struct sock *sk, const struct request_sock *req, const gfp_t priority) inet_csk_clone_lock() argument
/linux-4.1.27/drivers/scsi/qla2xxx/
H A Dqla_mid.c486 vha->req = base_vha->req; qla24xx_create_vhost()
487 host->can_queue = base_vha->req->length + 128; qla24xx_create_vhost()
517 qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) qla25xx_free_req_que() argument
520 uint16_t que_id = req->id; qla25xx_free_req_que()
522 dma_free_coherent(&ha->pdev->dev, (req->length + 1) * qla25xx_free_req_que()
523 sizeof(request_t), req->ring, req->dma); qla25xx_free_req_que()
524 req->ring = NULL; qla25xx_free_req_que()
525 req->dma = 0; qla25xx_free_req_que()
532 kfree(req->outstanding_cmds); qla25xx_free_req_que()
533 kfree(req); qla25xx_free_req_que()
534 req = NULL; qla25xx_free_req_que()
563 qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) qla25xx_delete_req_que() argument
567 if (req) { qla25xx_delete_req_que()
568 req->options |= BIT_0; qla25xx_delete_req_que()
569 ret = qla25xx_init_req_que(vha, req); qla25xx_delete_req_que()
572 qla25xx_free_req_que(vha, req); qla25xx_delete_req_que()
597 struct req_que *req = NULL; qla25xx_delete_queues() local
603 req = ha->req_q_map[cnt]; qla25xx_delete_queues()
604 if (req && test_bit(cnt, ha->req_qid_map)) { qla25xx_delete_queues()
605 ret = qla25xx_delete_req_que(vha, req); qla25xx_delete_queues()
608 "Couldn't delete req que %d.\n", qla25xx_delete_queues()
609 req->id); qla25xx_delete_queues()
636 struct req_que *req = NULL; qla25xx_create_req_que() local
642 req = kzalloc(sizeof(struct req_que), GFP_KERNEL); qla25xx_create_req_que()
643 if (req == NULL) { qla25xx_create_req_que()
649 req->length = REQUEST_ENTRY_CNT_24XX; qla25xx_create_req_que()
650 req->ring = dma_alloc_coherent(&ha->pdev->dev, qla25xx_create_req_que()
651 (req->length + 1) * sizeof(request_t), qla25xx_create_req_que()
652 &req->dma, GFP_KERNEL); qla25xx_create_req_que()
653 if (req->ring == NULL) { qla25xx_create_req_que()
659 ret = qla2x00_alloc_outstanding_cmds(ha, req); qla25xx_create_req_que()
672 ha->req_q_map[que_id] = req; qla25xx_create_req_que()
673 req->rid = rid; qla25xx_create_req_que()
674 req->vp_idx = vp_idx; qla25xx_create_req_que()
675 req->qos = qos; qla25xx_create_req_que()
679 que_id, req->rid, req->vp_idx, req->qos); qla25xx_create_req_que()
682 que_id, req->rid, req->vp_idx, req->qos); qla25xx_create_req_que()
684 req->rsp = NULL; qla25xx_create_req_que()
686 req->rsp = ha->rsp_q_map[rsp_que]; qla25xx_create_req_que()
688 if (MSB(req->rid)) qla25xx_create_req_que()
691 if (LSB(req->rid)) qla25xx_create_req_que()
693 req->options = options; qla25xx_create_req_que()
696 "options=0x%x.\n", req->options); qla25xx_create_req_que()
698 "options=0x%x.\n", req->options); qla25xx_create_req_que()
699 for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) qla25xx_create_req_que()
700 req->outstanding_cmds[cnt] = NULL; qla25xx_create_req_que()
701 req->current_outstanding_cmd = 1; qla25xx_create_req_que()
703 req->ring_ptr = req->ring; qla25xx_create_req_que()
704 req->ring_index = 0; qla25xx_create_req_que()
705 req->cnt = req->length; qla25xx_create_req_que()
706 req->id = que_id; qla25xx_create_req_que()
708 req->req_q_in = &reg->isp25mq.req_q_in; qla25xx_create_req_que()
709 req->req_q_out = &reg->isp25mq.req_q_out; qla25xx_create_req_que()
710 req->max_q_depth = ha->req_q_map[0]->max_q_depth; qla25xx_create_req_que()
711 req->out_ptr = (void *)(req->ring + req->length); qla25xx_create_req_que()
716 req->ring_ptr, req->ring_index, qla25xx_create_req_que()
717 req->cnt, req->id, req->max_q_depth); qla25xx_create_req_que()
721 req->ring_ptr, req->ring_index, req->cnt, qla25xx_create_req_que()
722 req->id, req->max_q_depth); qla25xx_create_req_que()
724 ret = qla25xx_init_req_que(base_vha, req); qla25xx_create_req_que()
734 return req->id; qla25xx_create_req_que()
737 qla25xx_free_req_que(base_vha, req); qla25xx_create_req_que()
758 uint8_t vp_idx, uint16_t rid, int req) qla25xx_create_rsp_que()
845 if (req >= 0) qla25xx_create_rsp_que()
846 rsp->req = ha->req_q_map[req]; qla25xx_create_rsp_que()
848 rsp->req = NULL; qla25xx_create_rsp_que()
757 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, uint8_t vp_idx, uint16_t rid, int req) qla25xx_create_rsp_que() argument
H A Dqla_iocb.c98 struct req_que *req = vha->req; qla2x00_prep_cont_type0_iocb() local
100 req->ring_index++; qla2x00_prep_cont_type0_iocb()
101 if (req->ring_index == req->length) { qla2x00_prep_cont_type0_iocb()
102 req->ring_index = 0; qla2x00_prep_cont_type0_iocb()
103 req->ring_ptr = req->ring; qla2x00_prep_cont_type0_iocb()
105 req->ring_ptr++; qla2x00_prep_cont_type0_iocb()
108 cont_pkt = (cont_entry_t *)req->ring_ptr; qla2x00_prep_cont_type0_iocb()
124 qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha, struct req_que *req) qla2x00_prep_cont_type1_iocb() argument
129 req->ring_index++; qla2x00_prep_cont_type1_iocb()
130 if (req->ring_index == req->length) { qla2x00_prep_cont_type1_iocb()
131 req->ring_index = 0; qla2x00_prep_cont_type1_iocb()
132 req->ring_ptr = req->ring; qla2x00_prep_cont_type1_iocb()
134 req->ring_ptr++; qla2x00_prep_cont_type1_iocb()
137 cont_pkt = (cont_a64_entry_t *)req->ring_ptr; qla2x00_prep_cont_type1_iocb()
291 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); scsi_for_each_sg()
326 struct req_que *req; qla2x00_start_scsi() local
335 req = ha->req_q_map[0]; qla2x00_start_scsi()
342 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != qla2x00_start_scsi()
353 handle = req->current_outstanding_cmd; qla2x00_start_scsi()
354 for (index = 1; index < req->num_outstanding_cmds; index++) { qla2x00_start_scsi()
356 if (handle == req->num_outstanding_cmds) qla2x00_start_scsi()
358 if (!req->outstanding_cmds[handle]) qla2x00_start_scsi()
361 if (index == req->num_outstanding_cmds) qla2x00_start_scsi()
377 if (req->cnt < (req_cnt + 2)) { qla2x00_start_scsi()
379 if (req->ring_index < cnt) qla2x00_start_scsi()
380 req->cnt = cnt - req->ring_index; qla2x00_start_scsi()
382 req->cnt = req->length - qla2x00_start_scsi()
383 (req->ring_index - cnt); qla2x00_start_scsi()
385 if (req->cnt < (req_cnt + 2)) qla2x00_start_scsi()
390 req->current_outstanding_cmd = handle; qla2x00_start_scsi()
391 req->outstanding_cmds[handle] = sp; qla2x00_start_scsi()
394 req->cnt -= req_cnt; qla2x00_start_scsi()
396 cmd_pkt = (cmd_entry_t *)req->ring_ptr; qla2x00_start_scsi()
420 req->ring_index++; qla2x00_start_scsi()
421 if (req->ring_index == req->length) { qla2x00_start_scsi()
422 req->ring_index = 0; qla2x00_start_scsi()
423 req->ring_ptr = req->ring; qla2x00_start_scsi()
425 req->ring_ptr++; qla2x00_start_scsi()
430 WRT_REG_WORD(ISP_REQ_Q_IN(ha, reg), req->ring_index); qla2x00_start_scsi()
454 qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) qla2x00_start_iocbs() argument
457 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); qla2x00_start_iocbs()
463 req->ring_index++; qla2x00_start_iocbs()
464 if (req->ring_index == req->length) { qla2x00_start_iocbs()
465 req->ring_index = 0; qla2x00_start_iocbs()
466 req->ring_ptr = req->ring; qla2x00_start_iocbs()
468 req->ring_ptr++; qla2x00_start_iocbs()
472 WRT_REG_DWORD(req->req_q_in, req->ring_index); qla2x00_start_iocbs()
475 WRT_REG_DWORD(&reg->ispfx00.req_q_in, req->ring_index); qla2x00_start_iocbs()
479 WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index); qla2x00_start_iocbs()
483 req->ring_index); qla2x00_start_iocbs()
501 __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, __qla2x00_marker() argument
511 req = ha->req_q_map[0]; __qla2x00_marker()
529 mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle); __qla2x00_marker()
537 qla2x00_start_iocbs(vha, req); __qla2x00_marker()
543 qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req, qla2x00_marker() argument
551 ret = __qla2x00_marker(vha, req, rsp, loop_id, lun, type); qla2x00_marker()
567 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, qla2x00_issue_marker()
571 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0, qla2x00_issue_marker()
713 struct req_que *req; qla24xx_build_scsi_iocbs() local
728 req = vha->req; qla24xx_build_scsi_iocbs()
759 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); scsi_for_each_sg()
1454 struct req_que *req = NULL; qla24xx_start_scsi() local
1464 req = vha->req; qla24xx_start_scsi()
1471 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != qla24xx_start_scsi()
1481 handle = req->current_outstanding_cmd; qla24xx_start_scsi()
1482 for (index = 1; index < req->num_outstanding_cmds; index++) { qla24xx_start_scsi()
1484 if (handle == req->num_outstanding_cmds) qla24xx_start_scsi()
1486 if (!req->outstanding_cmds[handle]) qla24xx_start_scsi()
1489 if (index == req->num_outstanding_cmds) qla24xx_start_scsi()
1503 if (req->cnt < (req_cnt + 2)) { qla24xx_start_scsi()
1504 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : qla24xx_start_scsi()
1505 RD_REG_DWORD_RELAXED(req->req_q_out); qla24xx_start_scsi()
1506 if (req->ring_index < cnt) qla24xx_start_scsi()
1507 req->cnt = cnt - req->ring_index; qla24xx_start_scsi()
1509 req->cnt = req->length - qla24xx_start_scsi()
1510 (req->ring_index - cnt); qla24xx_start_scsi()
1511 if (req->cnt < (req_cnt + 2)) qla24xx_start_scsi()
1516 req->current_outstanding_cmd = handle; qla24xx_start_scsi()
1517 req->outstanding_cmds[handle] = sp; qla24xx_start_scsi()
1520 req->cnt -= req_cnt; qla24xx_start_scsi()
1522 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; qla24xx_start_scsi()
1523 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); qla24xx_start_scsi()
1558 req->ring_index++; qla24xx_start_scsi()
1559 if (req->ring_index == req->length) { qla24xx_start_scsi()
1560 req->ring_index = 0; qla24xx_start_scsi()
1561 req->ring_ptr = req->ring; qla24xx_start_scsi()
1563 req->ring_ptr++; qla24xx_start_scsi()
1568 WRT_REG_DWORD(req->req_q_in, req->ring_index); qla24xx_start_scsi()
1607 struct req_que *req = NULL; qla24xx_dif_start_scsi() local
1626 req = vha->req; qla24xx_dif_start_scsi()
1633 if (qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL) != qla24xx_dif_start_scsi()
1643 handle = req->current_outstanding_cmd; qla24xx_dif_start_scsi()
1644 for (index = 1; index < req->num_outstanding_cmds; index++) { qla24xx_dif_start_scsi()
1646 if (handle == req->num_outstanding_cmds) qla24xx_dif_start_scsi()
1648 if (!req->outstanding_cmds[handle]) qla24xx_dif_start_scsi()
1652 if (index == req->num_outstanding_cmds) qla24xx_dif_start_scsi()
1707 if (req->cnt < (req_cnt + 2)) { qla24xx_dif_start_scsi()
1708 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : qla24xx_dif_start_scsi()
1709 RD_REG_DWORD_RELAXED(req->req_q_out); qla24xx_dif_start_scsi()
1710 if (req->ring_index < cnt) qla24xx_dif_start_scsi()
1711 req->cnt = cnt - req->ring_index; qla24xx_dif_start_scsi()
1713 req->cnt = req->length - qla24xx_dif_start_scsi()
1714 (req->ring_index - cnt); qla24xx_dif_start_scsi()
1715 if (req->cnt < (req_cnt + 2)) qla24xx_dif_start_scsi()
1722 req->current_outstanding_cmd = handle; qla24xx_dif_start_scsi()
1723 req->outstanding_cmds[handle] = sp; qla24xx_dif_start_scsi()
1726 req->cnt -= req_cnt; qla24xx_dif_start_scsi()
1729 cmd_pkt = (struct cmd_type_crc_2 *)req->ring_ptr; qla24xx_dif_start_scsi()
1730 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); qla24xx_dif_start_scsi()
1749 req->ring_ptr, tot_dsds, tot_prot_dsds, fw_prot_opts) != qla24xx_dif_start_scsi()
1760 req->ring_index++; qla24xx_dif_start_scsi()
1761 if (req->ring_index == req->length) { qla24xx_dif_start_scsi()
1762 req->ring_index = 0; qla24xx_dif_start_scsi()
1763 req->ring_ptr = req->ring; qla24xx_dif_start_scsi()
1765 req->ring_ptr++; qla24xx_dif_start_scsi()
1768 WRT_REG_DWORD(req->req_q_in, req->ring_index); qla24xx_dif_start_scsi()
1782 req->outstanding_cmds[handle] = NULL; qla24xx_dif_start_scsi()
1783 req->cnt += req_cnt; qla24xx_dif_start_scsi()
1821 struct req_que *req = ha->req_q_map[0]; qla2x00_alloc_iocbs() local
1822 device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id); qla2x00_alloc_iocbs()
1835 handle = req->current_outstanding_cmd; qla2x00_alloc_iocbs()
1836 for (index = 1; index < req->num_outstanding_cmds; index++) { qla2x00_alloc_iocbs()
1838 if (handle == req->num_outstanding_cmds) qla2x00_alloc_iocbs()
1840 if (!req->outstanding_cmds[handle]) qla2x00_alloc_iocbs()
1843 if (index == req->num_outstanding_cmds) { qla2x00_alloc_iocbs()
1850 req->current_outstanding_cmd = handle; qla2x00_alloc_iocbs()
1851 req->outstanding_cmds[handle] = sp; qla2x00_alloc_iocbs()
1860 if (req->cnt < req_cnt + 2) { qla2x00_alloc_iocbs()
1873 if (req->ring_index < cnt) qla2x00_alloc_iocbs()
1874 req->cnt = cnt - req->ring_index; qla2x00_alloc_iocbs()
1876 req->cnt = req->length - qla2x00_alloc_iocbs()
1877 (req->ring_index - cnt); qla2x00_alloc_iocbs()
1879 if (req->cnt < req_cnt + 2) qla2x00_alloc_iocbs()
1883 req->cnt -= req_cnt; qla2x00_alloc_iocbs()
1884 pkt = req->ring_ptr; qla2x00_alloc_iocbs()
2013 struct req_que *req = vha->req; qla24xx_tm_iocb() local
2020 tsk->handle = MAKE_HANDLE(req->id, tsk->handle); qla24xx_tm_iocb()
2259 struct req_que *req = NULL; qla82xx_start_scsi() local
2266 req = vha->req; qla82xx_start_scsi()
2276 if (qla2x00_marker(vha, req, qla82xx_start_scsi()
2289 handle = req->current_outstanding_cmd; qla82xx_start_scsi()
2290 for (index = 1; index < req->num_outstanding_cmds; index++) { qla82xx_start_scsi()
2292 if (handle == req->num_outstanding_cmds) qla82xx_start_scsi()
2294 if (!req->outstanding_cmds[handle]) qla82xx_start_scsi()
2297 if (index == req->num_outstanding_cmds) qla82xx_start_scsi()
2356 if (req->cnt < (req_cnt + 2)) { qla82xx_start_scsi()
2359 if (req->ring_index < cnt) qla82xx_start_scsi()
2360 req->cnt = cnt - req->ring_index; qla82xx_start_scsi()
2362 req->cnt = req->length - qla82xx_start_scsi()
2363 (req->ring_index - cnt); qla82xx_start_scsi()
2364 if (req->cnt < (req_cnt + 2)) qla82xx_start_scsi()
2406 cmd_pkt = (struct cmd_type_6 *)req->ring_ptr; qla82xx_start_scsi()
2407 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); qla82xx_start_scsi()
2467 if (req->cnt < (req_cnt + 2)) { qla82xx_start_scsi()
2470 if (req->ring_index < cnt) qla82xx_start_scsi()
2471 req->cnt = cnt - req->ring_index; qla82xx_start_scsi()
2473 req->cnt = req->length - qla82xx_start_scsi()
2474 (req->ring_index - cnt); qla82xx_start_scsi()
2476 if (req->cnt < (req_cnt + 2)) qla82xx_start_scsi()
2479 cmd_pkt = (struct cmd_type_7 *)req->ring_ptr; qla82xx_start_scsi()
2480 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); qla82xx_start_scsi()
2521 req->current_outstanding_cmd = handle; qla82xx_start_scsi()
2522 req->outstanding_cmds[handle] = sp; qla82xx_start_scsi()
2525 req->cnt -= req_cnt; qla82xx_start_scsi()
2529 req->ring_index++; qla82xx_start_scsi()
2530 if (req->ring_index == req->length) { qla82xx_start_scsi()
2531 req->ring_index = 0; qla82xx_start_scsi()
2532 req->ring_ptr = req->ring; qla82xx_start_scsi()
2534 req->ring_ptr++; qla82xx_start_scsi()
2540 dbval = dbval | (req->id << 8) | (req->ring_index << 16); qla82xx_start_scsi()
2584 struct req_que *req = vha->req; qla24xx_abort_iocb() local
2589 abt_iocb->handle = cpu_to_le32(MAKE_HANDLE(req->id, sp->handle)); qla24xx_abort_iocb()
2592 cpu_to_le32(MAKE_HANDLE(req->id, aio->u.abt.cmd_hndl)); qla24xx_abort_iocb()
2597 abt_iocb->req_que_no = cpu_to_le16(req->id); qla24xx_abort_iocb()
2726 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); qla25xx_build_bidir_iocb()
2752 cont_pkt = qla2x00_prep_cont_type1_iocb(vha, vha->req); qla25xx_build_bidir_iocb()
2780 struct req_que *req; qla2x00_start_bidir() local
2786 req = vha->req; qla2x00_start_bidir()
2790 if (qla2x00_marker(vha, req, qla2x00_start_bidir()
2800 handle = req->current_outstanding_cmd; qla2x00_start_bidir()
2801 for (index = 1; index < req->num_outstanding_cmds; index++) { qla2x00_start_bidir()
2803 if (handle == req->num_outstanding_cmds) qla2x00_start_bidir()
2805 if (!req->outstanding_cmds[handle]) qla2x00_start_bidir()
2809 if (index == req->num_outstanding_cmds) { qla2x00_start_bidir()
2818 if (req->cnt < req_cnt + 2) { qla2x00_start_bidir()
2819 cnt = IS_SHADOW_REG_CAPABLE(ha) ? *req->out_ptr : qla2x00_start_bidir()
2820 RD_REG_DWORD_RELAXED(req->req_q_out); qla2x00_start_bidir()
2821 if (req->ring_index < cnt) qla2x00_start_bidir()
2822 req->cnt = cnt - req->ring_index; qla2x00_start_bidir()
2824 req->cnt = req->length - qla2x00_start_bidir()
2825 (req->ring_index - cnt); qla2x00_start_bidir()
2827 if (req->cnt < req_cnt + 2) { qla2x00_start_bidir()
2832 cmd_pkt = (struct cmd_bidir *)req->ring_ptr; qla2x00_start_bidir()
2833 cmd_pkt->handle = MAKE_HANDLE(req->id, handle); qla2x00_start_bidir()
2849 req->current_outstanding_cmd = handle; qla2x00_start_bidir()
2850 req->outstanding_cmds[handle] = sp; qla2x00_start_bidir()
2852 req->cnt -= req_cnt; qla2x00_start_bidir()
2856 qla2x00_start_iocbs(vha, req); qla2x00_start_bidir()
/linux-4.1.27/drivers/block/
H A Dnbd.c99 static void nbd_end_request(struct nbd_device *nbd, struct request *req) nbd_end_request() argument
101 int error = req->errors ? -EIO : 0; nbd_end_request()
102 struct request_queue *q = req->q; nbd_end_request()
105 dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req, nbd_end_request() local
109 __blk_end_request_all(req, error); nbd_end_request()
228 static int nbd_send_req(struct nbd_device *nbd, struct request *req) nbd_send_req() argument
232 unsigned long size = blk_rq_bytes(req); nbd_send_req()
236 request.type = htonl(nbd_cmd(req)); nbd_send_req()
238 if (nbd_cmd(req) != NBD_CMD_FLUSH && nbd_cmd(req) != NBD_CMD_DISC) { nbd_send_req()
239 request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); nbd_send_req()
242 memcpy(request.handle, &req, sizeof(req)); nbd_send_req()
245 req, nbdcmd_to_ascii(nbd_cmd(req)), nbd_send_req() local
246 (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); nbd_send_req()
248 (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); nbd_send_req()
255 if (nbd_cmd(req) == NBD_CMD_WRITE) { nbd_send_req()
262 rq_for_each_segment(bvec, req, iter) { rq_for_each_segment()
267 req, bvec.bv_len); rq_for_each_segment() local
283 struct request *req, *tmp; nbd_find_request() local
291 list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { nbd_find_request()
292 if (req != xreq) nbd_find_request()
294 list_del_init(&req->queuelist); nbd_find_request()
296 return req; nbd_find_request()
318 struct request *req; nbd_read_stat() local
335 req = nbd_find_request(nbd, *(struct request **)reply.handle); nbd_read_stat()
336 if (IS_ERR(req)) { nbd_read_stat()
337 result = PTR_ERR(req); nbd_read_stat()
350 req->errors++; nbd_read_stat()
351 return req; nbd_read_stat()
354 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); nbd_read_stat() local
355 if (nbd_cmd(req) == NBD_CMD_READ) { nbd_read_stat()
359 rq_for_each_segment(bvec, req, iter) { rq_for_each_segment()
364 req->errors++; rq_for_each_segment()
365 return req; rq_for_each_segment()
368 req, bvec.bv_len); rq_for_each_segment() local
371 return req;
393 struct request *req; nbd_do_it() local
407 while ((req = nbd_read_stat(nbd)) != NULL) nbd_do_it()
408 nbd_end_request(nbd, req); nbd_do_it()
417 struct request *req; nbd_clear_que() local
433 req = list_entry(nbd->queue_head.next, struct request, nbd_clear_que()
435 list_del_init(&req->queuelist); nbd_clear_que()
436 req->errors++; nbd_clear_que()
437 nbd_end_request(nbd, req); nbd_clear_que()
441 req = list_entry(nbd->waiting_queue.next, struct request, nbd_clear_que()
443 list_del_init(&req->queuelist); nbd_clear_que()
444 req->errors++; nbd_clear_que()
445 nbd_end_request(nbd, req); nbd_clear_que()
450 static void nbd_handle_req(struct nbd_device *nbd, struct request *req) nbd_handle_req() argument
452 if (req->cmd_type != REQ_TYPE_FS) nbd_handle_req()
455 nbd_cmd(req) = NBD_CMD_READ; nbd_handle_req()
456 if (rq_data_dir(req) == WRITE) { nbd_handle_req()
457 if ((req->cmd_flags & REQ_DISCARD)) { nbd_handle_req()
459 nbd_cmd(req) = NBD_CMD_TRIM; nbd_handle_req()
461 nbd_cmd(req) = NBD_CMD_WRITE; nbd_handle_req()
469 if (req->cmd_flags & REQ_FLUSH) { nbd_handle_req()
470 BUG_ON(unlikely(blk_rq_sectors(req))); nbd_handle_req()
471 nbd_cmd(req) = NBD_CMD_FLUSH; nbd_handle_req()
474 req->errors = 0; nbd_handle_req()
484 nbd->active_req = req; nbd_handle_req()
486 if (nbd_send_req(nbd, req) != 0) { nbd_handle_req()
488 req->errors++; nbd_handle_req()
489 nbd_end_request(nbd, req); nbd_handle_req()
492 list_add_tail(&req->queuelist, &nbd->queue_head); nbd_handle_req()
503 req->errors++; nbd_handle_req()
504 nbd_end_request(nbd, req); nbd_handle_req()
510 struct request *req; nbd_thread() local
524 req = list_entry(nbd->waiting_queue.next, struct request, nbd_thread()
526 list_del_init(&req->queuelist); nbd_thread()
530 nbd_handle_req(nbd, req); nbd_thread()
538 * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK))
539 * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); }
545 struct request *req; variable in typeref:struct:request
547 while ((req = blk_fetch_request(q)) != NULL) {
552 nbd = req->rq_disk->private_data;
557 req, req->cmd_type); variable
562 req->errors++;
563 nbd_end_request(nbd, req);
569 list_add_tail(&req->queuelist, &nbd->waiting_queue);
H A Dps3disk.c47 struct request *req; member in struct:ps3disk_private
93 struct request *req, int gather) ps3disk_scatter_gather()
102 rq_for_each_segment(bvec, req, iter) { rq_for_each_segment()
122 struct request *req) ps3disk_submit_request_sg()
125 int write = rq_data_dir(req), res; ps3disk_submit_request_sg()
135 rq_for_each_segment(bv, req, iter) ps3disk_submit_request_sg()
138 "%s:%u: %s req has %u bvecs for %u sectors\n", ps3disk_submit_request_sg()
139 __func__, __LINE__, op, n, blk_rq_sectors(req)); ps3disk_submit_request_sg()
142 start_sector = blk_rq_pos(req) * priv->blocking_factor; ps3disk_submit_request_sg()
143 sectors = blk_rq_sectors(req) * priv->blocking_factor; ps3disk_submit_request_sg()
148 ps3disk_scatter_gather(dev, req, 1); ps3disk_submit_request_sg()
161 __blk_end_request_all(req, -EIO); ps3disk_submit_request_sg()
165 priv->req = req; ps3disk_submit_request_sg()
170 struct request *req) ps3disk_submit_flush_request()
183 __blk_end_request_all(req, -EIO); ps3disk_submit_flush_request()
187 priv->req = req; ps3disk_submit_flush_request()
194 struct request *req; ps3disk_do_request() local
198 while ((req = blk_fetch_request(q))) { ps3disk_do_request()
199 if (req->cmd_flags & REQ_FLUSH) { ps3disk_do_request()
200 if (ps3disk_submit_flush_request(dev, req)) ps3disk_do_request()
202 } else if (req->cmd_type == REQ_TYPE_FS) { ps3disk_do_request()
203 if (ps3disk_submit_request_sg(dev, req)) ps3disk_do_request()
206 blk_dump_rq_flags(req, DEVICE_NAME " bad request"); ps3disk_do_request()
207 __blk_end_request_all(req, -EIO); ps3disk_do_request()
218 if (priv->req) { ps3disk_request()
230 struct request *req; ps3disk_interrupt() local
249 req = priv->req; ps3disk_interrupt()
250 if (!req) { ps3disk_interrupt()
259 if (req->cmd_flags & REQ_FLUSH) { ps3disk_interrupt()
263 read = !rq_data_dir(req); ps3disk_interrupt()
275 ps3disk_scatter_gather(dev, req, 0); ps3disk_interrupt()
279 __blk_end_request_all(req, error); ps3disk_interrupt()
280 priv->req = NULL; ps3disk_interrupt()
92 ps3disk_scatter_gather(struct ps3_storage_device *dev, struct request *req, int gather) ps3disk_scatter_gather() argument
121 ps3disk_submit_request_sg(struct ps3_storage_device *dev, struct request *req) ps3disk_submit_request_sg() argument
169 ps3disk_submit_flush_request(struct ps3_storage_device *dev, struct request *req) ps3disk_submit_flush_request() argument
/linux-4.1.27/fs/nilfs2/
H A Ddat.c54 struct nilfs_palloc_req *req, int create) nilfs_dat_prepare_entry()
56 return nilfs_palloc_get_entry_block(dat, req->pr_entry_nr, nilfs_dat_prepare_entry()
57 create, &req->pr_entry_bh); nilfs_dat_prepare_entry()
61 struct nilfs_palloc_req *req) nilfs_dat_commit_entry()
63 mark_buffer_dirty(req->pr_entry_bh); nilfs_dat_commit_entry()
65 brelse(req->pr_entry_bh); nilfs_dat_commit_entry()
69 struct nilfs_palloc_req *req) nilfs_dat_abort_entry()
71 brelse(req->pr_entry_bh); nilfs_dat_abort_entry()
74 int nilfs_dat_prepare_alloc(struct inode *dat, struct nilfs_palloc_req *req) nilfs_dat_prepare_alloc() argument
78 ret = nilfs_palloc_prepare_alloc_entry(dat, req); nilfs_dat_prepare_alloc()
82 ret = nilfs_dat_prepare_entry(dat, req, 1); nilfs_dat_prepare_alloc()
84 nilfs_palloc_abort_alloc_entry(dat, req); nilfs_dat_prepare_alloc()
89 void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req) nilfs_dat_commit_alloc() argument
94 kaddr = kmap_atomic(req->pr_entry_bh->b_page); nilfs_dat_commit_alloc()
95 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_alloc()
96 req->pr_entry_bh, kaddr); nilfs_dat_commit_alloc()
102 nilfs_palloc_commit_alloc_entry(dat, req); nilfs_dat_commit_alloc()
103 nilfs_dat_commit_entry(dat, req); nilfs_dat_commit_alloc()
106 void nilfs_dat_abort_alloc(struct inode *dat, struct nilfs_palloc_req *req) nilfs_dat_abort_alloc() argument
108 nilfs_dat_abort_entry(dat, req); nilfs_dat_abort_alloc()
109 nilfs_palloc_abort_alloc_entry(dat, req); nilfs_dat_abort_alloc()
113 struct nilfs_palloc_req *req) nilfs_dat_commit_free()
118 kaddr = kmap_atomic(req->pr_entry_bh->b_page); nilfs_dat_commit_free()
119 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_free()
120 req->pr_entry_bh, kaddr); nilfs_dat_commit_free()
126 nilfs_dat_commit_entry(dat, req); nilfs_dat_commit_free()
127 nilfs_palloc_commit_free_entry(dat, req); nilfs_dat_commit_free()
130 int nilfs_dat_prepare_start(struct inode *dat, struct nilfs_palloc_req *req) nilfs_dat_prepare_start() argument
134 ret = nilfs_dat_prepare_entry(dat, req, 0); nilfs_dat_prepare_start()
139 void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req, nilfs_dat_commit_start() argument
145 kaddr = kmap_atomic(req->pr_entry_bh->b_page); nilfs_dat_commit_start()
146 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_start()
147 req->pr_entry_bh, kaddr); nilfs_dat_commit_start()
152 nilfs_dat_commit_entry(dat, req); nilfs_dat_commit_start()
155 int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req) nilfs_dat_prepare_end() argument
163 ret = nilfs_dat_prepare_entry(dat, req, 0); nilfs_dat_prepare_end()
169 kaddr = kmap_atomic(req->pr_entry_bh->b_page); nilfs_dat_prepare_end()
170 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_prepare_end()
171 req->pr_entry_bh, kaddr); nilfs_dat_prepare_end()
177 ret = nilfs_palloc_prepare_free_entry(dat, req); nilfs_dat_prepare_end()
179 nilfs_dat_abort_entry(dat, req); nilfs_dat_prepare_end()
187 void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req, nilfs_dat_commit_end() argument
195 kaddr = kmap_atomic(req->pr_entry_bh->b_page); nilfs_dat_commit_end()
196 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_commit_end()
197 req->pr_entry_bh, kaddr); nilfs_dat_commit_end()
208 nilfs_dat_commit_free(dat, req); nilfs_dat_commit_end()
210 nilfs_dat_commit_entry(dat, req); nilfs_dat_commit_end()
213 void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req) nilfs_dat_abort_end() argument
220 kaddr = kmap_atomic(req->pr_entry_bh->b_page); nilfs_dat_abort_end()
221 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, nilfs_dat_abort_end()
222 req->pr_entry_bh, kaddr); nilfs_dat_abort_end()
228 nilfs_palloc_abort_free_entry(dat, req); nilfs_dat_abort_end()
229 nilfs_dat_abort_entry(dat, req); nilfs_dat_abort_end()
279 struct nilfs_palloc_req req; nilfs_dat_mark_dirty() local
282 req.pr_entry_nr = vblocknr; nilfs_dat_mark_dirty()
283 ret = nilfs_dat_prepare_entry(dat, &req, 0); nilfs_dat_mark_dirty()
285 nilfs_dat_commit_entry(dat, &req); nilfs_dat_mark_dirty()
53 nilfs_dat_prepare_entry(struct inode *dat, struct nilfs_palloc_req *req, int create) nilfs_dat_prepare_entry() argument
60 nilfs_dat_commit_entry(struct inode *dat, struct nilfs_palloc_req *req) nilfs_dat_commit_entry() argument
68 nilfs_dat_abort_entry(struct inode *dat, struct nilfs_palloc_req *req) nilfs_dat_abort_entry() argument
112 nilfs_dat_commit_free(struct inode *dat, struct nilfs_palloc_req *req) nilfs_dat_commit_free() argument
H A Difile.c68 struct nilfs_palloc_req req; nilfs_ifile_create_inode() local
71 req.pr_entry_nr = 0; /* 0 says find free inode from beginning of nilfs_ifile_create_inode()
73 req.pr_entry_bh = NULL; nilfs_ifile_create_inode()
75 ret = nilfs_palloc_prepare_alloc_entry(ifile, &req); nilfs_ifile_create_inode()
77 ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 1, nilfs_ifile_create_inode()
78 &req.pr_entry_bh); nilfs_ifile_create_inode()
80 nilfs_palloc_abort_alloc_entry(ifile, &req); nilfs_ifile_create_inode()
83 brelse(req.pr_entry_bh); nilfs_ifile_create_inode()
86 nilfs_palloc_commit_alloc_entry(ifile, &req); nilfs_ifile_create_inode()
87 mark_buffer_dirty(req.pr_entry_bh); nilfs_ifile_create_inode()
89 *out_ino = (ino_t)req.pr_entry_nr; nilfs_ifile_create_inode()
90 *out_bh = req.pr_entry_bh; nilfs_ifile_create_inode()
110 struct nilfs_palloc_req req = { nilfs_ifile_delete_inode() local
117 ret = nilfs_palloc_prepare_free_entry(ifile, &req); nilfs_ifile_delete_inode()
119 ret = nilfs_palloc_get_entry_block(ifile, req.pr_entry_nr, 0, nilfs_ifile_delete_inode()
120 &req.pr_entry_bh); nilfs_ifile_delete_inode()
122 nilfs_palloc_abort_free_entry(ifile, &req); nilfs_ifile_delete_inode()
125 brelse(req.pr_entry_bh); nilfs_ifile_delete_inode()
129 kaddr = kmap_atomic(req.pr_entry_bh->b_page); nilfs_ifile_delete_inode()
130 raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr, nilfs_ifile_delete_inode()
131 req.pr_entry_bh, kaddr); nilfs_ifile_delete_inode()
135 mark_buffer_dirty(req.pr_entry_bh); nilfs_ifile_delete_inode()
136 brelse(req.pr_entry_bh); nilfs_ifile_delete_inode()
138 nilfs_palloc_commit_free_entry(ifile, &req); nilfs_ifile_delete_inode()
H A Dalloc.c467 * @req: nilfs_palloc_req structure exchanged for the allocation
470 struct nilfs_palloc_req *req) nilfs_palloc_prepare_alloc_entry()
484 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); nilfs_palloc_prepare_alloc_entry()
492 maxgroup = nilfs_palloc_group(inode, req->pr_entry_nr, nilfs_palloc_prepare_alloc_entry()
519 req->pr_entry_nr = nilfs_palloc_prepare_alloc_entry()
524 req->pr_desc_bh = desc_bh; nilfs_palloc_prepare_alloc_entry()
525 req->pr_bitmap_bh = bitmap_bh; nilfs_palloc_prepare_alloc_entry()
551 * @req: nilfs_palloc_req structure exchanged for the allocation
554 struct nilfs_palloc_req *req) nilfs_palloc_commit_alloc_entry()
556 mark_buffer_dirty(req->pr_bitmap_bh); nilfs_palloc_commit_alloc_entry()
557 mark_buffer_dirty(req->pr_desc_bh); nilfs_palloc_commit_alloc_entry()
560 brelse(req->pr_bitmap_bh); nilfs_palloc_commit_alloc_entry()
561 brelse(req->pr_desc_bh); nilfs_palloc_commit_alloc_entry()
567 * @req: nilfs_palloc_req structure exchanged for the removal
570 struct nilfs_palloc_req *req) nilfs_palloc_commit_free_entry()
577 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); nilfs_palloc_commit_free_entry()
578 desc_kaddr = kmap(req->pr_desc_bh->b_page); nilfs_palloc_commit_free_entry()
580 req->pr_desc_bh, desc_kaddr); nilfs_palloc_commit_free_entry()
581 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page); nilfs_palloc_commit_free_entry()
582 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh); nilfs_palloc_commit_free_entry()
587 __func__, (unsigned long long)req->pr_entry_nr); nilfs_palloc_commit_free_entry()
591 kunmap(req->pr_bitmap_bh->b_page); nilfs_palloc_commit_free_entry()
592 kunmap(req->pr_desc_bh->b_page); nilfs_palloc_commit_free_entry()
594 mark_buffer_dirty(req->pr_desc_bh); nilfs_palloc_commit_free_entry()
595 mark_buffer_dirty(req->pr_bitmap_bh); nilfs_palloc_commit_free_entry()
598 brelse(req->pr_bitmap_bh); nilfs_palloc_commit_free_entry()
599 brelse(req->pr_desc_bh); nilfs_palloc_commit_free_entry()
605 * @req: nilfs_palloc_req structure exchanged for the allocation
608 struct nilfs_palloc_req *req) nilfs_palloc_abort_alloc_entry()
615 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); nilfs_palloc_abort_alloc_entry()
616 desc_kaddr = kmap(req->pr_desc_bh->b_page); nilfs_palloc_abort_alloc_entry()
618 req->pr_desc_bh, desc_kaddr); nilfs_palloc_abort_alloc_entry()
619 bitmap_kaddr = kmap(req->pr_bitmap_bh->b_page); nilfs_palloc_abort_alloc_entry()
620 bitmap = bitmap_kaddr + bh_offset(req->pr_bitmap_bh); nilfs_palloc_abort_alloc_entry()
624 __func__, (unsigned long long)req->pr_entry_nr); nilfs_palloc_abort_alloc_entry()
628 kunmap(req->pr_bitmap_bh->b_page); nilfs_palloc_abort_alloc_entry()
629 kunmap(req->pr_desc_bh->b_page); nilfs_palloc_abort_alloc_entry()
631 brelse(req->pr_bitmap_bh); nilfs_palloc_abort_alloc_entry()
632 brelse(req->pr_desc_bh); nilfs_palloc_abort_alloc_entry()
634 req->pr_entry_nr = 0; nilfs_palloc_abort_alloc_entry()
635 req->pr_bitmap_bh = NULL; nilfs_palloc_abort_alloc_entry()
636 req->pr_desc_bh = NULL; nilfs_palloc_abort_alloc_entry()
642 * @req: nilfs_palloc_req structure exchanged for the removal
645 struct nilfs_palloc_req *req) nilfs_palloc_prepare_free_entry()
651 group = nilfs_palloc_group(inode, req->pr_entry_nr, &group_offset); nilfs_palloc_prepare_free_entry()
661 req->pr_desc_bh = desc_bh; nilfs_palloc_prepare_free_entry()
662 req->pr_bitmap_bh = bitmap_bh; nilfs_palloc_prepare_free_entry()
669 * @req: nilfs_palloc_req structure exchanged for the removal
672 struct nilfs_palloc_req *req) nilfs_palloc_abort_free_entry()
674 brelse(req->pr_bitmap_bh); nilfs_palloc_abort_free_entry()
675 brelse(req->pr_desc_bh); nilfs_palloc_abort_free_entry()
677 req->pr_entry_nr = 0; nilfs_palloc_abort_free_entry()
678 req->pr_bitmap_bh = NULL; nilfs_palloc_abort_free_entry()
679 req->pr_desc_bh = NULL; nilfs_palloc_abort_free_entry()
469 nilfs_palloc_prepare_alloc_entry(struct inode *inode, struct nilfs_palloc_req *req) nilfs_palloc_prepare_alloc_entry() argument
553 nilfs_palloc_commit_alloc_entry(struct inode *inode, struct nilfs_palloc_req *req) nilfs_palloc_commit_alloc_entry() argument
569 nilfs_palloc_commit_free_entry(struct inode *inode, struct nilfs_palloc_req *req) nilfs_palloc_commit_free_entry() argument
607 nilfs_palloc_abort_alloc_entry(struct inode *inode, struct nilfs_palloc_req *req) nilfs_palloc_abort_alloc_entry() argument
644 nilfs_palloc_prepare_free_entry(struct inode *inode, struct nilfs_palloc_req *req) nilfs_palloc_prepare_free_entry() argument
671 nilfs_palloc_abort_free_entry(struct inode *inode, struct nilfs_palloc_req *req) nilfs_palloc_abort_free_entry() argument
/linux-4.1.27/drivers/crypto/ccp/
H A Dccp-crypto-aes.c27 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); ccp_aes_complete() local
28 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ccp_aes_complete()
29 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); ccp_aes_complete()
35 memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); ccp_aes_complete()
70 static int ccp_aes_crypt(struct ablkcipher_request *req, bool encrypt) ccp_aes_crypt() argument
72 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ccp_aes_crypt()
73 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); ccp_aes_crypt()
84 (req->nbytes & (AES_BLOCK_SIZE - 1))) ccp_aes_crypt()
88 if (!req->info) ccp_aes_crypt()
91 memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); ccp_aes_crypt()
108 rctx->cmd.u.aes.src = req->src; ccp_aes_crypt()
109 rctx->cmd.u.aes.src_len = req->nbytes; ccp_aes_crypt()
110 rctx->cmd.u.aes.dst = req->dst; ccp_aes_crypt()
112 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); ccp_aes_crypt()
117 static int ccp_aes_encrypt(struct ablkcipher_request *req) ccp_aes_encrypt() argument
119 return ccp_aes_crypt(req, true); ccp_aes_encrypt()
122 static int ccp_aes_decrypt(struct ablkcipher_request *req) ccp_aes_decrypt() argument
124 return ccp_aes_crypt(req, false); ccp_aes_decrypt()
146 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); ccp_aes_rfc3686_complete() local
147 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); ccp_aes_rfc3686_complete()
150 req->info = rctx->rfc3686_info; ccp_aes_rfc3686_complete()
169 static int ccp_aes_rfc3686_crypt(struct ablkcipher_request *req, bool encrypt) ccp_aes_rfc3686_crypt() argument
171 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ccp_aes_rfc3686_crypt()
172 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); ccp_aes_rfc3686_crypt()
180 memcpy(iv, req->info, CTR_RFC3686_IV_SIZE); ccp_aes_rfc3686_crypt()
186 rctx->rfc3686_info = req->info; ccp_aes_rfc3686_crypt()
187 req->info = rctx->rfc3686_iv; ccp_aes_rfc3686_crypt()
189 return ccp_aes_crypt(req, encrypt); ccp_aes_rfc3686_crypt()
192 static int ccp_aes_rfc3686_encrypt(struct ablkcipher_request *req) ccp_aes_rfc3686_encrypt() argument
194 return ccp_aes_rfc3686_crypt(req, true); ccp_aes_rfc3686_encrypt()
197 static int ccp_aes_rfc3686_decrypt(struct ablkcipher_request *req) ccp_aes_rfc3686_decrypt() argument
199 return ccp_aes_rfc3686_crypt(req, false); ccp_aes_rfc3686_decrypt()
H A Dccp-crypto-sha.c28 struct ahash_request *req = ahash_request_cast(async_req); ccp_sha_complete() local
29 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ccp_sha_complete()
30 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); ccp_sha_complete()
48 if (req->result) ccp_sha_complete()
49 memcpy(req->result, rctx->ctx, digest_size); ccp_sha_complete()
57 static int ccp_do_sha_update(struct ahash_request *req, unsigned int nbytes, ccp_do_sha_update() argument
60 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ccp_do_sha_update()
62 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); ccp_do_sha_update()
74 scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, ccp_do_sha_update()
81 rctx->src = req->src; ccp_do_sha_update()
101 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? ccp_do_sha_update()
103 sg_count = sg_nents(req->src) + 1; ccp_do_sha_update()
110 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); ccp_do_sha_update()
119 sg = req->src; ccp_do_sha_update()
142 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); ccp_do_sha_update()
147 static int ccp_sha_init(struct ahash_request *req) ccp_sha_init() argument
149 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ccp_sha_init()
151 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); ccp_sha_init()
171 static int ccp_sha_update(struct ahash_request *req) ccp_sha_update() argument
173 return ccp_do_sha_update(req, req->nbytes, 0); ccp_sha_update()
176 static int ccp_sha_final(struct ahash_request *req) ccp_sha_final() argument
178 return ccp_do_sha_update(req, 0, 1); ccp_sha_final()
181 static int ccp_sha_finup(struct ahash_request *req) ccp_sha_finup() argument
183 return ccp_do_sha_update(req, req->nbytes, 1); ccp_sha_finup()
186 static int ccp_sha_digest(struct ahash_request *req) ccp_sha_digest() argument
190 ret = ccp_sha_init(req); ccp_sha_digest()
194 return ccp_sha_finup(req); ccp_sha_digest()
197 static int ccp_sha_export(struct ahash_request *req, void *out) ccp_sha_export() argument
199 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); ccp_sha_export()
218 static int ccp_sha_import(struct ahash_request *req, const void *in) ccp_sha_import() argument
220 struct ccp_sha_req_ctx *rctx = ahash_request_ctx(req); ccp_sha_import()
H A Dccp-crypto-aes-xts.c86 struct ablkcipher_request *req = ablkcipher_request_cast(async_req); ccp_aes_xts_complete() local
87 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); ccp_aes_xts_complete()
92 memcpy(req->info, rctx->iv, AES_BLOCK_SIZE); ccp_aes_xts_complete()
117 static int ccp_aes_xts_crypt(struct ablkcipher_request *req, ccp_aes_xts_crypt() argument
121 crypto_ablkcipher_tfm(crypto_ablkcipher_reqtfm(req)); ccp_aes_xts_crypt()
122 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ccp_aes_xts_crypt()
123 struct ccp_aes_req_ctx *rctx = ablkcipher_request_ctx(req); ccp_aes_xts_crypt()
131 if (req->nbytes & (AES_BLOCK_SIZE - 1)) ccp_aes_xts_crypt()
134 if (!req->info) ccp_aes_xts_crypt()
138 if (req->nbytes <= unit_size_map[0].size) { ccp_aes_xts_crypt()
140 if (!(req->nbytes & (unit_size_map[unit].size - 1))) { ccp_aes_xts_crypt()
152 ablkcipher_request_set_tfm(req, ctx->u.aes.tfm_ablkcipher); ccp_aes_xts_crypt()
153 ret = (encrypt) ? crypto_ablkcipher_encrypt(req) : ccp_aes_xts_crypt()
154 crypto_ablkcipher_decrypt(req); ccp_aes_xts_crypt()
155 ablkcipher_request_set_tfm(req, __crypto_ablkcipher_cast(tfm)); ccp_aes_xts_crypt()
160 memcpy(rctx->iv, req->info, AES_BLOCK_SIZE); ccp_aes_xts_crypt()
173 rctx->cmd.u.xts.src = req->src; ccp_aes_xts_crypt()
174 rctx->cmd.u.xts.src_len = req->nbytes; ccp_aes_xts_crypt()
175 rctx->cmd.u.xts.dst = req->dst; ccp_aes_xts_crypt()
177 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); ccp_aes_xts_crypt()
182 static int ccp_aes_xts_encrypt(struct ablkcipher_request *req) ccp_aes_xts_encrypt() argument
184 return ccp_aes_xts_crypt(req, 1); ccp_aes_xts_encrypt()
187 static int ccp_aes_xts_decrypt(struct ablkcipher_request *req) ccp_aes_xts_decrypt() argument
189 return ccp_aes_xts_crypt(req, 0); ccp_aes_xts_decrypt()
H A Dccp-crypto-aes-cmac.c29 struct ahash_request *req = ahash_request_cast(async_req); ccp_aes_cmac_complete() local
30 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ccp_aes_cmac_complete()
31 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); ccp_aes_cmac_complete()
49 if (req->result) ccp_aes_cmac_complete()
50 memcpy(req->result, rctx->iv, digest_size); ccp_aes_cmac_complete()
58 static int ccp_do_cmac_update(struct ahash_request *req, unsigned int nbytes, ccp_do_cmac_update() argument
61 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ccp_do_cmac_update()
63 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); ccp_do_cmac_update()
81 scatterwalk_map_and_copy(rctx->buf + rctx->buf_count, req->src, ccp_do_cmac_update()
88 rctx->src = req->src; ccp_do_cmac_update()
110 sg_count = (nbytes) ? sg_nents(req->src) + 2 : 2; ccp_do_cmac_update()
111 gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? ccp_do_cmac_update()
124 sg = ccp_crypto_sg_table_add(&rctx->data_sg, req->src); ccp_do_cmac_update()
163 ret = ccp_crypto_enqueue_request(&req->base, &rctx->cmd); ccp_do_cmac_update()
168 static int ccp_aes_cmac_init(struct ahash_request *req) ccp_aes_cmac_init() argument
170 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); ccp_aes_cmac_init()
179 static int ccp_aes_cmac_update(struct ahash_request *req) ccp_aes_cmac_update() argument
181 return ccp_do_cmac_update(req, req->nbytes, 0); ccp_aes_cmac_update()
184 static int ccp_aes_cmac_final(struct ahash_request *req) ccp_aes_cmac_final() argument
186 return ccp_do_cmac_update(req, 0, 1); ccp_aes_cmac_final()
189 static int ccp_aes_cmac_finup(struct ahash_request *req) ccp_aes_cmac_finup() argument
191 return ccp_do_cmac_update(req, req->nbytes, 1); ccp_aes_cmac_finup()
194 static int ccp_aes_cmac_digest(struct ahash_request *req) ccp_aes_cmac_digest() argument
198 ret = ccp_aes_cmac_init(req); ccp_aes_cmac_digest()
202 return ccp_aes_cmac_finup(req); ccp_aes_cmac_digest()
205 static int ccp_aes_cmac_export(struct ahash_request *req, void *out) ccp_aes_cmac_export() argument
207 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); ccp_aes_cmac_export()
224 static int ccp_aes_cmac_import(struct ahash_request *req, const void *in) ccp_aes_cmac_import() argument
226 struct ccp_aes_cmac_req_ctx *rctx = ahash_request_ctx(req); ccp_aes_cmac_import()
H A Dccp-crypto-main.c66 struct crypto_async_request *req; member in struct:ccp_crypto_cmd
140 struct crypto_async_request *req = crypto_cmd->req; ccp_crypto_complete() local
141 struct ccp_ctx *ctx = crypto_tfm_ctx(req->tfm); ccp_crypto_complete()
148 req->complete(req, -EINPROGRESS); ccp_crypto_complete()
161 backlog->req->complete(backlog->req, -EINPROGRESS); ccp_crypto_complete()
166 req->complete(req, -EINPROGRESS); ccp_crypto_complete()
171 ret = ctx->complete(req, ret); ccp_crypto_complete()
172 req->complete(req, ret); ccp_crypto_complete()
185 ctx = crypto_tfm_ctx(held->req->tfm); ccp_crypto_complete()
187 ret = ctx->complete(held->req, ret); ccp_crypto_complete()
188 held->req->complete(held->req, ret); ccp_crypto_complete()
193 backlog->req->complete(backlog->req, -EINPROGRESS); ccp_crypto_complete()
265 * @req: crypto_async_request struct to be processed
268 int ccp_crypto_enqueue_request(struct crypto_async_request *req, ccp_crypto_enqueue_request() argument
274 gfp = req->flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL : GFP_ATOMIC; ccp_crypto_enqueue_request()
281 * crypto_async_request (req) pointer because it is used after ccp_crypto_enqueue_request()
282 * completion callback for the request and the req pointer ccp_crypto_enqueue_request()
286 crypto_cmd->req = req; ccp_crypto_enqueue_request()
287 crypto_cmd->tfm = req->tfm; ccp_crypto_enqueue_request()
292 if (req->flags & CRYPTO_TFM_REQ_MAY_BACKLOG) ccp_crypto_enqueue_request()
/linux-4.1.27/drivers/target/sbp/
H A Dsbp_target.c70 static int read_peer_guid(u64 *guid, const struct sbp_management_request *req) read_peer_guid() argument
75 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, read_peer_guid()
76 req->node_addr, req->generation, req->speed, read_peer_guid()
82 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST, read_peer_guid()
83 req->node_addr, req->generation, req->speed, read_peer_guid()
293 struct sbp_management_agent *agent, struct sbp_management_request *req, sbp_management_request_login()
307 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); sbp_management_request_login()
310 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc))); sbp_management_request_login()
312 req->status.status = cpu_to_be32( sbp_management_request_login()
318 ret = read_peer_guid(&guid, req); sbp_management_request_login()
322 req->status.status = cpu_to_be32( sbp_management_request_login()
360 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) && sbp_management_request_login()
364 req->status.status = cpu_to_be32( sbp_management_request_login()
377 req->status.status = cpu_to_be32( sbp_management_request_login()
391 req->status.status = cpu_to_be32( sbp_management_request_login()
409 req->status.status = cpu_to_be32( sbp_management_request_login()
416 sess->node_id = req->node_addr; sbp_management_request_login()
417 sess->card = fw_card_get(req->card); sbp_management_request_login()
418 sess->generation = req->generation; sbp_management_request_login()
419 sess->speed = req->speed; sbp_management_request_login()
427 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)), sbp_management_request_login()
436 req->status.status = cpu_to_be32( sbp_management_request_login()
444 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo); sbp_management_request_login()
445 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)); sbp_management_request_login()
456 req->status.status = cpu_to_be32( sbp_management_request_login()
473 req->status.status = cpu_to_be32( sbp_management_request_login()
480 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)), sbp_management_request_login()
491 sbp2_pointer_to_addr(&req->orb.ptr2), response, sbp_management_request_login()
499 req->status.status = cpu_to_be32( sbp_management_request_login()
507 req->status.status = cpu_to_be32( sbp_management_request_login()
513 struct sbp_management_agent *agent, struct sbp_management_request *req, sbp_management_request_query_logins()
519 req->status.status = cpu_to_be32( sbp_management_request_query_logins()
525 struct sbp_management_agent *agent, struct sbp_management_request *req, sbp_management_request_reconnect()
534 ret = read_peer_guid(&guid, req); sbp_management_request_reconnect()
538 req->status.status = cpu_to_be32( sbp_management_request_reconnect()
547 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc))); sbp_management_request_reconnect()
552 req->status.status = cpu_to_be32( sbp_management_request_reconnect()
561 req->status.status = cpu_to_be32( sbp_management_request_reconnect()
572 login->sess->generation = req->generation; sbp_management_request_reconnect()
573 login->sess->node_id = req->node_addr; sbp_management_request_reconnect()
574 login->sess->card = fw_card_get(req->card); sbp_management_request_reconnect()
575 login->sess->speed = req->speed; sbp_management_request_reconnect()
578 req->status.status = cpu_to_be32( sbp_management_request_reconnect()
584 struct sbp_management_agent *agent, struct sbp_management_request *req, sbp_management_request_logout()
592 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); sbp_management_request_logout()
598 req->status.status = cpu_to_be32( sbp_management_request_logout()
607 if (req->node_addr != login->sess->node_id) { sbp_management_request_logout()
610 req->status.status = cpu_to_be32( sbp_management_request_logout()
618 req->status.status = cpu_to_be32( sbp_management_request_logout()
891 struct sbp_target_request *req = tgt_agent_process_work() local
895 req->orb_pointer, tgt_agent_process_work()
896 sbp2_pointer_to_addr(&req->orb.next_orb), tgt_agent_process_work()
897 sbp2_pointer_to_addr(&req->orb.data_descriptor), tgt_agent_process_work()
898 be32_to_cpu(req->orb.misc)); tgt_agent_process_work()
900 if (req->orb_pointer >> 32) tgt_agent_process_work()
903 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) { tgt_agent_process_work()
905 sbp_handle_command(req); tgt_agent_process_work()
909 req->status.status |= cpu_to_be32( tgt_agent_process_work()
916 sbp_send_status(req); tgt_agent_process_work()
917 sbp_free_request(req); tgt_agent_process_work()
920 req->status.status |= cpu_to_be32( tgt_agent_process_work()
927 sbp_send_status(req); tgt_agent_process_work()
928 sbp_free_request(req); tgt_agent_process_work()
952 struct sbp_target_request *req; tgt_agent_fetch_work() local
958 req = kzalloc(sizeof(*req), GFP_KERNEL); tgt_agent_fetch_work()
959 if (!req) { tgt_agent_fetch_work()
966 req->login = agent->login; tgt_agent_fetch_work()
967 req->orb_pointer = next_orb; tgt_agent_fetch_work()
969 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH( tgt_agent_fetch_work()
970 req->orb_pointer >> 32)); tgt_agent_fetch_work()
971 req->status.orb_low = cpu_to_be32( tgt_agent_fetch_work()
972 req->orb_pointer & 0xfffffffc); tgt_agent_fetch_work()
977 req->orb_pointer, &req->orb, sizeof(req->orb)); tgt_agent_fetch_work()
980 req->status.status |= cpu_to_be32( tgt_agent_fetch_work()
993 sbp_send_status(req); tgt_agent_fetch_work()
994 sbp_free_request(req); tgt_agent_fetch_work()
999 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) { tgt_agent_fetch_work()
1001 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( tgt_agent_fetch_work()
1004 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb); tgt_agent_fetch_work()
1005 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC( tgt_agent_fetch_work()
1010 INIT_WORK(&req->work, tgt_agent_process_work); tgt_agent_fetch_work()
1011 queue_work(system_unbound_wq, &req->work); tgt_agent_fetch_work()
1014 sbp_free_request(req); tgt_agent_fetch_work()
1103 static int sbp_run_request_transaction(struct sbp_target_request *req, sbp_run_request_transaction() argument
1107 struct sbp_login_descriptor *login = req->login; sbp_run_request_transaction()
1127 static int sbp_fetch_command(struct sbp_target_request *req) sbp_fetch_command() argument
1131 cmd_len = scsi_command_size(req->orb.command_block); sbp_fetch_command()
1133 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL); sbp_fetch_command()
1134 if (!req->cmd_buf) sbp_fetch_command()
1137 memcpy(req->cmd_buf, req->orb.command_block, sbp_fetch_command()
1138 min_t(int, cmd_len, sizeof(req->orb.command_block))); sbp_fetch_command()
1140 if (cmd_len > sizeof(req->orb.command_block)) { sbp_fetch_command()
1142 copy_len = cmd_len - sizeof(req->orb.command_block); sbp_fetch_command()
1144 ret = sbp_run_request_transaction(req, sbp_fetch_command()
1146 req->orb_pointer + sizeof(req->orb), sbp_fetch_command()
1147 req->cmd_buf + sizeof(req->orb.command_block), sbp_fetch_command()
1156 static int sbp_fetch_page_table(struct sbp_target_request *req) sbp_fetch_page_table() argument
1161 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc))) sbp_fetch_page_table()
1164 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) * sbp_fetch_page_table()
1171 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST, sbp_fetch_page_table()
1172 sbp2_pointer_to_addr(&req->orb.data_descriptor), sbp_fetch_page_table()
1179 req->pg_tbl = pg_tbl; sbp_fetch_page_table()
1183 static void sbp_calc_data_length_direction(struct sbp_target_request *req, sbp_calc_data_length_direction() argument
1188 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); sbp_calc_data_length_direction()
1189 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc)); sbp_calc_data_length_direction()
1199 if (req->pg_tbl) { sbp_calc_data_length_direction()
1203 req->pg_tbl[idx].segment_length); sbp_calc_data_length_direction()
1210 static void sbp_handle_command(struct sbp_target_request *req) sbp_handle_command() argument
1212 struct sbp_login_descriptor *login = req->login; sbp_handle_command()
1218 ret = sbp_fetch_command(req); sbp_handle_command()
1224 ret = sbp_fetch_page_table(req); sbp_handle_command()
1231 unpacked_lun = req->login->lun->unpacked_lun; sbp_handle_command()
1232 sbp_calc_data_length_direction(req, &data_length, &data_dir); sbp_handle_command()
1235 req->orb_pointer, unpacked_lun, data_length, data_dir); sbp_handle_command()
1237 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf, sbp_handle_command()
1238 req->sense_buf, unpacked_lun, data_length, sbp_handle_command()
1245 req->status.status |= cpu_to_be32( sbp_handle_command()
1250 sbp_send_status(req); sbp_handle_command()
1251 sbp_free_request(req); sbp_handle_command()
1258 static int sbp_rw_data(struct sbp_target_request *req) sbp_rw_data() argument
1260 struct sbp_session *sess = req->login->sess; sbp_rw_data()
1269 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) { sbp_rw_data()
1277 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc)); sbp_rw_data()
1278 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc)); sbp_rw_data()
1280 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc)); sbp_rw_data()
1292 if (req->pg_tbl) { sbp_rw_data()
1293 pte = req->pg_tbl; sbp_rw_data()
1294 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)); sbp_rw_data()
1302 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor); sbp_rw_data()
1303 length = req->se_cmd.data_length; sbp_rw_data()
1306 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents, sbp_rw_data()
1348 static int sbp_send_status(struct sbp_target_request *req) sbp_send_status() argument
1351 struct sbp_login_descriptor *login = req->login; sbp_send_status()
1353 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4; sbp_send_status()
1355 ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST, sbp_send_status()
1356 login->status_fifo_addr, &req->status, length); sbp_send_status()
1363 req->orb_pointer); sbp_send_status()
1368 static void sbp_sense_mangle(struct sbp_target_request *req) sbp_sense_mangle() argument
1370 struct se_cmd *se_cmd = &req->se_cmd; sbp_sense_mangle()
1371 u8 *sense = req->sense_buf; sbp_sense_mangle()
1372 u8 *status = req->status.data; sbp_sense_mangle()
1392 req->status.status |= cpu_to_be32( sbp_sense_mangle()
1428 req->status.status |= cpu_to_be32( sbp_sense_mangle()
1435 static int sbp_send_sense(struct sbp_target_request *req) sbp_send_sense() argument
1437 struct se_cmd *se_cmd = &req->se_cmd; sbp_send_sense()
1440 sbp_sense_mangle(req); sbp_send_sense()
1442 req->status.status |= cpu_to_be32( sbp_send_sense()
1449 return sbp_send_status(req); sbp_send_sense()
1452 static void sbp_free_request(struct sbp_target_request *req) sbp_free_request() argument
1454 kfree(req->pg_tbl); sbp_free_request()
1455 kfree(req->cmd_buf); sbp_free_request()
1456 kfree(req); sbp_free_request()
1463 struct sbp_management_request *req = agent->request; sbp_mgt_agent_process() local
1468 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST, sbp_mgt_agent_process()
1469 req->node_addr, req->generation, req->speed, sbp_mgt_agent_process()
1470 agent->orb_offset, &req->orb, sizeof(req->orb)); sbp_mgt_agent_process()
1477 sbp2_pointer_to_addr(&req->orb.ptr1), sbp_mgt_agent_process()
1478 sbp2_pointer_to_addr(&req->orb.ptr2), sbp_mgt_agent_process()
1479 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length), sbp_mgt_agent_process()
1480 sbp2_pointer_to_addr(&req->orb.status_fifo)); sbp_mgt_agent_process()
1482 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) || sbp_mgt_agent_process()
1483 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) { sbp_mgt_agent_process()
1488 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) { sbp_mgt_agent_process()
1490 sbp_management_request_login(agent, req, &status_data_len); sbp_mgt_agent_process()
1494 sbp_management_request_query_logins(agent, req, sbp_mgt_agent_process()
1499 sbp_management_request_reconnect(agent, req, &status_data_len); sbp_mgt_agent_process()
1505 req->status.status = cpu_to_be32( sbp_mgt_agent_process()
1512 sbp_management_request_logout(agent, req, &status_data_len); sbp_mgt_agent_process()
1518 req->status.status = cpu_to_be32( sbp_mgt_agent_process()
1527 req->status.status = cpu_to_be32( sbp_mgt_agent_process()
1536 req->status.status = cpu_to_be32( sbp_mgt_agent_process()
1545 req->status.status = cpu_to_be32( sbp_mgt_agent_process()
1553 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))); sbp_mgt_agent_process()
1555 req->status.status = cpu_to_be32( sbp_mgt_agent_process()
1562 req->status.status |= cpu_to_be32( sbp_mgt_agent_process()
1566 req->status.orb_low = cpu_to_be32(agent->orb_offset); sbp_mgt_agent_process()
1569 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST, sbp_mgt_agent_process()
1570 req->node_addr, req->generation, req->speed, sbp_mgt_agent_process()
1571 sbp2_pointer_to_addr(&req->orb.status_fifo), sbp_mgt_agent_process()
1572 &req->status, 8 + status_data_len); sbp_mgt_agent_process()
1579 fw_card_put(req->card); sbp_mgt_agent_process()
1580 kfree(req); sbp_mgt_agent_process()
1603 struct sbp_management_request *req; sbp_mgt_agent_rw() local
1617 req = kzalloc(sizeof(*req), GFP_ATOMIC); sbp_mgt_agent_rw()
1618 if (!req) { sbp_mgt_agent_rw()
1623 req->card = fw_card_get(card); sbp_mgt_agent_rw()
1624 req->generation = generation; sbp_mgt_agent_rw()
1625 req->node_addr = source; sbp_mgt_agent_rw()
1626 req->speed = fw_get_request_speed(request); sbp_mgt_agent_rw()
1629 agent->request = req; sbp_mgt_agent_rw()
1744 struct sbp_target_request *req = container_of(se_cmd, sbp_release_cmd() local
1747 sbp_free_request(req); sbp_release_cmd()
1767 struct sbp_target_request *req = container_of(se_cmd, sbp_write_pending() local
1771 ret = sbp_rw_data(req); sbp_write_pending()
1773 req->status.status |= cpu_to_be32( sbp_write_pending()
1780 sbp_send_status(req); sbp_write_pending()
1800 struct sbp_target_request *req = container_of(se_cmd, sbp_get_task_tag() local
1804 return (u32)req->orb_pointer; sbp_get_task_tag()
1814 struct sbp_target_request *req = container_of(se_cmd, sbp_queue_data_in() local
1818 ret = sbp_rw_data(req); sbp_queue_data_in()
1820 req->status.status |= cpu_to_be32( sbp_queue_data_in()
1825 sbp_send_status(req); sbp_queue_data_in()
1829 return sbp_send_sense(req); sbp_queue_data_in()
1838 struct sbp_target_request *req = container_of(se_cmd, sbp_queue_status() local
1841 return sbp_send_sense(req); sbp_queue_status()
1855 struct sbp_target_request *req = container_of(se_cmd, sbp_check_stop_free() local
1858 transport_generic_free_cmd(&req->se_cmd, 0); sbp_check_stop_free()
292 sbp_management_request_login( struct sbp_management_agent *agent, struct sbp_management_request *req, int *status_data_size) sbp_management_request_login() argument
512 sbp_management_request_query_logins( struct sbp_management_agent *agent, struct sbp_management_request *req, int *status_data_size) sbp_management_request_query_logins() argument
524 sbp_management_request_reconnect( struct sbp_management_agent *agent, struct sbp_management_request *req, int *status_data_size) sbp_management_request_reconnect() argument
583 sbp_management_request_logout( struct sbp_management_agent *agent, struct sbp_management_request *req, int *status_data_size) sbp_management_request_logout() argument
/linux-4.1.27/fs/lockd/
H A Dclntproc.c124 static void nlmclnt_setlockargs(struct nlm_rqst *req, struct file_lock *fl) nlmclnt_setlockargs() argument
126 struct nlm_args *argp = &req->a_args; nlmclnt_setlockargs()
128 char *nodename = req->a_host->h_rpcclnt->cl_nodename; nlmclnt_setlockargs()
133 lock->oh.data = req->a_owner; nlmclnt_setlockargs()
134 lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s", nlmclnt_setlockargs()
143 static void nlmclnt_release_lockargs(struct nlm_rqst *req) nlmclnt_release_lockargs() argument
145 WARN_ON_ONCE(req->a_args.lock.fl.fl_ops != NULL); nlmclnt_release_lockargs()
249 nlmclnt_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc) nlmclnt_call() argument
251 struct nlm_host *host = req->a_host; nlmclnt_call()
253 struct nlm_args *argp = &req->a_args; nlmclnt_call()
254 struct nlm_res *resp = &req->a_res; nlmclnt_call()
327 static struct rpc_task *__nlm_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) __nlm_async_call() argument
329 struct nlm_host *host = req->a_host; __nlm_async_call()
334 .callback_data = req, __nlm_async_call()
351 tk_ops->rpc_release(req); __nlm_async_call()
355 static int nlm_do_async_call(struct nlm_rqst *req, u32 proc, struct rpc_message *msg, const struct rpc_call_ops *tk_ops) nlm_do_async_call() argument
359 task = __nlm_async_call(req, proc, msg, tk_ops); nlm_do_async_call()
369 int nlm_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) nlm_async_call() argument
372 .rpc_argp = &req->a_args, nlm_async_call()
373 .rpc_resp = &req->a_res, nlm_async_call()
375 return nlm_do_async_call(req, proc, &msg, tk_ops); nlm_async_call()
378 int nlm_async_reply(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) nlm_async_reply() argument
381 .rpc_argp = &req->a_res, nlm_async_reply()
383 return nlm_do_async_call(req, proc, &msg, tk_ops); nlm_async_reply()
394 static int nlmclnt_async_call(struct rpc_cred *cred, struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops) nlmclnt_async_call() argument
397 .rpc_argp = &req->a_args, nlmclnt_async_call()
398 .rpc_resp = &req->a_res, nlmclnt_async_call()
404 task = __nlm_async_call(req, proc, &msg, tk_ops); nlmclnt_async_call()
416 nlmclnt_test(struct nlm_rqst *req, struct file_lock *fl) nlmclnt_test() argument
420 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_TEST); nlmclnt_test()
424 switch (req->a_res.status) { nlmclnt_test()
432 fl->fl_start = req->a_res.lock.fl.fl_start; nlmclnt_test()
433 fl->fl_end = req->a_res.lock.fl.fl_end; nlmclnt_test()
434 fl->fl_type = req->a_res.lock.fl.fl_type; nlmclnt_test()
438 status = nlm_stat_to_errno(req->a_res.status); nlmclnt_test()
441 nlmclnt_release_call(req); nlmclnt_test()
512 nlmclnt_lock(struct nlm_rqst *req, struct file_lock *fl) nlmclnt_lock() argument
515 struct nlm_host *host = req->a_host; nlmclnt_lock()
516 struct nlm_res *resp = &req->a_res; nlmclnt_lock()
524 req->a_args.state = nsm_local_state; nlmclnt_lock()
542 status = nlmclnt_call(cred, req, NLMPROC_LOCK); nlmclnt_lock()
551 status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); nlmclnt_lock()
562 if (!req->a_args.block) nlmclnt_lock()
564 if (nlmclnt_cancel(host, req->a_args.block, fl) == 0) nlmclnt_lock()
597 nlmclnt_release_call(req); nlmclnt_lock()
611 nlmclnt_async_call(cred, req, NLMPROC_UNLOCK, &nlmclnt_unlock_ops); nlmclnt_lock()
620 struct nlm_rqst *req) nlmclnt_reclaim()
624 memset(req, 0, sizeof(*req)); nlmclnt_reclaim()
625 locks_init_lock(&req->a_args.lock.fl); nlmclnt_reclaim()
626 locks_init_lock(&req->a_res.lock.fl); nlmclnt_reclaim()
627 req->a_host = host; nlmclnt_reclaim()
630 nlmclnt_setlockargs(req, fl); nlmclnt_reclaim()
631 req->a_args.reclaim = 1; nlmclnt_reclaim()
633 status = nlmclnt_call(nfs_file_cred(fl->fl_file), req, NLMPROC_LOCK); nlmclnt_reclaim()
634 if (status >= 0 && req->a_res.status == nlm_granted) nlmclnt_reclaim()
639 status, ntohl(req->a_res.status)); nlmclnt_reclaim()
660 nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl) nlmclnt_unlock() argument
662 struct nlm_host *host = req->a_host; nlmclnt_unlock()
663 struct nlm_res *resp = &req->a_res; nlmclnt_unlock()
682 atomic_inc(&req->a_count); nlmclnt_unlock()
683 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, nlmclnt_unlock()
697 nlmclnt_release_call(req); nlmclnt_unlock()
703 struct nlm_rqst *req = data; nlmclnt_unlock_callback() local
704 u32 status = ntohl(req->a_res.status); nlmclnt_unlock_callback()
728 nlm_rebind_host(req->a_host); nlmclnt_unlock_callback()
745 struct nlm_rqst *req; nlmclnt_cancel() local
751 req = nlm_alloc_call(host); nlmclnt_cancel()
752 if (!req) nlmclnt_cancel()
754 req->a_flags = RPC_TASK_ASYNC; nlmclnt_cancel()
756 nlmclnt_setlockargs(req, fl); nlmclnt_cancel()
757 req->a_args.block = block; nlmclnt_cancel()
759 atomic_inc(&req->a_count); nlmclnt_cancel()
760 status = nlmclnt_async_call(nfs_file_cred(fl->fl_file), req, nlmclnt_cancel()
762 if (status == 0 && req->a_res.status == nlm_lck_denied) nlmclnt_cancel()
764 nlmclnt_release_call(req); nlmclnt_cancel()
770 struct nlm_rqst *req = data; nlmclnt_cancel_callback() local
771 u32 status = ntohl(req->a_res.status); nlmclnt_cancel_callback()
804 if (req->a_retries++ >= NLMCLNT_MAX_RETRIES) nlmclnt_cancel_callback()
806 nlm_rebind_host(req->a_host); nlmclnt_cancel_callback()
619 nlmclnt_reclaim(struct nlm_host *host, struct file_lock *fl, struct nlm_rqst *req) nlmclnt_reclaim() argument
/linux-4.1.27/net/bluetooth/
H A Dhci_request.h33 void hci_req_init(struct hci_request *req, struct hci_dev *hdev);
34 int hci_req_run(struct hci_request *req, hci_req_complete_t complete);
35 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete);
36 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
38 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
47 void hci_req_add_le_scan_disable(struct hci_request *req);
48 void hci_req_add_le_passive_scan(struct hci_request *req);
51 void __hci_update_page_scan(struct hci_request *req);
53 int hci_update_random_address(struct hci_request *req, bool require_privacy,
57 void __hci_update_background_scan(struct hci_request *req);
H A Dhci_request.c30 void hci_req_init(struct hci_request *req, struct hci_dev *hdev) hci_req_init() argument
32 skb_queue_head_init(&req->cmd_q); hci_req_init()
33 req->hdev = hdev; hci_req_init()
34 req->err = 0; hci_req_init()
37 static int req_run(struct hci_request *req, hci_req_complete_t complete, req_run() argument
40 struct hci_dev *hdev = req->hdev; req_run()
44 BT_DBG("length %u", skb_queue_len(&req->cmd_q)); req_run()
49 if (req->err) { req_run()
50 skb_queue_purge(&req->cmd_q); req_run()
51 return req->err; req_run()
55 if (skb_queue_empty(&req->cmd_q)) req_run()
58 skb = skb_peek_tail(&req->cmd_q); req_run()
59 bt_cb(skb)->req.complete = complete; req_run()
60 bt_cb(skb)->req.complete_skb = complete_skb; req_run()
63 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); req_run()
71 int hci_req_run(struct hci_request *req, hci_req_complete_t complete) hci_req_run() argument
73 return req_run(req, complete, NULL); hci_req_run()
76 int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) hci_req_run_skb() argument
78 return req_run(req, NULL, complete); hci_req_run_skb()
108 void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, hci_req_add_ev() argument
111 struct hci_dev *hdev = req->hdev; hci_req_add_ev()
119 if (req->err) hci_req_add_ev()
126 req->err = -ENOMEM; hci_req_add_ev()
130 if (skb_queue_empty(&req->cmd_q)) hci_req_add_ev()
131 bt_cb(skb)->req.start = true; hci_req_add_ev()
133 bt_cb(skb)->req.event = event; hci_req_add_ev()
135 skb_queue_tail(&req->cmd_q, skb); hci_req_add_ev()
138 void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, hci_req_add() argument
141 hci_req_add_ev(req, opcode, plen, param, 0); hci_req_add()
144 void hci_req_add_le_scan_disable(struct hci_request *req) hci_req_add_le_scan_disable() argument
150 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); hci_req_add_le_scan_disable()
153 static void add_to_white_list(struct hci_request *req, add_to_white_list() argument
161 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); add_to_white_list()
164 static u8 update_white_list(struct hci_request *req) update_white_list() argument
166 struct hci_dev *hdev = req->hdev; update_white_list()
191 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, update_white_list()
222 add_to_white_list(req, params); update_white_list()
246 add_to_white_list(req, params); update_white_list()
253 void hci_req_add_le_passive_scan(struct hci_request *req) hci_req_add_le_passive_scan() argument
257 struct hci_dev *hdev = req->hdev; hci_req_add_le_passive_scan()
267 if (hci_update_random_address(req, false, &own_addr_type)) hci_req_add_le_passive_scan()
274 filter_policy = update_white_list(req); hci_req_add_le_passive_scan()
295 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), hci_req_add_le_passive_scan()
301 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), hci_req_add_le_passive_scan()
305 static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) set_random_addr() argument
307 struct hci_dev *hdev = req->hdev; set_random_addr()
326 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); set_random_addr()
329 int hci_update_random_address(struct hci_request *req, bool require_privacy, hci_update_random_address() argument
332 struct hci_dev *hdev = req->hdev; hci_update_random_address()
354 set_random_addr(req, &hdev->rpa); hci_update_random_address()
385 set_random_addr(req, &nrpa); hci_update_random_address()
404 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, hci_update_random_address()
435 void __hci_update_page_scan(struct hci_request *req) __hci_update_page_scan() argument
437 struct hci_dev *hdev = req->hdev; __hci_update_page_scan()
461 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); __hci_update_page_scan()
466 struct hci_request req; hci_update_page_scan() local
468 hci_req_init(&req, hdev); hci_update_page_scan()
469 __hci_update_page_scan(&req); hci_update_page_scan()
470 hci_req_run(&req, NULL); hci_update_page_scan()
479 void __hci_update_background_scan(struct hci_request *req) __hci_update_background_scan() argument
481 struct hci_dev *hdev = req->hdev; __hci_update_background_scan()
520 hci_req_add_le_scan_disable(req); __hci_update_background_scan()
540 hci_req_add_le_scan_disable(req); __hci_update_background_scan()
542 hci_req_add_le_passive_scan(req); __hci_update_background_scan()
559 struct hci_request req; hci_update_background_scan() local
561 hci_req_init(&req, hdev); hci_update_background_scan()
563 __hci_update_background_scan(&req); hci_update_background_scan()
565 err = hci_req_run(&req, update_background_scan_complete); hci_update_background_scan()
H A Da2mp.c139 struct a2mp_discov_req *req = (void *) skb->data; a2mp_discover_req() local
146 if (len < sizeof(*req)) a2mp_discover_req()
149 skb_pull(skb, sizeof(*req)); a2mp_discover_req()
151 ext_feat = le16_to_cpu(req->ext_feat); a2mp_discover_req()
153 BT_DBG("mtu %d efm 0x%4.4x", le16_to_cpu(req->mtu), ext_feat); a2mp_discover_req()
232 struct a2mp_info_req req; a2mp_discover_rsp() local
235 req.id = cl->id; a2mp_discover_rsp()
237 sizeof(req), &req); a2mp_discover_rsp()
292 struct a2mp_info_req *req = (void *) skb->data; a2mp_getinfo_req() local
295 if (le16_to_cpu(hdr->len) < sizeof(*req)) a2mp_getinfo_req()
298 BT_DBG("id %d", req->id); a2mp_getinfo_req()
300 hdev = hci_dev_get(req->id); a2mp_getinfo_req()
304 rsp.id = req->id; a2mp_getinfo_req()
320 skb_pull(skb, sizeof(*req)); a2mp_getinfo_req()
328 struct a2mp_amp_assoc_req req; a2mp_getinfo_rsp() local
343 req.id = rsp->id; a2mp_getinfo_rsp()
344 a2mp_send(mgr, A2MP_GETAMPASSOC_REQ, __next_ident(mgr), sizeof(req), a2mp_getinfo_rsp()
345 &req); a2mp_getinfo_rsp()
354 struct a2mp_amp_assoc_req *req = (void *) skb->data; a2mp_getampassoc_req() local
358 if (le16_to_cpu(hdr->len) < sizeof(*req)) a2mp_getampassoc_req()
361 BT_DBG("id %d", req->id); a2mp_getampassoc_req()
366 hdev = hci_dev_get(req->id); a2mp_getampassoc_req()
369 rsp.id = req->id; a2mp_getampassoc_req()
390 skb_pull(skb, sizeof(*req)); a2mp_getampassoc_req()
458 struct a2mp_physlink_req *req = (void *) skb->data; a2mp_createphyslink_req() local
465 if (le16_to_cpu(hdr->len) < sizeof(*req)) a2mp_createphyslink_req()
468 BT_DBG("local_id %d, remote_id %d", req->local_id, req->remote_id); a2mp_createphyslink_req()
470 rsp.local_id = req->remote_id; a2mp_createphyslink_req()
471 rsp.remote_id = req->local_id; a2mp_createphyslink_req()
473 hdev = hci_dev_get(req->remote_id); a2mp_createphyslink_req()
491 size_t assoc_len = le16_to_cpu(hdr->len) - sizeof(*req); a2mp_createphyslink_req()
494 assoc = kmemdup(req->amp_assoc, assoc_len, GFP_KERNEL); a2mp_createphyslink_req()
508 hcon = phylink_add(hdev, mgr, req->local_id, false); a2mp_createphyslink_req()
538 struct a2mp_physlink_req *req = (void *) skb->data; a2mp_discphyslink_req() local
543 if (le16_to_cpu(hdr->len) < sizeof(*req)) a2mp_discphyslink_req()
546 BT_DBG("local_id %d remote_id %d", req->local_id, req->remote_id); a2mp_discphyslink_req()
548 rsp.local_id = req->remote_id; a2mp_discphyslink_req()
549 rsp.remote_id = req->local_id; a2mp_discphyslink_req()
552 hdev = hci_dev_get(req->remote_id); a2mp_discphyslink_req()
574 skb_pull(skb, sizeof(*req)); a2mp_discphyslink_req()
947 struct a2mp_physlink_req *req; a2mp_send_create_phy_link_req() local
955 len = sizeof(*req) + loc_assoc->len; a2mp_send_create_phy_link_req()
959 req = kzalloc(len, GFP_KERNEL); a2mp_send_create_phy_link_req()
960 if (!req) { a2mp_send_create_phy_link_req()
969 req->local_id = hdev->id; a2mp_send_create_phy_link_req()
970 req->remote_id = bredr_chan->remote_amp_id; a2mp_send_create_phy_link_req()
971 memcpy(req->amp_assoc, loc_assoc->data, loc_assoc->len); a2mp_send_create_phy_link_req()
973 a2mp_send(mgr, A2MP_CREATEPHYSLINK_REQ, __next_ident(mgr), len, req); a2mp_send_create_phy_link_req()
977 kfree(req); a2mp_send_create_phy_link_req()
1010 struct a2mp_discov_req req; a2mp_discover_amp() local
1022 req.mtu = cpu_to_le16(L2CAP_A2MP_DEFAULT_MTU); a2mp_discover_amp()
1023 req.ext_feat = 0; a2mp_discover_amp()
1024 a2mp_send(mgr, A2MP_DISCOVER_REQ, 1, sizeof(req), &req); a2mp_discover_amp()
/linux-4.1.27/drivers/staging/lustre/lustre/osc/
H A Dosc_request.c99 struct ptlrpc_request *req, void *data, int rc);
193 static inline void osc_pack_capa(struct ptlrpc_request *req, osc_pack_capa() argument
202 c = req_capsule_client_get(&req->rq_pill, &RMF_CAPA1); osc_pack_capa()
209 static inline void osc_pack_req_body(struct ptlrpc_request *req, osc_pack_req_body() argument
214 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); osc_pack_req_body()
217 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, osc_pack_req_body()
219 osc_pack_capa(req, body, oinfo->oi_capa); osc_pack_req_body()
222 static inline void osc_set_capa_size(struct ptlrpc_request *req, osc_set_capa_size() argument
227 req_capsule_set_size(&req->rq_pill, field, RCL_CLIENT, 0); osc_set_capa_size()
234 struct ptlrpc_request *req, osc_getattr_interpret()
242 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); osc_getattr_interpret()
245 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, osc_getattr_interpret()
264 struct ptlrpc_request *req; osc_getattr_async() local
268 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); osc_getattr_async()
269 if (req == NULL) osc_getattr_async()
272 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa); osc_getattr_async()
273 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); osc_getattr_async()
275 ptlrpc_request_free(req); osc_getattr_async()
279 osc_pack_req_body(req, oinfo); osc_getattr_async()
281 ptlrpc_request_set_replen(req); osc_getattr_async()
282 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_getattr_interpret; osc_getattr_async()
284 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); osc_getattr_async()
285 aa = ptlrpc_req_async_args(req); osc_getattr_async()
288 ptlrpc_set_add_req(set, req); osc_getattr_async()
295 struct ptlrpc_request *req; osc_getattr() local
299 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_GETATTR); osc_getattr()
300 if (req == NULL) osc_getattr()
303 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa); osc_getattr()
304 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GETATTR); osc_getattr()
306 ptlrpc_request_free(req); osc_getattr()
310 osc_pack_req_body(req, oinfo); osc_getattr()
312 ptlrpc_request_set_replen(req); osc_getattr()
314 rc = ptlrpc_queue_wait(req); osc_getattr()
318 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); osc_getattr()
325 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa, osc_getattr()
332 ptlrpc_req_finished(req); osc_getattr()
339 struct ptlrpc_request *req; osc_setattr() local
345 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); osc_setattr()
346 if (req == NULL) osc_setattr()
349 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa); osc_setattr()
350 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); osc_setattr()
352 ptlrpc_request_free(req); osc_setattr()
356 osc_pack_req_body(req, oinfo); osc_setattr()
358 ptlrpc_request_set_replen(req); osc_setattr()
360 rc = ptlrpc_queue_wait(req); osc_setattr()
364 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); osc_setattr()
370 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oinfo->oi_oa, osc_setattr()
374 ptlrpc_req_finished(req); osc_setattr()
379 struct ptlrpc_request *req, osc_setattr_interpret()
387 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); osc_setattr_interpret()
393 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, sa->sa_oa, osc_setattr_interpret()
405 struct ptlrpc_request *req; osc_setattr_async_base() local
409 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SETATTR); osc_setattr_async_base()
410 if (req == NULL) osc_setattr_async_base()
413 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa); osc_setattr_async_base()
414 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SETATTR); osc_setattr_async_base()
416 ptlrpc_request_free(req); osc_setattr_async_base()
423 osc_pack_req_body(req, oinfo); osc_setattr_async_base()
425 ptlrpc_request_set_replen(req); osc_setattr_async_base()
430 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); osc_setattr_async_base()
432 req->rq_interpret_reply = osc_setattr_async_base()
435 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args)); osc_setattr_async_base()
436 sa = ptlrpc_req_async_args(req); osc_setattr_async_base()
442 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); osc_setattr_async_base()
444 ptlrpc_set_add_req(rqset, req); osc_setattr_async_base()
461 struct ptlrpc_request *req; osc_real_create() local
476 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_CREATE); osc_real_create()
477 if (req == NULL) { osc_real_create()
482 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_CREATE); osc_real_create()
484 ptlrpc_request_free(req); osc_real_create()
488 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); osc_real_create()
491 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); osc_real_create()
493 ptlrpc_request_set_replen(req); osc_real_create()
497 DEBUG_REQ(D_HA, req, osc_real_create()
499 /* Don't resend the delorphan req */ osc_real_create()
500 req->rq_no_resend = req->rq_no_delay = 1; osc_real_create()
503 rc = ptlrpc_queue_wait(req); osc_real_create()
507 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); osc_real_create()
514 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, oa, &body->oa); osc_real_create()
527 oti->oti_transno = lustre_msg_get_transno(req->rq_repmsg); osc_real_create()
537 lustre_msg_get_transno(req->rq_repmsg)); osc_real_create()
539 ptlrpc_req_finished(req); osc_real_create()
550 struct ptlrpc_request *req; osc_punch_base() local
555 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_PUNCH); osc_punch_base()
556 if (req == NULL) osc_punch_base()
559 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa); osc_punch_base()
560 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_PUNCH); osc_punch_base()
562 ptlrpc_request_free(req); osc_punch_base()
565 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ osc_punch_base()
566 ptlrpc_at_set_req_timeout(req); osc_punch_base()
568 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); osc_punch_base()
570 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, osc_punch_base()
572 osc_pack_capa(req, body, oinfo->oi_capa); osc_punch_base()
574 ptlrpc_request_set_replen(req); osc_punch_base()
576 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_setattr_interpret; osc_punch_base()
577 CLASSERT (sizeof(*sa) <= sizeof(req->rq_async_args)); osc_punch_base()
578 sa = ptlrpc_req_async_args(req); osc_punch_base()
583 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); osc_punch_base()
585 ptlrpc_set_add_req(rqset, req); osc_punch_base()
591 struct ptlrpc_request *req, osc_sync_interpret()
600 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); osc_sync_interpret()
617 struct ptlrpc_request *req; osc_sync_base() local
622 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_SYNC); osc_sync_base()
623 if (req == NULL) osc_sync_base()
626 osc_set_capa_size(req, &RMF_CAPA1, oinfo->oi_capa); osc_sync_base()
627 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SYNC); osc_sync_base()
629 ptlrpc_request_free(req); osc_sync_base()
634 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); osc_sync_base()
636 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, osc_sync_base()
638 osc_pack_capa(req, body, oinfo->oi_capa); osc_sync_base()
640 ptlrpc_request_set_replen(req); osc_sync_base()
641 req->rq_interpret_reply = osc_sync_interpret; osc_sync_base()
643 CLASSERT(sizeof(*fa) <= sizeof(req->rq_async_args)); osc_sync_base()
644 fa = ptlrpc_req_async_args(req); osc_sync_base()
650 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); osc_sync_base()
652 ptlrpc_set_add_req(rqset, req); osc_sync_base()
692 struct ptlrpc_request *req, void *data, osc_destroy_interpret()
695 struct client_obd *cli = &req->rq_import->imp_obd->u.cli; osc_destroy_interpret()
760 struct ptlrpc_request *req; osc_destroy() local
773 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_OST_DESTROY); osc_destroy()
774 if (req == NULL) { osc_destroy()
779 osc_set_capa_size(req, &RMF_CAPA1, (struct obd_capa *)capa); osc_destroy()
780 rc = ldlm_prep_elc_req(exp, req, LUSTRE_OST_VERSION, OST_DESTROY, osc_destroy()
783 ptlrpc_request_free(req); osc_destroy()
787 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ osc_destroy()
788 ptlrpc_at_set_req_timeout(req); osc_destroy()
792 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY); osc_destroy()
794 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); osc_destroy()
796 osc_pack_capa(req, body, (struct obd_capa *)capa); osc_destroy()
797 ptlrpc_request_set_replen(req); osc_destroy()
804 req->rq_interpret_reply = osc_destroy_interpret; osc_destroy()
819 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); osc_destroy()
896 struct ptlrpc_request *req, osc_shrink_grant_interpret()
899 struct client_obd *cli = &req->rq_import->imp_obd->u.cli; osc_shrink_grant_interpret()
908 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); osc_shrink_grant_interpret()
1129 static int check_write_rcs(struct ptlrpc_request *req, check_write_rcs() argument
1136 remote_rcs = req_capsule_server_sized_get(&req->rq_pill, &RMF_RCS, check_write_rcs()
1150 CDEBUG(D_INFO, "rc[%d] invalid (%d) req %p\n", check_write_rcs()
1151 i, remote_rcs[i], req); check_write_rcs()
1156 if (req->rq_bulk->bd_nob_transferred != requested_nob) { check_write_rcs()
1158 req->rq_bulk->bd_nob_transferred, requested_nob); check_write_rcs()
1252 struct ptlrpc_request *req; osc_brw_prep_request() local
1269 req = ptlrpc_request_alloc_pool(cli->cl_import, osc_brw_prep_request()
1274 req = ptlrpc_request_alloc(cli->cl_import, &RQF_OST_BRW_READ); osc_brw_prep_request()
1276 if (req == NULL) osc_brw_prep_request()
1284 pill = &req->rq_pill; osc_brw_prep_request()
1289 osc_set_capa_size(req, &RMF_CAPA1, ocapa); osc_brw_prep_request()
1291 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, opc); osc_brw_prep_request()
1293 ptlrpc_request_free(req); osc_brw_prep_request()
1296 req->rq_request_portal = OST_IO_PORTAL; /* bug 7198 */ osc_brw_prep_request()
1297 ptlrpc_at_set_req_timeout(req); osc_brw_prep_request()
1300 req->rq_no_retry_einprogress = 1; osc_brw_prep_request()
1302 desc = ptlrpc_prep_bulk_imp(req, page_count, osc_brw_prep_request()
1318 lustre_set_wire_obdo(&req->rq_import->imp_connect_data, &body->oa, oa); osc_brw_prep_request()
1328 osc_pack_capa(req, body, ocapa); osc_brw_prep_request()
1368 req_capsule_client_get(&req->rq_pill, &RMF_NIOBUF_REMOTE), osc_brw_prep_request()
1369 "want %p - real %p\n", req_capsule_client_get(&req->rq_pill, osc_brw_prep_request()
1387 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { osc_brw_prep_request()
1418 !sptlrpc_flavor_has_bulk(&req->rq_flvr)) { osc_brw_prep_request()
1425 ptlrpc_request_set_replen(req); osc_brw_prep_request()
1427 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); osc_brw_prep_request()
1428 aa = ptlrpc_req_async_args(req); osc_brw_prep_request()
1440 *reqp = req; osc_brw_prep_request()
1444 ptlrpc_req_finished(req); osc_brw_prep_request()
1494 static int osc_brw_fini_request(struct ptlrpc_request *req, int rc) osc_brw_fini_request() argument
1496 struct osc_brw_async_args *aa = (void *)&req->rq_async_args; osc_brw_fini_request()
1498 &req->rq_import->imp_connection->c_peer; osc_brw_fini_request()
1504 DEBUG_REQ(D_INFO, req, "Failed request with rc = %d\n", rc); osc_brw_fini_request()
1508 LASSERTF(req->rq_repmsg != NULL, "rc = %d\n", rc); osc_brw_fini_request()
1509 body = req_capsule_server_get(&req->rq_pill, &RMF_OST_BODY); osc_brw_fini_request()
1511 DEBUG_REQ(D_INFO, req, "Can't unpack body\n"); osc_brw_fini_request()
1516 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE && osc_brw_fini_request()
1534 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) { osc_brw_fini_request()
1539 LASSERT(req->rq_bulk->bd_nob == aa->aa_requested_nob); osc_brw_fini_request()
1541 if (sptlrpc_cli_unwrap_bulk_write(req, req->rq_bulk)) osc_brw_fini_request()
1551 rc = check_write_rcs(req, aa->aa_requested_nob, osc_brw_fini_request()
1560 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk, rc); osc_brw_fini_request()
1572 if (rc != req->rq_bulk->bd_nob_transferred) { osc_brw_fini_request()
1574 rc, req->rq_bulk->bd_nob_transferred); osc_brw_fini_request()
1594 if (peer->nid == req->rq_bulk->bd_sender) { osc_brw_fini_request()
1598 router = libcfs_nid2str(req->rq_bulk->bd_sender); osc_brw_fini_request()
1603 req->rq_import->imp_obd->obd_name, osc_brw_fini_request()
1639 lustre_get_wire_obdo(&req->rq_import->imp_connect_data, osc_brw_fini_request()
1755 struct ptlrpc_request *req, void *data, int rc) brw_interpret()
1763 rc = osc_brw_fini_request(req, rc); brw_interpret()
1764 CDEBUG(D_INODE, "request %p aa %p rc %d\n", req, aa, rc); brw_interpret()
1768 if (req->rq_import_generation != brw_interpret()
1769 req->rq_import->imp_generation) { brw_interpret()
1771 req->rq_import->imp_obd->obd_name, brw_interpret()
1775 rc = osc_brw_redo_request(req, aa, rc); brw_interpret()
1778 req->rq_import->imp_obd->obd_name, brw_interpret()
1837 req->rq_bulk->bd_nob_transferred); brw_interpret()
1839 ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred); brw_interpret()
1845 if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) brw_interpret()
1864 struct ptlrpc_request *req = NULL; osc_build_rpc() local
1969 pga, &req, crattr->cra_capa, 1, 0);
1975 req->rq_interpret_reply = brw_interpret;
1978 req->rq_memalloc = 1;
1985 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
1990 lustre_msg_set_jobid(req->rq_reqmsg, crattr->cra_jobid);
1992 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
1993 aa = ptlrpc_req_async_args(req);
2007 if (oap->oap_interrupted && !req->rq_intr) {
2008 CDEBUG(D_INODE, "oap %p in req %p interrupted\n",
2009 oap, req);
2010 ptlrpc_mark_interrupted(req);
2014 tmp->oap_request = ptlrpc_request_addref(req);
2033 DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
2049 ptlrpcd_add_req(req, pol, -1);
2062 LASSERT(req == NULL);
2143 static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb, osc_enqueue_fini() argument
2153 rep = req_capsule_server_get(&req->rq_pill, osc_enqueue_fini()
2177 struct ptlrpc_request *req, osc_enqueue_interpret()
2214 rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1, osc_enqueue_interpret()
2217 rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie, osc_enqueue_interpret()
2231 LASSERTF(lock != NULL, "lockh %p, req %p, aa %p - client evicted?\n", osc_enqueue_interpret()
2232 aa->oa_lockh, req, aa); osc_enqueue_interpret()
2256 struct ptlrpc_request *req = NULL; osc_enqueue_base() local
2337 req = ptlrpc_request_alloc(class_exp2cliimp(exp), osc_enqueue_base()
2339 if (req == NULL) osc_enqueue_base()
2342 rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0); osc_enqueue_base()
2344 ptlrpc_request_free(req); osc_enqueue_base()
2348 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER, osc_enqueue_base()
2350 ptlrpc_request_set_replen(req); osc_enqueue_base()
2356 rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb, osc_enqueue_base()
2361 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args)); osc_enqueue_base()
2362 aa = ptlrpc_req_async_args(req); osc_enqueue_base()
2372 req->rq_interpret_reply = osc_enqueue_base()
2375 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); osc_enqueue_base()
2377 ptlrpc_set_add_req(rqset, req); osc_enqueue_base()
2379 ptlrpc_req_finished(req); osc_enqueue_base()
2384 rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc); osc_enqueue_base()
2386 ptlrpc_req_finished(req); osc_enqueue_base()
2445 struct ptlrpc_request *req, osc_statfs_interpret()
2467 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); osc_statfs_interpret()
2484 struct ptlrpc_request *req; osc_statfs_async() local
2494 req = ptlrpc_request_alloc(obd->u.cli.cl_import, &RQF_OST_STATFS); osc_statfs_async()
2495 if (req == NULL) osc_statfs_async()
2498 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); osc_statfs_async()
2500 ptlrpc_request_free(req); osc_statfs_async()
2503 ptlrpc_request_set_replen(req); osc_statfs_async()
2504 req->rq_request_portal = OST_CREATE_PORTAL; osc_statfs_async()
2505 ptlrpc_at_set_req_timeout(req); osc_statfs_async()
2509 req->rq_no_resend = 1; osc_statfs_async()
2510 req->rq_no_delay = 1; osc_statfs_async()
2513 req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret; osc_statfs_async()
2514 CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args)); osc_statfs_async()
2515 aa = ptlrpc_req_async_args(req); osc_statfs_async()
2518 ptlrpc_set_add_req(rqset, req); osc_statfs_async()
2527 struct ptlrpc_request *req; osc_statfs() local
2546 req = ptlrpc_request_alloc(imp, &RQF_OST_STATFS); osc_statfs()
2550 if (req == NULL) osc_statfs()
2553 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_STATFS); osc_statfs()
2555 ptlrpc_request_free(req); osc_statfs()
2558 ptlrpc_request_set_replen(req); osc_statfs()
2559 req->rq_request_portal = OST_CREATE_PORTAL; osc_statfs()
2560 ptlrpc_at_set_req_timeout(req); osc_statfs()
2564 req->rq_no_resend = 1; osc_statfs()
2565 req->rq_no_delay = 1; osc_statfs()
2568 rc = ptlrpc_queue_wait(req); osc_statfs()
2572 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS); osc_statfs()
2581 ptlrpc_req_finished(req); osc_statfs()
2751 struct ptlrpc_request *req; osc_get_info() local
2756 req = ptlrpc_request_alloc(class_exp2cliimp(exp), osc_get_info()
2758 if (req == NULL) osc_get_info()
2761 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, osc_get_info()
2763 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO); osc_get_info()
2765 ptlrpc_request_free(req); osc_get_info()
2769 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY); osc_get_info()
2772 req->rq_no_delay = req->rq_no_resend = 1; osc_get_info()
2773 ptlrpc_request_set_replen(req); osc_get_info()
2774 rc = ptlrpc_queue_wait(req); osc_get_info()
2778 reply = req_capsule_server_get(&req->rq_pill, &RMF_OBD_ID); osc_get_info()
2786 ptlrpc_req_finished(req); osc_get_info()
2795 struct ptlrpc_request *req; osc_get_info() local
2831 req = ptlrpc_request_alloc(class_exp2cliimp(exp), osc_get_info()
2833 if (req == NULL) { osc_get_info()
2838 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_KEY, osc_get_info()
2840 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, osc_get_info()
2842 req_capsule_set_size(&req->rq_pill, &RMF_FIEMAP_VAL, osc_get_info()
2845 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_GET_INFO); osc_get_info()
2847 ptlrpc_request_free(req); osc_get_info()
2851 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_KEY); osc_get_info()
2853 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FIEMAP_VAL); osc_get_info()
2856 ptlrpc_request_set_replen(req); osc_get_info()
2857 rc = ptlrpc_queue_wait(req); osc_get_info()
2861 reply = req_capsule_server_get(&req->rq_pill, &RMF_FIEMAP_VAL); osc_get_info()
2869 ptlrpc_req_finished(req); osc_get_info()
2883 struct ptlrpc_request *req; osc_set_info_async() local
2945 req = ptlrpc_request_alloc(imp, KEY_IS(KEY_GRANT_SHRINK) ? osc_set_info_async()
2948 if (req == NULL) osc_set_info_async()
2951 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_KEY, osc_set_info_async()
2954 req_capsule_set_size(&req->rq_pill, &RMF_SETINFO_VAL, osc_set_info_async()
2956 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, OST_SET_INFO); osc_set_info_async()
2958 ptlrpc_request_free(req); osc_set_info_async()
2962 tmp = req_capsule_client_get(&req->rq_pill, &RMF_SETINFO_KEY); osc_set_info_async()
2964 tmp = req_capsule_client_get(&req->rq_pill, KEY_IS(KEY_GRANT_SHRINK) ? osc_set_info_async()
2973 CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args)); osc_set_info_async()
2974 aa = ptlrpc_req_async_args(req); osc_set_info_async()
2977 ptlrpc_req_finished(req); osc_set_info_async()
2982 req->rq_interpret_reply = osc_shrink_grant_interpret; osc_set_info_async()
2985 ptlrpc_request_set_replen(req); osc_set_info_async()
2988 ptlrpc_set_add_req(set, req); osc_set_info_async()
2991 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1); osc_set_info_async()
233 osc_getattr_interpret(const struct lu_env *env, struct ptlrpc_request *req, struct osc_async_args *aa, int rc) osc_getattr_interpret() argument
378 osc_setattr_interpret(const struct lu_env *env, struct ptlrpc_request *req, struct osc_setattr_args *sa, int rc) osc_setattr_interpret() argument
590 osc_sync_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *arg, int rc) osc_sync_interpret() argument
691 osc_destroy_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *data, int rc) osc_destroy_interpret() argument
895 osc_shrink_grant_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *aa, int rc) osc_shrink_grant_interpret() argument
1754 brw_interpret(const struct lu_env *env, struct ptlrpc_request *req, void *data, int rc) brw_interpret() argument
2176 osc_enqueue_interpret(const struct lu_env *env, struct ptlrpc_request *req, struct osc_enqueue_args *aa, int rc) osc_enqueue_interpret() argument
2444 osc_statfs_interpret(const struct lu_env *env, struct ptlrpc_request *req, struct osc_async_args *aa, int rc) osc_statfs_interpret() argument
/linux-4.1.27/drivers/crypto/amcc/
H A Dcrypto4xx_alg.c73 int crypto4xx_encrypt(struct ablkcipher_request *req) crypto4xx_encrypt() argument
75 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); crypto4xx_encrypt()
82 return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, crypto4xx_encrypt()
83 req->nbytes, req->info, crypto4xx_encrypt()
87 int crypto4xx_decrypt(struct ablkcipher_request *req) crypto4xx_decrypt() argument
89 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); crypto4xx_decrypt()
96 return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, crypto4xx_decrypt()
97 req->nbytes, req->info, crypto4xx_decrypt()
237 int crypto4xx_hash_init(struct ahash_request *req) crypto4xx_hash_init() argument
239 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); crypto4xx_hash_init()
245 __crypto_ahash_cast(req->base.tfm)); crypto4xx_hash_init()
254 int crypto4xx_hash_update(struct ahash_request *req) crypto4xx_hash_update() argument
256 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); crypto4xx_hash_update()
263 return crypto4xx_build_pd(&req->base, ctx, req->src, crypto4xx_hash_update()
264 (struct scatterlist *) req->result, crypto4xx_hash_update()
265 req->nbytes, NULL, 0); crypto4xx_hash_update()
268 int crypto4xx_hash_final(struct ahash_request *req) crypto4xx_hash_final() argument
273 int crypto4xx_hash_digest(struct ahash_request *req) crypto4xx_hash_digest() argument
275 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); crypto4xx_hash_digest()
281 return crypto4xx_build_pd(&req->base, ctx, req->src, crypto4xx_hash_digest()
282 (struct scatterlist *) req->result, crypto4xx_hash_digest()
283 req->nbytes, NULL, 0); crypto4xx_hash_digest()
/linux-4.1.27/fs/coda/
H A Dpsdev.c101 struct upc_req *req = NULL; coda_psdev_write() local
153 req = tmp; coda_psdev_write()
154 list_del(&req->uc_chain); coda_psdev_write()
160 if (!req) { coda_psdev_write()
168 if (req->uc_outSize < nbytes) { coda_psdev_write()
170 __func__, req->uc_outSize, (long)nbytes, coda_psdev_write()
172 nbytes = req->uc_outSize; /* don't have more space! */ coda_psdev_write()
174 if (copy_from_user(req->uc_data, buf, nbytes)) { coda_psdev_write()
175 req->uc_flags |= CODA_REQ_ABORT; coda_psdev_write()
176 wake_up(&req->uc_sleep); coda_psdev_write()
182 req->uc_outSize = nbytes; coda_psdev_write()
183 req->uc_flags |= CODA_REQ_WRITE; coda_psdev_write()
187 if (req->uc_opcode == CODA_OPEN_BY_FD) { coda_psdev_write()
189 (struct coda_open_by_fd_out *)req->uc_data; coda_psdev_write()
194 wake_up(&req->uc_sleep); coda_psdev_write()
208 struct upc_req *req; coda_psdev_read() local
239 req = list_entry(vcp->vc_pending.next, struct upc_req,uc_chain); coda_psdev_read()
240 list_del(&req->uc_chain); coda_psdev_read()
243 count = req->uc_inSize; coda_psdev_read()
244 if (nbytes < req->uc_inSize) { coda_psdev_read()
246 __func__, (long)nbytes, req->uc_inSize); coda_psdev_read()
250 if (copy_to_user(buf, req->uc_data, count)) coda_psdev_read()
254 if (!(req->uc_flags & CODA_REQ_ASYNC)) { coda_psdev_read()
255 req->uc_flags |= CODA_REQ_READ; coda_psdev_read()
256 list_add_tail(&(req->uc_chain), &vcp->vc_processing); coda_psdev_read()
260 CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); coda_psdev_read()
261 kfree(req); coda_psdev_read()
307 struct upc_req *req, *tmp; coda_psdev_release() local
317 list_for_each_entry_safe(req, tmp, &vcp->vc_pending, uc_chain) { coda_psdev_release()
318 list_del(&req->uc_chain); coda_psdev_release()
321 if (req->uc_flags & CODA_REQ_ASYNC) { coda_psdev_release()
322 CODA_FREE(req->uc_data, sizeof(struct coda_in_hdr)); coda_psdev_release()
323 kfree(req); coda_psdev_release()
326 req->uc_flags |= CODA_REQ_ABORT; coda_psdev_release()
327 wake_up(&req->uc_sleep); coda_psdev_release()
330 list_for_each_entry_safe(req, tmp, &vcp->vc_processing, uc_chain) { coda_psdev_release()
331 list_del(&req->uc_chain); coda_psdev_release()
333 req->uc_flags |= CODA_REQ_ABORT; coda_psdev_release()
334 wake_up(&req->uc_sleep); coda_psdev_release()
/linux-4.1.27/drivers/infiniband/core/
H A Daddr.c185 static void queue_req(struct addr_req *req) queue_req() argument
191 if (time_after_eq(req->timeout, temp_req->timeout)) queue_req()
195 list_add(&req->list, &temp_req->list); queue_req()
197 if (req_list.next == &req->list) queue_req()
198 set_timeout(req->timeout); queue_req()
336 struct addr_req *req, *temp_req; process_req() local
343 list_for_each_entry_safe(req, temp_req, &req_list, list) { process_req()
344 if (req->status == -ENODATA) { process_req()
345 src_in = (struct sockaddr *) &req->src_addr; process_req()
346 dst_in = (struct sockaddr *) &req->dst_addr; process_req()
347 req->status = addr_resolve(src_in, dst_in, req->addr); process_req()
348 if (req->status && time_after_eq(jiffies, req->timeout)) process_req()
349 req->status = -ETIMEDOUT; process_req()
350 else if (req->status == -ENODATA) process_req()
353 list_move_tail(&req->list, &done_list); process_req()
357 req = list_entry(req_list.next, struct addr_req, list); process_req()
358 set_timeout(req->timeout); process_req()
362 list_for_each_entry_safe(req, temp_req, &done_list, list) { process_req()
363 list_del(&req->list); process_req()
364 req->callback(req->status, (struct sockaddr *) &req->src_addr, process_req()
365 req->addr, req->context); process_req()
366 put_client(req->client); process_req()
367 kfree(req); process_req()
379 struct addr_req *req; rdma_resolve_ip() local
382 req = kzalloc(sizeof *req, GFP_KERNEL); rdma_resolve_ip()
383 if (!req) rdma_resolve_ip()
386 src_in = (struct sockaddr *) &req->src_addr; rdma_resolve_ip()
387 dst_in = (struct sockaddr *) &req->dst_addr; rdma_resolve_ip()
401 req->addr = addr; rdma_resolve_ip()
402 req->callback = callback; rdma_resolve_ip()
403 req->context = context; rdma_resolve_ip()
404 req->client = client; rdma_resolve_ip()
407 req->status = addr_resolve(src_in, dst_in, addr); rdma_resolve_ip()
408 switch (req->status) { rdma_resolve_ip()
410 req->timeout = jiffies; rdma_resolve_ip()
411 queue_req(req); rdma_resolve_ip()
414 req->timeout = msecs_to_jiffies(timeout_ms) + jiffies; rdma_resolve_ip()
415 queue_req(req); rdma_resolve_ip()
418 ret = req->status; rdma_resolve_ip()
424 kfree(req); rdma_resolve_ip()
431 struct addr_req *req, *temp_req; rdma_addr_cancel() local
434 list_for_each_entry_safe(req, temp_req, &req_list, list) { rdma_addr_cancel()
435 if (req->addr == addr) { rdma_addr_cancel()
436 req->status = -ECANCELED; rdma_addr_cancel()
437 req->timeout = jiffies; rdma_addr_cancel()
438 list_move(&req->list, &req_list); rdma_addr_cancel()
439 set_timeout(req->timeout); rdma_addr_cancel()
/linux-4.1.27/fs/ecryptfs/
H A Dkthread.c61 struct ecryptfs_open_req *req; ecryptfs_threadfn() local
73 req = list_first_entry(&ecryptfs_kthread_ctl.req_list, ecryptfs_threadfn()
76 list_del(&req->kthread_ctl_list); ecryptfs_threadfn()
77 *req->lower_file = dentry_open(&req->path, ecryptfs_threadfn()
79 complete(&req->done); ecryptfs_threadfn()
106 struct ecryptfs_open_req *req, *tmp; ecryptfs_destroy_kthread() local
110 list_for_each_entry_safe(req, tmp, &ecryptfs_kthread_ctl.req_list, ecryptfs_destroy_kthread()
112 list_del(&req->kthread_ctl_list); ecryptfs_destroy_kthread()
113 *req->lower_file = ERR_PTR(-EIO); ecryptfs_destroy_kthread()
114 complete(&req->done); ecryptfs_destroy_kthread()
136 struct ecryptfs_open_req req; ecryptfs_privileged_open() local
140 init_completion(&req.done); ecryptfs_privileged_open()
141 req.lower_file = lower_file; ecryptfs_privileged_open()
142 req.path.dentry = lower_dentry; ecryptfs_privileged_open()
143 req.path.mnt = lower_mnt; ecryptfs_privileged_open()
149 (*lower_file) = dentry_open(&req.path, flags, cred); ecryptfs_privileged_open()
165 list_add_tail(&req.kthread_ctl_list, &ecryptfs_kthread_ctl.req_list); ecryptfs_privileged_open()
168 wait_for_completion(&req.done); ecryptfs_privileged_open()
/linux-4.1.27/drivers/net/usb/
H A Dcdc-phonet.c55 static void tx_complete(struct urb *req);
56 static void rx_complete(struct urb *req);
64 struct urb *req = NULL; usbpn_xmit() local
71 req = usb_alloc_urb(0, GFP_ATOMIC); usbpn_xmit()
72 if (!req) usbpn_xmit()
74 usb_fill_bulk_urb(req, pnd->usb, pnd->tx_pipe, skb->data, skb->len, usbpn_xmit()
76 req->transfer_flags = URB_ZERO_PACKET; usbpn_xmit()
77 err = usb_submit_urb(req, GFP_ATOMIC); usbpn_xmit()
79 usb_free_urb(req); usbpn_xmit()
96 static void tx_complete(struct urb *req) tx_complete() argument
98 struct sk_buff *skb = req->context; tx_complete()
101 int status = req->status; tx_complete()
124 usb_free_urb(req); tx_complete()
127 static int rx_submit(struct usbpn_dev *pnd, struct urb *req, gfp_t gfp_flags) rx_submit() argument
137 usb_fill_bulk_urb(req, pnd->usb, pnd->rx_pipe, page_address(page), rx_submit()
139 req->transfer_flags = 0; rx_submit()
140 err = usb_submit_urb(req, gfp_flags); rx_submit()
148 static void rx_complete(struct urb *req) rx_complete() argument
150 struct net_device *dev = req->context; rx_complete()
152 struct page *page = virt_to_page(req->transfer_buffer); rx_complete()
155 int status = req->status; rx_complete()
167 page, 1, req->actual_length, rx_complete()
173 page, 0, req->actual_length, rx_complete()
177 if (req->actual_length < PAGE_SIZE) rx_complete()
197 req = NULL; rx_complete()
214 if (req) rx_complete()
215 rx_submit(pnd, req, GFP_ATOMIC); rx_complete()
232 struct urb *req = usb_alloc_urb(0, GFP_KERNEL); usbpn_open() local
234 if (!req || rx_submit(pnd, req, GFP_KERNEL)) { usbpn_open()
235 usb_free_urb(req); usbpn_open()
239 pnd->urbs[i] = req; usbpn_open()
255 struct urb *req = pnd->urbs[i]; usbpn_close() local
257 if (!req) usbpn_close()
259 usb_kill_urb(req); usbpn_close()
260 usb_free_urb(req); usbpn_close()
269 struct if_phonet_req *req = (struct if_phonet_req *)ifr; usbpn_ioctl() local
273 req->ifr_phonet_autoconf.device = PN_DEV_PC; usbpn_ioctl()
/linux-4.1.27/drivers/scsi/be2iscsi/
H A Dbe_mgmt.c163 struct be_cmd_req_modify_eq_delay *req; be_cmd_modify_eq_delay() local
175 req = embedded_payload(wrb); be_cmd_modify_eq_delay()
178 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_modify_eq_delay()
179 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_modify_eq_delay()
180 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req)); be_cmd_modify_eq_delay()
182 req->num_eq = cpu_to_le32(num); be_cmd_modify_eq_delay()
184 req->delay[i].eq_id = cpu_to_le32(set_eqd[i].eq_id); be_cmd_modify_eq_delay()
185 req->delay[i].phase = 0; be_cmd_modify_eq_delay()
186 req->delay[i].delay_multiplier = be_cmd_modify_eq_delay()
211 struct be_cmd_reopen_session_req *req; mgmt_reopen_session() local
226 req = embedded_payload(wrb); mgmt_reopen_session()
228 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); mgmt_reopen_session()
229 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, mgmt_reopen_session()
234 req->reopen_type = reopen_type; mgmt_reopen_session()
235 req->session_handle = sess_handle; mgmt_reopen_session()
246 struct be_cmd_get_boot_target_req *req; mgmt_get_boot_target() local
261 req = embedded_payload(wrb); mgmt_get_boot_target()
263 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); mgmt_get_boot_target()
264 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, mgmt_get_boot_target()
280 struct be_cmd_get_session_req *req; mgmt_get_session_info() local
296 req = nonemb_cmd->va; mgmt_get_session_info()
297 memset(req, 0, sizeof(*req)); mgmt_get_session_info()
304 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); mgmt_get_session_info()
305 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, mgmt_get_session_info()
308 req->session_handle = boot_session_handle; mgmt_get_session_info()
334 struct be_fw_cfg *req = embedded_payload(wrb); mgmt_get_fw_config() local
340 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); mgmt_get_fw_config()
342 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, mgmt_get_fw_config()
349 pfw_cfg = req; mgmt_get_fw_config()
427 struct be_mgmt_controller_attributes *req; mgmt_check_supported_fw() local
441 req = nonemb_cmd.va; mgmt_check_supported_fw()
442 memset(req, 0, sizeof(*req)); mgmt_check_supported_fw()
445 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); mgmt_check_supported_fw()
446 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, mgmt_check_supported_fw()
447 OPCODE_COMMON_GET_CNTL_ATTRIBUTES, sizeof(*req)); mgmt_check_supported_fw()
491 struct be_bsg_vendor_cmd *req = nonemb_cmd->va; mgmt_vendor_specific_fw_cmd() local
501 req->region = region; mgmt_vendor_specific_fw_cmd()
502 req->sector = sector; mgmt_vendor_specific_fw_cmd()
503 req->offset = offset; mgmt_vendor_specific_fw_cmd()
509 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, mgmt_vendor_specific_fw_cmd()
510 OPCODE_COMMON_WRITE_FLASH, sizeof(*req)); mgmt_vendor_specific_fw_cmd()
516 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, mgmt_vendor_specific_fw_cmd()
517 OPCODE_COMMON_READ_FLASH, sizeof(*req)); mgmt_vendor_specific_fw_cmd()
562 struct iscsi_cleanup_req *req = embedded_payload(wrb); mgmt_epfw_cleanup() local
567 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); mgmt_epfw_cleanup()
568 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, mgmt_epfw_cleanup()
569 OPCODE_COMMON_ISCSI_CLEANUP, sizeof(*req)); mgmt_epfw_cleanup()
571 req->chute = (1 << ulp_num); mgmt_epfw_cleanup()
572 req->hdr_ring_id = cpu_to_le16(HWI_GET_DEF_HDRQ_ID(phba, ulp_num)); mgmt_epfw_cleanup()
573 req->data_ring_id = cpu_to_le16(HWI_GET_DEF_BUFQ_ID(phba, ulp_num)); mgmt_epfw_cleanup()
592 struct invalidate_commands_params_in *req; mgmt_invalidate_icds() local
602 req = nonemb_cmd->va; mgmt_invalidate_icds()
603 memset(req, 0, sizeof(*req)); mgmt_invalidate_icds()
608 be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1); mgmt_invalidate_icds()
609 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, mgmt_invalidate_icds()
611 sizeof(*req)); mgmt_invalidate_icds()
612 req->ref_handle = 0; mgmt_invalidate_icds()
613 req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE; mgmt_invalidate_icds()
615 req->table[i].icd = inv_tbl->icd; mgmt_invalidate_icds()
616 req->table[i].cid = inv_tbl->cid; mgmt_invalidate_icds()
617 req->icd_count++; mgmt_invalidate_icds()
637 struct iscsi_invalidate_connection_params_in *req; mgmt_invalidate_connection() local
648 req = embedded_payload(wrb); mgmt_invalidate_connection()
650 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); mgmt_invalidate_connection()
651 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, mgmt_invalidate_connection()
653 sizeof(*req)); mgmt_invalidate_connection()
654 req->session_handle = beiscsi_ep->fw_handle; mgmt_invalidate_connection()
655 req->cid = cid; mgmt_invalidate_connection()
657 req->cleanup_type = CMD_ISCSI_CONNECTION_ISSUE_TCP_RST; mgmt_invalidate_connection()
659 req->cleanup_type = CMD_ISCSI_CONNECTION_INVALIDATE; mgmt_invalidate_connection()
660 req->save_cfg = savecfg_flag; mgmt_invalidate_connection()
671 struct tcp_upload_params_in *req; mgmt_upload_connection() local
681 req = embedded_payload(wrb); mgmt_upload_connection()
684 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); mgmt_upload_connection()
685 be_cmd_hdr_prepare(&req->hdr, CMD_COMMON_TCP_UPLOAD, mgmt_upload_connection()
686 OPCODE_COMMON_TCP_UPLOAD, sizeof(*req)); mgmt_upload_connection()
687 req->id = (unsigned short)cid; mgmt_upload_connection()
688 req->upload_type = (unsigned char)upload_flag; mgmt_upload_connection()
715 struct tcp_connect_and_offload_in_v1 *req; mgmt_open_connection() local
744 req = nonemb_cmd->va; mgmt_open_connection()
745 memset(req, 0, sizeof(*req)); mgmt_open_connection()
749 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, mgmt_open_connection()
754 req->ip_address.ip_type = BE2_IPV4; mgmt_open_connection()
755 req->ip_address.addr[0] = s_addr & 0x000000ff; mgmt_open_connection()
756 req->ip_address.addr[1] = (s_addr & 0x0000ff00) >> 8; mgmt_open_connection()
757 req->ip_address.addr[2] = (s_addr & 0x00ff0000) >> 16; mgmt_open_connection()
758 req->ip_address.addr[3] = (s_addr & 0xff000000) >> 24; mgmt_open_connection()
759 req->tcp_port = ntohs(daddr_in->sin_port); mgmt_open_connection()
764 req->ip_address.ip_type = BE2_IPV6; mgmt_open_connection()
765 memcpy(&req->ip_address.addr, mgmt_open_connection()
767 req->tcp_port = ntohs(daddr_in6->sin6_port); mgmt_open_connection()
781 req->cid = cid; mgmt_open_connection()
785 req->cq_id = phwi_context->be_cq[i].id; mgmt_open_connection()
787 "BG_%d : i=%d cq_id=%d\n", i, req->cq_id); mgmt_open_connection()
788 req->defq_id = def_hdr_id; mgmt_open_connection()
789 req->hdr_ring_id = def_hdr_id; mgmt_open_connection()
790 req->data_ring_id = def_data_id; mgmt_open_connection()
791 req->do_offload = 1; mgmt_open_connection()
792 req->dataout_template_pa.lo = ptemplate_address->lo; mgmt_open_connection()
793 req->dataout_template_pa.hi = ptemplate_address->hi; mgmt_open_connection()
799 req->hdr.version = MBX_CMD_VER1; mgmt_open_connection()
800 req->tcp_window_size = 0; mgmt_open_connection()
801 req->tcp_window_scale_count = 2; mgmt_open_connection()
813 struct be_cmd_get_all_if_id_req *req = embedded_payload(wrb); mgmt_get_all_if_id() local
814 struct be_cmd_get_all_if_id_req *pbe_allid = req; mgmt_get_all_if_id()
821 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); mgmt_get_all_if_id()
822 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, mgmt_get_all_if_id()
824 sizeof(*req)); mgmt_get_all_if_id()
921 struct be_cmd_set_ip_addr_req *req; mgmt_static_ip_modify() local
928 sizeof(*req)); mgmt_static_ip_modify()
935 req = nonemb_cmd.va; mgmt_static_ip_modify()
936 req->ip_params.record_entry_count = 1; mgmt_static_ip_modify()
937 req->ip_params.ip_record.action = ip_action; mgmt_static_ip_modify()
938 req->ip_params.ip_record.interface_hndl = mgmt_static_ip_modify()
940 req->ip_params.ip_record.ip_addr.size_of_structure = mgmt_static_ip_modify()
942 req->ip_params.ip_record.ip_addr.ip_type = ip_type; mgmt_static_ip_modify()
945 memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value, mgmt_static_ip_modify()
946 sizeof(req->ip_params.ip_record.ip_addr.addr)); mgmt_static_ip_modify()
949 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, mgmt_static_ip_modify()
951 sizeof(req->ip_params.ip_record.ip_addr.subnet_mask)); mgmt_static_ip_modify()
953 memcpy(req->ip_params.ip_record.ip_addr.addr, mgmt_static_ip_modify()
955 sizeof(req->ip_params.ip_record.ip_addr.addr)); mgmt_static_ip_modify()
957 memcpy(req->ip_params.ip_record.ip_addr.subnet_mask, mgmt_static_ip_modify()
959 sizeof(req->ip_params.ip_record.ip_addr.subnet_mask)); mgmt_static_ip_modify()
972 struct be_cmd_set_def_gateway_req *req; mgmt_modify_gateway() local
979 sizeof(*req)); mgmt_modify_gateway()
983 req = nonemb_cmd.va; mgmt_modify_gateway()
984 req->action = gtway_action; mgmt_modify_gateway()
985 req->ip_addr.ip_type = BE2_IPV4; mgmt_modify_gateway()
987 memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr)); mgmt_modify_gateway()
1151 struct be_cmd_get_def_gateway_req *req; mgmt_get_gateway() local
1161 req = nonemb_cmd.va; mgmt_get_gateway()
1162 req->ip_type = ip_type; mgmt_get_gateway()
1171 struct be_cmd_get_if_info_req *req; mgmt_get_if_info() local
1186 req = nonemb_cmd.va; mgmt_get_if_info()
1187 req->interface_hndl = phba->interface_handle; mgmt_get_if_info()
1188 req->ip_type = ip_type; mgmt_get_if_info()
1250 struct be_cmd_hba_name *req; be_cmd_get_initname() local
1261 req = embedded_payload(wrb); be_cmd_get_initname()
1263 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_get_initname()
1264 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI_INI, be_cmd_get_initname()
1266 sizeof(*req)); be_cmd_get_initname()
1277 struct be_cmd_ntwk_link_status_req *req; be_cmd_get_port_speed() local
1288 req = embedded_payload(wrb); be_cmd_get_port_speed()
1290 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_get_port_speed()
1291 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_get_port_speed()
1293 sizeof(*req)); be_cmd_get_port_speed()
H A Dbe_cmds.c827 struct be_cmd_req_eq_create *req = embedded_payload(wrb); beiscsi_cmd_eq_create() local
835 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); beiscsi_cmd_eq_create()
837 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, beiscsi_cmd_eq_create()
838 OPCODE_COMMON_EQ_CREATE, sizeof(*req)); beiscsi_cmd_eq_create()
840 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); beiscsi_cmd_eq_create()
842 AMAP_SET_BITS(struct amap_eq_context, func, req->context, beiscsi_cmd_eq_create()
844 AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); beiscsi_cmd_eq_create()
845 AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); beiscsi_cmd_eq_create()
846 AMAP_SET_BITS(struct amap_eq_context, count, req->context, beiscsi_cmd_eq_create()
848 AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, beiscsi_cmd_eq_create()
850 be_dws_cpu_to_le(req->context, sizeof(req->context)); beiscsi_cmd_eq_create()
852 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); beiscsi_cmd_eq_create()
949 struct be_cmd_req_cq_create *req = embedded_payload(wrb); beiscsi_cmd_cq_create() local
953 void *ctxt = &req->context; beiscsi_cmd_cq_create()
959 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); beiscsi_cmd_cq_create()
961 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, beiscsi_cmd_cq_create()
962 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); beiscsi_cmd_cq_create()
964 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); beiscsi_cmd_cq_create()
979 req->hdr.version = MBX_CMD_VER2; beiscsi_cmd_cq_create()
980 req->page_size = 1; beiscsi_cmd_cq_create()
993 be_dws_cpu_to_le(ctxt, sizeof(req->context)); beiscsi_cmd_cq_create()
995 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); beiscsi_cmd_cq_create()
1024 struct be_cmd_req_mcc_create *req; beiscsi_cmd_mccq_create() local
1034 req = embedded_payload(wrb); beiscsi_cmd_mccq_create()
1035 ctxt = &req->context; beiscsi_cmd_mccq_create()
1037 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); beiscsi_cmd_mccq_create()
1039 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, beiscsi_cmd_mccq_create()
1040 OPCODE_COMMON_MCC_CREATE, sizeof(*req)); beiscsi_cmd_mccq_create()
1042 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); beiscsi_cmd_mccq_create()
1051 be_dws_cpu_to_le(ctxt, sizeof(req->context)); beiscsi_cmd_mccq_create()
1053 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); beiscsi_cmd_mccq_create()
1070 struct be_cmd_req_q_destroy *req = embedded_payload(wrb); beiscsi_cmd_q_destroy() local
1081 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); beiscsi_cmd_q_destroy()
1113 be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req)); beiscsi_cmd_q_destroy()
1115 req->id = cpu_to_le16(q->id); beiscsi_cmd_q_destroy()
1148 struct be_defq_create_req *req = embedded_payload(wrb); be_cmd_create_default_pdu_queue() local
1151 void *ctxt = &req->context; be_cmd_create_default_pdu_queue()
1157 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_create_default_pdu_queue()
1159 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, be_cmd_create_default_pdu_queue()
1160 OPCODE_COMMON_ISCSI_DEFQ_CREATE, sizeof(*req)); be_cmd_create_default_pdu_queue()
1162 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); be_cmd_create_default_pdu_queue()
1164 req->ulp_num = ulp_num; be_cmd_create_default_pdu_queue()
1165 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); be_cmd_create_default_pdu_queue()
1166 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); be_cmd_create_default_pdu_queue()
1199 be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_create_default_pdu_queue()
1201 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_create_default_pdu_queue()
1249 struct be_wrbq_create_req *req = embedded_payload(wrb); be_cmd_wrbq_create() local
1257 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_wrbq_create()
1259 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, be_cmd_wrbq_create()
1260 OPCODE_COMMON_ISCSI_WRBQ_CREATE, sizeof(*req)); be_cmd_wrbq_create()
1261 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); be_cmd_wrbq_create()
1264 req->ulp_num = ulp_num; be_cmd_wrbq_create()
1265 req->dua_feature |= (1 << BEISCSI_DUAL_ULP_AWARE_BIT); be_cmd_wrbq_create()
1266 req->dua_feature |= (1 << BEISCSI_BIND_Q_TO_ULP_BIT); be_cmd_wrbq_create()
1269 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_wrbq_create()
1293 struct be_post_template_pages_req *req = embedded_payload(wrb); be_cmd_iscsi_post_template_hdr() local
1299 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_iscsi_post_template_hdr()
1300 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_iscsi_post_template_hdr()
1302 sizeof(*req)); be_cmd_iscsi_post_template_hdr()
1304 req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); be_cmd_iscsi_post_template_hdr()
1305 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; be_cmd_iscsi_post_template_hdr()
1306 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); be_cmd_iscsi_post_template_hdr()
1316 struct be_remove_template_pages_req *req = embedded_payload(wrb); be_cmd_iscsi_remove_template_hdr() local
1322 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_iscsi_remove_template_hdr()
1323 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, be_cmd_iscsi_remove_template_hdr()
1325 sizeof(*req)); be_cmd_iscsi_remove_template_hdr()
1327 req->type = BEISCSI_TEMPLATE_HDR_TYPE_ISCSI; be_cmd_iscsi_remove_template_hdr()
1339 struct be_post_sgl_pages_req *req = embedded_payload(wrb); be_cmd_iscsi_post_sgl_pages() local
1352 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); be_cmd_iscsi_post_sgl_pages()
1353 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, be_cmd_iscsi_post_sgl_pages()
1355 sizeof(*req)); be_cmd_iscsi_post_sgl_pages()
1358 req->num_pages = min(num_pages, curr_pages); be_cmd_iscsi_post_sgl_pages()
1359 req->page_offset = page_offset; be_cmd_iscsi_post_sgl_pages()
1360 be_cmd_page_addrs_prepare(req->pages, req->num_pages, q_mem); be_cmd_iscsi_post_sgl_pages()
1361 q_mem->dma = q_mem->dma + (req->num_pages * PAGE_SIZE); be_cmd_iscsi_post_sgl_pages()
1362 internal_page_offset += req->num_pages; be_cmd_iscsi_post_sgl_pages()
1363 page_offset += req->num_pages; be_cmd_iscsi_post_sgl_pages()
1364 num_pages -= req->num_pages; be_cmd_iscsi_post_sgl_pages()
1367 req->num_pages = temp_num_pages; be_cmd_iscsi_post_sgl_pages()
1388 struct be_post_sgl_pages_req *req = embedded_payload(wrb); beiscsi_cmd_reset_function() local
1393 req = embedded_payload(wrb); beiscsi_cmd_reset_function()
1394 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0); beiscsi_cmd_reset_function()
1395 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, beiscsi_cmd_reset_function()
1396 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req)); beiscsi_cmd_reset_function()
1418 struct be_cmd_set_vlan_req *req; be_cmd_set_vlan() local
1429 req = embedded_payload(wrb); be_cmd_set_vlan()
1432 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ISCSI, be_cmd_set_vlan()
1434 sizeof(*req)); be_cmd_set_vlan()
1436 req->interface_hndl = phba->interface_handle; be_cmd_set_vlan()
1437 req->vlan_priority = vlan_tag; be_cmd_set_vlan()
/linux-4.1.27/include/trace/events/
H A Dhswadsp.h245 struct sst_hsw_audio_data_format_ipc *req),
247 TP_ARGS(stream, req),
262 __entry->frequency = req->frequency;
263 __entry->bitdepth = req->bitdepth;
264 __entry->map = req->map;
265 __entry->config = req->config;
266 __entry->style = req->style;
267 __entry->ch_num = req->ch_num;
268 __entry->valid_bit = req->valid_bit;
281 struct sst_hsw_ipc_stream_alloc_req *req),
283 TP_ARGS(stream, req),
294 __entry->path_id = req->path_id;
295 __entry->stream_type = req->stream_type;
296 __entry->format_id = req->format_id;
307 struct sst_hsw_ipc_stream_free_req *req),
309 TP_ARGS(stream, req),
318 __entry->stream_id = req->stream_id;
328 struct sst_hsw_ipc_volume_req *req),
330 TP_ARGS(stream, req),
342 __entry->channel = req->channel;
343 __entry->target_volume = req->target_volume;
344 __entry->curve_duration = req->curve_duration;
345 __entry->curve_type = req->curve_type;
357 TP_PROTO(struct sst_hsw_ipc_device_config_req *req),
359 TP_ARGS(req),
369 __entry->ssp = req->ssp_interface;
370 __entry->clock_freq = req->clock_frequency;
371 __entry->mode = req->mode;
372 __entry->clock_divider = req->clock_divider;
/linux-4.1.27/arch/powerpc/platforms/powermac/
H A Dtime.c107 struct adb_request req; cuda_get_time() local
110 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0) cuda_get_time()
112 while (!req.complete) cuda_get_time()
114 if (req.reply_len != 7) cuda_get_time()
116 req.reply_len); cuda_get_time()
117 now = (req.reply[3] << 24) + (req.reply[4] << 16) cuda_get_time()
118 + (req.reply[5] << 8) + req.reply[6]; cuda_get_time()
127 struct adb_request req; cuda_set_rtc_time() local
130 if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, cuda_set_rtc_time()
134 while (!req.complete) cuda_set_rtc_time()
136 if ((req.reply_len != 3) && (req.reply_len != 7)) cuda_set_rtc_time()
138 req.reply_len); cuda_set_rtc_time()
151 struct adb_request req; pmu_get_time() local
154 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) pmu_get_time()
156 pmu_wait_complete(&req); pmu_get_time()
157 if (req.reply_len != 4) pmu_get_time()
159 req.reply_len); pmu_get_time()
160 now = (req.reply[0] << 24) + (req.reply[1] << 16) pmu_get_time()
161 + (req.reply[2] << 8) + req.reply[3]; pmu_get_time()
170 struct adb_request req; pmu_set_rtc_time() local
173 if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24, pmu_set_rtc_time()
176 pmu_wait_complete(&req); pmu_set_rtc_time()
177 if (req.reply_len != 0) pmu_set_rtc_time()
179 req.reply_len); pmu_set_rtc_time()
/linux-4.1.27/kernel/power/
H A Dqos.c192 struct pm_qos_request *req; pm_qos_dbg_show_requests() local
229 plist_for_each_entry(req, &c->list, node) { pm_qos_dbg_show_requests()
232 if ((req->node).prio != c->default_value) { pm_qos_dbg_show_requests()
238 (req->node).prio, state); pm_qos_dbg_show_requests()
328 * @req: Request to remove from the set.
331 struct pm_qos_flags_request *req) pm_qos_flags_remove_req()
335 list_del(&req->node); pm_qos_flags_remove_req()
336 list_for_each_entry(req, &pqf->list, node) pm_qos_flags_remove_req()
337 val |= req->flags; pm_qos_flags_remove_req()
345 * @req: Request to add to the set, to modify, or to remove from the set.
354 struct pm_qos_flags_request *req, pm_qos_update_flags()
366 pm_qos_flags_remove_req(pqf, req); pm_qos_update_flags()
369 pm_qos_flags_remove_req(pqf, req); pm_qos_update_flags()
371 req->flags = val; pm_qos_update_flags()
372 INIT_LIST_HEAD(&req->node); pm_qos_update_flags()
373 list_add_tail(&req->node, &pqf->list); pm_qos_update_flags()
401 int pm_qos_request_active(struct pm_qos_request *req) pm_qos_request_active() argument
403 return req->pm_qos_class != 0; pm_qos_request_active()
407 static void __pm_qos_update_request(struct pm_qos_request *req, __pm_qos_update_request() argument
410 trace_pm_qos_update_request(req->pm_qos_class, new_value); __pm_qos_update_request()
412 if (new_value != req->node.prio) __pm_qos_update_request()
414 pm_qos_array[req->pm_qos_class]->constraints, __pm_qos_update_request()
415 &req->node, PM_QOS_UPDATE_REQ, new_value); __pm_qos_update_request()
426 struct pm_qos_request *req = container_of(to_delayed_work(work), pm_qos_work_fn() local
430 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE); pm_qos_work_fn()
435 * @req: pointer to a preallocated handle
446 void pm_qos_add_request(struct pm_qos_request *req, pm_qos_add_request() argument
449 if (!req) /*guard against callers passing in null */ pm_qos_add_request()
452 if (pm_qos_request_active(req)) { pm_qos_add_request()
456 req->pm_qos_class = pm_qos_class; pm_qos_add_request()
457 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn); pm_qos_add_request()
460 &req->node, PM_QOS_ADD_REQ, value); pm_qos_add_request()
466 * @req : handle to list element holding a pm_qos request to use
474 void pm_qos_update_request(struct pm_qos_request *req, pm_qos_update_request() argument
477 if (!req) /*guard against callers passing in null */ pm_qos_update_request()
480 if (!pm_qos_request_active(req)) { pm_qos_update_request()
485 cancel_delayed_work_sync(&req->work); pm_qos_update_request()
486 __pm_qos_update_request(req, new_value); pm_qos_update_request()
492 * @req : handle to list element holding a pm_qos request to use
498 void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value, pm_qos_update_request_timeout() argument
501 if (!req) pm_qos_update_request_timeout()
503 if (WARN(!pm_qos_request_active(req), pm_qos_update_request_timeout()
507 cancel_delayed_work_sync(&req->work); pm_qos_update_request_timeout()
509 trace_pm_qos_update_request_timeout(req->pm_qos_class, pm_qos_update_request_timeout()
511 if (new_value != req->node.prio) pm_qos_update_request_timeout()
513 pm_qos_array[req->pm_qos_class]->constraints, pm_qos_update_request_timeout()
514 &req->node, PM_QOS_UPDATE_REQ, new_value); pm_qos_update_request_timeout()
516 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us)); pm_qos_update_request_timeout()
521 * @req: handle to request list element
527 void pm_qos_remove_request(struct pm_qos_request *req) pm_qos_remove_request() argument
529 if (!req) /*guard against callers passing in null */ pm_qos_remove_request()
533 if (!pm_qos_request_active(req)) { pm_qos_remove_request()
538 cancel_delayed_work_sync(&req->work); pm_qos_remove_request()
540 trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE); pm_qos_remove_request()
541 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints, pm_qos_remove_request()
542 &req->node, PM_QOS_REMOVE_REQ, pm_qos_remove_request()
544 memset(req, 0, sizeof(*req)); pm_qos_remove_request()
622 struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL); pm_qos_power_open() local
623 if (!req) pm_qos_power_open()
626 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE); pm_qos_power_open()
627 filp->private_data = req; pm_qos_power_open()
636 struct pm_qos_request *req; pm_qos_power_release() local
638 req = filp->private_data; pm_qos_power_release()
639 pm_qos_remove_request(req); pm_qos_power_release()
640 kfree(req); pm_qos_power_release()
651 struct pm_qos_request *req = filp->private_data; pm_qos_power_read() local
653 if (!req) pm_qos_power_read()
655 if (!pm_qos_request_active(req)) pm_qos_power_read()
659 value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints); pm_qos_power_read()
669 struct pm_qos_request *req; pm_qos_power_write() local
682 req = filp->private_data; pm_qos_power_write()
683 pm_qos_update_request(req, value); pm_qos_power_write()
330 pm_qos_flags_remove_req(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req) pm_qos_flags_remove_req() argument
353 pm_qos_update_flags(struct pm_qos_flags *pqf, struct pm_qos_flags_request *req, enum pm_qos_req_action action, s32 val) pm_qos_update_flags() argument
/linux-4.1.27/drivers/memstick/host/
H A Dtifm_ms.c74 struct memstick_request *req; member in struct:tifm_ms
192 if (host->req->long_data) { tifm_ms_transfer_data()
193 length = host->req->sg.length - host->block_pos; tifm_ms_transfer_data()
194 off = host->req->sg.offset + host->block_pos; tifm_ms_transfer_data()
196 length = host->req->data_len - host->block_pos; tifm_ms_transfer_data()
205 if (host->req->long_data) { tifm_ms_transfer_data()
206 pg = nth_page(sg_page(&host->req->sg), tifm_ms_transfer_data()
215 buf = host->req->data + host->block_pos; tifm_ms_transfer_data()
216 p_cnt = host->req->data_len - host->block_pos; tifm_ms_transfer_data()
219 t_size = host->req->data_dir == WRITE tifm_ms_transfer_data()
223 if (host->req->long_data) { tifm_ms_transfer_data()
236 if (!length && (host->req->data_dir == WRITE)) { tifm_ms_transfer_data()
266 data = host->req->data; tifm_ms_issue_cmd()
270 if (host->req->long_data) { tifm_ms_issue_cmd()
271 data_len = host->req->sg.length; tifm_ms_issue_cmd()
275 data_len = host->req->data_len; tifm_ms_issue_cmd()
285 if (1 != tifm_map_sg(sock, &host->req->sg, 1, tifm_ms_issue_cmd()
286 host->req->data_dir == READ tifm_ms_issue_cmd()
289 host->req->error = -ENOMEM; tifm_ms_issue_cmd()
290 return host->req->error; tifm_ms_issue_cmd()
292 data_len = sg_dma_len(&host->req->sg); tifm_ms_issue_cmd()
299 if (host->req->data_dir == WRITE) tifm_ms_issue_cmd()
305 writel(sg_dma_address(&host->req->sg), tifm_ms_issue_cmd()
319 host->req->error = 0; tifm_ms_issue_cmd()
331 cmd = (host->req->tpc & 0xf) << 12; tifm_ms_issue_cmd()
347 host->req->int_reg = readl(sock->addr + SOCK_MS_STATUS) & 0xff; tifm_ms_complete_cmd()
348 host->req->int_reg = (host->req->int_reg & 1) tifm_ms_complete_cmd()
349 | ((host->req->int_reg << 4) & 0xe0); tifm_ms_complete_cmd()
356 tifm_unmap_sg(sock, &host->req->sg, 1, tifm_ms_complete_cmd()
357 host->req->data_dir == READ tifm_ms_complete_cmd()
367 rc = memstick_next_req(msh, &host->req); tifm_ms_complete_cmd()
373 if (!host->req->error) { tifm_ms_check_status()
378 if (host->req->need_card_int tifm_ms_check_status()
400 if (host->req) { tifm_ms_data_event()
434 if (host->req) { tifm_ms_card_event()
436 host->req->error = -ETIME; tifm_ms_card_event()
438 host->req->error = -EILSEQ; tifm_ms_card_event()
469 if (!host->req) { tifm_ms_req_tasklet()
472 rc = memstick_next_req(msh, &host->req); tifm_ms_req_tasklet()
474 host->req->error = -ETIME; tifm_ms_req_tasklet()
481 rc = memstick_next_req(msh, &host->req); tifm_ms_req_tasklet()
550 dev_name(&host->dev->dev), host->req ? host->req->tpc : 0, tifm_ms_abort()
607 if (host->req) { tifm_ms_remove()
613 tifm_unmap_sg(sock, &host->req->sg, 1, tifm_ms_remove()
614 host->req->data_dir == READ tifm_ms_remove()
617 host->req->error = -ETIME; tifm_ms_remove()
620 rc = memstick_next_req(msh, &host->req); tifm_ms_remove()
622 host->req->error = -ETIME; tifm_ms_remove()
H A Djmb38x_ms.c62 struct memstick_request *req; member in struct:jmb38x_ms_host
309 if (host->req->long_data) { jmb38x_ms_transfer_data()
310 length = host->req->sg.length - host->block_pos; jmb38x_ms_transfer_data()
311 off = host->req->sg.offset + host->block_pos; jmb38x_ms_transfer_data()
313 length = host->req->data_len - host->block_pos; jmb38x_ms_transfer_data()
320 if (host->req->long_data) { jmb38x_ms_transfer_data()
321 pg = nth_page(sg_page(&host->req->sg), jmb38x_ms_transfer_data()
330 buf = host->req->data + host->block_pos; jmb38x_ms_transfer_data()
331 p_cnt = host->req->data_len - host->block_pos; jmb38x_ms_transfer_data()
334 if (host->req->data_dir == WRITE) jmb38x_ms_transfer_data()
343 if (host->req->long_data) { jmb38x_ms_transfer_data()
355 if (!length && host->req->data_dir == WRITE) { jmb38x_ms_transfer_data()
375 host->req->error = -ETIME; jmb38x_ms_issue_cmd()
376 return host->req->error; jmb38x_ms_issue_cmd()
389 cmd = host->req->tpc << 16; jmb38x_ms_issue_cmd()
392 if (host->req->data_dir == READ) jmb38x_ms_issue_cmd()
395 if (host->req->need_card_int) { jmb38x_ms_issue_cmd()
402 data = host->req->data; jmb38x_ms_issue_cmd()
407 if (host->req->long_data) { jmb38x_ms_issue_cmd()
408 data_len = host->req->sg.length; jmb38x_ms_issue_cmd()
410 data_len = host->req->data_len; jmb38x_ms_issue_cmd()
422 if (1 != pci_map_sg(host->chip->pdev, &host->req->sg, 1, jmb38x_ms_issue_cmd()
423 host->req->data_dir == READ jmb38x_ms_issue_cmd()
426 host->req->error = -ENOMEM; jmb38x_ms_issue_cmd()
427 return host->req->error; jmb38x_ms_issue_cmd()
429 data_len = sg_dma_len(&host->req->sg); jmb38x_ms_issue_cmd()
430 writel(sg_dma_address(&host->req->sg), jmb38x_ms_issue_cmd()
441 t_val |= host->req->data_dir == READ jmb38x_ms_issue_cmd()
452 if (host->req->data_dir == WRITE) { jmb38x_ms_issue_cmd()
462 host->req->error = 0; jmb38x_ms_issue_cmd()
484 host->req->int_reg = readl(host->addr + STATUS) & 0xff; jmb38x_ms_complete_cmd()
490 pci_unmap_sg(host->chip->pdev, &host->req->sg, 1, jmb38x_ms_complete_cmd()
491 host->req->data_dir == READ jmb38x_ms_complete_cmd()
495 if (host->req->data_dir == READ) jmb38x_ms_complete_cmd()
509 rc = memstick_next_req(msh, &host->req); jmb38x_ms_complete_cmd()
513 rc = memstick_next_req(msh, &host->req); jmb38x_ms_complete_cmd()
515 host->req->error = -ETIME; jmb38x_ms_complete_cmd()
534 if (host->req) { jmb38x_ms_isr()
537 host->req->error = -EILSEQ; jmb38x_ms_isr()
542 host->req->error = -ETIME; jmb38x_ms_isr()
561 if (host->req->data_dir == READ) { jmb38x_ms_isr()
585 if (host->req jmb38x_ms_isr()
588 || host->req->error)) jmb38x_ms_isr()
603 if (host->req) { jmb38x_ms_abort()
604 host->req->error = -ETIME; jmb38x_ms_abort()
618 if (!host->req) { jmb38x_ms_req_tasklet()
620 rc = memstick_next_req(msh, &host->req); jmb38x_ms_req_tasklet()
621 dev_dbg(&host->chip->pdev->dev, "tasklet req %d\n", rc); jmb38x_ms_req_tasklet()
1013 if (host->req) { jmb38x_ms_remove()
1014 host->req->error = -ETIME; jmb38x_ms_remove()
/linux-4.1.27/arch/arm64/lib/
H A Dmemmove.S40 dstin .req x0
41 src .req x1
42 count .req x2
43 tmp1 .req x3
44 tmp1w .req w3
45 tmp2 .req x4
46 tmp2w .req w4
47 tmp3 .req x5
48 tmp3w .req w5
49 dst .req x6
51 A_l .req x7
52 A_h .req x8
53 B_l .req x9
54 B_h .req x10
55 C_l .req x11
56 C_h .req x12
57 D_l .req x13
58 D_h .req x14
H A Dstrlen.S38 srcin .req x0
39 len .req x0
42 src .req x1
43 data1 .req x2
44 data2 .req x3
45 data2a .req x4
46 has_nul1 .req x5
47 has_nul2 .req x6
48 tmp1 .req x7
49 tmp2 .req x8
50 tmp3 .req x9
51 tmp4 .req x10
52 zeroones .req x11
53 pos .req x12
/linux-4.1.27/drivers/net/wireless/iwlwifi/mvm/
H A Dscan.c168 * If req->n_ssids > 0, it means we should do an active scan.
175 * req->n_ssids - 1 bits in addition to the first bit.
393 static void iwl_scan_offload_build_ssid(struct cfg80211_sched_scan_request *req, iwl_scan_offload_build_ssid() argument
405 for (i = 0; i < req->n_match_sets && i < PROBE_OPTION_MAX; i++) { iwl_scan_offload_build_ssid()
407 if (!req->match_sets[i].ssid.ssid_len) iwl_scan_offload_build_ssid()
410 direct_scan[i].len = req->match_sets[i].ssid.ssid_len; iwl_scan_offload_build_ssid()
411 memcpy(direct_scan[i].ssid, req->match_sets[i].ssid.ssid, iwl_scan_offload_build_ssid()
417 for (j = 0; j < req->n_ssids && i < PROBE_OPTION_MAX; j++) { iwl_scan_offload_build_ssid()
418 index = iwl_ssid_exist(req->ssids[j].ssid, iwl_scan_offload_build_ssid()
419 req->ssids[j].ssid_len, iwl_scan_offload_build_ssid()
422 if (!req->ssids[j].ssid_len && basic_ssid) iwl_scan_offload_build_ssid()
425 direct_scan[i].len = req->ssids[j].ssid_len; iwl_scan_offload_build_ssid()
426 memcpy(direct_scan[i].ssid, req->ssids[j].ssid, iwl_scan_offload_build_ssid()
437 struct cfg80211_sched_scan_request *req) iwl_mvm_config_sched_scan_profiles()
452 if (WARN_ON(req->n_match_sets > IWL_SCAN_MAX_PROFILES)) iwl_mvm_config_sched_scan_profiles()
476 profile_cfg->num_profiles = req->n_match_sets; iwl_mvm_config_sched_scan_profiles()
480 if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len) iwl_mvm_config_sched_scan_profiles()
483 for (i = 0; i < req->n_match_sets; i++) { iwl_mvm_config_sched_scan_profiles()
505 struct cfg80211_sched_scan_request *req) iwl_mvm_scan_pass_all()
507 if (req->n_match_sets && req->match_sets[0].ssid.ssid_len) { iwl_mvm_scan_pass_all()
510 req->n_match_sets); iwl_mvm_scan_pass_all()
520 struct cfg80211_sched_scan_request *req, iwl_mvm_scan_offload_start()
526 ret = iwl_mvm_config_sched_scan_profiles(mvm, req); iwl_mvm_scan_offload_start()
529 ret = iwl_mvm_sched_scan_umac(mvm, vif, req, ies); iwl_mvm_scan_offload_start()
532 ret = iwl_mvm_config_sched_scan_profiles(mvm, req); iwl_mvm_scan_offload_start()
535 ret = iwl_mvm_unified_sched_scan_lmac(mvm, vif, req, ies); iwl_mvm_scan_offload_start()
785 struct ieee80211_scan_request *req) iwl_mvm_unified_scan_lmac()
809 if (req->req.n_ssids > PROBE_OPTION_MAX || iwl_mvm_unified_scan_lmac()
810 req->ies.common_ie_len + req->ies.len[NL80211_BAND_2GHZ] + iwl_mvm_unified_scan_lmac()
811 req->ies.len[NL80211_BAND_5GHZ] > iwl_mvm_unified_scan_lmac()
813 req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) iwl_mvm_unified_scan_lmac()
818 iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags, iwl_mvm_unified_scan_lmac()
823 cmd->n_channels = (u8)req->req.n_channels; iwl_mvm_unified_scan_lmac()
827 if (req->req.n_ssids == 1 && req->req.ssids[0].ssid_len != 0) iwl_mvm_unified_scan_lmac()
833 if (req->req.n_ssids == 0) iwl_mvm_unified_scan_lmac()
838 cmd->flags = iwl_mvm_scan_rxon_flags(req->req.channels[0]->band); iwl_mvm_unified_scan_lmac()
841 iwl_mvm_unified_scan_fill_tx_cmd(mvm, cmd->tx_cmd, req->req.no_cck); iwl_mvm_unified_scan_lmac()
842 iwl_mvm_scan_fill_ssids(cmd->direct_scan, req->req.ssids, iwl_mvm_unified_scan_lmac()
843 req->req.n_ssids, 0); iwl_mvm_unified_scan_lmac()
868 for (i = 1; i <= req->req.n_ssids; i++) iwl_mvm_unified_scan_lmac()
871 iwl_mvm_lmac_scan_cfg_channels(mvm, req->req.channels, iwl_mvm_unified_scan_lmac()
872 req->req.n_channels, ssid_bitmap, iwl_mvm_unified_scan_lmac()
878 iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, preq, iwl_mvm_unified_scan_lmac()
879 req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? iwl_mvm_unified_scan_lmac()
880 req->req.mac_addr : NULL, iwl_mvm_unified_scan_lmac()
881 req->req.mac_addr_mask); iwl_mvm_unified_scan_lmac()
901 struct cfg80211_sched_scan_request *req, iwl_mvm_unified_sched_scan_lmac()
925 if (req->n_ssids > PROBE_OPTION_MAX || iwl_mvm_unified_sched_scan_lmac()
929 req->n_channels > mvm->fw->ucode_capa.n_scan_channels) iwl_mvm_unified_sched_scan_lmac()
932 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, 0, &params); iwl_mvm_unified_sched_scan_lmac()
936 cmd->n_channels = (u8)req->n_channels; iwl_mvm_unified_sched_scan_lmac()
938 cmd->delay = cpu_to_le32(req->delay); iwl_mvm_unified_sched_scan_lmac()
940 if (iwl_mvm_scan_pass_all(mvm, req)) iwl_mvm_unified_sched_scan_lmac()
945 if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) iwl_mvm_unified_sched_scan_lmac()
951 if (req->n_ssids == 0) iwl_mvm_unified_sched_scan_lmac()
961 cmd->flags = iwl_mvm_scan_rxon_flags(req->channels[0]->band); iwl_mvm_unified_sched_scan_lmac()
965 iwl_scan_offload_build_ssid(req, cmd->direct_scan, &ssid_bitmap, false); iwl_mvm_unified_sched_scan_lmac()
967 cmd->schedule[0].delay = cpu_to_le16(req->interval / MSEC_PER_SEC); iwl_mvm_unified_sched_scan_lmac()
971 cmd->schedule[1].delay = cpu_to_le16(req->interval / MSEC_PER_SEC); iwl_mvm_unified_sched_scan_lmac()
991 iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels, iwl_mvm_unified_sched_scan_lmac()
998 req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? iwl_mvm_unified_sched_scan_lmac()
999 req->mac_addr : NULL, iwl_mvm_unified_sched_scan_lmac()
1000 req->mac_addr_mask); iwl_mvm_unified_sched_scan_lmac()
1276 struct ieee80211_scan_request *req) iwl_mvm_scan_umac()
1303 if (WARN_ON(req->req.n_ssids > PROBE_OPTION_MAX || iwl_mvm_scan_umac()
1304 req->ies.common_ie_len + iwl_mvm_scan_umac()
1305 req->ies.len[NL80211_BAND_2GHZ] + iwl_mvm_scan_umac()
1306 req->ies.len[NL80211_BAND_5GHZ] + 24 + 2 > iwl_mvm_scan_umac()
1307 SCAN_OFFLOAD_PROBE_REQ_SIZE || req->req.n_channels > iwl_mvm_scan_umac()
1311 iwl_mvm_scan_calc_params(mvm, vif, req->req.n_ssids, req->req.flags, iwl_mvm_scan_umac()
1322 flags = iwl_mvm_scan_umac_common_flags(mvm, req->req.n_ssids, iwl_mvm_scan_umac()
1323 req->req.ssids, iwl_mvm_scan_umac()
1336 cmd->n_channels = req->req.n_channels; iwl_mvm_scan_umac()
1338 for (i = 0; i < req->req.n_ssids; i++) iwl_mvm_scan_umac()
1341 iwl_mvm_umac_scan_cfg_channels(mvm, req->req.channels, iwl_mvm_scan_umac()
1342 req->req.n_channels, ssid_bitmap, cmd); iwl_mvm_scan_umac()
1347 iwl_mvm_build_unified_scan_probe(mvm, vif, &req->ies, &sec_part->preq, iwl_mvm_scan_umac()
1348 req->req.flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? iwl_mvm_scan_umac()
1349 req->req.mac_addr : NULL, iwl_mvm_scan_umac()
1350 req->req.mac_addr_mask); iwl_mvm_scan_umac()
1352 iwl_mvm_scan_fill_ssids(sec_part->direct_scan, req->req.ssids, iwl_mvm_scan_umac()
1353 req->req.n_ssids, 0); iwl_mvm_scan_umac()
1371 struct cfg80211_sched_scan_request *req, iwl_mvm_sched_scan_umac()
1400 if (WARN_ON(req->n_ssids > PROBE_OPTION_MAX || iwl_mvm_sched_scan_umac()
1403 SCAN_OFFLOAD_PROBE_REQ_SIZE || req->n_channels > iwl_mvm_sched_scan_umac()
1407 iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, req->flags, iwl_mvm_sched_scan_umac()
1420 flags = iwl_mvm_scan_umac_common_flags(mvm, req->n_ssids, req->ssids, iwl_mvm_sched_scan_umac()
1425 if (iwl_mvm_scan_pass_all(mvm, req)) iwl_mvm_sched_scan_umac()
1438 cmd->n_channels = req->n_channels; iwl_mvm_sched_scan_umac()
1440 iwl_scan_offload_build_ssid(req, sec_part->direct_scan, &ssid_bitmap, iwl_mvm_sched_scan_umac()
1446 iwl_mvm_umac_scan_cfg_channels(mvm, req->channels, req->n_channels, iwl_mvm_sched_scan_umac()
1450 cpu_to_le16(req->interval / MSEC_PER_SEC); iwl_mvm_sched_scan_umac()
1453 if (req->delay > U16_MAX) { iwl_mvm_sched_scan_umac()
1458 sec_part->delay = cpu_to_le16(req->delay); iwl_mvm_sched_scan_umac()
1462 req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR ? iwl_mvm_sched_scan_umac()
1463 req->mac_addr : NULL, iwl_mvm_sched_scan_umac()
1464 req->mac_addr_mask); iwl_mvm_sched_scan_umac()
436 iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req) iwl_mvm_config_sched_scan_profiles() argument
504 iwl_mvm_scan_pass_all(struct iwl_mvm *mvm, struct cfg80211_sched_scan_request *req) iwl_mvm_scan_pass_all() argument
518 iwl_mvm_scan_offload_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) iwl_mvm_scan_offload_start() argument
783 iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_scan_request *req) iwl_mvm_unified_scan_lmac() argument
899 iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) iwl_mvm_unified_sched_scan_lmac() argument
1275 iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_scan_request *req) iwl_mvm_scan_umac() argument
1370 iwl_mvm_sched_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct cfg80211_sched_scan_request *req, struct ieee80211_scan_ies *ies) iwl_mvm_sched_scan_umac() argument
/linux-4.1.27/drivers/input/touchscreen/
H A Dad7877.c215 struct ser_req *req; ad7877_read() local
218 req = kzalloc(sizeof *req, GFP_KERNEL); ad7877_read()
219 if (!req) ad7877_read()
222 spi_message_init(&req->msg); ad7877_read()
224 req->command = (u16) (AD7877_WRITEADD(AD7877_REG_CTRL1) | ad7877_read()
226 req->xfer[0].tx_buf = &req->command; ad7877_read()
227 req->xfer[0].len = 2; ad7877_read()
228 req->xfer[0].cs_change = 1; ad7877_read()
230 req->xfer[1].rx_buf = &req->sample; ad7877_read()
231 req->xfer[1].len = 2; ad7877_read()
233 spi_message_add_tail(&req->xfer[0], &req->msg); ad7877_read()
234 spi_message_add_tail(&req->xfer[1], &req->msg); ad7877_read()
236 status = spi_sync(spi, &req->msg); ad7877_read()
237 ret = status ? : req->sample; ad7877_read()
239 kfree(req); ad7877_read()
246 struct ser_req *req; ad7877_write() local
249 req = kzalloc(sizeof *req, GFP_KERNEL); ad7877_write()
250 if (!req) ad7877_write()
253 spi_message_init(&req->msg); ad7877_write()
255 req->command = (u16) (AD7877_WRITEADD(reg) | (val & MAX_12BIT)); ad7877_write()
256 req->xfer[0].tx_buf = &req->command; ad7877_write()
257 req->xfer[0].len = 2; ad7877_write()
259 spi_message_add_tail(&req->xfer[0], &req->msg); ad7877_write()
261 status = spi_sync(spi, &req->msg); ad7877_write()
263 kfree(req); ad7877_write()
271 struct ser_req *req; ad7877_read_adc() local
276 req = kzalloc(sizeof *req, GFP_KERNEL); ad7877_read_adc()
277 if (!req) ad7877_read_adc()
280 spi_message_init(&req->msg); ad7877_read_adc()
283 req->ref_on = AD7877_WRITEADD(AD7877_REG_CTRL2) | ad7877_read_adc()
288 req->reset = AD7877_WRITEADD(AD7877_REG_CTRL1) | AD7877_MODE_NOC; ad7877_read_adc()
290 req->command = (u16) command; ad7877_read_adc()
292 req->xfer[0].tx_buf = &req->reset; ad7877_read_adc()
293 req->xfer[0].len = 2; ad7877_read_adc()
294 req->xfer[0].cs_change = 1; ad7877_read_adc()
296 req->xfer[1].tx_buf = &req->ref_on; ad7877_read_adc()
297 req->xfer[1].len = 2; ad7877_read_adc()
298 req->xfer[1].delay_usecs = ts->vref_delay_usecs; ad7877_read_adc()
299 req->xfer[1].cs_change = 1; ad7877_read_adc()
301 req->xfer[2].tx_buf = &req->command; ad7877_read_adc()
302 req->xfer[2].len = 2; ad7877_read_adc()
303 req->xfer[2].delay_usecs = ts->vref_delay_usecs; ad7877_read_adc()
304 req->xfer[2].cs_change = 1; ad7877_read_adc()
306 req->xfer[3].rx_buf = &req->sample; ad7877_read_adc()
307 req->xfer[3].len = 2; ad7877_read_adc()
308 req->xfer[3].cs_change = 1; ad7877_read_adc()
310 req->xfer[4].tx_buf = &ts->cmd_crtl2; /*REF OFF*/ ad7877_read_adc()
311 req->xfer[4].len = 2; ad7877_read_adc()
312 req->xfer[4].cs_change = 1; ad7877_read_adc()
314 req->xfer[5].tx_buf = &ts->cmd_crtl1; /*DEFAULT*/ ad7877_read_adc()
315 req->xfer[5].len = 2; ad7877_read_adc()
321 spi_message_add_tail(&req->xfer[i], &req->msg); ad7877_read_adc()
323 status = spi_sync(spi, &req->msg); ad7877_read_adc()
324 sample = req->sample; ad7877_read_adc()
326 kfree(req); ad7877_read_adc()
/linux-4.1.27/drivers/media/usb/as102/
H A Das10x_cmd_stream.c38 sizeof(pcmd->body.add_pid_filter.req)); as10x_cmd_add_PID_filter()
41 pcmd->body.add_pid_filter.req.proc_id = as10x_cmd_add_PID_filter()
43 pcmd->body.add_pid_filter.req.pid = cpu_to_le16(filter->pid); as10x_cmd_add_PID_filter()
44 pcmd->body.add_pid_filter.req.stream_type = filter->type; as10x_cmd_add_PID_filter()
47 pcmd->body.add_pid_filter.req.idx = filter->idx; as10x_cmd_add_PID_filter()
49 pcmd->body.add_pid_filter.req.idx = 0xFF; as10x_cmd_add_PID_filter()
54 sizeof(pcmd->body.add_pid_filter.req) as10x_cmd_add_PID_filter()
95 sizeof(pcmd->body.del_pid_filter.req)); as10x_cmd_del_PID_filter()
98 pcmd->body.del_pid_filter.req.proc_id = as10x_cmd_del_PID_filter()
100 pcmd->body.del_pid_filter.req.pid = cpu_to_le16(pid_value); as10x_cmd_del_PID_filter()
105 sizeof(pcmd->body.del_pid_filter.req) as10x_cmd_del_PID_filter()
139 sizeof(pcmd->body.start_streaming.req)); as10x_cmd_start_streaming()
142 pcmd->body.start_streaming.req.proc_id = as10x_cmd_start_streaming()
148 sizeof(pcmd->body.start_streaming.req) as10x_cmd_start_streaming()
182 sizeof(pcmd->body.stop_streaming.req)); as10x_cmd_stop_streaming()
185 pcmd->body.stop_streaming.req.proc_id = as10x_cmd_stop_streaming()
191 sizeof(pcmd->body.stop_streaming.req) as10x_cmd_stop_streaming()
H A Das10x_cmd_cfg.c43 sizeof(pcmd->body.context.req)); as10x_cmd_get_context()
46 pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT); as10x_cmd_get_context()
47 pcmd->body.context.req.tag = cpu_to_le16(tag); as10x_cmd_get_context()
48 pcmd->body.context.req.type = cpu_to_le16(GET_CONTEXT_DATA); as10x_cmd_get_context()
54 sizeof(pcmd->body.context.req) as10x_cmd_get_context()
99 sizeof(pcmd->body.context.req)); as10x_cmd_set_context()
102 pcmd->body.context.req.proc_id = cpu_to_le16(CONTROL_PROC_CONTEXT); as10x_cmd_set_context()
103 /* pcmd->body.context.req.reg_val.mode initialization is not required */ as10x_cmd_set_context()
104 pcmd->body.context.req.reg_val.u.value32 = (__force u32)cpu_to_le32(value); as10x_cmd_set_context()
105 pcmd->body.context.req.tag = cpu_to_le16(tag); as10x_cmd_set_context()
106 pcmd->body.context.req.type = cpu_to_le16(SET_CONTEXT_DATA); as10x_cmd_set_context()
112 sizeof(pcmd->body.context.req) as10x_cmd_set_context()
153 sizeof(pcmd->body.cfg_change_mode.req)); as10x_cmd_eLNA_change_mode()
156 pcmd->body.cfg_change_mode.req.proc_id = as10x_cmd_eLNA_change_mode()
158 pcmd->body.cfg_change_mode.req.mode = mode; as10x_cmd_eLNA_change_mode()
163 sizeof(pcmd->body.cfg_change_mode.req) as10x_cmd_eLNA_change_mode()
H A Das10x_cmd.c37 sizeof(pcmd->body.turn_on.req)); as10x_cmd_turn_on()
40 pcmd->body.turn_on.req.proc_id = cpu_to_le16(CONTROL_PROC_TURNON); as10x_cmd_turn_on()
45 sizeof(pcmd->body.turn_on.req) + as10x_cmd_turn_on()
78 sizeof(pcmd->body.turn_off.req)); as10x_cmd_turn_off()
81 pcmd->body.turn_off.req.proc_id = cpu_to_le16(CONTROL_PROC_TURNOFF); as10x_cmd_turn_off()
87 sizeof(pcmd->body.turn_off.req) + HEADER_SIZE, as10x_cmd_turn_off()
120 sizeof(preq->body.set_tune.req)); as10x_cmd_set_tune()
123 preq->body.set_tune.req.proc_id = cpu_to_le16(CONTROL_PROC_SETTUNE); as10x_cmd_set_tune()
124 preq->body.set_tune.req.args.freq = (__force __u32)cpu_to_le32(ptune->freq); as10x_cmd_set_tune()
125 preq->body.set_tune.req.args.bandwidth = ptune->bandwidth; as10x_cmd_set_tune()
126 preq->body.set_tune.req.args.hier_select = ptune->hier_select; as10x_cmd_set_tune()
127 preq->body.set_tune.req.args.modulation = ptune->modulation; as10x_cmd_set_tune()
128 preq->body.set_tune.req.args.hierarchy = ptune->hierarchy; as10x_cmd_set_tune()
129 preq->body.set_tune.req.args.interleaving_mode = as10x_cmd_set_tune()
131 preq->body.set_tune.req.args.code_rate = ptune->code_rate; as10x_cmd_set_tune()
132 preq->body.set_tune.req.args.guard_interval = ptune->guard_interval; as10x_cmd_set_tune()
133 preq->body.set_tune.req.args.transmission_mode = as10x_cmd_set_tune()
140 sizeof(preq->body.set_tune.req) as10x_cmd_set_tune()
175 sizeof(preq->body.get_tune_status.req)); as10x_cmd_get_tune_status()
178 preq->body.get_tune_status.req.proc_id = as10x_cmd_get_tune_status()
186 sizeof(preq->body.get_tune_status.req) + HEADER_SIZE, as10x_cmd_get_tune_status()
227 sizeof(pcmd->body.get_tps.req)); as10x_cmd_get_tps()
230 pcmd->body.get_tune_status.req.proc_id = as10x_cmd_get_tps()
237 sizeof(pcmd->body.get_tps.req) + as10x_cmd_get_tps()
286 sizeof(pcmd->body.get_demod_stats.req)); as10x_cmd_get_demod_stats()
289 pcmd->body.get_demod_stats.req.proc_id = as10x_cmd_get_demod_stats()
296 sizeof(pcmd->body.get_demod_stats.req) as10x_cmd_get_demod_stats()
346 sizeof(pcmd->body.get_impulse_rsp.req)); as10x_cmd_get_impulse_resp()
349 pcmd->body.get_impulse_rsp.req.proc_id = as10x_cmd_get_impulse_resp()
356 sizeof(pcmd->body.get_impulse_rsp.req) as10x_cmd_get_impulse_resp()
/linux-4.1.27/include/linux/
H A Dnfs_page.h52 struct nfs_page *wb_head; /* head pointer for req list */
112 #define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
137 struct nfs_page *req);
139 extern void nfs_unlock_request(struct nfs_page *req);
150 nfs_lock_request(struct nfs_page *req) nfs_lock_request() argument
152 return !test_and_set_bit(PG_BUSY, &req->wb_flags); nfs_lock_request()
157 * @req: request
161 nfs_list_add_request(struct nfs_page *req, struct list_head *head) nfs_list_add_request() argument
163 list_add_tail(&req->wb_list, head); nfs_list_add_request()
169 * @req: request
172 nfs_list_remove_request(struct nfs_page *req) nfs_list_remove_request() argument
174 if (list_empty(&req->wb_list)) nfs_list_remove_request()
176 list_del_init(&req->wb_list); nfs_list_remove_request()
186 loff_t req_offset(struct nfs_page *req) req_offset() argument
188 return (((loff_t)req->wb_index) << PAGE_CACHE_SHIFT) + req->wb_offset; req_offset()
/linux-4.1.27/arch/s390/include/asm/
H A Dpci_io.h36 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
40 rc = zpci_load(&data, req, ZPCI_OFFSET(addr)); \
51 u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, LENGTH); \
54 zpci_store(data, req, ZPCI_OFFSET(addr)); \
66 static inline int zpci_write_single(u64 req, const u64 *data, u64 offset, u8 len) zpci_write_single() argument
87 return zpci_store(val, req, offset); zpci_write_single()
90 static inline int zpci_read_single(u64 req, u64 *dst, u64 offset, u8 len) zpci_read_single() argument
95 cc = zpci_load(&data, req, offset); zpci_read_single()
117 static inline int zpci_write_block(u64 req, const u64 *data, u64 offset) zpci_write_block() argument
119 return zpci_store_block(data, req, offset); zpci_write_block()
139 u64 req, offset = ZPCI_OFFSET(src); zpci_memcpy_fromio() local
145 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); zpci_memcpy_fromio()
146 rc = zpci_read_single(req, dst, offset, size); zpci_memcpy_fromio()
160 u64 req, offset = ZPCI_OFFSET(dst); zpci_memcpy_toio() local
169 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); zpci_memcpy_toio()
172 rc = zpci_write_block(req, src, offset); zpci_memcpy_toio()
174 rc = zpci_write_single(req, src, offset, size); zpci_memcpy_toio()
/linux-4.1.27/drivers/net/ethernet/broadcom/bnx2x/
H A Dbnx2x_vfpf.c53 first_tlv->resp_msg_offset = sizeof(bp->vf2pf_mbox->req); bnx2x_vfpf_prep()
224 struct vfpf_acquire_tlv *req = &bp->vf2pf_mbox->req.acquire; bnx2x_vfpf_acquire() local
232 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_ACQUIRE, sizeof(*req)); bnx2x_vfpf_acquire()
239 req->vfdev_info.vf_id = vf_id; bnx2x_vfpf_acquire()
240 req->vfdev_info.vf_os = 0; bnx2x_vfpf_acquire()
241 req->vfdev_info.fp_hsi_ver = ETH_FP_HSI_VERSION; bnx2x_vfpf_acquire()
243 req->resc_request.num_rxqs = rx_count; bnx2x_vfpf_acquire()
244 req->resc_request.num_txqs = tx_count; bnx2x_vfpf_acquire()
245 req->resc_request.num_sbs = bp->igu_sb_cnt; bnx2x_vfpf_acquire()
246 req->resc_request.num_mac_filters = VF_ACQUIRE_MAC_FILTERS; bnx2x_vfpf_acquire()
247 req->resc_request.num_mc_filters = VF_ACQUIRE_MC_FILTERS; bnx2x_vfpf_acquire()
250 req->bulletin_addr = bp->pf2vf_bulletin_mapping; bnx2x_vfpf_acquire()
253 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, bnx2x_vfpf_acquire()
257 req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN; bnx2x_vfpf_acquire()
260 bnx2x_add_tlv(bp, req, bnx2x_vfpf_acquire()
261 req->first_tlv.tl.length + sizeof(struct channel_tlv), bnx2x_vfpf_acquire()
266 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_acquire()
298 req->resc_request.num_txqs = bnx2x_vfpf_acquire()
299 min(req->resc_request.num_txqs, bnx2x_vfpf_acquire()
301 req->resc_request.num_rxqs = bnx2x_vfpf_acquire()
302 min(req->resc_request.num_rxqs, bnx2x_vfpf_acquire()
304 req->resc_request.num_sbs = bnx2x_vfpf_acquire()
305 min(req->resc_request.num_sbs, bnx2x_vfpf_acquire()
307 req->resc_request.num_mac_filters = bnx2x_vfpf_acquire()
308 min(req->resc_request.num_mac_filters, bnx2x_vfpf_acquire()
310 req->resc_request.num_vlan_filters = bnx2x_vfpf_acquire()
311 min(req->resc_request.num_vlan_filters, bnx2x_vfpf_acquire()
313 req->resc_request.num_mc_filters = bnx2x_vfpf_acquire()
314 min(req->resc_request.num_mc_filters, bnx2x_vfpf_acquire()
355 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_acquire()
385 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_acquire()
391 struct vfpf_release_tlv *req = &bp->vf2pf_mbox->req.release; bnx2x_vfpf_release() local
396 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_RELEASE, sizeof(*req)); bnx2x_vfpf_release()
403 req->vf_id = vf_id; bnx2x_vfpf_release()
406 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_release()
410 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_release()
430 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_release()
438 struct vfpf_init_tlv *req = &bp->vf2pf_mbox->req.init; bnx2x_vfpf_init() local
443 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_INIT, sizeof(*req)); bnx2x_vfpf_init()
447 req->sb_addr[i] = (dma_addr_t)bnx2x_fp(bp, i, bnx2x_vfpf_init()
451 req->stats_addr = bp->fw_stats_data_mapping + bnx2x_vfpf_init()
454 req->stats_stride = sizeof(struct per_queue_stats); bnx2x_vfpf_init()
457 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_init()
461 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_init()
476 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_init()
484 struct vfpf_close_tlv *req = &bp->vf2pf_mbox->req.close; bnx2x_vfpf_close_vf() local
503 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_CLOSE, sizeof(*req)); bnx2x_vfpf_close_vf()
505 req->vf_id = vf_id; bnx2x_vfpf_close_vf()
508 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_close_vf()
512 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_close_vf()
523 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_close_vf()
587 struct vfpf_setup_q_tlv *req = &bp->vf2pf_mbox->req.setup_q; bnx2x_vfpf_setup_q() local
594 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req)); bnx2x_vfpf_setup_q()
614 req->vf_qid = fp_idx; bnx2x_vfpf_setup_q()
615 req->param_valid = VFPF_RXQ_VALID | VFPF_TXQ_VALID; bnx2x_vfpf_setup_q()
618 req->rxq.rcq_addr = fp->rx_comp_mapping; bnx2x_vfpf_setup_q()
619 req->rxq.rcq_np_addr = fp->rx_comp_mapping + BCM_PAGE_SIZE; bnx2x_vfpf_setup_q()
620 req->rxq.rxq_addr = fp->rx_desc_mapping; bnx2x_vfpf_setup_q()
621 req->rxq.sge_addr = fp->rx_sge_mapping; bnx2x_vfpf_setup_q()
622 req->rxq.vf_sb = fp_idx; bnx2x_vfpf_setup_q()
623 req->rxq.sb_index = HC_INDEX_ETH_RX_CQ_CONS; bnx2x_vfpf_setup_q()
624 req->rxq.hc_rate = bp->rx_ticks ? 1000000/bp->rx_ticks : 0; bnx2x_vfpf_setup_q()
625 req->rxq.mtu = bp->dev->mtu; bnx2x_vfpf_setup_q()
626 req->rxq.buf_sz = fp->rx_buf_size; bnx2x_vfpf_setup_q()
627 req->rxq.sge_buf_sz = BCM_PAGE_SIZE * PAGES_PER_SGE; bnx2x_vfpf_setup_q()
628 req->rxq.tpa_agg_sz = tpa_agg_size; bnx2x_vfpf_setup_q()
629 req->rxq.max_sge_pkt = SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT; bnx2x_vfpf_setup_q()
630 req->rxq.max_sge_pkt = ((req->rxq.max_sge_pkt + PAGES_PER_SGE - 1) & bnx2x_vfpf_setup_q()
632 req->rxq.flags = flags; bnx2x_vfpf_setup_q()
633 req->rxq.drop_flags = 0; bnx2x_vfpf_setup_q()
634 req->rxq.cache_line_log = BNX2X_RX_ALIGN_SHIFT; bnx2x_vfpf_setup_q()
635 req->rxq.stat_id = -1; /* No stats at the moment */ bnx2x_vfpf_setup_q()
638 req->txq.txq_addr = fp->txdata_ptr[FIRST_TX_COS_INDEX]->tx_desc_mapping; bnx2x_vfpf_setup_q()
639 req->txq.vf_sb = fp_idx; bnx2x_vfpf_setup_q()
640 req->txq.sb_index = HC_INDEX_ETH_TX_CQ_CONS_COS0; bnx2x_vfpf_setup_q()
641 req->txq.hc_rate = bp->tx_ticks ? 1000000/bp->tx_ticks : 0; bnx2x_vfpf_setup_q()
642 req->txq.flags = flags; bnx2x_vfpf_setup_q()
643 req->txq.traffic_type = LLFC_TRAFFIC_TYPE_NW; bnx2x_vfpf_setup_q()
646 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_setup_q()
650 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_setup_q()
663 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_setup_q()
670 struct vfpf_q_op_tlv *req = &bp->vf2pf_mbox->req.q_op; bnx2x_vfpf_teardown_queue() local
675 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_TEARDOWN_Q, bnx2x_vfpf_teardown_queue()
676 sizeof(*req)); bnx2x_vfpf_teardown_queue()
678 req->vf_qid = qidx; bnx2x_vfpf_teardown_queue()
681 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_teardown_queue()
685 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_teardown_queue()
703 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_teardown_queue()
711 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; bnx2x_vfpf_config_mac() local
717 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, bnx2x_vfpf_config_mac()
718 sizeof(*req)); bnx2x_vfpf_config_mac()
720 req->flags = VFPF_SET_Q_FILTERS_MAC_VLAN_CHANGED; bnx2x_vfpf_config_mac()
721 req->vf_qid = vf_qid; bnx2x_vfpf_config_mac()
722 req->n_mac_vlan_filters = 1; bnx2x_vfpf_config_mac()
724 req->filters[0].flags = VFPF_Q_FILTER_DEST_MAC_VALID; bnx2x_vfpf_config_mac()
726 req->filters[0].flags |= VFPF_Q_FILTER_SET_MAC; bnx2x_vfpf_config_mac()
732 memcpy(req->filters[0].mac, addr, ETH_ALEN); bnx2x_vfpf_config_mac()
735 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_config_mac()
739 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_config_mac()
759 memcpy(req->filters[0].mac, bp->dev->dev_addr, bnx2x_vfpf_config_mac()
776 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_config_mac()
786 struct vfpf_rss_tlv *req = &bp->vf2pf_mbox->req.update_rss; bnx2x_vfpf_config_rss() local
790 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_UPDATE_RSS, bnx2x_vfpf_config_rss()
791 sizeof(*req)); bnx2x_vfpf_config_rss()
794 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_config_rss()
797 memcpy(req->ind_table, params->ind_table, T_ETH_INDIRECTION_TABLE_SIZE); bnx2x_vfpf_config_rss()
798 memcpy(req->rss_key, params->rss_key, sizeof(params->rss_key)); bnx2x_vfpf_config_rss()
799 req->ind_table_size = T_ETH_INDIRECTION_TABLE_SIZE; bnx2x_vfpf_config_rss()
800 req->rss_key_size = T_ETH_RSS_KEY; bnx2x_vfpf_config_rss()
801 req->rss_result_mask = params->rss_result_mask; bnx2x_vfpf_config_rss()
805 req->rss_flags |= VFPF_RSS_MODE_DISABLED; bnx2x_vfpf_config_rss()
807 req->rss_flags |= VFPF_RSS_MODE_REGULAR; bnx2x_vfpf_config_rss()
809 req->rss_flags |= VFPF_RSS_SET_SRCH; bnx2x_vfpf_config_rss()
811 req->rss_flags |= VFPF_RSS_IPV4; bnx2x_vfpf_config_rss()
813 req->rss_flags |= VFPF_RSS_IPV4_TCP; bnx2x_vfpf_config_rss()
815 req->rss_flags |= VFPF_RSS_IPV4_UDP; bnx2x_vfpf_config_rss()
817 req->rss_flags |= VFPF_RSS_IPV6; bnx2x_vfpf_config_rss()
819 req->rss_flags |= VFPF_RSS_IPV6_TCP; bnx2x_vfpf_config_rss()
821 req->rss_flags |= VFPF_RSS_IPV6_UDP; bnx2x_vfpf_config_rss()
823 DP(BNX2X_MSG_IOV, "rss flags %x\n", req->rss_flags); bnx2x_vfpf_config_rss()
826 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_config_rss()
845 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_config_rss()
853 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; bnx2x_vfpf_set_mcast() local
864 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, bnx2x_vfpf_set_mcast()
865 sizeof(*req)); bnx2x_vfpf_set_mcast()
873 memcpy(req->multicast[i], bnx2x_mc_addr(ha), ETH_ALEN); netdev_for_each_mc_addr()
887 req->n_multicast = i;
888 req->flags |= VFPF_SET_Q_FILTERS_MULTICAST_CHANGED;
889 req->vf_qid = 0;
892 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END,
896 bnx2x_dp_tlv_list(bp, req);
909 bnx2x_vfpf_finalize(bp, &req->first_tlv);
917 struct vfpf_set_q_filters_tlv *req = &bp->vf2pf_mbox->req.set_q_filters; bnx2x_vfpf_storm_rx_mode() local
922 bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SET_Q_FILTERS, bnx2x_vfpf_storm_rx_mode()
923 sizeof(*req)); bnx2x_vfpf_storm_rx_mode()
929 req->rx_mask = VFPF_RX_MASK_ACCEPT_NONE; bnx2x_vfpf_storm_rx_mode()
934 req->rx_mask = VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST; bnx2x_vfpf_storm_rx_mode()
935 req->rx_mask |= VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST; bnx2x_vfpf_storm_rx_mode()
936 req->rx_mask |= VFPF_RX_MASK_ACCEPT_BROADCAST; bnx2x_vfpf_storm_rx_mode()
939 req->flags |= VFPF_SET_Q_FILTERS_RX_MASK_CHANGED; bnx2x_vfpf_storm_rx_mode()
940 req->vf_qid = 0; bnx2x_vfpf_storm_rx_mode()
943 bnx2x_add_tlv(bp, req, req->first_tlv.tl.length, CHANNEL_TLV_LIST_END, bnx2x_vfpf_storm_rx_mode()
947 bnx2x_dp_tlv_list(bp, req); bnx2x_vfpf_storm_rx_mode()
958 bnx2x_vfpf_finalize(bp, &req->first_tlv); bnx2x_vfpf_storm_rx_mode()
1264 if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
1305 if (bnx2x_search_tlv_list(bp, &mbx->msg->req, bnx2x_vf_mbx_acquire_chk_dorq()
1310 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) bnx2x_vf_mbx_acquire_chk_dorq()
1320 struct vfpf_acquire_tlv *acquire = &mbx->msg->req.acquire; bnx2x_vf_mbx_acquire()
1346 if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire)) bnx2x_vf_mbx_acquire()
1381 struct vfpf_init_tlv *init = &mbx->msg->req.init; bnx2x_vf_mbx_init_vf()
1433 struct vfpf_setup_q_tlv *setup_q = &mbx->msg->req.setup_q; bnx2x_vf_mbx_setup_q()
1638 &BP_VF_MBX(bp, vf->index)->msg->req.set_q_filters; bnx2x_vf_mbx_qfilters()
1790 struct vfpf_set_q_filters_tlv *filters = &mbx->msg->req.set_q_filters; bnx2x_vf_mbx_set_q_filters()
1816 int qid = mbx->msg->req.q_op.vf_qid; bnx2x_vf_mbx_teardown_q()
1852 struct vfpf_rss_tlv *rss_tlv = &mbx->msg->req.update_rss; bnx2x_vf_mbx_update_rss()
1936 struct vfpf_tpa_tlv *tpa_tlv = &mbx->msg->req.update_tpa; bnx2x_vf_mbx_update_tpa()
2029 mbx->msg->req.tlv_buf_size.tlv_buffer[i]); bnx2x_vf_mbx_request()
2120 mbx->first_tlv = mbx->msg->req.first_tlv; for_each_vf()
/linux-4.1.27/drivers/usb/gadget/function/
H A Df_printer.c237 struct usb_request *req; printer_req_alloc() local
239 req = usb_ep_alloc_request(ep, gfp_flags); printer_req_alloc()
241 if (req != NULL) { printer_req_alloc()
242 req->length = len; printer_req_alloc()
243 req->buf = kmalloc(len, gfp_flags); printer_req_alloc()
244 if (req->buf == NULL) { printer_req_alloc()
245 usb_ep_free_request(ep, req); printer_req_alloc()
250 return req; printer_req_alloc()
254 printer_req_free(struct usb_ep *ep, struct usb_request *req) printer_req_free() argument
256 if (ep != NULL && req != NULL) { printer_req_free()
257 kfree(req->buf); printer_req_free()
258 usb_ep_free_request(ep, req); printer_req_free()
264 static void rx_complete(struct usb_ep *ep, struct usb_request *req) rx_complete() argument
267 int status = req->status; rx_complete()
272 list_del_init(&req->list); /* Remode from Active List */ rx_complete()
278 if (req->actual > 0) { rx_complete()
279 list_add_tail(&req->list, &dev->rx_buffers); rx_complete()
280 DBG(dev, "G_Printer : rx length %d\n", req->actual); rx_complete()
282 list_add(&req->list, &dev->rx_reqs); rx_complete()
290 list_add(&req->list, &dev->rx_reqs); rx_complete()
296 list_add(&req->list, &dev->rx_reqs); rx_complete()
305 list_add(&req->list, &dev->rx_reqs); rx_complete()
313 static void tx_complete(struct usb_ep *ep, struct usb_request *req) tx_complete() argument
317 switch (req->status) { tx_complete()
319 VDBG(dev, "tx err %d\n", req->status); tx_complete()
332 list_del_init(&req->list); tx_complete()
333 list_add(&req->list, &dev->tx_reqs); tx_complete()
390 struct usb_request *req; setup_rx_reqs() local
395 req = container_of(dev->rx_reqs.next, setup_rx_reqs()
397 list_del_init(&req->list); setup_rx_reqs()
405 req->length = USB_BUFSIZE; setup_rx_reqs()
406 req->complete = rx_complete; setup_rx_reqs()
410 error = usb_ep_queue(dev->out_ep, req, GFP_ATOMIC); setup_rx_reqs()
414 list_add(&req->list, &dev->rx_reqs); setup_rx_reqs()
417 /* if the req is empty, then add it into dev->rx_reqs_active. */ setup_rx_reqs()
418 else if (list_empty(&req->list)) setup_rx_reqs()
419 list_add(&req->list, &dev->rx_reqs_active); setup_rx_reqs()
430 struct usb_request *req; printer_read() local
491 req = container_of(dev->rx_buffers.next, printer_read()
493 list_del_init(&req->list); printer_read()
495 if (req->actual && req->buf) { printer_read()
496 current_rx_req = req; printer_read()
497 current_rx_bytes = req->actual; printer_read()
498 current_rx_buf = req->buf; printer_read()
500 list_add(&req->list, &dev->rx_reqs); printer_read()
566 struct usb_request *req; printer_write() local
606 req = container_of(dev->tx_reqs.next, struct usb_request, printer_write()
608 list_del_init(&req->list); printer_write()
610 req->complete = tx_complete; printer_write()
611 req->length = size; printer_write()
616 req->zero = 0; printer_write()
621 req->zero = ((len % dev->in_ep->maxpacket) == 0); printer_write()
626 if (copy_from_user(req->buf, buf, size)) { printer_write()
627 list_add(&req->list, &dev->tx_reqs); printer_write()
638 /* We've disconnected or reset so free the req and buffer */ printer_write()
640 list_add(&req->list, &dev->tx_reqs); printer_write()
646 if (usb_ep_queue(dev->in_ep, req, GFP_ATOMIC)) { printer_write()
647 list_add(&req->list, &dev->tx_reqs); printer_write()
653 list_add(&req->list, &dev->tx_reqs_active); printer_write()
845 struct usb_request *req; printer_soft_reset() local
863 req = container_of(dev->rx_buffers.next, struct usb_request, printer_soft_reset()
865 list_del_init(&req->list); printer_soft_reset()
866 list_add(&req->list, &dev->rx_reqs); printer_soft_reset()
870 req = container_of(dev->rx_buffers.next, struct usb_request, printer_soft_reset()
872 list_del_init(&req->list); printer_soft_reset()
873 list_add(&req->list, &dev->rx_reqs); printer_soft_reset()
877 req = container_of(dev->tx_reqs_active.next, printer_soft_reset()
879 list_del_init(&req->list); printer_soft_reset()
880 list_add(&req->list, &dev->tx_reqs); printer_soft_reset()
939 struct usb_request *req = cdev->req; printer_func_setup() local
945 DBG(dev, "ctrl req%02x.%02x v%04x i%04x l%d\n", printer_func_setup()
957 memcpy(req->buf, dev->pnp_string, value); printer_func_setup()
967 *(u8 *)req->buf = dev->printer_status; printer_func_setup()
989 "unknown ctrl req%02x.%02x v%04x i%04x l%d\n", printer_func_setup()
996 req->length = value; printer_func_setup()
997 req->zero = value < wLength; printer_func_setup()
998 value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); printer_func_setup()
1001 req->status = 0; printer_func_setup()
1016 struct usb_request *req; printer_func_bind() local
1061 req = printer_req_alloc(dev->in_ep, USB_BUFSIZE, GFP_KERNEL); printer_func_bind()
1062 if (!req) printer_func_bind()
1064 list_add(&req->list, &dev->tx_reqs); printer_func_bind()
1068 req = printer_req_alloc(dev->out_ep, USB_BUFSIZE, GFP_KERNEL); printer_func_bind()
1069 if (!req) printer_func_bind()
1071 list_add(&req->list, &dev->rx_reqs); printer_func_bind()
1103 req = container_of(dev->rx_reqs.next, struct usb_request, list); printer_func_bind()
1104 list_del(&req->list); printer_func_bind()
1105 printer_req_free(dev->out_ep, req); printer_func_bind()
1110 req = container_of(dev->tx_reqs.next, struct usb_request, list); printer_func_bind()
1111 list_del(&req->list); printer_func_bind()
1112 printer_req_free(dev->in_ep, req); printer_func_bind()
1336 struct usb_request *req; printer_func_unbind() local
1351 req = container_of(dev->tx_reqs.next, struct usb_request, printer_func_unbind()
1353 list_del(&req->list); printer_func_unbind()
1354 printer_req_free(dev->in_ep, req); printer_func_unbind()
1361 req = container_of(dev->rx_reqs.next, printer_func_unbind()
1363 list_del(&req->list); printer_func_unbind()
1364 printer_req_free(dev->out_ep, req); printer_func_unbind()
1368 req = container_of(dev->rx_buffers.next, printer_func_unbind()
1370 list_del(&req->list); printer_func_unbind()
1371 printer_req_free(dev->out_ep, req); printer_func_unbind()
H A Duvc_video.c62 uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video, uvc_video_encode_bulk() argument
65 void *mem = req->buf; uvc_video_encode_bulk()
84 req->length = video->req_size - len; uvc_video_encode_bulk()
85 req->zero = video->payload_size == video->max_payload_size; uvc_video_encode_bulk()
102 uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video, uvc_video_encode_isoc() argument
105 void *mem = req->buf; uvc_video_encode_isoc()
118 req->length = video->req_size - len; uvc_video_encode_isoc()
163 uvc_video_complete(struct usb_ep *ep, struct usb_request *req) uvc_video_complete() argument
165 struct uvc_video *video = req->context; uvc_video_complete()
171 switch (req->status) { uvc_video_complete()
182 req->status); uvc_video_complete()
194 video->encode(req, video, buf); uvc_video_complete()
196 if ((ret = usb_ep_queue(ep, req, GFP_ATOMIC)) < 0) { uvc_video_complete()
209 list_add_tail(&req->list, &video->req_free); uvc_video_complete()
219 if (video->req[i]) { uvc_video_free_requests()
220 usb_ep_free_request(video->ep, video->req[i]); uvc_video_free_requests()
221 video->req[i] = NULL; uvc_video_free_requests()
253 video->req[i] = usb_ep_alloc_request(video->ep, GFP_KERNEL); uvc_video_alloc_requests()
254 if (video->req[i] == NULL) uvc_video_alloc_requests()
257 video->req[i]->buf = video->req_buffer[i]; uvc_video_alloc_requests()
258 video->req[i]->length = 0; uvc_video_alloc_requests()
259 video->req[i]->complete = uvc_video_complete; uvc_video_alloc_requests()
260 video->req[i]->context = video; uvc_video_alloc_requests()
262 list_add_tail(&video->req[i]->list, &video->req_free); uvc_video_alloc_requests()
287 struct usb_request *req; uvcg_video_pump() local
305 req = list_first_entry(&video->req_free, struct usb_request, uvcg_video_pump()
307 list_del(&req->list); uvcg_video_pump()
320 video->encode(req, video, buf); uvcg_video_pump()
323 ret = usb_ep_queue(video->ep, req, GFP_ATOMIC); uvcg_video_pump()
335 list_add_tail(&req->list, &video->req_free); uvcg_video_pump()
356 if (video->req[i]) uvcg_video_enable()
357 usb_ep_dequeue(video->ep, video->req[i]); uvcg_video_enable()
/linux-4.1.27/drivers/scsi/csiostor/
H A Dcsio_scsi.c157 * @req: IO req structure.
163 csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr) csio_scsi_fcp_cmnd() argument
166 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); csio_scsi_fcp_cmnd()
178 if (req->nsge) csio_scsi_fcp_cmnd()
179 if (req->datadir == DMA_TO_DEVICE) csio_scsi_fcp_cmnd()
194 * @req: IO req structure.
201 csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size) csio_scsi_init_cmd_wr() argument
203 struct csio_hw *hw = req->lnode->hwp; csio_scsi_init_cmd_wr()
204 struct csio_rnode *rn = req->rnode; csio_scsi_init_cmd_wr()
215 wr->cookie = (uintptr_t) req; csio_scsi_init_cmd_wr()
216 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); csio_scsi_init_cmd_wr()
217 wr->tmo_val = (uint8_t) req->tmo; csio_scsi_init_cmd_wr()
222 dma_buf = &req->dma_buf; csio_scsi_init_cmd_wr()
236 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr + csio_scsi_init_cmd_wr()
249 * @req: IO req structure.
255 csio_scsi_cmd(struct csio_ioreq *req) csio_scsi_cmd() argument
258 struct csio_hw *hw = req->lnode->hwp; csio_scsi_cmd()
262 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); csio_scsi_cmd()
263 if (unlikely(req->drv_status != 0)) csio_scsi_cmd()
268 csio_scsi_init_cmd_wr(req, wrp.addr1, size); csio_scsi_cmd()
270 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); csio_scsi_cmd()
276 csio_scsi_init_cmd_wr(req, (void *)tmpwr, size); csio_scsi_cmd()
285 * @req: IO request
290 csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, csio_scsi_init_ultptx_dsgl() argument
299 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); csio_scsi_init_ultptx_dsgl()
302 ULPTX_NSGE_V(req->nsge)); csio_scsi_init_ultptx_dsgl()
304 if (likely(!req->dcopy)) { csio_scsi_init_ultptx_dsgl()
305 scsi_for_each_sg(scmnd, sgel, req->nsge, i) { csio_scsi_init_ultptx_dsgl()
328 list_for_each(tmp, &req->gen_list) { csio_scsi_init_ultptx_dsgl()
353 * @req: IO req structure.
360 csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size) csio_scsi_init_read_wr() argument
362 struct csio_hw *hw = req->lnode->hwp; csio_scsi_init_read_wr()
363 struct csio_rnode *rn = req->rnode; csio_scsi_init_read_wr()
368 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); csio_scsi_init_read_wr()
374 wr->cookie = (uintptr_t)req; csio_scsi_init_read_wr()
375 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); csio_scsi_init_read_wr()
376 wr->tmo_val = (uint8_t)(req->tmo); csio_scsi_init_read_wr()
381 dma_buf = &req->dma_buf; csio_scsi_init_read_wr()
393 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + csio_scsi_init_read_wr()
401 csio_scsi_init_ultptx_dsgl(hw, req, sgl); csio_scsi_init_read_wr()
406 * @req: IO req structure.
413 csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size) csio_scsi_init_write_wr() argument
415 struct csio_hw *hw = req->lnode->hwp; csio_scsi_init_write_wr()
416 struct csio_rnode *rn = req->rnode; csio_scsi_init_write_wr()
421 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); csio_scsi_init_write_wr()
427 wr->cookie = (uintptr_t)req; csio_scsi_init_write_wr()
428 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); csio_scsi_init_write_wr()
429 wr->tmo_val = (uint8_t)(req->tmo); csio_scsi_init_write_wr()
434 dma_buf = &req->dma_buf; csio_scsi_init_write_wr()
446 csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + csio_scsi_init_write_wr()
454 csio_scsi_init_ultptx_dsgl(hw, req, sgl); csio_scsi_init_write_wr()
458 #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \
464 if (unlikely((req)->nsge > 1)) \
466 (ALIGN(((req)->nsge - 1), 2) / 2)); \
472 * @req: IO req structure.
479 csio_scsi_read(struct csio_ioreq *req) csio_scsi_read() argument
483 struct csio_hw *hw = req->lnode->hwp; csio_scsi_read()
486 CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len); csio_scsi_read()
489 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); csio_scsi_read()
490 if (likely(req->drv_status == 0)) { csio_scsi_read()
493 csio_scsi_init_read_wr(req, wrp.addr1, size); csio_scsi_read()
495 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); csio_scsi_read()
500 csio_scsi_init_read_wr(req, (void *)tmpwr, size); csio_scsi_read()
509 * @req: IO req structure.
516 csio_scsi_write(struct csio_ioreq *req) csio_scsi_write() argument
520 struct csio_hw *hw = req->lnode->hwp; csio_scsi_write()
523 CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len); csio_scsi_write()
526 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); csio_scsi_write()
527 if (likely(req->drv_status == 0)) { csio_scsi_write()
530 csio_scsi_init_write_wr(req, wrp.addr1, size); csio_scsi_write()
532 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); csio_scsi_write()
537 csio_scsi_init_write_wr(req, (void *)tmpwr, size); csio_scsi_write()
546 * @req: IO req structure.
553 csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req) csio_setup_ddp() argument
556 struct csio_hw *hw = req->lnode->hwp; csio_setup_ddp()
559 struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); csio_setup_ddp()
569 scsi_for_each_sg(scmnd, sgel, req->nsge, i) { csio_setup_ddp()
583 if ((i != (req->nsge - 1)) && csio_setup_ddp()
593 req->dcopy = 0; csio_setup_ddp()
594 csio_scsi_read(req); csio_setup_ddp()
604 req->dcopy = 1; csio_setup_ddp()
607 INIT_LIST_HEAD(&req->gen_list); csio_setup_ddp()
615 req->drv_status = -EBUSY; csio_setup_ddp()
619 /* Added to IO req */ csio_setup_ddp()
620 list_add_tail(&dma_buf->list, &req->gen_list); csio_setup_ddp()
624 if (!req->drv_status) { csio_setup_ddp()
626 req->nsge = i; csio_setup_ddp()
627 csio_scsi_read(req); csio_setup_ddp()
633 csio_put_scsi_ddp_list(scsim, &req->gen_list, i); csio_setup_ddp()
638 * @req: IO req structure.
646 csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size, csio_scsi_init_abrt_cls_wr() argument
649 struct csio_hw *hw = req->lnode->hwp; csio_scsi_init_abrt_cls_wr()
650 struct csio_rnode *rn = req->rnode; csio_scsi_init_abrt_cls_wr()
658 wr->cookie = (uintptr_t) req; csio_scsi_init_abrt_cls_wr()
659 wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); csio_scsi_init_abrt_cls_wr()
660 wr->tmo_val = (uint8_t) req->tmo; csio_scsi_init_abrt_cls_wr()
670 wr->t_cookie = (uintptr_t) req; csio_scsi_init_abrt_cls_wr()
674 csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort) csio_scsi_abrt_cls() argument
677 struct csio_hw *hw = req->lnode->hwp; csio_scsi_abrt_cls()
680 req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); csio_scsi_abrt_cls()
681 if (req->drv_status != 0) csio_scsi_abrt_cls()
686 csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort); csio_scsi_abrt_cls()
688 uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); csio_scsi_abrt_cls()
693 csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort); csio_scsi_abrt_cls()
703 csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt) csio_scsis_uninit() argument
705 struct csio_hw *hw = req->lnode->hwp; csio_scsis_uninit()
711 if (req->nsge) { csio_scsis_uninit()
712 if (req->datadir == DMA_TO_DEVICE) { csio_scsis_uninit()
713 req->dcopy = 0; csio_scsis_uninit()
714 csio_scsi_write(req); csio_scsis_uninit()
716 csio_setup_ddp(scsim, req); csio_scsis_uninit()
718 csio_scsi_cmd(req); csio_scsis_uninit()
721 if (likely(req->drv_status == 0)) { csio_scsis_uninit()
723 csio_set_state(&req->sm, csio_scsis_io_active); csio_scsis_uninit()
724 list_add_tail(&req->sm.sm_list, &scsim->active_q); csio_scsis_uninit()
725 csio_wr_issue(hw, req->eq_idx, false); csio_scsis_uninit()
733 csio_scsi_cmd(req); csio_scsis_uninit()
734 if (req->drv_status == 0) { csio_scsis_uninit()
743 csio_set_state(&req->sm, csio_scsis_tm_active); csio_scsis_uninit()
744 list_add_tail(&req->sm.sm_list, &scsim->active_q); csio_scsis_uninit()
745 csio_wr_issue(hw, req->eq_idx, false); csio_scsis_uninit()
762 req->drv_status = -EINVAL; csio_scsis_uninit()
763 csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req); csio_scsis_uninit()
767 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); csio_scsis_uninit()
773 csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) csio_scsis_io_active() argument
775 struct csio_hw *hw = req->lnode->hwp; csio_scsis_io_active()
782 list_del_init(&req->sm.sm_list); csio_scsis_io_active()
783 csio_set_state(&req->sm, csio_scsis_uninit); csio_scsis_io_active()
799 if (unlikely(req->wr_status != FW_SUCCESS)) { csio_scsis_io_active()
800 rn = req->rnode; csio_scsis_io_active()
805 if (csio_scsi_itnexus_loss_error(req->wr_status) && csio_scsis_io_active()
807 csio_set_state(&req->sm, csio_scsis_io_active()
809 list_add_tail(&req->sm.sm_list, csio_scsis_io_active()
817 csio_scsi_abrt_cls(req, SCSI_ABORT); csio_scsis_io_active()
818 if (req->drv_status == 0) { csio_scsis_io_active()
819 csio_wr_issue(hw, req->eq_idx, false); csio_scsis_io_active()
820 csio_set_state(&req->sm, csio_scsis_aborting); csio_scsis_io_active()
825 csio_scsi_abrt_cls(req, SCSI_CLOSE); csio_scsis_io_active()
826 if (req->drv_status == 0) { csio_scsis_io_active()
827 csio_wr_issue(hw, req->eq_idx, false); csio_scsis_io_active()
828 csio_set_state(&req->sm, csio_scsis_closing); csio_scsis_io_active()
833 req->wr_status = FW_HOSTERROR; csio_scsis_io_active()
835 csio_set_state(&req->sm, csio_scsis_uninit); csio_scsis_io_active()
839 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); csio_scsis_io_active()
845 csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt) csio_scsis_tm_active() argument
847 struct csio_hw *hw = req->lnode->hwp; csio_scsis_tm_active()
853 list_del_init(&req->sm.sm_list); csio_scsis_tm_active()
854 csio_set_state(&req->sm, csio_scsis_uninit); csio_scsis_tm_active()
859 csio_scsi_abrt_cls(req, SCSI_ABORT); csio_scsis_tm_active()
860 if (req->drv_status == 0) { csio_scsis_tm_active()
861 csio_wr_issue(hw, req->eq_idx, false); csio_scsis_tm_active()
862 csio_set_state(&req->sm, csio_scsis_aborting); csio_scsis_tm_active()
868 csio_scsi_abrt_cls(req, SCSI_CLOSE); csio_scsis_tm_active()
869 if (req->drv_status == 0) { csio_scsis_tm_active()
870 csio_wr_issue(hw, req->eq_idx, false); csio_scsis_tm_active()
871 csio_set_state(&req->sm, csio_scsis_closing); csio_scsis_tm_active()
876 req->wr_status = FW_HOSTERROR; csio_scsis_tm_active()
878 csio_set_state(&req->sm, csio_scsis_uninit); csio_scsis_tm_active()
882 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); csio_scsis_tm_active()
888 csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt) csio_scsis_aborting() argument
890 struct csio_hw *hw = req->lnode->hwp; csio_scsis_aborting()
897 "in aborting st\n", req, req->wr_status); csio_scsis_aborting()
907 req->drv_status = -ECANCELED; csio_scsis_aborting()
917 req, req->wr_status, req->drv_status); csio_scsis_aborting()
922 if (req->drv_status != -ECANCELED) { csio_scsis_aborting()
925 " req:%p\n", req); csio_scsis_aborting()
952 if ((req->wr_status == FW_SUCCESS) || csio_scsis_aborting()
953 (req->wr_status == FW_EINVAL) || csio_scsis_aborting()
954 csio_scsi_itnexus_loss_error(req->wr_status)) csio_scsis_aborting()
955 req->wr_status = FW_SCSI_ABORT_REQUESTED; csio_scsis_aborting()
958 list_del_init(&req->sm.sm_list); csio_scsis_aborting()
959 csio_set_state(&req->sm, csio_scsis_uninit); csio_scsis_aborting()
963 req->wr_status = FW_HOSTERROR; csio_scsis_aborting()
965 csio_set_state(&req->sm, csio_scsis_uninit); csio_scsis_aborting()
979 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); csio_scsis_aborting()
985 csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt) csio_scsis_closing() argument
987 struct csio_hw *hw = req->lnode->hwp; csio_scsis_closing()
994 "in closing st\n", req, req->wr_status); csio_scsis_closing()
1004 req->drv_status = -ECANCELED; csio_scsis_closing()
1012 if (req->drv_status != -ECANCELED) { csio_scsis_closing()
1015 " req:%p\n", req); csio_scsis_closing()
1024 CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) || csio_scsis_closing()
1025 (req->wr_status == FW_EINVAL)); csio_scsis_closing()
1026 req->wr_status = FW_SCSI_CLOSE_REQUESTED; csio_scsis_closing()
1029 list_del_init(&req->sm.sm_list); csio_scsis_closing()
1030 csio_set_state(&req->sm, csio_scsis_uninit); csio_scsis_closing()
1037 req->wr_status = FW_HOSTERROR; csio_scsis_closing()
1039 csio_set_state(&req->sm, csio_scsis_uninit); csio_scsis_closing()
1043 csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); csio_scsis_closing()
1049 csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt) csio_scsis_shost_cmpl_await() argument
1070 req->drv_status = 0; csio_scsis_shost_cmpl_await()
1073 csio_set_state(&req->sm, csio_scsis_uninit); csio_scsis_shost_cmpl_await()
1076 csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n", csio_scsis_shost_cmpl_await()
1077 evt, req); csio_scsis_shost_cmpl_await()
1489 csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req) csio_scsi_copy_to_sgl() argument
1491 struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); csio_scsi_copy_to_sgl()
1504 dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list); csio_scsi_copy_to_sgl()
1531 sg, req); csio_scsi_copy_to_sgl()
1554 * @req: IO request.
1558 csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) csio_scsi_err_handler() argument
1560 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); csio_scsi_err_handler()
1571 switch (req->wr_status) { csio_scsi_err_handler()
1581 dma_buf = &req->dma_buf; csio_scsi_err_handler()
1647 csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd, csio_scsi_err_handler()
1649 (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? csio_scsi_err_handler()
1656 if (req->wr_status == FW_SCSI_CLOSE_REQUESTED) csio_scsi_err_handler()
1664 csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n", csio_scsi_err_handler()
1665 req, cmnd, req->wr_status); csio_scsi_err_handler()
1706 csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n", csio_scsi_err_handler()
1707 req->wr_status, req, cmnd); csio_scsi_err_handler()
1716 if (req->nsge > 0) csio_scsi_err_handler()
1723 csio_scsi_cmnd(req) = NULL; csio_scsi_err_handler()
1724 complete_all(&req->cmplobj); csio_scsi_err_handler()
1730 * @req: IO request.
1734 csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req) csio_scsi_cbfn() argument
1736 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); csio_scsi_cbfn()
1740 if (likely(req->wr_status == FW_SUCCESS)) { csio_scsi_cbfn()
1741 if (req->nsge > 0) { csio_scsi_cbfn()
1743 if (req->dcopy) csio_scsi_cbfn()
1744 host_status = csio_scsi_copy_to_sgl(hw, req); csio_scsi_cbfn()
1749 csio_scsi_cmnd(req) = NULL; csio_scsi_cbfn()
1753 csio_scsi_err_handler(hw, req); csio_scsi_cbfn()
1807 /* Get req->nsge, if there are SG elements to be mapped */ csio_queuecommand()
1974 csio_err(hw, "Abort timed out -- req: %p\n", ioreq); csio_eh_abort_handler()
2010 * @req: IO request.
2017 csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req) csio_tm_cbfn() argument
2019 struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); csio_tm_cbfn()
2025 csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n", csio_tm_cbfn()
2026 req, req->wr_status); csio_tm_cbfn()
2029 cmnd->SCp.Status = req->wr_status; csio_tm_cbfn()
2042 if (req->wr_status == FW_SCSI_RSP_ERR) { csio_tm_cbfn()
2043 dma_buf = &req->dma_buf; csio_tm_cbfn()
2058 csio_scsi_cmnd(req) = NULL; csio_tm_cbfn()
2155 csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n", csio_eh_lun_reset_handler()
/linux-4.1.27/drivers/net/wireless/ath/wil6210/
H A Drx_reorder.c257 struct wil_back_rx *req = kzalloc(sizeof(*req), GFP_KERNEL); wil_addba_rx_request() local
259 if (!req) wil_addba_rx_request()
262 req->cidxtid = cidxtid; wil_addba_rx_request()
263 req->dialog_token = dialog_token; wil_addba_rx_request()
264 req->ba_param_set = le16_to_cpu(ba_param_set); wil_addba_rx_request()
265 req->ba_timeout = le16_to_cpu(ba_timeout); wil_addba_rx_request()
266 req->ba_seq_ctrl = le16_to_cpu(ba_seq_ctrl); wil_addba_rx_request()
269 list_add_tail(&req->list, &wil->back_rx_pending); wil_addba_rx_request()
278 struct wil_back_rx *req)
289 u16 req_agg_wsize = WIL_GET_BITS(req->ba_param_set, 6, 15);
290 bool agg_amsdu = !!(req->ba_param_set & BIT(0));
291 int ba_policy = req->ba_param_set & BIT(1);
292 u16 agg_timeout = req->ba_timeout;
294 u16 ssn = req->ba_seq_ctrl >> 4;
299 parse_cidxtid(req->cidxtid, &cid, &tid);
315 cid, sta->addr, tid, req_agg_wsize, req->ba_timeout,
316 agg_amsdu ? "+" : "-", !!ba_policy, req->dialog_token, ssn);
326 rc = wmi_addba_rx_resp(wil, cid, tid, req->dialog_token, status,
389 struct wil_back_tx *req) wil_back_tx_handle()
391 struct vring_tx_data *txdata = &wil->vring_tx_data[req->ringid]; wil_back_tx_handle()
396 req->ringid); wil_back_tx_handle()
402 req->ringid, txdata->agg_wsize); wil_back_tx_handle()
406 rc = wmi_addba(wil, req->ringid, req->agg_wsize, req->agg_timeout); wil_back_tx_handle()
460 struct wil_back_tx *req = kzalloc(sizeof(*req), GFP_KERNEL); wil_addba_tx_request() local
462 if (!req) wil_addba_tx_request()
465 req->ringid = ringid; wil_addba_tx_request()
466 req->agg_wsize = wil_agg_size(wil, wsize); wil_addba_tx_request()
467 req->agg_timeout = 0; wil_addba_tx_request()
470 list_add_tail(&req->list, &wil->back_tx_pending); wil_addba_tx_request()
388 wil_back_tx_handle(struct wil6210_priv *wil, struct wil_back_tx *req) wil_back_tx_handle() argument
/linux-4.1.27/drivers/misc/sgi-gru/
H A Dgrukdump.c190 struct gru_dump_chiplet_state_req req; gru_dump_chiplet_request() local
195 if (copy_from_user(&req, (void __user *)arg, sizeof(req))) gru_dump_chiplet_request()
199 if (req.gid >= gru_max_gids || req.gid < 0) gru_dump_chiplet_request()
202 gru = GID_TO_GRU(req.gid); gru_dump_chiplet_request()
203 ubuf = req.buf; gru_dump_chiplet_request()
204 ubufend = req.buf + req.buflen; gru_dump_chiplet_request()
217 if (req.ctxnum == ctxnum || req.ctxnum < 0) { gru_dump_chiplet_request()
219 req.data_opt, req.lock_cch, gru_dump_chiplet_request()
220 req.flush_cbrs); gru_dump_chiplet_request()
228 if (copy_to_user((void __user *)arg, &req, sizeof(req))) gru_dump_chiplet_request()
/linux-4.1.27/arch/x86/crypto/
H A Dghash-clmulni-intel_glue.c165 static int ghash_async_init(struct ahash_request *req) ghash_async_init() argument
167 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ghash_async_init()
169 struct ahash_request *cryptd_req = ahash_request_ctx(req); ghash_async_init()
173 memcpy(cryptd_req, req, sizeof(*req)); ghash_async_init()
181 desc->flags = req->base.flags; ghash_async_init()
186 static int ghash_async_update(struct ahash_request *req) ghash_async_update() argument
188 struct ahash_request *cryptd_req = ahash_request_ctx(req); ghash_async_update()
191 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ghash_async_update()
195 memcpy(cryptd_req, req, sizeof(*req)); ghash_async_update()
200 return shash_ahash_update(req, desc); ghash_async_update()
204 static int ghash_async_final(struct ahash_request *req) ghash_async_final() argument
206 struct ahash_request *cryptd_req = ahash_request_ctx(req); ghash_async_final()
209 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ghash_async_final()
213 memcpy(cryptd_req, req, sizeof(*req)); ghash_async_final()
218 return crypto_shash_final(desc, req->result); ghash_async_final()
222 static int ghash_async_digest(struct ahash_request *req) ghash_async_digest() argument
224 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ghash_async_digest()
226 struct ahash_request *cryptd_req = ahash_request_ctx(req); ghash_async_digest()
230 memcpy(cryptd_req, req, sizeof(*req)); ghash_async_digest()
238 desc->flags = req->base.flags; ghash_async_digest()
239 return shash_ahash_digest(req, desc); ghash_async_digest()
/linux-4.1.27/arch/powerpc/perf/req-gen/
H A D_begin.h11 #define REQ_GEN_PREFIX req-gen
/linux-4.1.27/arch/m68k/mac/
H A Dmisc.c39 struct adb_request req; cuda_read_time() local
42 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME) < 0) cuda_read_time()
44 while (!req.complete) cuda_read_time()
47 time = (req.reply[3] << 24) | (req.reply[4] << 16) cuda_read_time()
48 | (req.reply[5] << 8) | req.reply[6]; cuda_read_time()
54 struct adb_request req; cuda_write_time() local
56 if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME, cuda_write_time()
60 while (!req.complete) cuda_write_time()
66 struct adb_request req; cuda_read_pram() local
67 if (cuda_request(&req, NULL, 4, CUDA_PACKET, CUDA_GET_PRAM, cuda_read_pram()
70 while (!req.complete) cuda_read_pram()
72 return req.reply[3]; cuda_read_pram()
77 struct adb_request req; cuda_write_pram() local
78 if (cuda_request(&req, NULL, 5, CUDA_PACKET, CUDA_SET_PRAM, cuda_write_pram()
81 while (!req.complete) cuda_write_pram()
94 struct adb_request req; pmu_read_time() local
97 if (pmu_request(&req, NULL, 1, PMU_READ_RTC) < 0) pmu_read_time()
99 while (!req.complete) pmu_read_time()
102 time = (req.reply[1] << 24) | (req.reply[2] << 16) pmu_read_time()
103 | (req.reply[3] << 8) | req.reply[4]; pmu_read_time()
109 struct adb_request req; pmu_write_time() local
111 if (pmu_request(&req, NULL, 5, PMU_SET_RTC, pmu_write_time()
115 while (!req.complete) pmu_write_time()
121 struct adb_request req; pmu_read_pram() local
122 if (pmu_request(&req, NULL, 3, PMU_READ_NVRAM, pmu_read_pram()
125 while (!req.complete) pmu_read_pram()
127 return req.reply[3]; pmu_read_pram()
132 struct adb_request req; pmu_write_pram() local
133 if (pmu_request(&req, NULL, 4, PMU_WRITE_NVRAM, pmu_write_pram()
136 while (!req.complete) pmu_write_pram()
147 extern int maciisi_request(struct adb_request *req,
152 struct adb_request req;
155 if (maciisi_request(&req, NULL, 2, CUDA_PACKET, CUDA_GET_TIME))
158 time = (req.reply[3] << 24) | (req.reply[4] << 16)
159 | (req.reply[5] << 8) | req.reply[6];
165 struct adb_request req;
167 maciisi_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
174 struct adb_request req;
175 if (maciisi_request(&req, NULL, 4, CUDA_PACKET, CUDA_GET_PRAM,
178 return req.reply[3];
183 struct adb_request req;
184 maciisi_request(&req, NULL, 5, CUDA_PACKET, CUDA_SET_PRAM,
399 struct adb_request req; cuda_restart() local
400 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM) < 0) cuda_restart()
402 while (!req.complete) cuda_restart()
408 struct adb_request req; cuda_shutdown() local
409 if (cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN) < 0) cuda_shutdown()
411 while (!req.complete) cuda_shutdown()
421 struct adb_request req; pmu_restart() local
422 if (pmu_request(&req, NULL, pmu_restart()
425 while (!req.complete) pmu_restart()
427 if (pmu_request(&req, NULL, 1, PMU_RESET) < 0) pmu_restart()
429 while (!req.complete) pmu_restart()
435 struct adb_request req; pmu_shutdown() local
436 if (pmu_request(&req, NULL, pmu_shutdown()
439 while (!req.complete) pmu_shutdown()
441 if (pmu_request(&req, NULL, 5, PMU_SHUTDOWN, 'M', 'A', 'T', 'T') < 0) pmu_shutdown()
443 while (!req.complete) pmu_shutdown()
/linux-4.1.27/arch/arm/crypto/
H A Dghash-ce-glue.c151 static int ghash_async_init(struct ahash_request *req) ghash_async_init() argument
153 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ghash_async_init()
155 struct ahash_request *cryptd_req = ahash_request_ctx(req); ghash_async_init()
159 memcpy(cryptd_req, req, sizeof(*req)); ghash_async_init()
167 desc->flags = req->base.flags; ghash_async_init()
172 static int ghash_async_update(struct ahash_request *req) ghash_async_update() argument
174 struct ahash_request *cryptd_req = ahash_request_ctx(req); ghash_async_update()
177 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ghash_async_update()
181 memcpy(cryptd_req, req, sizeof(*req)); ghash_async_update()
186 return shash_ahash_update(req, desc); ghash_async_update()
190 static int ghash_async_final(struct ahash_request *req) ghash_async_final() argument
192 struct ahash_request *cryptd_req = ahash_request_ctx(req); ghash_async_final()
195 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ghash_async_final()
199 memcpy(cryptd_req, req, sizeof(*req)); ghash_async_final()
204 return crypto_shash_final(desc, req->result); ghash_async_final()
208 static int ghash_async_digest(struct ahash_request *req) ghash_async_digest() argument
210 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); ghash_async_digest()
212 struct ahash_request *cryptd_req = ahash_request_ctx(req); ghash_async_digest()
216 memcpy(cryptd_req, req, sizeof(*req)); ghash_async_digest()
224 desc->flags = req->base.flags; ghash_async_digest()
225 return shash_ahash_digest(req, desc); ghash_async_digest()
/linux-4.1.27/fs/ext4/
H A Dcrypto_key.c20 static void derive_crypt_complete(struct crypto_async_request *req, int rc) derive_crypt_complete() argument
22 struct ext4_completion_result *ecr = req->data; derive_crypt_complete()
44 struct ablkcipher_request *req = NULL; ext4_derive_key_aes() local
56 req = ablkcipher_request_alloc(tfm, GFP_NOFS); ext4_derive_key_aes()
57 if (!req) { ext4_derive_key_aes()
61 ablkcipher_request_set_callback(req, ext4_derive_key_aes()
70 ablkcipher_request_set_crypt(req, &src_sg, &dst_sg, ext4_derive_key_aes()
72 res = crypto_ablkcipher_encrypt(req); ext4_derive_key_aes()
74 BUG_ON(req->base.data != &ecr); ext4_derive_key_aes()
80 if (req) ext4_derive_key_aes()
81 ablkcipher_request_free(req); ext4_derive_key_aes()
/linux-4.1.27/drivers/mtd/ubi/
H A Dcdev.c440 struct ubi_leb_change_req req; vol_cdev_ioctl() local
442 err = copy_from_user(&req, argp, vol_cdev_ioctl()
457 if (req.lnum < 0 || req.lnum >= vol->reserved_pebs || vol_cdev_ioctl()
458 req.bytes < 0 || req.bytes > vol->usable_leb_size) vol_cdev_ioctl()
465 err = ubi_start_leb_change(ubi, vol, &req); vol_cdev_ioctl()
466 if (req.bytes == 0) vol_cdev_ioctl()
505 struct ubi_map_req req; vol_cdev_ioctl() local
507 err = copy_from_user(&req, argp, sizeof(struct ubi_map_req)); vol_cdev_ioctl()
512 err = ubi_leb_map(desc, req.lnum); vol_cdev_ioctl()
547 struct ubi_set_vol_prop_req req; vol_cdev_ioctl() local
549 err = copy_from_user(&req, argp, vol_cdev_ioctl()
555 switch (req.property) { vol_cdev_ioctl()
558 desc->vol->direct_writes = !!req.value; vol_cdev_ioctl()
598 * @req: the request to check
603 const struct ubi_mkvol_req *req) verify_mkvol_req()
607 if (req->bytes < 0 || req->alignment < 0 || req->vol_type < 0 || verify_mkvol_req()
608 req->name_len < 0) verify_mkvol_req()
611 if ((req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) && verify_mkvol_req()
612 req->vol_id != UBI_VOL_NUM_AUTO) verify_mkvol_req()
615 if (req->alignment == 0) verify_mkvol_req()
618 if (req->bytes == 0) verify_mkvol_req()
621 if (req->vol_type != UBI_DYNAMIC_VOLUME && verify_mkvol_req()
622 req->vol_type != UBI_STATIC_VOLUME) verify_mkvol_req()
625 if (req->alignment > ubi->leb_size) verify_mkvol_req()
628 n = req->alignment & (ubi->min_io_size - 1); verify_mkvol_req()
629 if (req->alignment != 1 && n) verify_mkvol_req()
632 if (!req->name[0] || !req->name_len) verify_mkvol_req()
635 if (req->name_len > UBI_VOL_NAME_MAX) { verify_mkvol_req()
640 n = strnlen(req->name, req->name_len + 1); verify_mkvol_req()
641 if (n != req->name_len) verify_mkvol_req()
648 ubi_dump_mkvol_req(req); verify_mkvol_req()
655 * @req: the request to check
660 const struct ubi_rsvol_req *req) verify_rsvol_req()
662 if (req->bytes <= 0) verify_rsvol_req()
665 if (req->vol_id < 0 || req->vol_id >= ubi->vtbl_slots) verify_rsvol_req()
674 * @req: volumes re-name request
682 struct ubi_rnvol_req *req) rename_volumes()
688 if (req->count < 0 || req->count > UBI_MAX_RNVOL) rename_volumes()
691 if (req->count == 0) rename_volumes()
695 for (i = 0; i < req->count; i++) { rename_volumes()
696 if (req->ents[i].vol_id < 0 || rename_volumes()
697 req->ents[i].vol_id >= ubi->vtbl_slots) rename_volumes()
699 if (req->ents[i].name_len < 0) rename_volumes()
701 if (req->ents[i].name_len > UBI_VOL_NAME_MAX) rename_volumes()
703 req->ents[i].name[req->ents[i].name_len] = '\0'; rename_volumes()
704 n = strlen(req->ents[i].name); rename_volumes()
705 if (n != req->ents[i].name_len) rename_volumes()
710 for (i = 0; i < req->count - 1; i++) { rename_volumes()
711 for (n = i + 1; n < req->count; n++) { rename_volumes()
712 if (req->ents[i].vol_id == req->ents[n].vol_id) { rename_volumes()
714 req->ents[i].vol_id); rename_volumes()
717 if (!strcmp(req->ents[i].name, req->ents[n].name)) { rename_volumes()
719 req->ents[i].name); rename_volumes()
727 for (i = 0; i < req->count; i++) { rename_volumes()
728 int vol_id = req->ents[i].vol_id; rename_volumes()
729 int name_len = req->ents[i].name_len; rename_volumes()
730 const char *name = req->ents[i].name; rename_volumes()
774 * removed, unless it is also re-named in the request (@req). rename_volumes()
852 struct ubi_mkvol_req req; ubi_cdev_ioctl() local
855 err = copy_from_user(&req, argp, sizeof(struct ubi_mkvol_req)); ubi_cdev_ioctl()
861 err = verify_mkvol_req(ubi, &req); ubi_cdev_ioctl()
866 err = ubi_create_volume(ubi, &req); ubi_cdev_ioctl()
871 err = put_user(req.vol_id, (__user int32_t *)argp); ubi_cdev_ioctl()
913 struct ubi_rsvol_req req; ubi_cdev_ioctl() local
916 err = copy_from_user(&req, argp, sizeof(struct ubi_rsvol_req)); ubi_cdev_ioctl()
922 err = verify_rsvol_req(ubi, &req); ubi_cdev_ioctl()
926 desc = ubi_open_volume(ubi->ubi_num, req.vol_id, UBI_EXCLUSIVE); ubi_cdev_ioctl()
932 pebs = div_u64(req.bytes + desc->vol->usable_leb_size - 1, ubi_cdev_ioctl()
945 struct ubi_rnvol_req *req; ubi_cdev_ioctl() local
948 req = kmalloc(sizeof(struct ubi_rnvol_req), GFP_KERNEL); ubi_cdev_ioctl()
949 if (!req) { ubi_cdev_ioctl()
954 err = copy_from_user(req, argp, sizeof(struct ubi_rnvol_req)); ubi_cdev_ioctl()
957 kfree(req); ubi_cdev_ioctl()
961 err = rename_volumes(ubi, req); ubi_cdev_ioctl()
962 kfree(req); ubi_cdev_ioctl()
988 struct ubi_attach_req req; ctrl_cdev_ioctl() local
992 err = copy_from_user(&req, argp, sizeof(struct ubi_attach_req)); ctrl_cdev_ioctl()
998 if (req.mtd_num < 0 || ctrl_cdev_ioctl()
999 (req.ubi_num < 0 && req.ubi_num != UBI_DEV_NUM_AUTO)) { ctrl_cdev_ioctl()
1004 mtd = get_mtd_device(NULL, req.mtd_num); ctrl_cdev_ioctl()
1015 err = ubi_attach_mtd_dev(mtd, req.ubi_num, req.vid_hdr_offset, ctrl_cdev_ioctl()
1016 req.max_beb_per1024); ctrl_cdev_ioctl()
602 verify_mkvol_req(const struct ubi_device *ubi, const struct ubi_mkvol_req *req) verify_mkvol_req() argument
659 verify_rsvol_req(const struct ubi_device *ubi, const struct ubi_rsvol_req *req) verify_rsvol_req() argument
681 rename_volumes(struct ubi_device *ubi, struct ubi_rnvol_req *req) rename_volumes() argument
/linux-4.1.27/net/unix/
H A Ddiag.c74 struct sock *req, *peer; sk_diag_dump_icons() local
76 req = skb->sk; sk_diag_dump_icons()
82 unix_state_lock_nested(req); sk_diag_dump_icons()
83 peer = unix_sk(req)->peer; sk_diag_dump_icons()
85 unix_state_unlock(req); sk_diag_dump_icons()
112 static int sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, sk_diag_fill() argument
131 if ((req->udiag_show & UDIAG_SHOW_NAME) && sk_diag_fill()
135 if ((req->udiag_show & UDIAG_SHOW_VFS) && sk_diag_fill()
139 if ((req->udiag_show & UDIAG_SHOW_PEER) && sk_diag_fill()
143 if ((req->udiag_show & UDIAG_SHOW_ICONS) && sk_diag_fill()
147 if ((req->udiag_show & UDIAG_SHOW_RQLEN) && sk_diag_fill()
151 if ((req->udiag_show & UDIAG_SHOW_MEMINFO) && sk_diag_fill()
166 static int sk_diag_dump(struct sock *sk, struct sk_buff *skb, struct unix_diag_req *req, sk_diag_dump() argument
178 return sk_diag_fill(sk, skb, req, portid, seq, flags, sk_ino); sk_diag_dump()
183 struct unix_diag_req *req; unix_diag_dump() local
187 req = nlmsg_data(cb->nlh); unix_diag_dump()
204 if (!(req->udiag_states & (1 << sk->sk_state))) unix_diag_dump()
206 if (sk_diag_dump(sk, skb, req, unix_diag_dump()
245 struct unix_diag_req *req) unix_diag_get_exact()
253 if (req->udiag_ino == 0) unix_diag_get_exact()
256 sk = unix_lookup_by_ino(req->udiag_ino); unix_diag_get_exact()
261 err = sock_diag_check_cookie(sk, req->udiag_cookie); unix_diag_get_exact()
272 err = sk_diag_fill(sk, rep, req, NETLINK_CB(in_skb).portid, unix_diag_get_exact()
273 nlh->nlmsg_seq, 0, req->udiag_ino); unix_diag_get_exact()
243 unix_diag_get_exact(struct sk_buff *in_skb, const struct nlmsghdr *nlh, struct unix_diag_req *req) unix_diag_get_exact() argument
/linux-4.1.27/drivers/media/usb/dvb-usb/
H A Daz6027.c301 static int az6027_usb_in_op(struct dvb_usb_device *d, u8 req, az6027_usb_in_op() argument
310 req, az6027_usb_in_op()
324 deb_xfer("in: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index); az6027_usb_in_op()
332 u8 req, az6027_usb_out_op()
340 deb_xfer("out: req. %02x, val: %04x, ind: %04x, buffer: ", req, value, index); az6027_usb_out_op()
348 req, az6027_usb_out_op()
369 u8 req; az6027_streaming_ctrl() local
376 req = 0xBC; az6027_streaming_ctrl()
381 ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); az6027_streaming_ctrl()
416 u8 req; az6027_ci_read_attribute_mem() local
431 req = 0xC1; az6027_ci_read_attribute_mem()
436 ret = az6027_usb_in_op(d, req, value, index, b, blen); az6027_ci_read_attribute_mem()
458 u8 req; az6027_ci_write_attribute_mem() local
468 req = 0xC2; az6027_ci_write_attribute_mem()
473 ret = az6027_usb_out_op(d, req, value1, index, NULL, blen); az6027_ci_write_attribute_mem()
489 u8 req; az6027_ci_read_cam_control() local
504 req = 0xC3; az6027_ci_read_cam_control()
509 ret = az6027_usb_in_op(d, req, value, index, b, blen); az6027_ci_read_cam_control()
535 u8 req; az6027_ci_write_cam_control() local
544 req = 0xC4; az6027_ci_write_cam_control()
549 ret = az6027_usb_out_op(d, req, value1, index, NULL, blen); az6027_ci_write_cam_control()
565 u8 req; CI_CamReady() local
575 req = 0xC8; CI_CamReady()
580 ret = az6027_usb_in_op(d, req, value, index, b, blen); CI_CamReady()
597 u8 req; az6027_ci_slot_reset() local
604 req = 0xC6; az6027_ci_slot_reset()
609 ret = az6027_usb_out_op(d, req, value, index, NULL, blen); az6027_ci_slot_reset()
616 req = 0xC6; az6027_ci_slot_reset()
621 ret = az6027_usb_out_op(d, req, value, index, NULL, blen); az6027_ci_slot_reset()
653 u8 req; az6027_ci_slot_ts_enable() local
660 req = 0xC7; az6027_ci_slot_ts_enable()
665 ret = az6027_usb_out_op(d, req, value, index, NULL, blen); az6027_ci_slot_ts_enable()
681 u8 req; az6027_ci_poll_slot_status() local
692 req = 0xC5; az6027_ci_poll_slot_status()
697 ret = az6027_usb_in_op(d, req, value, index, b, blen); az6027_ci_poll_slot_status()
825 u8 req; az6027_frontend_poweron() local
830 req = 0xBC; az6027_frontend_poweron()
835 ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); az6027_frontend_poweron()
844 u8 req; az6027_frontend_reset() local
850 req = 0xC0; az6027_frontend_reset()
855 ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); az6027_frontend_reset()
859 req = 0xC0; az6027_frontend_reset()
865 ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); az6027_frontend_reset()
871 req = 0xC0; az6027_frontend_reset()
876 ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); az6027_frontend_reset()
887 u8 req; az6027_frontend_tsbypass() local
893 req = 0xC7; az6027_frontend_tsbypass()
898 ret = az6027_usb_out_op(adap->dev, req, value, index, NULL, blen); az6027_frontend_tsbypass()
959 u8 req; az6027_i2c_xfer() local
977 req = 0xBE; az6027_i2c_xfer()
981 az6027_usb_out_op(d, req, value, index, data, length); az6027_i2c_xfer()
987 req = 0xB9; az6027_i2c_xfer()
991 az6027_usb_in_op(d, req, value, index, data, length); az6027_i2c_xfer()
1000 req = 0xBD; az6027_i2c_xfer()
1007 az6027_usb_out_op(d, req, value, index, data, length); az6027_i2c_xfer()
1014 req = 0xB9; az6027_i2c_xfer()
1018 az6027_usb_in_op(d, req, value, index, data, length); az6027_i2c_xfer()
1025 req = 0xBD; az6027_i2c_xfer()
1034 az6027_usb_out_op(d, req, value, index, data, length); az6027_i2c_xfer()
331 az6027_usb_out_op(struct dvb_usb_device *d, u8 req, u16 value, u16 index, u8 *b, int blen) az6027_usb_out_op() argument
/linux-4.1.27/drivers/staging/lustre/lnet/selftest/
H A Dping_test.c92 srpc_ping_reqst_t *req; ping_client_prep_rpc() local
105 req = &(*rpc)->crpc_reqstmsg.msg_body.ping_reqst; ping_client_prep_rpc()
107 req->pnr_magic = LST_PING_TEST_MAGIC; ping_client_prep_rpc()
110 req->pnr_seq = lst_ping_data.pnd_counter++; ping_client_prep_rpc()
114 req->pnr_time_sec = tv.tv_sec; ping_client_prep_rpc()
115 req->pnr_time_usec = tv.tv_usec; ping_client_prep_rpc()
177 srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst; ping_server_handle() local
185 __swab32s(&req->pnr_seq); ping_server_handle()
186 __swab32s(&req->pnr_magic); ping_server_handle()
187 __swab64s(&req->pnr_time_sec); ping_server_handle()
188 __swab64s(&req->pnr_time_usec); ping_server_handle()
192 if (req->pnr_magic != LST_PING_TEST_MAGIC) { ping_server_handle()
194 req->pnr_magic, libcfs_id2str(rpc->srpc_peer)); ping_server_handle()
198 rep->pnr_seq = req->pnr_seq; ping_server_handle()
210 req->pnr_seq, libcfs_id2str(rpc->srpc_peer)); ping_server_handle()
/linux-4.1.27/fs/cifs/
H A Dsmb2pdu.c326 struct smb2_negotiate_req *req; SMB2_negotiate() local
343 rc = small_smb2_init(SMB2_NEGOTIATE, NULL, (void **) &req); SMB2_negotiate()
347 req->hdr.SessionId = 0; SMB2_negotiate()
349 req->Dialects[0] = cpu_to_le16(ses->server->vals->protocol_id); SMB2_negotiate()
351 req->DialectCount = cpu_to_le16(1); /* One vers= at a time for now */ SMB2_negotiate()
352 inc_rfc1001_len(req, 2); SMB2_negotiate()
356 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_REQUIRED); SMB2_negotiate()
358 req->SecurityMode = cpu_to_le16(SMB2_NEGOTIATE_SIGNING_ENABLED); SMB2_negotiate()
360 req->SecurityMode = 0; SMB2_negotiate()
362 req->Capabilities = cpu_to_le32(ses->server->vals->req_capabilities); SMB2_negotiate()
366 memset(req->ClientGUID, 0, SMB2_CLIENT_GUID_SIZE); SMB2_negotiate()
368 memcpy(req->ClientGUID, server->client_guid, SMB2_negotiate()
371 iov[0].iov_base = (char *)req; SMB2_negotiate()
373 iov[0].iov_len = get_rfc1002_length(req) + 4; SMB2_negotiate()
525 struct smb2_sess_setup_req *req; SMB2_sess_setup() local
569 rc = small_smb2_init(SMB2_SESSION_SETUP, NULL, (void **) &req); SMB2_sess_setup()
573 req->hdr.SessionId = 0; /* First session, not a reauthenticate */ SMB2_sess_setup()
574 req->VcNumber = 0; /* MBZ */ SMB2_sess_setup()
576 req->hdr.CreditRequest = cpu_to_le16(3); SMB2_sess_setup()
580 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_REQUIRED; SMB2_sess_setup()
582 req->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED; SMB2_sess_setup()
584 req->SecurityMode = 0; SMB2_sess_setup()
586 req->Capabilities = 0; SMB2_sess_setup()
587 req->Channel = 0; /* MBZ */ SMB2_sess_setup()
589 iov[0].iov_base = (char *)req; SMB2_sess_setup()
591 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; SMB2_sess_setup()
659 req->hdr.SessionId = ses->Suid; SMB2_sess_setup()
694 req->SecurityBufferOffset = SMB2_sess_setup()
697 req->SecurityBufferLength = cpu_to_le16(blob_length); SMB2_sess_setup()
699 inc_rfc1001_len(req, blob_length - 1 /* pad */); SMB2_sess_setup()
794 struct smb2_logoff_req *req; /* response is also trivial struct */ SMB2_logoff() local
809 rc = small_smb2_init(SMB2_LOGOFF, NULL, (void **) &req); SMB2_logoff()
814 req->hdr.SessionId = ses->Suid; SMB2_logoff()
816 req->hdr.Flags |= SMB2_FLAGS_SIGNED; SMB2_logoff()
818 rc = SendReceiveNoRsp(xid, ses, (char *) &req->hdr, 0); SMB2_logoff()
847 struct smb2_tree_connect_req *req; SMB2_tcon() local
877 rc = small_smb2_init(SMB2_TREE_CONNECT, tcon, (void **) &req); SMB2_tcon()
885 req->hdr.SessionId = ses->Suid; SMB2_tcon()
887 req->hdr.Flags |= SMB2_FLAGS_SIGNED; */ SMB2_tcon()
890 iov[0].iov_base = (char *)req; SMB2_tcon()
892 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; SMB2_tcon()
895 req->PathOffset = cpu_to_le16(sizeof(struct smb2_tree_connect_req) SMB2_tcon()
897 req->PathLength = cpu_to_le16(unc_path_len - 2); SMB2_tcon()
901 inc_rfc1001_len(req, unc_path_len - 1 /* pad */); SMB2_tcon()
964 struct smb2_tree_disconnect_req *req; /* response is trivial */ SMB2_tdis() local
979 rc = small_smb2_init(SMB2_TREE_DISCONNECT, tcon, (void **) &req); SMB2_tdis()
983 rc = SendReceiveNoRsp(xid, ses, (char *)&req->hdr, 0); SMB2_tdis()
1072 struct smb2_create_req *req = iov[0].iov_base; add_lease_context() local
1079 req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_LEASE; add_lease_context()
1080 if (!req->CreateContextsOffset) add_lease_context()
1081 req->CreateContextsOffset = cpu_to_le32( add_lease_context()
1084 le32_add_cpu(&req->CreateContextsLength, add_lease_context()
1086 inc_rfc1001_len(&req->hdr, server->vals->create_lease_size); add_lease_context()
1095 struct smb2_create_req *req = iov[0].iov_base; add_durable_context() local
1107 if (!req->CreateContextsOffset) add_durable_context()
1108 req->CreateContextsOffset = add_durable_context()
1111 le32_add_cpu(&req->CreateContextsLength, sizeof(struct create_durable)); add_durable_context()
1112 inc_rfc1001_len(&req->hdr, sizeof(struct create_durable)); add_durable_context()
1122 struct smb2_create_req *req; SMB2_open() local
1144 rc = small_smb2_init(SMB2_CREATE, tcon, (void **) &req); SMB2_open()
1153 req->ImpersonationLevel = IL_IMPERSONATION; SMB2_open()
1154 req->DesiredAccess = cpu_to_le32(oparms->desired_access); SMB2_open()
1156 req->FileAttributes = cpu_to_le32(file_attributes); SMB2_open()
1157 req->ShareAccess = FILE_SHARE_ALL_LE; SMB2_open()
1158 req->CreateDisposition = cpu_to_le32(oparms->disposition); SMB2_open()
1159 req->CreateOptions = cpu_to_le32(oparms->create_options & CREATE_OPTIONS_MASK); SMB2_open()
1162 req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req) - 4); SMB2_open()
1164 iov[0].iov_base = (char *)req; SMB2_open()
1166 iov[0].iov_len = get_rfc1002_length(req) + 4; SMB2_open()
1169 req->NameLength = cpu_to_le16(uni_path_len - 2); SMB2_open()
1189 inc_rfc1001_len(req, uni_path_len - 1); SMB2_open()
1196 req->RequestedOplockLevel = *oplock; SMB2_open()
1200 cifs_small_buf_release(req); SMB2_open()
1217 cifs_small_buf_release(req); SMB2_open()
1268 struct smb2_ioctl_req *req; SMB2_ioctl() local
1296 rc = small_smb2_init(SMB2_IOCTL, tcon, (void **) &req); SMB2_ioctl()
1300 req->CtlCode = cpu_to_le32(opcode); SMB2_ioctl()
1301 req->PersistentFileId = persistent_fid; SMB2_ioctl()
1302 req->VolatileFileId = volatile_fid; SMB2_ioctl()
1305 req->InputCount = cpu_to_le32(indatalen); SMB2_ioctl()
1307 req->InputOffset = SMB2_ioctl()
1315 req->OutputOffset = 0; SMB2_ioctl()
1316 req->OutputCount = 0; /* MBZ */ SMB2_ioctl()
1324 req->MaxOutputResponse = cpu_to_le32(0xFF00); /* < 64K uses 1 credit */ SMB2_ioctl()
1327 req->Flags = cpu_to_le32(SMB2_0_IOCTL_IS_FSCTL); SMB2_ioctl()
1329 req->Flags = 0; SMB2_ioctl()
1331 iov[0].iov_base = (char *)req; SMB2_ioctl()
1344 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; SMB2_ioctl()
1345 inc_rfc1001_len(req, indatalen - 1); SMB2_ioctl()
1347 iov[0].iov_len = get_rfc1002_length(req) + 4; SMB2_ioctl()
1430 struct smb2_close_req *req; SMB2_close() local
1445 rc = small_smb2_init(SMB2_CLOSE, tcon, (void **) &req); SMB2_close()
1449 req->PersistentFileId = persistent_fid; SMB2_close()
1450 req->VolatileFileId = volatile_fid; SMB2_close()
1452 iov[0].iov_base = (char *)req; SMB2_close()
1454 iov[0].iov_len = get_rfc1002_length(req) + 4; SMB2_close()
1533 struct smb2_query_info_req *req; query_info() local
1548 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req); query_info()
1552 req->InfoType = SMB2_O_INFO_FILE; query_info()
1553 req->FileInfoClass = info_class; query_info()
1554 req->PersistentFileId = persistent_fid; query_info()
1555 req->VolatileFileId = volatile_fid; query_info()
1557 req->InputBufferOffset = query_info()
1559 req->OutputBufferLength = cpu_to_le32(output_len); query_info()
1561 iov[0].iov_base = (char *)req; query_info()
1563 iov[0].iov_len = get_rfc1002_length(req) + 4; query_info()
1627 struct smb2_echo_req *req; SMB2_echo() local
1635 rc = small_smb2_init(SMB2_ECHO, NULL, (void **)&req); SMB2_echo()
1639 req->hdr.CreditRequest = cpu_to_le16(1); SMB2_echo()
1641 iov.iov_base = (char *)req; SMB2_echo()
1643 iov.iov_len = get_rfc1002_length(req) + 4; SMB2_echo()
1650 cifs_small_buf_release(req); SMB2_echo()
1658 struct smb2_flush_req *req; SMB2_flush() local
1672 rc = small_smb2_init(SMB2_FLUSH, tcon, (void **) &req); SMB2_flush()
1676 req->PersistentFileId = persistent_fid; SMB2_flush()
1677 req->VolatileFileId = volatile_fid; SMB2_flush()
1679 iov[0].iov_base = (char *)req; SMB2_flush()
1681 iov[0].iov_len = get_rfc1002_length(req) + 4; SMB2_flush()
1701 struct smb2_read_req *req = NULL; smb2_new_read_req() local
1703 rc = small_smb2_init(SMB2_READ, io_parms->tcon, (void **) &req); smb2_new_read_req()
1709 req->hdr.ProcessId = cpu_to_le32(io_parms->pid); smb2_new_read_req()
1711 req->PersistentFileId = io_parms->persistent_fid; smb2_new_read_req()
1712 req->VolatileFileId = io_parms->volatile_fid; smb2_new_read_req()
1713 req->ReadChannelInfoOffset = 0; /* reserved */ smb2_new_read_req()
1714 req->ReadChannelInfoLength = 0; /* reserved */ smb2_new_read_req()
1715 req->Channel = 0; /* reserved */ smb2_new_read_req()
1716 req->MinimumCount = 0; smb2_new_read_req()
1717 req->Length = cpu_to_le32(io_parms->length); smb2_new_read_req()
1718 req->Offset = cpu_to_le64(io_parms->offset); smb2_new_read_req()
1723 req->hdr.NextCommand = smb2_new_read_req()
1724 cpu_to_le32(get_rfc1002_length(req) + 4); smb2_new_read_req()
1726 req->hdr.NextCommand = 0; smb2_new_read_req()
1728 req->hdr.Flags |= SMB2_FLAGS_RELATED_OPERATIONS; smb2_new_read_req()
1733 req->hdr.SessionId = 0xFFFFFFFF; smb2_new_read_req()
1734 req->hdr.TreeId = 0xFFFFFFFF; smb2_new_read_req()
1735 req->PersistentFileId = 0xFFFFFFFF; smb2_new_read_req()
1736 req->VolatileFileId = 0xFFFFFFFF; smb2_new_read_req()
1740 req->RemainingBytes = cpu_to_le32(remaining_bytes); smb2_new_read_req()
1742 req->RemainingBytes = 0; smb2_new_read_req()
1744 iov[0].iov_base = (char *)req; smb2_new_read_req()
1746 iov[0].iov_len = get_rfc1002_length(req) + 4; smb2_new_read_req()
1836 /* reduce in_flight value since we won't send the req */ smb2_async_readv()
1981 struct smb2_write_req *req = NULL; smb2_async_writev() local
1987 rc = small_smb2_init(SMB2_WRITE, tcon, (void **) &req); smb2_async_writev()
1992 /* reduce in_flight value since we won't send the req */ smb2_async_writev()
2000 req->hdr.ProcessId = cpu_to_le32(wdata->cfile->pid); smb2_async_writev()
2002 req->PersistentFileId = wdata->cfile->fid.persistent_fid; smb2_async_writev()
2003 req->VolatileFileId = wdata->cfile->fid.volatile_fid; smb2_async_writev()
2004 req->WriteChannelInfoOffset = 0; smb2_async_writev()
2005 req->WriteChannelInfoLength = 0; smb2_async_writev()
2006 req->Channel = 0; smb2_async_writev()
2007 req->Offset = cpu_to_le64(wdata->offset); smb2_async_writev()
2009 req->DataOffset = cpu_to_le16( smb2_async_writev()
2011 req->RemainingBytes = 0; smb2_async_writev()
2014 iov.iov_len = get_rfc1002_length(req) + 4 - 1; smb2_async_writev()
2015 iov.iov_base = req; smb2_async_writev()
2027 req->Length = cpu_to_le32(wdata->bytes); smb2_async_writev()
2029 inc_rfc1001_len(&req->hdr, wdata->bytes - 1 /* Buffer */); smb2_async_writev()
2032 req->hdr.CreditCharge = cpu_to_le16(DIV_ROUND_UP(wdata->bytes, smb2_async_writev()
2036 le16_to_cpu(req->hdr.CreditCharge); smb2_async_writev()
2052 cifs_small_buf_release(req); smb2_async_writev()
2067 struct smb2_write_req *req = NULL; SMB2_write() local
2075 rc = small_smb2_init(SMB2_WRITE, io_parms->tcon, (void **) &req); SMB2_write()
2082 req->hdr.ProcessId = cpu_to_le32(io_parms->pid); SMB2_write()
2084 req->PersistentFileId = io_parms->persistent_fid; SMB2_write()
2085 req->VolatileFileId = io_parms->volatile_fid; SMB2_write()
2086 req->WriteChannelInfoOffset = 0; SMB2_write()
2087 req->WriteChannelInfoLength = 0; SMB2_write()
2088 req->Channel = 0; SMB2_write()
2089 req->Length = cpu_to_le32(io_parms->length); SMB2_write()
2090 req->Offset = cpu_to_le64(io_parms->offset); SMB2_write()
2092 req->DataOffset = cpu_to_le16( SMB2_write()
2094 req->RemainingBytes = 0; SMB2_write()
2096 iov[0].iov_base = (char *)req; SMB2_write()
2098 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; SMB2_write()
2101 inc_rfc1001_len(req, io_parms->length - 1 /* Buffer */); SMB2_write()
2165 struct smb2_query_directory_req *req; SMB2_query_directory() local
2184 rc = small_smb2_init(SMB2_QUERY_DIRECTORY, tcon, (void **) &req); SMB2_query_directory()
2190 req->FileInformationClass = FILE_DIRECTORY_INFORMATION; SMB2_query_directory()
2194 req->FileInformationClass = FILEID_FULL_DIRECTORY_INFORMATION; SMB2_query_directory()
2204 req->FileIndex = cpu_to_le32(index); SMB2_query_directory()
2205 req->PersistentFileId = persistent_fid; SMB2_query_directory()
2206 req->VolatileFileId = volatile_fid; SMB2_query_directory()
2209 bufptr = req->Buffer; SMB2_query_directory()
2212 req->FileNameOffset = SMB2_query_directory()
2214 req->FileNameLength = cpu_to_le16(len); SMB2_query_directory()
2221 req->OutputBufferLength = cpu_to_le32(output_size); SMB2_query_directory()
2223 iov[0].iov_base = (char *)req; SMB2_query_directory()
2225 iov[0].iov_len = get_rfc1002_length(req) + 4 - 1; SMB2_query_directory()
2227 iov[1].iov_base = (char *)(req->Buffer); SMB2_query_directory()
2230 inc_rfc1001_len(req, len - 1 /* Buffer */); SMB2_query_directory()
2289 struct smb2_set_info_req *req; send_set_info() local
2310 rc = small_smb2_init(SMB2_SET_INFO, tcon, (void **) &req); send_set_info()
2316 req->hdr.ProcessId = cpu_to_le32(pid); send_set_info()
2318 req->InfoType = SMB2_O_INFO_FILE; send_set_info()
2319 req->FileInfoClass = info_class; send_set_info()
2320 req->PersistentFileId = persistent_fid; send_set_info()
2321 req->VolatileFileId = volatile_fid; send_set_info()
2324 req->BufferOffset = send_set_info()
2326 req->BufferLength = cpu_to_le32(*size); send_set_info()
2328 inc_rfc1001_len(req, *size - 1 /* Buffer */); send_set_info()
2330 memcpy(req->Buffer, *data, *size); send_set_info()
2332 iov[0].iov_base = (char *)req; send_set_info()
2334 iov[0].iov_len = get_rfc1002_length(req) + 4; send_set_info()
2337 inc_rfc1001_len(req, size[i]); send_set_info()
2338 le32_add_cpu(&req->BufferLength, size[i]); send_set_info()
2471 struct smb2_oplock_break *req = NULL; SMB2_oplock_break() local
2474 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); SMB2_oplock_break()
2479 req->VolatileFid = volatile_fid; SMB2_oplock_break()
2480 req->PersistentFid = persistent_fid; SMB2_oplock_break()
2481 req->OplockLevel = oplock_level; SMB2_oplock_break()
2482 req->hdr.CreditRequest = cpu_to_le16(1); SMB2_oplock_break()
2484 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP); SMB2_oplock_break()
2512 struct smb2_query_info_req *req; build_qfs_info_req() local
2519 rc = small_smb2_init(SMB2_QUERY_INFO, tcon, (void **) &req); build_qfs_info_req()
2523 req->InfoType = SMB2_O_INFO_FILESYSTEM; build_qfs_info_req()
2524 req->FileInfoClass = level; build_qfs_info_req()
2525 req->PersistentFileId = persistent_fid; build_qfs_info_req()
2526 req->VolatileFileId = volatile_fid; build_qfs_info_req()
2528 req->InputBufferOffset = build_qfs_info_req()
2530 req->OutputBufferLength = cpu_to_le32( build_qfs_info_req()
2533 iov->iov_base = (char *)req; build_qfs_info_req()
2535 iov->iov_len = get_rfc1002_length(req) + 4; build_qfs_info_req()
2645 struct smb2_lock_req *req = NULL; smb2_lockv() local
2652 rc = small_smb2_init(SMB2_LOCK, tcon, (void **) &req); smb2_lockv()
2656 req->hdr.ProcessId = cpu_to_le32(pid); smb2_lockv()
2657 req->LockCount = cpu_to_le16(num_lock); smb2_lockv()
2659 req->PersistentFileId = persist_fid; smb2_lockv()
2660 req->VolatileFileId = volatile_fid; smb2_lockv()
2663 inc_rfc1001_len(req, count - sizeof(struct smb2_lock_element)); smb2_lockv()
2665 iov[0].iov_base = (char *)req; smb2_lockv()
2667 iov[0].iov_len = get_rfc1002_length(req) + 4 - count; smb2_lockv()
2703 struct smb2_lease_ack *req = NULL; SMB2_lease_break() local
2706 rc = small_smb2_init(SMB2_OPLOCK_BREAK, tcon, (void **) &req); SMB2_lease_break()
2711 req->hdr.CreditRequest = cpu_to_le16(1); SMB2_lease_break()
2712 req->StructureSize = cpu_to_le16(36); SMB2_lease_break()
2713 inc_rfc1001_len(req, 12); SMB2_lease_break()
2715 memcpy(req->LeaseKey, lease_key, 16); SMB2_lease_break()
2716 req->LeaseState = lease_state; SMB2_lease_break()
2718 rc = SendReceiveNoRsp(xid, tcon->ses, (char *) req, CIFS_OBREAK_OP); SMB2_lease_break()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Dmcg.c539 struct mcast_req *req = NULL; mlx4_ib_mcg_timeout_handler() local
546 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); mlx4_ib_mcg_timeout_handler()
547 list_del(&req->group_list); mlx4_ib_mcg_timeout_handler()
548 list_del(&req->func_list); mlx4_ib_mcg_timeout_handler()
549 --group->func[req->func].num_pend_reqs; mlx4_ib_mcg_timeout_handler()
551 kfree(req); mlx4_ib_mcg_timeout_handler()
581 struct mcast_req *req) handle_leave_req()
585 if (req->clean) handle_leave_req()
586 leave_mask = group->func[req->func].join_state; handle_leave_req()
588 status = check_leave(group, req->func, leave_mask); handle_leave_req()
590 leave_group(group, req->func, leave_mask); handle_leave_req()
592 if (!req->clean) handle_leave_req()
593 send_reply_to_slave(req->func, group, &req->sa_mad, status); handle_leave_req()
594 --group->func[req->func].num_pend_reqs; handle_leave_req()
595 list_del(&req->group_list); handle_leave_req()
596 list_del(&req->func_list); handle_leave_req()
597 kfree(req); handle_leave_req()
602 struct mcast_req *req) handle_join_req()
607 struct ib_sa_mcmember_data *sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; handle_join_req()
611 status = cmp_rec(&group->rec, sa_data, req->sa_mad.sa_hdr.comp_mask); handle_join_req()
613 join_group(group, req->func, join_mask); handle_join_req()
615 --group->func[req->func].num_pend_reqs; handle_join_req()
616 send_reply_to_slave(req->func, group, &req->sa_mad, status); handle_join_req()
617 list_del(&req->group_list); handle_join_req()
618 list_del(&req->func_list); handle_join_req()
619 kfree(req); handle_join_req()
624 if (send_join_to_wire(group, &req->sa_mad)) { handle_join_req()
625 --group->func[req->func].num_pend_reqs; handle_join_req()
626 list_del(&req->group_list); handle_join_req()
627 list_del(&req->func_list); handle_join_req()
628 kfree(req); handle_join_req()
641 struct mcast_req *req = NULL; mlx4_ib_mcg_work_handler() local
670 req = list_first_entry(&group->pending_list, mlx4_ib_mcg_work_handler()
673 if (req) { mlx4_ib_mcg_work_handler()
674 send_reply_to_slave(req->func, group, &req->sa_mad, status); mlx4_ib_mcg_work_handler()
675 --group->func[req->func].num_pend_reqs; mlx4_ib_mcg_work_handler()
676 list_del(&req->group_list); mlx4_ib_mcg_work_handler()
677 list_del(&req->func_list); mlx4_ib_mcg_work_handler()
678 kfree(req); mlx4_ib_mcg_work_handler()
706 req = list_first_entry(&group->pending_list, struct mcast_req, mlx4_ib_mcg_work_handler()
708 sa_data = (struct ib_sa_mcmember_data *)req->sa_mad.data; mlx4_ib_mcg_work_handler()
714 if (req->sa_mad.mad_hdr.method == IB_SA_METHOD_DELETE) mlx4_ib_mcg_work_handler()
715 rc += handle_leave_req(group, req_join_state, req); mlx4_ib_mcg_work_handler()
717 rc += handle_join_req(group, req_join_state, req); mlx4_ib_mcg_work_handler()
747 struct mcast_req *req; search_relocate_mgid0_group() local
765 req = list_first_entry(&group->pending_list, search_relocate_mgid0_group()
767 --group->func[req->func].num_pend_reqs; search_relocate_mgid0_group()
768 list_del(&req->group_list); search_relocate_mgid0_group()
769 list_del(&req->func_list); search_relocate_mgid0_group()
770 kfree(req); search_relocate_mgid0_group()
869 static void queue_req(struct mcast_req *req) queue_req() argument
871 struct mcast_group *group = req->group; queue_req()
875 list_add_tail(&req->group_list, &group->pending_list); queue_req()
876 list_add_tail(&req->func_list, &group->func[req->func].pending); queue_req()
938 struct mcast_req *req; mlx4_ib_mcg_multiplex_handler() local
948 req = kzalloc(sizeof *req, GFP_KERNEL); mlx4_ib_mcg_multiplex_handler()
949 if (!req) mlx4_ib_mcg_multiplex_handler()
952 req->func = slave; mlx4_ib_mcg_multiplex_handler()
953 req->sa_mad = *sa_mad; mlx4_ib_mcg_multiplex_handler()
959 kfree(req); mlx4_ib_mcg_multiplex_handler()
968 kfree(req); mlx4_ib_mcg_multiplex_handler()
972 req->group = group; mlx4_ib_mcg_multiplex_handler()
973 queue_req(req); mlx4_ib_mcg_multiplex_handler()
994 struct mcast_req *req = NULL; sysfs_show_group() local
1009 req = list_first_entry(&group->pending_list, struct mcast_req, group_list); sysfs_show_group()
1011 be64_to_cpu(req->sa_mad.mad_hdr.tid)); sysfs_show_group()
1061 struct mcast_req *req, *tmp force_clean_group() local
1063 list_for_each_entry_safe(req, tmp, &group->pending_list, group_list) { force_clean_group()
1064 list_del(&req->group_list); force_clean_group()
1065 kfree(req); force_clean_group()
1154 static void build_leave_mad(struct mcast_req *req) build_leave_mad() argument
1156 struct ib_sa_mad *mad = &req->sa_mad; build_leave_mad()
1164 struct mcast_req *req, *tmp, *group_first = NULL; clear_pending_reqs() local
1171 list_for_each_entry_safe(req, tmp, &group->func[vf].pending, func_list) { clear_pending_reqs()
1173 if (group_first == req && clear_pending_reqs()
1182 list_del(&req->group_list); clear_pending_reqs()
1183 list_del(&req->func_list); clear_pending_reqs()
1184 kfree(req); clear_pending_reqs()
1197 struct mcast_req *req; push_deleteing_req() local
1203 req = kzalloc(sizeof *req, GFP_KERNEL); push_deleteing_req()
1204 if (!req) { push_deleteing_req()
1212 kfree(req); push_deleteing_req()
1217 req->clean = 1; push_deleteing_req()
1218 req->func = slave; push_deleteing_req()
1219 req->group = group; push_deleteing_req()
1221 build_leave_mad(req); push_deleteing_req()
1222 queue_req(req); push_deleteing_req()
580 handle_leave_req(struct mcast_group *group, u8 leave_mask, struct mcast_req *req) handle_leave_req() argument
601 handle_join_req(struct mcast_group *group, u8 join_mask, struct mcast_req *req) handle_join_req() argument
/linux-4.1.27/drivers/gpu/drm/nouveau/
H A Dnouveau_gem.c259 struct drm_nouveau_gem_new *req = data; nouveau_gem_ioctl_new() local
263 if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { nouveau_gem_ioctl_new()
264 NV_PRINTK(error, cli, "bad page flags: 0x%08x\n", req->info.tile_flags); nouveau_gem_ioctl_new()
268 ret = nouveau_gem_new(dev, req->info.size, req->align, nouveau_gem_ioctl_new()
269 req->info.domain, req->info.tile_mode, nouveau_gem_ioctl_new()
270 req->info.tile_flags, &nvbo); nouveau_gem_ioctl_new()
274 ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); nouveau_gem_ioctl_new()
276 ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); nouveau_gem_ioctl_new()
278 drm_gem_handle_delete(file_priv, req->info.handle); nouveau_gem_ioctl_new()
589 struct drm_nouveau_gem_pushbuf *req, nouveau_gem_pushbuf_reloc_apply()
596 reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); nouveau_gem_pushbuf_reloc_apply()
600 for (i = 0; i < req->nr_relocs; i++) { nouveau_gem_pushbuf_reloc_apply()
606 if (unlikely(r->bo_index > req->nr_buffers)) { nouveau_gem_pushbuf_reloc_apply()
616 if (unlikely(r->reloc_bo_index > req->nr_buffers)) { nouveau_gem_pushbuf_reloc_apply()
676 struct drm_nouveau_gem_pushbuf *req = data; nouveau_gem_ioctl_pushbuf() local
688 if (temp->chan->object->handle == (NVDRM_CHAN | req->channel)) { nouveau_gem_ioctl_pushbuf()
697 req->vram_available = drm->gem.vram_available; nouveau_gem_ioctl_pushbuf()
698 req->gart_available = drm->gem.gart_available; nouveau_gem_ioctl_pushbuf()
699 if (unlikely(req->nr_push == 0)) nouveau_gem_ioctl_pushbuf()
702 if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { nouveau_gem_ioctl_pushbuf()
704 req->nr_push, NOUVEAU_GEM_MAX_PUSH); nouveau_gem_ioctl_pushbuf()
708 if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { nouveau_gem_ioctl_pushbuf()
710 req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); nouveau_gem_ioctl_pushbuf()
714 if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { nouveau_gem_ioctl_pushbuf()
716 req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); nouveau_gem_ioctl_pushbuf()
720 push = u_memcpya(req->push, req->nr_push, sizeof(*push)); nouveau_gem_ioctl_pushbuf()
724 bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); nouveau_gem_ioctl_pushbuf()
731 for (i = 0; i < req->nr_push; i++) { nouveau_gem_ioctl_pushbuf()
732 if (push[i].bo_index >= req->nr_buffers) { nouveau_gem_ioctl_pushbuf()
740 ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, nouveau_gem_ioctl_pushbuf()
741 req->nr_buffers, &op, &do_reloc); nouveau_gem_ioctl_pushbuf()
750 ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); nouveau_gem_ioctl_pushbuf()
758 ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); nouveau_gem_ioctl_pushbuf()
764 for (i = 0; i < req->nr_push; i++) { nouveau_gem_ioctl_pushbuf()
773 ret = RING_SPACE(chan, req->nr_push * 2); nouveau_gem_ioctl_pushbuf()
779 for (i = 0; i < req->nr_push; i++) { nouveau_gem_ioctl_pushbuf()
787 ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); nouveau_gem_ioctl_pushbuf()
793 for (i = 0; i < req->nr_push; i++) { nouveau_gem_ioctl_pushbuf()
800 if (unlikely(cmd != req->suffix0)) { nouveau_gem_ioctl_pushbuf()
842 req->suffix0 = 0x00000000; nouveau_gem_ioctl_pushbuf()
843 req->suffix1 = 0x00000000; nouveau_gem_ioctl_pushbuf()
846 req->suffix0 = 0x00020000; nouveau_gem_ioctl_pushbuf()
847 req->suffix1 = 0x00000000; nouveau_gem_ioctl_pushbuf()
849 req->suffix0 = 0x20000000 | nouveau_gem_ioctl_pushbuf()
851 req->suffix1 = 0x00000000; nouveau_gem_ioctl_pushbuf()
861 struct drm_nouveau_gem_cpu_prep *req = data; nouveau_gem_ioctl_cpu_prep() local
864 bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); nouveau_gem_ioctl_cpu_prep()
865 bool write = !!(req->flags & NOUVEAU_GEM_CPU_PREP_WRITE); nouveau_gem_ioctl_cpu_prep()
868 gem = drm_gem_object_lookup(dev, file_priv, req->handle); nouveau_gem_ioctl_cpu_prep()
896 struct drm_nouveau_gem_cpu_fini *req = data; nouveau_gem_ioctl_cpu_fini() local
900 gem = drm_gem_object_lookup(dev, file_priv, req->handle); nouveau_gem_ioctl_cpu_fini()
914 struct drm_nouveau_gem_info *req = data; nouveau_gem_ioctl_info() local
918 gem = drm_gem_object_lookup(dev, file_priv, req->handle); nouveau_gem_ioctl_info()
922 ret = nouveau_gem_info(file_priv, gem, req); nouveau_gem_ioctl_info()
588 nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, struct drm_nouveau_gem_pushbuf *req, struct drm_nouveau_gem_pushbuf_bo *bo) nouveau_gem_pushbuf_reloc_apply() argument
/linux-4.1.27/drivers/s390/char/
H A Dsclp_cpi_sys.c71 static void cpi_callback(struct sclp_req *req, void *data) cpi_callback() argument
80 struct sclp_req *req; cpi_prepare_req() local
84 req = kzalloc(sizeof(struct sclp_req), GFP_KERNEL); cpi_prepare_req()
85 if (!req) cpi_prepare_req()
89 kfree(req); cpi_prepare_req()
112 req->command = SCLP_CMDW_WRITE_EVENT_DATA; cpi_prepare_req()
113 req->sccb = sccb; cpi_prepare_req()
114 req->status = SCLP_REQ_FILLED; cpi_prepare_req()
115 req->callback = cpi_callback; cpi_prepare_req()
116 return req; cpi_prepare_req()
119 static void cpi_free_req(struct sclp_req *req) cpi_free_req() argument
121 free_page((unsigned long) req->sccb); cpi_free_req()
122 kfree(req); cpi_free_req()
128 struct sclp_req *req; cpi_req() local
140 req = cpi_prepare_req(); cpi_req()
141 if (IS_ERR(req)) { cpi_req()
142 rc = PTR_ERR(req); cpi_req()
147 req->callback_data = &completion; cpi_req()
150 rc = sclp_add_request(req); cpi_req()
156 if (req->status != SCLP_REQ_DONE) { cpi_req()
158 req->status); cpi_req()
163 response = ((struct cpi_sccb *) req->sccb)->header.response_code; cpi_req()
171 cpi_free_req(req); cpi_req()
/linux-4.1.27/drivers/scsi/
H A Dhptiop.c49 struct hpt_iop_request_scsi_command *req);
56 u32 req = 0; iop_wait_ready_itl() local
60 req = readl(&hba->u.itl.iop->inbound_queue); iop_wait_ready_itl()
61 if (req != IOPMU_QUEUE_EMPTY) iop_wait_ready_itl()
66 if (req != IOPMU_QUEUE_EMPTY) { iop_wait_ready_itl()
67 writel(req, &hba->u.itl.iop->outbound_queue); iop_wait_ready_itl()
96 u32 req; hptiop_drain_outbound_queue_itl() local
98 while ((req = readl(&hba->u.itl.iop->outbound_queue)) != hptiop_drain_outbound_queue_itl()
101 if (req & IOPMU_QUEUE_MASK_HOST_BITS) hptiop_drain_outbound_queue_itl()
102 hptiop_request_callback_itl(hba, req); hptiop_drain_outbound_queue_itl()
107 ((char __iomem *)hba->u.itl.iop + req); hptiop_drain_outbound_queue_itl()
111 hptiop_request_callback_itl(hba, req); hptiop_drain_outbound_queue_itl()
116 hptiop_request_callback_itl(hba, req); hptiop_drain_outbound_queue_itl()
186 struct hpt_iop_request_scsi_command *req; hptiop_request_callback_mv() local
199 req = hba->reqs[tag >> 8].req_virt; hptiop_request_callback_mv()
201 req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); hptiop_request_callback_mv()
203 hptiop_finish_scsi_req(hba, tag>>8, req); hptiop_request_callback_mv()
241 struct hpt_iop_request_scsi_command *req; hptiop_request_callback_mvfrey() local
250 req = hba->reqs[(_tag >> 4) & 0xff].req_virt; hptiop_request_callback_mvfrey()
252 req->header.result = IOP_RESULT_SUCCESS; hptiop_request_callback_mvfrey()
253 hptiop_finish_scsi_req(hba, (_tag >> 4) & 0xff, req); hptiop_request_callback_mvfrey()
309 struct hpt_iop_request_header __iomem *req = _req; iop_send_sync_request_itl() local
312 writel(readl(&req->flags) | IOP_REQUEST_FLAG_SYNC_REQUEST, &req->flags); iop_send_sync_request_itl()
313 writel(0, &req->context); iop_send_sync_request_itl()
314 writel((unsigned long)req - (unsigned long)hba->u.itl.iop, iop_send_sync_request_itl()
320 if (readl(&req->context)) iop_send_sync_request_itl()
412 struct hpt_iop_request_get_config __iomem *req; iop_get_config_itl() local
418 req = (struct hpt_iop_request_get_config __iomem *) iop_get_config_itl()
421 writel(0, &req->header.flags); iop_get_config_itl()
422 writel(IOP_REQUEST_TYPE_GET_CONFIG, &req->header.type); iop_get_config_itl()
423 writel(sizeof(struct hpt_iop_request_get_config), &req->header.size); iop_get_config_itl()
424 writel(IOP_RESULT_PENDING, &req->header.result); iop_get_config_itl()
426 if (iop_send_sync_request_itl(hba, req, 20000)) { iop_get_config_itl()
431 memcpy_fromio(config, req, sizeof(*config)); iop_get_config_itl()
439 struct hpt_iop_request_get_config *req = hba->u.mv.internal_req; iop_get_config_mv() local
441 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); iop_get_config_mv()
442 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG); iop_get_config_mv()
443 req->header.size = iop_get_config_mv()
445 req->header.result = cpu_to_le32(IOP_RESULT_PENDING); iop_get_config_mv()
446 req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_GET_CONFIG<<5); iop_get_config_mv()
447 req->header.context_hi32 = 0; iop_get_config_mv()
454 memcpy(config, req, sizeof(struct hpt_iop_request_get_config)); iop_get_config_mv()
484 struct hpt_iop_request_set_config __iomem *req; iop_set_config_itl() local
490 req = (struct hpt_iop_request_set_config __iomem *) iop_set_config_itl()
493 memcpy_toio((u8 __iomem *)req + sizeof(struct hpt_iop_request_header), iop_set_config_itl()
498 writel(0, &req->header.flags); iop_set_config_itl()
499 writel(IOP_REQUEST_TYPE_SET_CONFIG, &req->header.type); iop_set_config_itl()
500 writel(sizeof(struct hpt_iop_request_set_config), &req->header.size); iop_set_config_itl()
501 writel(IOP_RESULT_PENDING, &req->header.result); iop_set_config_itl()
503 if (iop_send_sync_request_itl(hba, req, 20000)) { iop_set_config_itl()
515 struct hpt_iop_request_set_config *req = hba->u.mv.internal_req; iop_set_config_mv() local
517 memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); iop_set_config_mv()
518 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); iop_set_config_mv()
519 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); iop_set_config_mv()
520 req->header.size = iop_set_config_mv()
522 req->header.result = cpu_to_le32(IOP_RESULT_PENDING); iop_set_config_mv()
523 req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); iop_set_config_mv()
524 req->header.context_hi32 = 0; iop_set_config_mv()
537 struct hpt_iop_request_set_config *req = iop_set_config_mvfrey() local
540 memcpy(req, config, sizeof(struct hpt_iop_request_set_config)); iop_set_config_mvfrey()
541 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); iop_set_config_mvfrey()
542 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG); iop_set_config_mvfrey()
543 req->header.size = iop_set_config_mvfrey()
545 req->header.result = cpu_to_le32(IOP_RESULT_PENDING); iop_set_config_mvfrey()
546 req->header.context = cpu_to_le32(IOP_REQUEST_TYPE_SET_CONFIG<<5); iop_set_config_mvfrey()
547 req->header.context_hi32 = 0; iop_set_config_mvfrey()
708 dprintk("get_req : req=%p\n", hba->req_list); get_req()
717 static void free_req(struct hptiop_hba *hba, struct hptiop_request *req) free_req() argument
719 dprintk("free_req(%d, %p)\n", req->index, req); free_req()
720 req->next = hba->req_list; free_req()
721 hba->req_list = req; free_req()
725 struct hpt_iop_request_scsi_command *req) hptiop_finish_scsi_req()
729 dprintk("hptiop_finish_scsi_req: req=%p, type=%d, " hptiop_finish_scsi_req()
731 req, req->header.type, req->header.result, hptiop_finish_scsi_req()
732 req->header.context, tag); hptiop_finish_scsi_req()
734 BUG_ON(!req->header.result); hptiop_finish_scsi_req()
735 BUG_ON(req->header.type != cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND)); hptiop_finish_scsi_req()
742 switch (le32_to_cpu(req->header.result)) { hptiop_finish_scsi_req()
745 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); hptiop_finish_scsi_req()
765 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); hptiop_finish_scsi_req()
767 memcpy(scp->sense_buffer, &req->sg_list, hptiop_finish_scsi_req()
769 le32_to_cpu(req->dataxfer_length))); hptiop_finish_scsi_req()
779 scsi_bufflen(scp) - le32_to_cpu(req->dataxfer_length)); hptiop_finish_scsi_req()
789 struct hpt_iop_request_scsi_command *req; hptiop_host_request_callback_itl() local
794 req = hba->reqs[tag].req_virt; hptiop_host_request_callback_itl()
796 req->header.result = cpu_to_le32(IOP_RESULT_SUCCESS); hptiop_host_request_callback_itl()
799 req = hba->reqs[tag].req_virt; hptiop_host_request_callback_itl()
802 hptiop_finish_scsi_req(hba, tag, req); hptiop_host_request_callback_itl()
807 struct hpt_iop_request_header __iomem *req; hptiop_iop_request_callback_itl() local
811 req = (struct hpt_iop_request_header __iomem *) hptiop_iop_request_callback_itl()
813 dprintk("hptiop_iop_request_callback_itl: req=%p, type=%d, " hptiop_iop_request_callback_itl()
815 req, readl(&req->type), readl(&req->result), hptiop_iop_request_callback_itl()
816 readl(&req->context), tag); hptiop_iop_request_callback_itl()
818 BUG_ON(!readl(&req->result)); hptiop_iop_request_callback_itl()
819 BUG_ON(readl(&req->type) != IOP_REQUEST_TYPE_IOCTL_COMMAND); hptiop_iop_request_callback_itl()
821 p = (struct hpt_iop_request_ioctl_command __iomem *)req; hptiop_iop_request_callback_itl()
823 (readl(&req->context) | hptiop_iop_request_callback_itl()
824 ((u64)readl(&req->context_hi32)<<32)); hptiop_iop_request_callback_itl()
826 if (readl(&req->result) == IOP_RESULT_SUCCESS) { hptiop_iop_request_callback_itl()
1012 struct hpt_iop_request_scsi_command *req; hptiop_queuecommand_lck() local
1021 dprintk("hptiop_queuecmd : no free req\n"); hptiop_queuecommand_lck()
1028 "req_index=%d, req=%p\n", hptiop_queuecommand_lck()
1047 req = _req->req_virt; hptiop_queuecommand_lck()
1050 sg_count = hptiop_buildsgl(scp, req->sg_list); hptiop_queuecommand_lck()
1054 req->header.flags = cpu_to_le32(IOP_REQUEST_FLAG_OUTPUT_CONTEXT); hptiop_queuecommand_lck()
1055 req->header.type = cpu_to_le32(IOP_REQUEST_TYPE_SCSI_COMMAND); hptiop_queuecommand_lck()
1056 req->header.result = cpu_to_le32(IOP_RESULT_PENDING); hptiop_queuecommand_lck()
1057 req->dataxfer_length = cpu_to_le32(scsi_bufflen(scp)); hptiop_queuecommand_lck()
1058 req->channel = scp->device->channel; hptiop_queuecommand_lck()
1059 req->target = scp->device->id; hptiop_queuecommand_lck()
1060 req->lun = scp->device->lun; hptiop_queuecommand_lck()
1061 req->header.size = cpu_to_le32( hptiop_queuecommand_lck()
1066 memcpy(req->cdb, scp->cmnd, sizeof(req->cdb)); hptiop_queuecommand_lck()
724 hptiop_finish_scsi_req(struct hptiop_hba *hba, u32 tag, struct hpt_iop_request_scsi_command *req) hptiop_finish_scsi_req() argument
/linux-4.1.27/drivers/macintosh/ams/
H A Dams-pmu.c46 static void ams_pmu_req_complete(struct adb_request *req) ams_pmu_req_complete() argument
48 complete((struct completion *)req->arg); ams_pmu_req_complete()
54 static struct adb_request req; ams_pmu_set_register() local
57 req.arg = &req_complete; ams_pmu_set_register()
58 if (pmu_request(&req, ams_pmu_req_complete, 4, ams_pmu_cmd, 0x00, reg, value)) ams_pmu_set_register()
67 static struct adb_request req; ams_pmu_get_register() local
70 req.arg = &req_complete; ams_pmu_get_register()
71 if (pmu_request(&req, ams_pmu_req_complete, 3, ams_pmu_cmd, 0x01, reg)) ams_pmu_get_register()
76 if (req.reply_len > 0) ams_pmu_get_register()
77 return req.reply[0]; ams_pmu_get_register()
/linux-4.1.27/arch/ia64/hp/sim/boot/
H A Dbootloader.c61 struct disk_req req; start_bootloader() local
108 req.len = sizeof(mem); start_bootloader()
109 req.addr = (long) mem; start_bootloader()
110 ssc(fd, 1, (long) &req, off, SSC_READ); start_bootloader()
136 req.len = sizeof(*elf_phdr); start_bootloader()
137 req.addr = (long) mem; start_bootloader()
138 ssc(fd, 1, (long) &req, e_phoff, SSC_READ); start_bootloader()
151 req.len = elf_phdr->p_filesz; start_bootloader()
152 req.addr = __pa(elf_phdr->p_paddr); start_bootloader()
153 ssc(fd, 1, (long) &req, elf_phdr->p_offset, SSC_READ); start_bootloader()
/linux-4.1.27/drivers/staging/lustre/lustre/include/
H A Dlustre_sec.h395 * \pre req->rq_reqmsg point to request message.
396 * \pre req->rq_reqlen is the request message length.
397 * \post req->rq_reqbuf point to request message with signature.
398 * \post req->rq_reqdata_len is set to the final request message size.
403 struct ptlrpc_request *req);
408 * \pre req->rq_repdata point to reply message with signature.
409 * \pre req->rq_repdata_len is the total reply message length.
410 * \post req->rq_repmsg point to reply message without signature.
411 * \post req->rq_replen is the reply message length.
416 struct ptlrpc_request *req);
421 * \pre req->rq_reqmsg point to request message in clear text.
422 * \pre req->rq_reqlen is the request message length.
423 * \post req->rq_reqbuf point to request message.
424 * \post req->rq_reqdata_len is set to the final request message size.
429 struct ptlrpc_request *req);
434 * \pre req->rq_repdata point to encrypted reply message.
435 * \pre req->rq_repdata_len is the total cipher text length.
436 * \post req->rq_repmsg point to reply message in clear text.
437 * \post req->rq_replen is the reply message length in clear text.
442 struct ptlrpc_request *req);
465 struct ptlrpc_request *req,
482 struct ptlrpc_request *req,
618 * To allocate request buffer for \a req.
620 * \pre req->rq_reqmsg == NULL.
621 * \pre req->rq_reqbuf == NULL, otherwise it must be pre-allocated,
623 * \post if success, req->rq_reqmsg point to a buffer with size
629 struct ptlrpc_request *req,
633 * To free request buffer for \a req.
635 * \pre req->rq_reqbuf != NULL.
640 struct ptlrpc_request *req);
643 * To allocate reply buffer for \a req.
645 * \pre req->rq_repbuf == NULL.
646 * \post if success, req->rq_repbuf point to a buffer with size
647 * req->rq_repbuf_len, the size should be large enough to receive
653 struct ptlrpc_request *req,
657 * To free reply buffer for \a req.
659 * \pre req->rq_repbuf != NULL.
660 * \post req->rq_repbuf == NULL.
661 * \post req->rq_repbuf_len == 0.
666 struct ptlrpc_request *req);
669 * To expand the request buffer of \a req, thus the \a segment in
670 * the request message pointed by req->rq_reqmsg can accommodate
673 * \pre req->rq_reqmsg->lm_buflens[segment] < newsize.
680 struct ptlrpc_request *req,
696 * \pre request message is pointed by req->rq_reqbuf, size is
697 * req->rq_reqdata_len; and the message has been unpacked to
700 * \retval SECSVC_OK success, req->rq_reqmsg point to request message
701 * in clear text, size is req->rq_reqlen; req->rq_svc_ctx is set;
702 * req->rq_sp_from is decoded from request.
704 * processed, and reply message has been prepared; req->rq_sp_from is
710 int (*accept) (struct ptlrpc_request *req);
715 * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
716 * is req->rq_replen.
717 * \post req->rs_repdata_len is the final message size.
718 * \post req->rq_reply_off is set.
722 int (*authorize) (struct ptlrpc_request *req);
736 * \pre if req->rq_reply_state != NULL, then it's pre-allocated, we
739 * \post req->rq_reply_state != NULL;
740 * \post req->rq_reply_state->rs_msg != NULL;
744 int (*alloc_rs) (struct ptlrpc_request *req,
775 int (*prep_bulk) (struct ptlrpc_request *req,
783 int (*unwrap_bulk) (struct ptlrpc_request *req,
791 int (*wrap_bulk) (struct ptlrpc_request *req,
1019 int sptlrpc_cli_wrap_request(struct ptlrpc_request *req);
1020 int sptlrpc_cli_unwrap_reply(struct ptlrpc_request *req);
1021 int sptlrpc_cli_alloc_reqbuf(struct ptlrpc_request *req, int msgsize);
1022 void sptlrpc_cli_free_reqbuf(struct ptlrpc_request *req);
1023 int sptlrpc_cli_alloc_repbuf(struct ptlrpc_request *req, int msgsize);
1024 void sptlrpc_cli_free_repbuf(struct ptlrpc_request *req);
1025 int sptlrpc_cli_enlarge_reqbuf(struct ptlrpc_request *req,
1027 int sptlrpc_cli_unwrap_early_reply(struct ptlrpc_request *req,
1031 void sptlrpc_request_out_callback(struct ptlrpc_request *req);
1046 int sptlrpc_req_get_ctx(struct ptlrpc_request *req);
1047 void sptlrpc_req_put_ctx(struct ptlrpc_request *req, int sync);
1048 int sptlrpc_req_refresh_ctx(struct ptlrpc_request *req, long timeout);
1049 int sptlrpc_req_replace_dead_ctx(struct ptlrpc_request *req);
1050 void sptlrpc_req_set_flavor(struct ptlrpc_request *req, int opcode);
1083 int sptlrpc_svc_unwrap_request(struct ptlrpc_request *req);
1084 int sptlrpc_svc_alloc_rs(struct ptlrpc_request *req, int msglen);
1085 int sptlrpc_svc_wrap_reply(struct ptlrpc_request *req);
1087 void sptlrpc_svc_ctx_addref(struct ptlrpc_request *req);
1088 void sptlrpc_svc_ctx_decref(struct ptlrpc_request *req);
1089 void sptlrpc_svc_ctx_invalidate(struct ptlrpc_request *req);
1092 struct ptlrpc_request *req);
1110 int sptlrpc_cli_wrap_bulk(struct ptlrpc_request *req,
1112 int sptlrpc_cli_unwrap_bulk_read(struct ptlrpc_request *req,
1115 int sptlrpc_cli_unwrap_bulk_write(struct ptlrpc_request *req,
1132 int sptlrpc_unpack_user_desc(struct lustre_msg *req, int offset, int swabbed);
/linux-4.1.27/drivers/misc/genwqe/
H A Dcard_ddcb.c204 struct ddcb_requ *req; ddcb_requ_alloc() local
206 req = kzalloc(sizeof(*req), GFP_ATOMIC); ddcb_requ_alloc()
207 if (!req) ddcb_requ_alloc()
210 return &req->cmd; ddcb_requ_alloc()
215 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); ddcb_requ_free() local
217 kfree(req); ddcb_requ_free()
220 static inline enum genwqe_requ_state ddcb_requ_get_state(struct ddcb_requ *req) ddcb_requ_get_state() argument
222 return req->req_state; ddcb_requ_get_state()
225 static inline void ddcb_requ_set_state(struct ddcb_requ *req, ddcb_requ_set_state() argument
228 req->req_state = new_state; ddcb_requ_set_state()
231 static inline int ddcb_requ_collect_debug_data(struct ddcb_requ *req) ddcb_requ_collect_debug_data() argument
233 return req->cmd.ddata_addr != 0x0; ddcb_requ_collect_debug_data()
239 * @req: DDCB work request
250 static int ddcb_requ_finished(struct genwqe_dev *cd, struct ddcb_requ *req) ddcb_requ_finished() argument
252 return (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) || ddcb_requ_finished()
337 static void copy_ddcb_results(struct ddcb_requ *req, int ddcb_no) copy_ddcb_results() argument
339 struct ddcb_queue *queue = req->queue; copy_ddcb_results()
340 struct ddcb *pddcb = &queue->ddcb_vaddr[req->num]; copy_ddcb_results()
342 memcpy(&req->cmd.asv[0], &pddcb->asv[0], DDCB_ASV_LENGTH); copy_ddcb_results()
345 req->cmd.vcrc = be16_to_cpu(pddcb->vcrc_16); copy_ddcb_results()
346 req->cmd.deque_ts = be64_to_cpu(pddcb->deque_ts_64); copy_ddcb_results()
347 req->cmd.cmplt_ts = be64_to_cpu(pddcb->cmplt_ts_64); copy_ddcb_results()
349 req->cmd.attn = be16_to_cpu(pddcb->attn_16); copy_ddcb_results()
350 req->cmd.progress = be32_to_cpu(pddcb->progress_32); copy_ddcb_results()
351 req->cmd.retc = be16_to_cpu(pddcb->retc_16); copy_ddcb_results()
353 if (ddcb_requ_collect_debug_data(req)) { copy_ddcb_results()
358 memcpy(&req->debug_data.ddcb_finished, pddcb, copy_ddcb_results()
359 sizeof(req->debug_data.ddcb_finished)); copy_ddcb_results()
360 memcpy(&req->debug_data.ddcb_prev, prev_pddcb, copy_ddcb_results()
361 sizeof(req->debug_data.ddcb_prev)); copy_ddcb_results()
384 struct ddcb_requ *req; genwqe_check_ddcb_queue() local
396 req = queue->ddcb_req[queue->ddcb_act]; genwqe_check_ddcb_queue()
397 if (req == NULL) { genwqe_check_ddcb_queue()
426 copy_ddcb_results(req, queue->ddcb_act); genwqe_check_ddcb_queue()
429 dev_dbg(&pci_dev->dev, "FINISHED DDCB#%d\n", req->num); genwqe_check_ddcb_queue()
436 VCRC_LENGTH(req->cmd.asv_length), genwqe_check_ddcb_queue()
443 pddcb->pre, VCRC_LENGTH(req->cmd.asv_length), genwqe_check_ddcb_queue()
447 ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); genwqe_check_ddcb_queue()
469 * @req: pointer to requsted DDCB parameters
480 * purge_ddcb() is being called to get the &req removed from the
483 int __genwqe_wait_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) __genwqe_wait_ddcb() argument
490 if (req == NULL) __genwqe_wait_ddcb()
493 queue = req->queue; __genwqe_wait_ddcb()
497 ddcb_no = req->num; __genwqe_wait_ddcb()
502 ddcb_requ_finished(cd, req), __genwqe_wait_ddcb()
512 struct ddcb_queue *queue = req->queue; __genwqe_wait_ddcb()
520 genwqe_check_ddcb_queue(cd, req->queue); __genwqe_wait_ddcb()
521 if (ddcb_requ_finished(cd, req)) __genwqe_wait_ddcb()
525 "[%s] err: DDCB#%d timeout rc=%d state=%d req @ %p\n", __genwqe_wait_ddcb()
526 __func__, req->num, rc, ddcb_requ_get_state(req), __genwqe_wait_ddcb()
527 req); __genwqe_wait_ddcb()
532 pddcb = &queue->ddcb_vaddr[req->num]; __genwqe_wait_ddcb()
535 print_ddcb_info(cd, req->queue); __genwqe_wait_ddcb()
548 __func__, req->num, rc, ddcb_requ_get_state(req)); __genwqe_wait_ddcb()
556 __func__, req->num, rc); __genwqe_wait_ddcb()
612 * @req: DDCB request
624 int __genwqe_purge_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req) __genwqe_purge_ddcb() argument
629 struct ddcb_queue *queue = req->queue; __genwqe_purge_ddcb()
642 pddcb = &queue->ddcb_vaddr[req->num]; __genwqe_purge_ddcb()
648 /* Check if req was meanwhile finished */ __genwqe_purge_ddcb()
649 if (ddcb_requ_get_state(req) == GENWQE_REQU_FINISHED) __genwqe_purge_ddcb()
674 * time. It will mark the req finished and free it up __genwqe_purge_ddcb()
678 copy_ddcb_results(req, req->num); /* for the failing case */ __genwqe_purge_ddcb()
683 copy_ddcb_results(req, req->num); __genwqe_purge_ddcb()
684 ddcb_requ_set_state(req, GENWQE_REQU_FINISHED); __genwqe_purge_ddcb()
686 queue->ddcb_req[req->num] = NULL; /* delete from array */ __genwqe_purge_ddcb()
701 (queue->ddcb_act == req->num)) { __genwqe_purge_ddcb()
716 dev_dbg(&pci_dev->dev, "UN/FINISHED DDCB#%d\n", req->num); __genwqe_purge_ddcb()
721 __func__, req->num, genwqe_ddcb_software_timeout, __genwqe_purge_ddcb()
724 print_ddcb_info(cd, req->queue); __genwqe_purge_ddcb()
751 * @req: pointer to DDCB execution request
758 int __genwqe_enqueue_ddcb(struct genwqe_dev *cd, struct ddcb_requ *req, __genwqe_enqueue_ddcb() argument
772 __func__, req->num); __genwqe_enqueue_ddcb()
776 queue = req->queue = &cd->queue; __genwqe_enqueue_ddcb()
791 pddcb = get_next_ddcb(cd, queue, &req->num); /* get ptr and num */ __genwqe_enqueue_ddcb()
813 if (queue->ddcb_req[req->num] != NULL) { __genwqe_enqueue_ddcb()
817 "[%s] picked DDCB %d with req=%p still in use!!\n", __genwqe_enqueue_ddcb()
818 __func__, req->num, req); __genwqe_enqueue_ddcb()
821 ddcb_requ_set_state(req, GENWQE_REQU_ENQUEUED); __genwqe_enqueue_ddcb()
822 queue->ddcb_req[req->num] = req; __genwqe_enqueue_ddcb()
824 pddcb->cmdopts_16 = cpu_to_be16(req->cmd.cmdopts); __genwqe_enqueue_ddcb()
825 pddcb->cmd = req->cmd.cmd; __genwqe_enqueue_ddcb()
826 pddcb->acfunc = req->cmd.acfunc; /* functional unit */ __genwqe_enqueue_ddcb()
842 pddcb->psp = (((req->cmd.asiv_length / 8) << 4) | __genwqe_enqueue_ddcb()
843 ((req->cmd.asv_length / 8))); __genwqe_enqueue_ddcb()
844 pddcb->disp_ts_64 = cpu_to_be64(req->cmd.disp_ts); __genwqe_enqueue_ddcb()
849 * req->cmd.asiv_length. But simulation benefits from some __genwqe_enqueue_ddcb()
861 &req->cmd.__asiv[0], /* source */ __genwqe_enqueue_ddcb()
862 DDCB_ASIV_LENGTH); /* req->cmd.asiv_length */ __genwqe_enqueue_ddcb()
864 pddcb->n.ats_64 = cpu_to_be64(req->cmd.ats); __genwqe_enqueue_ddcb()
866 &req->cmd.asiv[0], /* source */ __genwqe_enqueue_ddcb()
867 DDCB_ASIV_LENGTH_ATS); /* req->cmd.asiv_length */ __genwqe_enqueue_ddcb()
877 ICRC_LENGTH(req->cmd.asiv_length), 0xffff); __genwqe_enqueue_ddcb()
884 dev_dbg(&pci_dev->dev, "INPUT DDCB#%d\n", req->num); __genwqe_enqueue_ddcb()
887 if (ddcb_requ_collect_debug_data(req)) { __genwqe_enqueue_ddcb()
891 genwqe_init_debug_data(cd, &req->debug_data); __genwqe_enqueue_ddcb()
892 memcpy(&req->debug_data.ddcb_before, pddcb, __genwqe_enqueue_ddcb()
893 sizeof(req->debug_data.ddcb_before)); __genwqe_enqueue_ddcb()
896 enqueue_ddcb(cd, queue, pddcb, req->num); __genwqe_enqueue_ddcb()
902 ddcb_requ_set_state(req, GENWQE_REQU_TAPPED); __genwqe_enqueue_ddcb()
912 * @req: user provided DDCB request
921 struct ddcb_requ *req = container_of(cmd, struct ddcb_requ, cmd); __genwqe_execute_raw_ddcb() local
933 rc = __genwqe_enqueue_ddcb(cd, req, f_flags); __genwqe_execute_raw_ddcb()
937 rc = __genwqe_wait_ddcb(cd, req); __genwqe_execute_raw_ddcb()
941 if (ddcb_requ_collect_debug_data(req)) { __genwqe_execute_raw_ddcb()
944 &req->debug_data, __genwqe_execute_raw_ddcb()
963 __genwqe_purge_ddcb(cd, req); __genwqe_execute_raw_ddcb()
965 if (ddcb_requ_collect_debug_data(req)) { __genwqe_execute_raw_ddcb()
968 &req->debug_data, __genwqe_execute_raw_ddcb()
/linux-4.1.27/drivers/scsi/libsas/
H A Dsas_host_smp.c228 int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req, sas_smp_host_handler() argument
236 if (blk_rq_bytes(req) < 8 || blk_rq_bytes(rsp) < 8) sas_smp_host_handler()
239 if (bio_offset(req->bio) + blk_rq_bytes(req) > PAGE_SIZE || sas_smp_host_handler()
246 req_data = kzalloc(blk_rq_bytes(req), GFP_KERNEL); sas_smp_host_handler()
258 buf = kmap_atomic(bio_page(req->bio)); sas_smp_host_handler()
259 memcpy(req_data, buf, blk_rq_bytes(req)); sas_smp_host_handler()
260 kunmap_atomic(buf - bio_offset(req->bio)); sas_smp_host_handler()
277 req->resid_len -= 8; sas_smp_host_handler()
284 req->resid_len -= 8; sas_smp_host_handler()
298 req->resid_len -= 16; sas_smp_host_handler()
299 if ((int)req->resid_len < 0) { sas_smp_host_handler()
300 req->resid_len = 0; sas_smp_host_handler()
314 req->resid_len -= 16; sas_smp_host_handler()
315 if ((int)req->resid_len < 0) { sas_smp_host_handler()
316 req->resid_len = 0; sas_smp_host_handler()
333 if (blk_rq_bytes(req) < base_frame_size + to_write * 4 || sas_smp_host_handler()
334 req->resid_len < base_frame_size + to_write * 4) { sas_smp_host_handler()
341 req->resid_len -= base_frame_size + to_write * 4; sas_smp_host_handler()
351 req->resid_len -= 44; sas_smp_host_handler()
352 if ((int)req->resid_len < 0) { sas_smp_host_handler()
353 req->resid_len = 0; sas_smp_host_handler()
/linux-4.1.27/net/core/
H A Drequest_sock.c101 struct request_sock *req; reqsk_queue_destroy() local
104 while ((req = lopt->syn_table[i]) != NULL) { reqsk_queue_destroy()
105 lopt->syn_table[i] = req->dl_next; reqsk_queue_destroy()
112 if (del_timer_sync(&req->rsk_timer)) reqsk_queue_destroy()
113 reqsk_put(req); reqsk_queue_destroy()
114 reqsk_put(req); reqsk_queue_destroy()
136 * until 3WHS is either completed or aborted. Afterwards the req will stay
142 * When a child socket is accepted, its corresponding req->sk is set to
143 * NULL since it's no longer needed. More importantly, "req->sk == NULL"
149 * with its socket lock held. But a request_sock (req) can be accessed by
171 void reqsk_fastopen_remove(struct sock *sk, struct request_sock *req, reqsk_fastopen_remove() argument
174 struct sock *lsk = req->rsk_listener; reqsk_fastopen_remove()
182 tcp_rsk(req)->tfo_listener = false; reqsk_fastopen_remove()
183 if (req->sk) /* the child socket hasn't been accepted yet */ reqsk_fastopen_remove()
191 reqsk_put(req); reqsk_fastopen_remove()
194 /* Wait for 60secs before removing a req that has triggered RST. reqsk_fastopen_remove()
196 * counting the req against fastopen.max_qlen, and disabling reqsk_fastopen_remove()
201 req->rsk_timer.expires = jiffies + 60*HZ; reqsk_fastopen_remove()
203 fastopenq->rskq_rst_head = req; reqsk_fastopen_remove()
205 fastopenq->rskq_rst_tail->dl_next = req; reqsk_fastopen_remove()
207 req->dl_next = NULL; reqsk_fastopen_remove()
208 fastopenq->rskq_rst_tail = req; reqsk_fastopen_remove()
/linux-4.1.27/drivers/crypto/caam/
H A Dcaamhash.c131 int (*update)(struct ahash_request *req);
132 int (*final)(struct ahash_request *req);
133 int (*finup)(struct ahash_request *req);
156 /* Map req->result, and append seq_out_ptr command that points to it */ map_seq_out_ptr_result()
181 /* Map req->src and put it in link table */ src_map_to_sec4_sg()
258 * or req->result
291 * Load from buf and/or src and write to req->result or state->context ahash_ctx_data_to_out()
306 * Load from buf and/or src and write to req->result or state->context ahash_data_to_out()
586 * @dst_dma: physical mapped address of req->result
606 struct ahash_request *req, int dst_len) ahash_unmap()
609 dma_unmap_sg_chained(dev, req->src, edesc->src_nents, ahash_unmap()
621 struct ahash_request *req, int dst_len, u32 flag) ahash_unmap_ctx()
623 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_unmap_ctx()
625 struct caam_hash_state *state = ahash_request_ctx(req); ahash_unmap_ctx()
629 ahash_unmap(dev, edesc, req, dst_len); ahash_unmap_ctx()
635 struct ahash_request *req = context; ahash_done() local
637 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_done()
641 struct caam_hash_state *state = ahash_request_ctx(req); ahash_done()
651 ahash_unmap(jrdev, edesc, req, digestsize); ahash_done()
658 if (req->result) ahash_done()
660 DUMP_PREFIX_ADDRESS, 16, 4, req->result, ahash_done()
664 req->base.complete(&req->base, err); ahash_done()
670 struct ahash_request *req = context; ahash_done_bi() local
672 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_done_bi()
675 struct caam_hash_state *state = ahash_request_ctx(req); ahash_done_bi()
686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); ahash_done_bi()
693 if (req->result) ahash_done_bi()
695 DUMP_PREFIX_ADDRESS, 16, 4, req->result, ahash_done_bi()
699 req->base.complete(&req->base, err); ahash_done_bi()
705 struct ahash_request *req = context; ahash_done_ctx_src() local
707 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_done_ctx_src()
711 struct caam_hash_state *state = ahash_request_ctx(req); ahash_done_ctx_src()
721 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_TO_DEVICE); ahash_done_ctx_src()
728 if (req->result) ahash_done_ctx_src()
730 DUMP_PREFIX_ADDRESS, 16, 4, req->result, ahash_done_ctx_src()
734 req->base.complete(&req->base, err); ahash_done_ctx_src()
740 struct ahash_request *req = context; ahash_done_ctx_dst() local
742 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_done_ctx_dst()
745 struct caam_hash_state *state = ahash_request_ctx(req); ahash_done_ctx_dst()
756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); ahash_done_ctx_dst()
763 if (req->result) ahash_done_ctx_dst()
765 DUMP_PREFIX_ADDRESS, 16, 4, req->result, ahash_done_ctx_dst()
769 req->base.complete(&req->base, err); ahash_done_ctx_dst()
773 static int ahash_update_ctx(struct ahash_request *req) ahash_update_ctx() argument
775 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_update_ctx()
777 struct caam_hash_state *state = ahash_request_ctx(req); ahash_update_ctx()
779 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | ahash_update_ctx()
786 int in_len = *buflen + req->nbytes, to_hash; ahash_update_ctx()
800 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), ahash_update_ctx()
835 src_map_to_sec4_sg(jrdev, req->src, src_nents, ahash_update_ctx()
839 scatterwalk_map_and_copy(next_buf, req->src, ahash_update_ctx()
873 ret = caam_jr_enqueue(jrdev, desc, ahash_done_bi, req); ahash_update_ctx()
877 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_ctx()
882 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, ahash_update_ctx()
883 req->nbytes, 0); ahash_update_ctx()
898 static int ahash_final_ctx(struct ahash_request *req) ahash_final_ctx() argument
900 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_final_ctx()
902 struct caam_hash_state *state = ahash_request_ctx(req); ahash_final_ctx()
904 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | ahash_final_ctx()
958 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_final_ctx()
970 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); ahash_final_ctx()
974 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); ahash_final_ctx()
981 static int ahash_finup_ctx(struct ahash_request *req) ahash_finup_ctx() argument
983 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_finup_ctx()
985 struct caam_hash_state *state = ahash_request_ctx(req); ahash_finup_ctx()
987 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | ahash_finup_ctx()
1003 src_nents = __sg_count(req->src, req->nbytes, &chained); ahash_finup_ctx()
1035 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + ahash_finup_ctx()
1046 buflen + req->nbytes, LDST_SGF); ahash_finup_ctx()
1048 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_finup_ctx()
1060 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_src, req); ahash_finup_ctx()
1064 ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_FROM_DEVICE); ahash_finup_ctx()
1071 static int ahash_digest(struct ahash_request *req) ahash_digest() argument
1073 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_digest()
1076 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | ahash_digest()
1089 src_nents = sg_count(req->src, req->nbytes, &chained); ahash_digest()
1090 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, DMA_TO_DEVICE, ahash_digest()
1112 sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0); ahash_digest()
1122 src_dma = sg_dma_address(req->src); ahash_digest()
1125 append_seq_in_ptr(desc, src_dma, req->nbytes, options); ahash_digest()
1127 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_digest()
1139 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_digest()
1143 ahash_unmap(jrdev, edesc, req, digestsize); ahash_digest()
1151 static int ahash_final_no_ctx(struct ahash_request *req) ahash_final_no_ctx() argument
1153 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_final_no_ctx()
1155 struct caam_hash_state *state = ahash_request_ctx(req); ahash_final_no_ctx()
1157 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | ahash_final_no_ctx()
1189 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_final_no_ctx()
1202 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_final_no_ctx()
1206 ahash_unmap(jrdev, edesc, req, digestsize); ahash_final_no_ctx()
1214 static int ahash_update_no_ctx(struct ahash_request *req) ahash_update_no_ctx() argument
1216 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_update_no_ctx()
1218 struct caam_hash_state *state = ahash_request_ctx(req); ahash_update_no_ctx()
1220 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | ahash_update_no_ctx()
1227 int in_len = *buflen + req->nbytes, to_hash; ahash_update_no_ctx()
1240 src_nents = __sg_count(req->src, req->nbytes - (*next_buflen), ahash_update_no_ctx()
1266 src_map_to_sec4_sg(jrdev, req->src, src_nents, ahash_update_no_ctx()
1269 scatterwalk_map_and_copy(next_buf, req->src, ahash_update_no_ctx()
1300 ret = caam_jr_enqueue(jrdev, desc, ahash_done_ctx_dst, req); ahash_update_no_ctx()
1307 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_no_ctx()
1312 scatterwalk_map_and_copy(buf + *buflen, req->src, 0, ahash_update_no_ctx()
1313 req->nbytes, 0); ahash_update_no_ctx()
1329 static int ahash_finup_no_ctx(struct ahash_request *req) ahash_finup_no_ctx() argument
1331 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_finup_no_ctx()
1333 struct caam_hash_state *state = ahash_request_ctx(req); ahash_finup_no_ctx()
1335 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | ahash_finup_no_ctx()
1350 src_nents = __sg_count(req->src, req->nbytes, &chained); ahash_finup_no_ctx()
1377 src_map_to_sec4_sg(jrdev, req->src, src_nents, edesc->sec4_sg + 1, ahash_finup_no_ctx()
1388 req->nbytes, LDST_SGF); ahash_finup_no_ctx()
1390 edesc->dst_dma = map_seq_out_ptr_result(desc, jrdev, req->result, ahash_finup_no_ctx()
1402 ret = caam_jr_enqueue(jrdev, desc, ahash_done, req); ahash_finup_no_ctx()
1406 ahash_unmap(jrdev, edesc, req, digestsize); ahash_finup_no_ctx()
1414 static int ahash_update_first(struct ahash_request *req) ahash_update_first() argument
1416 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_update_first()
1418 struct caam_hash_state *state = ahash_request_ctx(req); ahash_update_first()
1420 gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG | ahash_update_first()
1436 *next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) - ahash_update_first()
1438 to_hash = req->nbytes - *next_buflen; ahash_update_first()
1441 src_nents = sg_count(req->src, req->nbytes - (*next_buflen), ahash_update_first()
1443 dma_map_sg_chained(jrdev, req->src, src_nents ? : 1, ahash_update_first()
1467 sg_to_sec4_sg_last(req->src, src_nents, ahash_update_first()
1480 src_dma = sg_dma_address(req->src); ahash_update_first()
1485 scatterwalk_map_and_copy(next_buf, req->src, to_hash, ahash_update_first()
1506 req); ahash_update_first()
1513 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_first()
1521 scatterwalk_map_and_copy(next_buf, req->src, 0, ahash_update_first()
1522 req->nbytes, 0); ahash_update_first()
1533 static int ahash_finup_first(struct ahash_request *req) ahash_finup_first() argument
1535 return ahash_digest(req); ahash_finup_first()
1538 static int ahash_init(struct ahash_request *req) ahash_init() argument
1540 struct caam_hash_state *state = ahash_request_ctx(req); ahash_init()
1554 static int ahash_update(struct ahash_request *req) ahash_update() argument
1556 struct caam_hash_state *state = ahash_request_ctx(req); ahash_update()
1558 return state->update(req); ahash_update()
1561 static int ahash_finup(struct ahash_request *req) ahash_finup() argument
1563 struct caam_hash_state *state = ahash_request_ctx(req); ahash_finup()
1565 return state->finup(req); ahash_finup()
1568 static int ahash_final(struct ahash_request *req) ahash_final() argument
1570 struct caam_hash_state *state = ahash_request_ctx(req); ahash_final()
1572 return state->final(req); ahash_final()
1575 static int ahash_export(struct ahash_request *req, void *out) ahash_export() argument
1577 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_export()
1579 struct caam_hash_state *state = ahash_request_ctx(req); ahash_export()
1587 static int ahash_import(struct ahash_request *req, const void *in) ahash_import() argument
1589 struct crypto_ahash *ahash = crypto_ahash_reqtfm(req); ahash_import()
1591 struct caam_hash_state *state = ahash_request_ctx(req); ahash_import()
604 ahash_unmap(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len) ahash_unmap() argument
619 ahash_unmap_ctx(struct device *dev, struct ahash_edesc *edesc, struct ahash_request *req, int dst_len, u32 flag) ahash_unmap_ctx() argument
/linux-4.1.27/drivers/usb/chipidea/
H A Dudc.c364 u32 mul = hwreq->req.length / hwep->ep.maxpacket; add_td_to_list()
366 if (hwreq->req.length == 0 add_td_to_list()
367 || hwreq->req.length % hwep->ep.maxpacket) add_td_to_list()
372 temp = (u32) (hwreq->req.dma + hwreq->req.actual); add_td_to_list()
382 hwreq->req.actual += length; add_td_to_list()
417 unsigned rest = hwreq->req.length; _hardware_enqueue()
422 if (hwreq->req.status == -EALREADY) _hardware_enqueue()
425 hwreq->req.status = -EALREADY; _hardware_enqueue()
427 ret = usb_gadget_map_request(&ci->gadget, &hwreq->req, hwep->dir); _hardware_enqueue()
435 if (hwreq->req.dma % PAGE_SIZE) _hardware_enqueue()
442 unsigned count = min(hwreq->req.length - hwreq->req.actual, _hardware_enqueue()
448 if (hwreq->req.zero && hwreq->req.length _hardware_enqueue()
449 && (hwreq->req.length % hwep->ep.maxpacket == 0)) _hardware_enqueue()
458 if (!hwreq->req.no_interrupt) _hardware_enqueue()
462 hwreq->req.actual = 0; _hardware_enqueue()
494 u32 mul = hwreq->req.length / hwep->ep.maxpacket; _hardware_enqueue()
496 if (hwreq->req.length == 0 _hardware_enqueue()
497 || hwreq->req.length % hwep->ep.maxpacket) _hardware_enqueue()
549 unsigned actual = hwreq->req.length; _hardware_dequeue()
552 if (hwreq->req.status != -EALREADY) _hardware_dequeue()
555 hwreq->req.status = 0; _hardware_dequeue()
565 hwreq->req.status = -EALREADY; _hardware_dequeue()
573 hwreq->req.status = tmptoken & TD_STATUS; _hardware_dequeue()
574 if ((TD_STATUS_HALTED & hwreq->req.status)) { _hardware_dequeue()
575 hwreq->req.status = -EPIPE; _hardware_dequeue()
577 } else if ((TD_STATUS_DT_ERR & hwreq->req.status)) { _hardware_dequeue()
578 hwreq->req.status = -EPROTO; _hardware_dequeue()
580 } else if ((TD_STATUS_TR_ERR & hwreq->req.status)) { _hardware_dequeue()
581 hwreq->req.status = -EILSEQ; _hardware_dequeue()
587 hwreq->req.status = -EPROTO; _hardware_dequeue()
603 usb_gadget_unmap_request(&hwep->ci->gadget, &hwreq->req, hwep->dir); _hardware_dequeue()
605 hwreq->req.actual += actual; _hardware_dequeue()
607 if (hwreq->req.status) _hardware_dequeue()
608 return hwreq->req.status; _hardware_dequeue()
610 return hwreq->req.actual; _hardware_dequeue()
644 hwreq->req.status = -ESHUTDOWN;
646 if (hwreq->req.complete != NULL) {
648 usb_gadget_giveback_request(&hwep->ep, &hwreq->req);
776 * @req: request handled
780 static void isr_get_status_complete(struct usb_ep *ep, struct usb_request *req) isr_get_status_complete() argument
782 if (ep == NULL || req == NULL) isr_get_status_complete()
785 kfree(req->buf); isr_get_status_complete()
786 usb_ep_free_request(ep, req); isr_get_status_complete()
794 static int _ep_queue(struct usb_ep *ep, struct usb_request *req, _ep_queue() argument
798 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); _ep_queue()
802 if (ep == NULL || req == NULL || hwep->ep.desc == NULL) _ep_queue()
806 if (req->length) _ep_queue()
818 hwreq->req.length > (1 + hwep->ep.mult) * hwep->ep.maxpacket) { _ep_queue()
830 hwreq->req.status = -EINPROGRESS; _ep_queue()
831 hwreq->req.actual = 0; _ep_queue()
856 struct usb_request *req = NULL; variable in typeref:struct:usb_request
864 req = usb_ep_alloc_request(&hwep->ep, gfp_flags);
866 if (req == NULL)
869 req->complete = isr_get_status_complete;
870 req->length = 2;
871 req->buf = kzalloc(req->length, gfp_flags);
872 if (req->buf == NULL) {
878 *(u16 *)req->buf = (ci->remote_wakeup << 1) |
885 *(u16 *)req->buf = hw_ep_get_halt(ci, num, dir);
889 retval = _ep_queue(&hwep->ep, req, gfp_flags);
896 kfree(req->buf);
899 usb_ep_free_request(&hwep->ep, req);
907 * @req: request handled
913 isr_setup_status_complete(struct usb_ep *ep, struct usb_request *req) isr_setup_status_complete() argument
915 struct ci_hdrc *ci = req->context; isr_setup_status_complete()
972 if (hwreq->req.complete != NULL) {
975 hwreq->req.length)
977 usb_gadget_giveback_request(&hweptemp->ep, &hwreq->req);
1006 struct usb_ctrlrequest req; variable in typeref:struct:usb_ctrlrequest
1020 memcpy(&req, &hwep->qh.ptr->setup, sizeof(req));
1023 type = req.bRequestType;
1027 switch (req.bRequest) {
1030 le16_to_cpu(req.wValue) ==
1032 if (req.wLength != 0)
1034 num = le16_to_cpu(req.wIndex);
1049 le16_to_cpu(req.wValue) ==
1051 if (req.wLength != 0)
1064 if (le16_to_cpu(req.wLength) != 2 ||
1065 le16_to_cpu(req.wValue) != 0)
1067 err = isr_get_status_response(ci, &req);
1072 if (le16_to_cpu(req.wLength) != 0 ||
1073 le16_to_cpu(req.wIndex) != 0)
1075 ci->address = (u8)le16_to_cpu(req.wValue);
1081 le16_to_cpu(req.wValue) ==
1083 if (req.wLength != 0)
1085 num = le16_to_cpu(req.wIndex);
1097 if (req.wLength != 0)
1099 switch (le16_to_cpu(req.wValue)) {
1105 tmode = le16_to_cpu(req.wIndex) >> 8;
1140 if (req.wLength == 0) /* no data phase */
1144 err = ci->driver->setup(&ci->gadget, &req);
1323 return (hwreq == NULL) ? NULL : &hwreq->req; ep_alloc_request()
1331 static void ep_free_request(struct usb_ep *ep, struct usb_request *req) ep_free_request() argument
1334 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); ep_free_request()
1338 if (ep == NULL || req == NULL) { ep_free_request()
1364 static int ep_queue(struct usb_ep *ep, struct usb_request *req, ep_queue() argument
1371 if (ep == NULL || req == NULL || hwep->ep.desc == NULL) ep_queue()
1375 retval = _ep_queue(ep, req, gfp_flags); ep_queue()
1385 static int ep_dequeue(struct usb_ep *ep, struct usb_request *req) ep_dequeue() argument
1388 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); ep_dequeue()
1392 if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY || ep_dequeue()
1410 usb_gadget_unmap_request(&hwep->ci->gadget, req, hwep->dir); ep_dequeue()
1412 req->status = -ECONNRESET; ep_dequeue()
1414 if (hwreq->req.complete != NULL) { ep_dequeue()
1416 usb_gadget_giveback_request(&hwep->ep, &hwreq->req); ep_dequeue()
/linux-4.1.27/drivers/infiniband/hw/cxgb3/
H A Diwch_cm.c173 struct cpl_tid_release *req; release_tid() local
175 skb = get_skb(skb, sizeof *req, GFP_KERNEL); release_tid()
178 req = (struct cpl_tid_release *) skb_put(skb, sizeof(*req)); release_tid()
179 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); release_tid()
180 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, hwtid)); release_tid()
188 struct cpl_set_tcb_field *req; iwch_quiesce_tid() local
189 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); iwch_quiesce_tid()
193 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); iwch_quiesce_tid()
194 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); iwch_quiesce_tid()
195 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); iwch_quiesce_tid()
196 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); iwch_quiesce_tid()
197 req->reply = 0; iwch_quiesce_tid()
198 req->cpu_idx = 0; iwch_quiesce_tid()
199 req->word = htons(W_TCB_RX_QUIESCE); iwch_quiesce_tid()
200 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE); iwch_quiesce_tid()
201 req->val = cpu_to_be64(1 << S_TCB_RX_QUIESCE); iwch_quiesce_tid()
209 struct cpl_set_tcb_field *req; iwch_resume_tid() local
210 struct sk_buff *skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); iwch_resume_tid()
214 req = (struct cpl_set_tcb_field *) skb_put(skb, sizeof(*req)); iwch_resume_tid()
215 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); iwch_resume_tid()
216 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); iwch_resume_tid()
217 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, ep->hwtid)); iwch_resume_tid()
218 req->reply = 0; iwch_resume_tid()
219 req->cpu_idx = 0; iwch_resume_tid()
220 req->word = htons(W_TCB_RX_QUIESCE); iwch_resume_tid()
221 req->mask = cpu_to_be64(1ULL << S_TCB_RX_QUIESCE); iwch_resume_tid()
222 req->val = 0; iwch_resume_tid()
380 struct cpl_abort_req *req = cplhdr(skb); abort_arp_failure() local
383 req->cmd = CPL_ABORT_NO_RST; abort_arp_failure()
389 struct cpl_close_con_req *req; send_halfclose() local
393 skb = get_skb(NULL, sizeof(*req), gfp); send_halfclose()
400 req = (struct cpl_close_con_req *) skb_put(skb, sizeof(*req)); send_halfclose()
401 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_CLOSE_CON)); send_halfclose()
402 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); send_halfclose()
403 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_CON_REQ, ep->hwtid)); send_halfclose()
409 struct cpl_abort_req *req; send_abort() local
412 skb = get_skb(skb, sizeof(*req), gfp); send_abort()
420 req = (struct cpl_abort_req *) skb_put(skb, sizeof(*req)); send_abort()
421 memset(req, 0, sizeof(*req)); send_abort()
422 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_REQ)); send_abort()
423 req->wr.wr_lo = htonl(V_WR_TID(ep->hwtid)); send_abort()
424 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ABORT_REQ, ep->hwtid)); send_abort()
425 req->cmd = CPL_ABORT_SEND_RST; send_abort()
431 struct cpl_act_open_req *req; send_connect() local
439 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); send_connect()
460 req = (struct cpl_act_open_req *) skb_put(skb, sizeof(*req)); send_connect()
461 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); send_connect()
462 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_ACT_OPEN_REQ, ep->atid)); send_connect()
463 req->local_port = ep->com.local_addr.sin_port; send_connect()
464 req->peer_port = ep->com.remote_addr.sin_port; send_connect()
465 req->local_ip = ep->com.local_addr.sin_addr.s_addr; send_connect()
466 req->peer_ip = ep->com.remote_addr.sin_addr.s_addr; send_connect()
467 req->opt0h = htonl(opt0h); send_connect()
468 req->opt0l = htonl(opt0l); send_connect()
469 req->params = 0; send_connect()
470 req->opt2 = htonl(opt2); send_connect()
477 struct tx_data_wr *req; send_mpa_req() local
486 if (skb->data + mpalen + sizeof(*req) > skb_end_pointer(skb)) { send_mpa_req()
488 skb=alloc_skb(mpalen + sizeof(*req), GFP_KERNEL); send_mpa_req()
495 skb_reserve(skb, sizeof(*req)); send_mpa_req()
518 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); send_mpa_req()
519 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); send_mpa_req()
520 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); send_mpa_req()
521 req->len = htonl(len); send_mpa_req()
522 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | send_mpa_req()
524 req->flags = htonl(F_TX_INIT); send_mpa_req()
525 req->sndseq = htonl(ep->snd_seq); send_mpa_req()
537 struct tx_data_wr *req; send_mpa_reject() local
545 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); send_mpa_reject()
550 skb_reserve(skb, sizeof(*req)); send_mpa_reject()
569 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); send_mpa_reject()
570 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); send_mpa_reject()
571 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); send_mpa_reject()
572 req->len = htonl(mpalen); send_mpa_reject()
573 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | send_mpa_reject()
575 req->flags = htonl(F_TX_INIT); send_mpa_reject()
576 req->sndseq = htonl(ep->snd_seq); send_mpa_reject()
585 struct tx_data_wr *req; send_mpa_reply() local
594 skb = get_skb(NULL, mpalen + sizeof(*req), GFP_KERNEL); send_mpa_reply()
600 skb_reserve(skb, sizeof(*req)); send_mpa_reply()
620 req = (struct tx_data_wr *) skb_push(skb, sizeof(*req)); send_mpa_reply()
621 req->wr_hi = htonl(V_WR_OP(FW_WROPCODE_OFLD_TX_DATA)|F_WR_COMPL); send_mpa_reply()
622 req->wr_lo = htonl(V_WR_TID(ep->hwtid)); send_mpa_reply()
623 req->len = htonl(len); send_mpa_reply()
624 req->param = htonl(V_TX_PORT(ep->l2t->smt_idx) | send_mpa_reply()
626 req->flags = htonl(F_TX_INIT); send_mpa_reply()
627 req->sndseq = htonl(ep->snd_seq); send_mpa_reply()
636 struct cpl_act_establish *req = cplhdr(skb); act_establish() local
637 unsigned int tid = GET_TID(req); act_establish()
647 ep->snd_seq = ntohl(req->snd_isn); act_establish()
648 ep->rcv_seq = ntohl(req->rcv_isn); act_establish()
650 set_emss(ep, ntohs(req->tcp_opt)); act_establish()
795 struct cpl_rx_data_ack *req; update_rx_credits() local
799 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); update_rx_credits()
805 req = (struct cpl_rx_data_ack *) skb_put(skb, sizeof(*req)); update_rx_credits()
806 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); update_rx_credits()
807 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_RX_DATA_ACK, ep->hwtid)); update_rx_credits()
808 req->credit_dack = htonl(V_RX_CREDITS(credits) | V_RX_FORCE_ACK(1)); update_rx_credits()
1203 struct cpl_pass_open_req *req; listen_start() local
1206 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); listen_start()
1212 req = (struct cpl_pass_open_req *) skb_put(skb, sizeof(*req)); listen_start()
1213 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); listen_start()
1214 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, ep->stid)); listen_start()
1215 req->local_port = ep->com.local_addr.sin_port; listen_start()
1216 req->local_ip = ep->com.local_addr.sin_addr.s_addr; listen_start()
1217 req->peer_port = 0; listen_start()
1218 req->peer_ip = 0; listen_start()
1219 req->peer_netmask = 0; listen_start()
1220 req->opt0h = htonl(F_DELACK | F_TCAM_BYPASS); listen_start()
1221 req->opt0l = htonl(V_RCV_BUFSIZ(rcv_win>>10)); listen_start()
1222 req->opt1 = htonl(V_CONN_POLICY(CPL_CONN_POLICY_ASK)); listen_start()
1245 struct cpl_close_listserv_req *req; listen_stop() local
1248 skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); listen_stop()
1253 req = (struct cpl_close_listserv_req *) skb_put(skb, sizeof(*req)); listen_stop()
1254 req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD)); listen_stop()
1255 req->cpu_idx = 0; listen_stop()
1256 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_CLOSE_LISTSRV_REQ, ep->stid)); listen_stop()
1343 struct cpl_pass_accept_req *req = cplhdr(skb); pass_accept_req() local
1344 unsigned int hwtid = GET_TID(req); pass_accept_req()
1361 tim.mac_addr = req->dst_mac; pass_accept_req()
1362 tim.vlan_tag = ntohs(req->vlan_tag); pass_accept_req()
1365 __func__, req->dst_mac); pass_accept_req()
1371 req->local_ip, pass_accept_req()
1372 req->peer_ip, pass_accept_req()
1373 req->local_port, pass_accept_req()
1374 req->peer_port, G_PASS_OPEN_TOS(ntohl(req->tos_tid))); pass_accept_req()
1381 l2t = t3_l2t_get(tdev, dst, NULL, &req->peer_ip); pass_accept_req()
1400 child_ep->com.local_addr.sin_port = req->local_port; pass_accept_req()
1401 child_ep->com.local_addr.sin_addr.s_addr = req->local_ip; pass_accept_req()
1403 child_ep->com.remote_addr.sin_port = req->peer_port; pass_accept_req()
1404 child_ep->com.remote_addr.sin_addr.s_addr = req->peer_ip; pass_accept_req()
1407 child_ep->tos = G_PASS_OPEN_TOS(ntohl(req->tos_tid)); pass_accept_req()
1413 accept_cr(child_ep, req->peer_ip, skb); pass_accept_req()
1416 reject_cr(tdev, hwtid, req->peer_ip, skb); pass_accept_req()
1424 struct cpl_pass_establish *req = cplhdr(skb); pass_establish() local
1427 ep->snd_seq = ntohl(req->snd_isn); pass_establish()
1428 ep->rcv_seq = ntohl(req->rcv_isn); pass_establish()
1430 set_emss(ep, ntohs(req->tcp_opt)); pass_establish()
1532 struct cpl_abort_req_rss *req = cplhdr(skb); peer_abort() local
1541 if (is_neg_adv_abort(req->status)) { peer_abort()
/linux-4.1.27/drivers/usb/dwc3/
H A Dtrace.h112 TP_PROTO(struct dwc3_request *req),
113 TP_ARGS(req),
116 __field(struct dwc3_request *, req)
122 snprintf(__get_str(name), DWC3_MSG_MAX, "%s", req->dep->name);
123 __entry->req = req;
124 __entry->actual = req->request.actual;
125 __entry->length = req->request.length;
126 __entry->status = req->request.status;
128 TP_printk("%s: req %p length %u/%u ==> %d",
129 __get_str(name), __entry->req, __entry->actual, __entry->length,
135 TP_PROTO(struct dwc3_request *req),
136 TP_ARGS(req)
140 TP_PROTO(struct dwc3_request *req),
141 TP_ARGS(req)
145 TP_PROTO(struct dwc3_request *req),
146 TP_ARGS(req)
150 TP_PROTO(struct dwc3_request *req),
151 TP_ARGS(req)
155 TP_PROTO(struct dwc3_request *req),
156 TP_ARGS(req)
/linux-4.1.27/net/ipv6/
H A Dsyncookies.c45 struct request_sock *req, get_cookie_sock()
51 child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); get_cookie_sock()
53 atomic_set(&req->rsk_refcnt, 1); get_cookie_sock()
54 inet_csk_reqsk_queue_add(sk, req, child); get_cookie_sock()
56 reqsk_free(req); get_cookie_sock()
166 struct request_sock *req; cookie_v6_check() local
193 req = inet_reqsk_alloc(&tcp6_request_sock_ops, sk); cookie_v6_check()
194 if (!req) cookie_v6_check()
197 ireq = inet_rsk(req); cookie_v6_check()
198 treq = tcp_rsk(req); cookie_v6_check()
201 if (security_inet_conn_request(sk, skb, req)) cookie_v6_check()
204 req->mss = mss; cookie_v6_check()
224 req->num_retrans = 0; cookie_v6_check()
229 req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; cookie_v6_check()
251 security_req_classify_flow(req, flowi6_to_flowi(&fl6)); cookie_v6_check()
258 req->window_clamp = tp->window_clamp ? :dst_metric(dst, RTAX_WINDOW); cookie_v6_check()
259 tcp_select_initial_window(tcp_full_space(sk), req->mss, cookie_v6_check()
260 &req->rcv_wnd, &req->window_clamp, cookie_v6_check()
267 ret = get_cookie_sock(sk, skb, req, dst); cookie_v6_check()
271 reqsk_free(req); cookie_v6_check()
44 get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) get_cookie_sock() argument
/linux-4.1.27/fs/ocfs2/
H A Dioctl.c42 struct ocfs2_info_request __user *req) o2info_set_request_error()
45 (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags)); o2info_set_request_error()
48 static inline void o2info_set_request_filled(struct ocfs2_info_request *req) o2info_set_request_filled() argument
50 req->ir_flags |= OCFS2_INFO_FL_FILLED; o2info_set_request_filled()
53 static inline void o2info_clear_request_filled(struct ocfs2_info_request *req) o2info_clear_request_filled() argument
55 req->ir_flags &= ~OCFS2_INFO_FL_FILLED; o2info_clear_request_filled()
58 static inline int o2info_coherent(struct ocfs2_info_request *req) o2info_coherent() argument
60 return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT)); o2info_coherent()
146 struct ocfs2_info_request __user *req) ocfs2_info_handle_blocksize()
150 if (o2info_from_user(oib, req)) ocfs2_info_handle_blocksize()
157 if (o2info_to_user(oib, req)) ocfs2_info_handle_blocksize()
164 struct ocfs2_info_request __user *req) ocfs2_info_handle_clustersize()
169 if (o2info_from_user(oic, req)) ocfs2_info_handle_clustersize()
176 if (o2info_to_user(oic, req)) ocfs2_info_handle_clustersize()
183 struct ocfs2_info_request __user *req) ocfs2_info_handle_maxslots()
188 if (o2info_from_user(oim, req)) ocfs2_info_handle_maxslots()
195 if (o2info_to_user(oim, req)) ocfs2_info_handle_maxslots()
202 struct ocfs2_info_request __user *req) ocfs2_info_handle_label()
207 if (o2info_from_user(oil, req)) ocfs2_info_handle_label()
214 if (o2info_to_user(oil, req)) ocfs2_info_handle_label()
221 struct ocfs2_info_request __user *req) ocfs2_info_handle_uuid()
226 if (o2info_from_user(oiu, req)) ocfs2_info_handle_uuid()
233 if (o2info_to_user(oiu, req)) ocfs2_info_handle_uuid()
240 struct ocfs2_info_request __user *req) ocfs2_info_handle_fs_features()
245 if (o2info_from_user(oif, req)) ocfs2_info_handle_fs_features()
254 if (o2info_to_user(oif, req)) ocfs2_info_handle_fs_features()
261 struct ocfs2_info_request __user *req) ocfs2_info_handle_journal_size()
266 if (o2info_from_user(oij, req)) ocfs2_info_handle_journal_size()
273 if (o2info_to_user(oij, req)) ocfs2_info_handle_journal_size()
328 struct ocfs2_info_request __user *req) ocfs2_info_handle_freeinode()
345 if (o2info_from_user(*oifi, req)) { ocfs2_info_handle_freeinode()
386 if (o2info_to_user(*oifi, req)) { ocfs2_info_handle_freeinode()
394 o2info_set_request_error(&oifi->ifi_req, req); ocfs2_info_handle_freeinode()
618 struct ocfs2_info_request __user *req) ocfs2_info_handle_freefrag()
635 if (o2info_from_user(*oiff, req)) { ocfs2_info_handle_freefrag()
675 if (o2info_to_user(*oiff, req)) { ocfs2_info_handle_freefrag()
683 o2info_set_request_error(&oiff->iff_req, req); ocfs2_info_handle_freefrag()
691 struct ocfs2_info_request __user *req) ocfs2_info_handle_unknown()
695 if (o2info_from_user(oir, req)) ocfs2_info_handle_unknown()
700 if (o2info_to_user(oir, req)) ocfs2_info_handle_unknown()
714 struct ocfs2_info_request __user *req) ocfs2_info_handle_request()
719 if (o2info_from_user(oir, req)) ocfs2_info_handle_request()
729 status = ocfs2_info_handle_blocksize(inode, req); ocfs2_info_handle_request()
733 status = ocfs2_info_handle_clustersize(inode, req); ocfs2_info_handle_request()
737 status = ocfs2_info_handle_maxslots(inode, req); ocfs2_info_handle_request()
741 status = ocfs2_info_handle_label(inode, req); ocfs2_info_handle_request()
745 status = ocfs2_info_handle_uuid(inode, req); ocfs2_info_handle_request()
749 status = ocfs2_info_handle_fs_features(inode, req); ocfs2_info_handle_request()
753 status = ocfs2_info_handle_journal_size(inode, req); ocfs2_info_handle_request()
757 status = ocfs2_info_handle_freeinode(inode, req); ocfs2_info_handle_request()
761 status = ocfs2_info_handle_freefrag(inode, req); ocfs2_info_handle_request()
764 status = ocfs2_info_handle_unknown(inode, req); ocfs2_info_handle_request()
41 o2info_set_request_error(struct ocfs2_info_request *kreq, struct ocfs2_info_request __user *req) o2info_set_request_error() argument
145 ocfs2_info_handle_blocksize(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_blocksize() argument
163 ocfs2_info_handle_clustersize(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_clustersize() argument
182 ocfs2_info_handle_maxslots(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_maxslots() argument
201 ocfs2_info_handle_label(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_label() argument
220 ocfs2_info_handle_uuid(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_uuid() argument
239 ocfs2_info_handle_fs_features(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_fs_features() argument
260 ocfs2_info_handle_journal_size(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_journal_size() argument
327 ocfs2_info_handle_freeinode(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_freeinode() argument
617 ocfs2_info_handle_freefrag(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_freefrag() argument
690 ocfs2_info_handle_unknown(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_unknown() argument
713 ocfs2_info_handle_request(struct inode *inode, struct ocfs2_info_request __user *req) ocfs2_info_handle_request() argument
/linux-4.1.27/net/netlink/
H A Ddiag.c58 struct netlink_diag_req *req, sk_diag_fill()
82 if ((req->ndiag_show & NDIAG_SHOW_GROUPS) && sk_diag_fill()
86 if ((req->ndiag_show & NDIAG_SHOW_MEMINFO) && sk_diag_fill()
90 if ((req->ndiag_show & NDIAG_SHOW_RING_CFG) && sk_diag_fill()
109 struct netlink_diag_req *req; __netlink_diag_dump() local
114 req = nlmsg_data(cb->nlh); __netlink_diag_dump()
129 if (sk_diag_fill(sk, skb, req, rht_for_each_entry_rcu()
152 if (sk_diag_fill(sk, skb, req,
171 struct netlink_diag_req *req; netlink_diag_dump() local
174 req = nlmsg_data(cb->nlh); netlink_diag_dump()
179 if (req->sdiag_protocol == NDIAG_PROTO_ALL) { netlink_diag_dump()
188 if (req->sdiag_protocol >= MAX_LINKS) { netlink_diag_dump()
194 __netlink_diag_dump(skb, cb, req->sdiag_protocol, s_num); netlink_diag_dump()
57 sk_diag_fill(struct sock *sk, struct sk_buff *skb, struct netlink_diag_req *req, u32 portid, u32 seq, u32 flags, int sk_ino) sk_diag_fill() argument
/linux-4.1.27/drivers/mmc/host/
H A Dwmt-sdmmc.c204 struct mmc_request *req; member in struct:wmt_mci_priv
299 struct mmc_request *req; wmt_complete_data_request() local
300 req = priv->req; wmt_complete_data_request()
302 req->data->bytes_xfered = req->data->blksz * req->data->blocks; wmt_complete_data_request()
305 if (req->data->flags & MMC_DATA_WRITE) wmt_complete_data_request()
306 dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, wmt_complete_data_request()
307 req->data->sg_len, DMA_TO_DEVICE); wmt_complete_data_request()
309 dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, wmt_complete_data_request()
310 req->data->sg_len, DMA_FROM_DEVICE); wmt_complete_data_request()
313 if ((req->cmd->error) || (req->data->error)) wmt_complete_data_request()
314 mmc_request_done(priv->mmc, req); wmt_complete_data_request()
317 if (!req->data->stop) { wmt_complete_data_request()
319 mmc_request_done(priv->mmc, req); wmt_complete_data_request()
328 priv->cmd = req->data->stop; wmt_complete_data_request()
329 wmt_mci_send_command(priv->mmc, req->data->stop->opcode, wmt_complete_data_request()
330 7, req->data->stop->arg, 9); wmt_complete_data_request()
348 priv->req->data->error = -ETIMEDOUT; wmt_mci_dma_isr()
353 priv->req->data->error = 0; wmt_mci_dma_isr()
404 if ((!priv->req->data) || wmt_mci_regular_isr()
405 ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) { wmt_mci_regular_isr()
424 mmc_request_done(priv->mmc, priv->req); wmt_mci_regular_isr()
562 static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req) wmt_mci_request() argument
580 priv->req = req; wmt_mci_request()
587 priv->cmd = req->cmd; wmt_mci_request()
589 command = req->cmd->opcode; wmt_mci_request()
590 arg = req->cmd->arg; wmt_mci_request()
591 rsptype = mmc_resp_type(req->cmd); wmt_mci_request()
601 if (!req->data) { wmt_mci_request()
606 if (req->data) { wmt_mci_request()
614 writew((reg_tmp & 0xF800) | (req->data->blksz - 1), wmt_mci_request()
618 writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT); wmt_mci_request()
622 if (req->data->flags & MMC_DATA_WRITE) { wmt_mci_request()
623 sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, wmt_mci_request()
624 req->data->sg_len, DMA_TO_DEVICE); wmt_mci_request()
626 if (req->data->blocks > 1) wmt_mci_request()
629 sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, wmt_mci_request()
630 req->data->sg_len, DMA_FROM_DEVICE); wmt_mci_request()
632 if (req->data->blocks > 1) wmt_mci_request()
639 for_each_sg(req->data->sg, sg, sg_cnt, i) { wmt_mci_request()
642 wmt_dma_init_descriptor(desc, req->data->blksz, wmt_mci_request()
647 offset += req->data->blksz; wmt_mci_request()
649 if (desc_cnt == req->data->blocks) wmt_mci_request()
656 if (req->data->flags & MMC_DATA_WRITE) wmt_mci_request()

Completed in 4214 milliseconds

1234567