Searched refs:reply (Results 1 - 200 of 724) sorted by relevance

1234

/linux-4.4.14/drivers/staging/rdma/amso1100/
H A Dc2_mm.c43 * Wait for the adapter's reply on the last one.
57 struct c2wr_nsmr_pbl_rep *reply; /* reply ptr */ send_pbl_messages() local
81 * Only the last PBL message will generate a reply from the verbs, send_pbl_messages()
83 * handler blocked awaiting this reply. send_pbl_messages()
96 * vq request struct cuz we're gonna wait for a reply. send_pbl_messages()
110 * wait for the reply. send_pbl_messages()
148 * Now wait for the reply... send_pbl_messages()
156 * Process reply send_pbl_messages()
158 reply = (struct c2wr_nsmr_pbl_rep *) (unsigned long) vq_req->reply_msg; send_pbl_messages()
159 if (!reply) { send_pbl_messages()
164 err = c2_errno(reply); send_pbl_messages()
166 vq_repbuf_free(c2dev, reply); send_pbl_messages()
181 struct c2wr_nsmr_register_rep *reply; c2_nsmr_register_phys_kern() local
260 * wait for reply from adapter c2_nsmr_register_phys_kern()
268 * process reply c2_nsmr_register_phys_kern()
270 reply = c2_nsmr_register_phys_kern()
272 if (!reply) { c2_nsmr_register_phys_kern()
276 if ((err = c2_errno(reply))) { c2_nsmr_register_phys_kern()
279 //*p_pb_entries = be32_to_cpu(reply->pbl_depth); c2_nsmr_register_phys_kern()
280 mr->ibmr.lkey = mr->ibmr.rkey = be32_to_cpu(reply->stag_index); c2_nsmr_register_phys_kern()
281 vq_repbuf_free(c2dev, reply); c2_nsmr_register_phys_kern()
285 * the adapter and wait for a reply on the final one. c2_nsmr_register_phys_kern()
308 vq_repbuf_free(c2dev, reply); c2_nsmr_register_phys_kern()
320 struct c2wr_stag_dealloc_rep *reply; /* WR reply */ c2_stag_dealloc() local
355 * Wait for reply from adapter c2_stag_dealloc()
363 * Process reply c2_stag_dealloc()
365 reply = (struct c2wr_stag_dealloc_rep *) (unsigned long) vq_req->reply_msg; c2_stag_dealloc()
366 if (!reply) { c2_stag_dealloc()
371 err = c2_errno(reply); c2_stag_dealloc()
373 vq_repbuf_free(c2dev, reply); c2_stag_dealloc()
H A Dc2_rnic.c127 struct c2wr_rnic_query_rep *reply; c2_rnic_query() local
150 reply = c2_rnic_query()
152 if (!reply) c2_rnic_query()
155 err = c2_errno(reply); c2_rnic_query()
160 ((u64)be32_to_cpu(reply->fw_ver_major) << 32) | c2_rnic_query()
161 ((be32_to_cpu(reply->fw_ver_minor) & 0xFFFF) << 16) | c2_rnic_query()
162 (be32_to_cpu(reply->fw_ver_patch) & 0xFFFF); c2_rnic_query()
166 props->vendor_id = be32_to_cpu(reply->vendor_id); c2_rnic_query()
167 props->vendor_part_id = be32_to_cpu(reply->part_number); c2_rnic_query()
168 props->hw_ver = be32_to_cpu(reply->hw_version); c2_rnic_query()
169 props->max_qp = be32_to_cpu(reply->max_qps); c2_rnic_query()
170 props->max_qp_wr = be32_to_cpu(reply->max_qp_depth); c2_rnic_query()
174 props->max_cq = be32_to_cpu(reply->max_cqs); c2_rnic_query()
175 props->max_cqe = be32_to_cpu(reply->max_cq_depth); c2_rnic_query()
176 props->max_mr = be32_to_cpu(reply->max_mrs); c2_rnic_query()
177 props->max_pd = be32_to_cpu(reply->max_pds); c2_rnic_query()
178 props->max_qp_rd_atom = be32_to_cpu(reply->max_qp_ird); c2_rnic_query()
180 props->max_res_rd_atom = be32_to_cpu(reply->max_global_ird); c2_rnic_query()
181 props->max_qp_init_rd_atom = be32_to_cpu(reply->max_qp_ord); c2_rnic_query()
186 props->max_mw = be32_to_cpu(reply->max_mws); c2_rnic_query()
202 vq_repbuf_free(c2dev, reply); c2_rnic_query()
216 struct c2wr_rnic_setconfig_rep *reply; c2_add_addr() local
254 reply = c2_add_addr()
256 if (!reply) { c2_add_addr()
261 err = c2_errno(reply); c2_add_addr()
262 vq_repbuf_free(c2dev, reply); c2_add_addr()
278 struct c2wr_rnic_setconfig_rep *reply; c2_del_addr() local
316 reply = c2_del_addr()
318 if (!reply) { c2_del_addr()
323 err = c2_errno(reply); c2_del_addr()
324 vq_repbuf_free(c2dev, reply); c2_del_addr()
341 struct c2wr_rnic_open_rep *reply; c2_rnic_open() local
369 reply = (struct c2wr_rnic_open_rep *) (unsigned long) (vq_req->reply_msg); c2_rnic_open()
370 if (!reply) { c2_rnic_open()
375 if ((err = c2_errno(reply)) != 0) { c2_rnic_open()
379 c2dev->adapter_handle = reply->rnic_handle; c2_rnic_open()
382 vq_repbuf_free(c2dev, reply); c2_rnic_open()
395 struct c2wr_rnic_close_rep *reply; c2_rnic_close() local
421 reply = (struct c2wr_rnic_close_rep *) (unsigned long) (vq_req->reply_msg); c2_rnic_close()
422 if (!reply) { c2_rnic_close()
427 if ((err = c2_errno(reply)) != 0) { c2_rnic_close()
434 vq_repbuf_free(c2dev, reply); c2_rnic_close()
642 /* Free the verbs reply queue */ c2_rnic_term()
H A Dc2_cm.c114 * Send WR to adapter. NOTE: There is no synch reply from c2_llp_connect()
139 struct c2wr_ep_listen_create_rep *reply; c2_llp_service_create() local
184 * Wait for reply from adapter c2_llp_service_create()
191 * Process reply c2_llp_service_create()
193 reply = c2_llp_service_create()
195 if (!reply) { c2_llp_service_create()
200 if ((err = c2_errno(reply)) != 0) c2_llp_service_create()
206 cm_id->provider_data = (void*)(unsigned long) reply->ep_handle; c2_llp_service_create()
211 vq_repbuf_free(c2dev, reply); c2_llp_service_create()
217 vq_repbuf_free(c2dev, reply); c2_llp_service_create()
229 struct c2wr_ep_listen_destroy_rep *reply; c2_llp_service_destroy() local
267 * Wait for reply from adapter c2_llp_service_destroy()
274 * Process reply c2_llp_service_destroy()
276 reply=(struct c2wr_ep_listen_destroy_rep *)(unsigned long)vq_req->reply_msg; c2_llp_service_destroy()
277 if (!reply) { c2_llp_service_destroy()
282 vq_repbuf_free(c2dev, reply); c2_llp_service_destroy()
295 struct c2wr_cr_accept_rep *reply; /* VQ Reply msg ptr. */ c2_llp_accept() local
361 /* Wait for reply from adapter */ c2_llp_accept()
366 /* Check that reply is present */ c2_llp_accept()
367 reply = (struct c2wr_cr_accept_rep *) (unsigned long) vq_req->reply_msg; c2_llp_accept()
368 if (!reply) { c2_llp_accept()
373 err = c2_errno(reply); c2_llp_accept()
374 vq_repbuf_free(c2dev, reply); c2_llp_accept()
399 struct c2wr_cr_reject_rep *reply; c2_llp_reject() local
434 * Wait for reply from adapter c2_llp_reject()
441 * Process reply c2_llp_reject()
443 reply = (struct c2wr_cr_reject_rep *) (unsigned long) c2_llp_reject()
445 if (!reply) { c2_llp_reject()
449 err = c2_errno(reply); c2_llp_reject()
453 vq_repbuf_free(c2dev, reply); c2_llp_reject()
H A Dc2_vq.c44 * adapter has replied, and a copy of the verb reply work request.
47 * in the verbs reply message. The function handle_vq() in the interrupt
49 * 1) append a copy of the verbs reply message
50 * 2) mark that the reply is ready
51 * 3) wake up the kernel verbs handler blocked awaiting the reply.
57 * until the adapter's reply can be processed. The reason we need this is
59 * the verbs work request message, and reflected back in the reply message.
61 * kernel verb handler that is blocked awaiting the verb reply.
64 * getting the reply, then we don't need these refcnts.
69 * does not reply.
75 * outstanding Verb Request reply
145 * a verb reply message. If the associated
257 void vq_repbuf_free(struct c2_dev *c2dev, void *reply) vq_repbuf_free() argument
259 kmem_cache_free(c2dev->host_msg_cache, reply); vq_repbuf_free()
H A Dc2_cq.c293 struct c2wr_cq_create_rep *reply; c2_init_cq() local
342 reply = (struct c2wr_cq_create_rep *) (unsigned long) (vq_req->reply_msg); c2_init_cq()
343 if (!reply) { c2_init_cq()
348 if ((err = c2_errno(reply)) != 0) c2_init_cq()
351 cq->adapter_handle = reply->cq_handle; c2_init_cq()
352 cq->mq.index = be32_to_cpu(reply->mq_index); c2_init_cq()
354 peer_pa = c2dev->pa + be32_to_cpu(reply->adapter_shared); c2_init_cq()
361 vq_repbuf_free(c2dev, reply); c2_init_cq()
378 vq_repbuf_free(c2dev, reply); c2_init_cq()
394 struct c2wr_cq_destroy_rep *reply; c2_free_cq() local
429 reply = (struct c2wr_cq_destroy_rep *) (unsigned long) (vq_req->reply_msg); c2_free_cq()
430 if (reply) c2_free_cq()
431 vq_repbuf_free(c2dev, reply); c2_free_cq()
H A Dc2_qp.c138 struct c2wr_qp_modify_rep *reply; c2_qp_modify() local
217 reply = (struct c2wr_qp_modify_rep *) (unsigned long) vq_req->reply_msg; c2_qp_modify()
218 if (!reply) { c2_qp_modify()
223 err = c2_errno(reply); c2_qp_modify()
242 vq_repbuf_free(c2dev, reply); c2_qp_modify()
257 struct c2wr_qp_modify_rep *reply; c2_qp_set_read_limits() local
288 reply = (struct c2wr_qp_modify_rep *) (unsigned long) c2_qp_set_read_limits()
290 if (!reply) { c2_qp_set_read_limits()
295 err = c2_errno(reply); c2_qp_set_read_limits()
296 vq_repbuf_free(c2dev, reply); c2_qp_set_read_limits()
306 struct c2wr_qp_destroy_rep *reply; destroy_qp() local
352 * Wait for reply from adapter destroy_qp()
360 * Process reply destroy_qp()
362 reply = (struct c2wr_qp_destroy_rep *) (unsigned long) (vq_req->reply_msg); destroy_qp()
363 if (!reply) { destroy_qp()
375 vq_repbuf_free(c2dev, reply); destroy_qp()
420 struct c2wr_qp_create_rep *reply; c2_alloc_qp() local
488 /* Wait for the verb reply */ c2_alloc_qp()
494 /* Process the reply */ c2_alloc_qp()
495 reply = (struct c2wr_qp_create_rep *) (unsigned long) (vq_req->reply_msg); c2_alloc_qp()
496 if (!reply) { c2_alloc_qp()
501 if ((err = c2_wr_get_result(reply)) != 0) { c2_alloc_qp()
507 qp->adapter_handle = reply->qp_handle; c2_alloc_qp()
515 q_size = be32_to_cpu(reply->sq_depth); c2_alloc_qp()
516 msg_size = be32_to_cpu(reply->sq_msg_size); c2_alloc_qp()
517 peer_pa = c2dev->pa + be32_to_cpu(reply->sq_mq_start); c2_alloc_qp()
526 be32_to_cpu(reply->sq_mq_index), c2_alloc_qp()
534 q_size = be32_to_cpu(reply->rq_depth); c2_alloc_qp()
535 msg_size = be32_to_cpu(reply->rq_msg_size); c2_alloc_qp()
536 peer_pa = c2dev->pa + be32_to_cpu(reply->rq_mq_start); c2_alloc_qp()
545 be32_to_cpu(reply->rq_mq_index), c2_alloc_qp()
552 vq_repbuf_free(c2dev, reply); c2_alloc_qp()
562 vq_repbuf_free(c2dev, reply); c2_alloc_qp()
H A Dc2_vq.h41 u64 reply_msg; /* ptr to reply msg */
43 atomic_t reply_ready; /* set when reply is ready */
60 extern void vq_repbuf_free(struct c2_dev *c2dev, void *reply);
H A Dc2_wr.h716 * No synchronous reply from adapter to this WR. The results of
733 /* no synchronous reply. */
1348 * adapter sends reply when private data is successfully submitted to
1386 * request and reply queues.
1392 * - the physaddr/len of host memory to be used for the reply.
1397 u64 reply_buf; /* pinned host buf for reply */
1398 u32 reply_buf_len; /* length of reply buffer */
1404 * flags used in the console reply.
1407 CONS_REPLY_TRUNCATED = 0x00000001 /* reply was truncated */
1411 * Console reply message.
1412 * hdr.result contains the c2_status_t error if the reply was _not_ generated,
1413 * or C2_OK if the reply was generated.
/linux-4.4.14/arch/um/drivers/
H A Dmconsole_user.c40 static int mconsole_reply_v0(struct mc_request *req, char *reply) mconsole_reply_v0() argument
45 iov.iov_base = reply; mconsole_reply_v0()
46 iov.iov_len = strlen(reply); mconsole_reply_v0()
134 struct mconsole_reply reply; mconsole_reply_len() local
138 reply.err = err; mconsole_reply_len()
145 if (len == total) reply.more = more; mconsole_reply_len()
146 else reply.more = 1; mconsole_reply_len()
148 memcpy(reply.data, str, len); mconsole_reply_len()
149 reply.data[len] = '\0'; mconsole_reply_len()
152 reply.len = len + 1; mconsole_reply_len()
154 len = sizeof(reply) + reply.len - sizeof(reply.data); mconsole_reply_len()
156 n = sendto(req->originating_fd, &reply, len, 0, mconsole_reply_len()
H A Dmconsole.h72 extern int mconsole_reply_len(struct mc_request *req, const char *reply,
/linux-4.4.14/drivers/uwb/i1480/dfu/
H A Dphy.c56 struct i1480_evt_confirm *reply = i1480->evt_buf; i1480_mpi_write() local
64 reply->rceb.bEventType = i1480_CET_VS1; i1480_mpi_write()
65 reply->rceb.wEvent = i1480_CMD_MPI_WRITE; i1480_mpi_write()
66 result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply)); i1480_mpi_write()
69 if (reply->bResultCode != UWB_RC_RES_SUCCESS) { i1480_mpi_write()
71 reply->bResultCode); i1480_mpi_write()
94 * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply.
96 * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount
97 * of values we can read is (512 - sizeof(*reply)) / 3
104 struct i1480_evt_mpi_read *reply = i1480->evt_buf; i1480_mpi_read() local
110 BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3); i1480_mpi_read()
119 reply->rceb.bEventType = i1480_CET_VS1; i1480_mpi_read()
120 reply->rceb.wEvent = i1480_CMD_MPI_READ; i1480_mpi_read()
122 sizeof(*reply) + 3*size); i1480_mpi_read()
125 if (reply->bResultCode != UWB_RC_RES_SUCCESS) { i1480_mpi_read()
127 reply->bResultCode); i1480_mpi_read()
131 if (reply->data[cnt].page != (srcaddr + cnt) >> 8) i1480_mpi_read()
134 (srcaddr + cnt) >> 8, reply->data[cnt].page); i1480_mpi_read()
135 if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff)) i1480_mpi_read()
139 reply->data[cnt].offset); i1480_mpi_read()
140 data[cnt] = reply->data[cnt].value; i1480_mpi_read()
H A Ddfu.c89 * @returns size of the reply data filled in i1480->evt_buf or < 0 errno
96 struct uwb_rceb *reply = i1480->evt_buf; i1480_cmd() local
98 u16 expected_event = reply->wEvent; i1480_cmd()
99 u8 expected_type = reply->bEventType; i1480_cmd()
122 dev_err(i1480->dev, "%s: command reply reception failed: %zd\n", i1480_cmd()
H A Dmac.c381 * USB from the stack. The reply event is copied from an stage buffer,
395 } __attribute__((packed)) *reply = (void *) i1480->evt_buf; i1480_cmd_reset() local
400 reply->rceb.bEventType = UWB_RC_CET_GENERAL; i1480_cmd_reset()
401 reply->rceb.wEvent = UWB_RC_CMD_RESET; i1480_cmd_reset()
402 result = i1480_cmd(i1480, "RESET", sizeof(*cmd), sizeof(*reply)); i1480_cmd_reset()
405 if (reply->bResultCode != UWB_RC_RES_SUCCESS) { i1480_cmd_reset()
407 reply->bResultCode); i1480_cmd_reset()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/
H A Dmemx.c47 u32 reply[2]; nvkm_memx_init() local
50 ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO, nvkm_memx_init()
59 memx->base = reply[0]; nvkm_memx_init()
60 memx->size = reply[1]; nvkm_memx_init()
77 u32 finish, reply[2]; nvkm_memx_fini() local
86 /* call MEMX process to execute the script, and wait for reply */ nvkm_memx_fini()
88 nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC, nvkm_memx_fini()
93 reply[0], reply[1]); nvkm_memx_fini()
168 u32 reply[2], base, size, i; nvkm_memx_train_result() local
171 ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO, nvkm_memx_train_result()
176 base = reply[0]; nvkm_memx_train_result()
177 size = reply[1] >> 2; nvkm_memx_train_result()
H A Dbase.c36 nvkm_pmu_send(struct nvkm_pmu *pmu, u32 reply[2], nvkm_pmu_send() argument
53 * on a synchronous reply, take the PMU mutex and tell the nvkm_pmu_send()
56 if (reply) { nvkm_pmu_send()
79 /* wait for reply, if requested */ nvkm_pmu_send()
80 if (reply) { nvkm_pmu_send()
82 reply[0] = pmu->recv.data[0]; nvkm_pmu_send()
83 reply[1] = pmu->recv.data[1]; nvkm_pmu_send()
120 /* wake process if it's waiting on a synchronous reply */ nvkm_pmu_recv()
/linux-4.4.14/drivers/uwb/
H A Dreset.c120 struct uwb_rceb *reply; member in struct:uwb_rc_cmd_done_params
125 struct uwb_rceb *reply, ssize_t reply_size) uwb_rc_cmd_done()
130 if (p->reply) uwb_rc_cmd_done()
133 p->reply = kmalloc(reply_size, GFP_ATOMIC); uwb_rc_cmd_done()
135 if (p->reply) uwb_rc_cmd_done()
136 memcpy(p->reply, reply, reply_size); uwb_rc_cmd_done()
154 * @reply: Pointer to where to store the reply
155 * @reply_size: @reply's size
166 * be allocated in *preply with the whole contents of the reply.
173 struct uwb_rceb *reply, size_t reply_size, __uwb_rc_cmd()
182 params.reply = reply; __uwb_rc_cmd()
194 *preply = params.reply; __uwb_rc_cmd()
214 * @reply: Pointer to the beginning of the confirmation event
216 * You need to fill out reply->bEventType and reply->wEvent (in
219 * @reply_size: Size of the reply buffer
221 * The function checks that the length returned in the reply is at
229 struct uwb_rceb *reply, size_t reply_size) uwb_rc_cmd()
235 cmd, cmd_size, reply, reply_size, uwb_rc_cmd()
236 reply->bEventType, reply->wEvent, NULL); uwb_rc_cmd()
239 dev_err(dev, "%s: not enough data returned for decoding reply " uwb_rc_cmd()
264 * The function checks that the length returned in the reply is at
288 * USB from the stack. The reply event is copied from an stage buffer,
294 struct uwb_rc_evt_confirm reply; uwb_rc_reset() local
304 reply.rceb.bEventType = UWB_RC_CET_GENERAL; uwb_rc_reset()
305 reply.rceb.wEvent = UWB_RC_CMD_RESET; uwb_rc_reset()
307 &reply.rceb, sizeof(reply)); uwb_rc_reset()
310 if (reply.bResultCode != UWB_RC_RES_SUCCESS) { uwb_rc_reset()
313 uwb_rc_strerror(reply.bResultCode), reply.bResultCode); uwb_rc_reset()
124 uwb_rc_cmd_done(struct uwb_rc *rc, void *arg, struct uwb_rceb *reply, ssize_t reply_size) uwb_rc_cmd_done() argument
171 __uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, struct uwb_rccb *cmd, size_t cmd_size, struct uwb_rceb *reply, size_t reply_size, u8 expected_type, u16 expected_event, struct uwb_rceb **preply) __uwb_rc_cmd() argument
227 uwb_rc_cmd(struct uwb_rc *rc, const char *cmd_name, struct uwb_rccb *cmd, size_t cmd_size, struct uwb_rceb *reply, size_t reply_size) uwb_rc_cmd() argument
H A Dscan.c54 * USB from the stack. The reply event is copied from an stage buffer,
63 struct uwb_rc_evt_confirm reply; uwb_rc_scan() local
75 reply.rceb.bEventType = UWB_RC_CET_GENERAL; uwb_rc_scan()
76 reply.rceb.wEvent = UWB_RC_CMD_SCAN; uwb_rc_scan()
78 &reply.rceb, sizeof(reply)); uwb_rc_scan()
81 if (reply.bResultCode != UWB_RC_RES_SUCCESS) { uwb_rc_scan()
84 uwb_rc_strerror(reply.bResultCode), reply.bResultCode); uwb_rc_scan()
H A Daddress.c52 * @reply: Pointer to reply buffer (can be stack allocated)
61 struct uwb_rc_evt_dev_addr_mgmt *reply) uwb_rc_dev_addr_mgmt()
82 reply->rceb.bEventType = UWB_RC_CET_GENERAL; uwb_rc_dev_addr_mgmt()
83 reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT; uwb_rc_dev_addr_mgmt()
86 &reply->rceb, sizeof(*reply)); uwb_rc_dev_addr_mgmt()
89 if (result < sizeof(*reply)) { uwb_rc_dev_addr_mgmt()
92 "%d vs %zu bytes needed\n", result, sizeof(*reply)); uwb_rc_dev_addr_mgmt()
94 } else if (reply->bResultCode != UWB_RC_RES_SUCCESS) { uwb_rc_dev_addr_mgmt()
97 uwb_rc_strerror(reply->bResultCode), uwb_rc_dev_addr_mgmt()
98 reply->bResultCode); uwb_rc_dev_addr_mgmt()
131 struct uwb_rc_evt_dev_addr_mgmt reply; uwb_rc_addr_set() local
146 return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply); uwb_rc_addr_set()
59 uwb_rc_dev_addr_mgmt(struct uwb_rc *rc, u8 bmOperationType, const u8 *baAddr, struct uwb_rc_evt_dev_addr_mgmt *reply) uwb_rc_dev_addr_mgmt() argument
H A Die.c115 struct uwb_rceb *reply = NULL; uwb_rc_get_ie() local
126 &reply); uwb_rc_get_ie()
131 get_ie = container_of(reply, struct uwb_rc_evt_get_ie, rceb); uwb_rc_get_ie()
159 struct uwb_rc_evt_set_ie reply; uwb_rc_set_ie() local
161 reply.rceb.bEventType = UWB_RC_CET_GENERAL; uwb_rc_set_ie()
162 reply.rceb.wEvent = UWB_RC_CMD_SET_IE; uwb_rc_set_ie()
165 &reply.rceb, sizeof(reply)); uwb_rc_set_ie()
168 else if (result != sizeof(reply)) { uwb_rc_set_ie()
169 dev_err(dev, "SET-IE: not enough data to decode reply " uwb_rc_set_ie()
171 result, sizeof(reply)); uwb_rc_set_ie()
173 } else if (reply.bResultCode != UWB_RC_RES_SUCCESS) { uwb_rc_set_ie()
175 uwb_rc_strerror(reply.bResultCode), reply.bResultCode); uwb_rc_set_ie()
H A Dbeacon.c47 struct uwb_rc_evt_confirm reply; uwb_rc_start_beacon() local
56 reply.rceb.bEventType = UWB_RC_CET_GENERAL; uwb_rc_start_beacon()
57 reply.rceb.wEvent = UWB_RC_CMD_START_BEACON; uwb_rc_start_beacon()
59 &reply.rceb, sizeof(reply)); uwb_rc_start_beacon()
62 if (reply.bResultCode != UWB_RC_RES_SUCCESS) { uwb_rc_start_beacon()
65 uwb_rc_strerror(reply.bResultCode), reply.bResultCode); uwb_rc_start_beacon()
77 struct uwb_rc_evt_confirm reply; uwb_rc_stop_beacon() local
84 reply.rceb.bEventType = UWB_RC_CET_GENERAL; uwb_rc_stop_beacon()
85 reply.rceb.wEvent = UWB_RC_CMD_STOP_BEACON; uwb_rc_stop_beacon()
87 &reply.rceb, sizeof(reply)); uwb_rc_stop_beacon()
90 if (reply.bResultCode != UWB_RC_RES_SUCCESS) { uwb_rc_stop_beacon()
93 uwb_rc_strerror(reply.bResultCode), reply.bResultCode); uwb_rc_stop_beacon()
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvif/
H A Devent.h6 __u8 reply; member in struct:nvif_notify_req_v0
19 __u8 data[]; /* reply data (below) */
H A Dnotify.h27 bool work, u8 type, void *data, u32 size, u32 reply,
/linux-4.4.14/net/netfilter/
H A Dnf_conntrack_sane.c76 struct sane_reply_net_start *reply; help() local
110 /* We're interested in the next reply */ help()
115 /* Is it a reply to an uninteresting command? */ help()
119 /* It's a reply to SANE_NET_START. */ help()
123 pr_debug("nf_ct_sane: NET_START reply too short\n"); help()
127 reply = sb_ptr; help()
128 if (reply->status != htonl(SANE_STATUS_SUCCESS)) { help()
131 ntohl(reply->status)); help()
135 /* Invalid saned reply? Ignore it. */ help()
136 if (reply->zero != 0) help()
149 IPPROTO_TCP, NULL, &reply->port); help()
H A Dnf_nat_core.c139 * so we invert the tuple and look for the incoming reply. nf_nat_used_tuple()
143 struct nf_conntrack_tuple reply; nf_nat_used_tuple() local
145 nf_ct_invert_tuplepr(&reply, tuple); nf_nat_used_tuple()
146 return nf_conntrack_tuple_taken(&reply, ignored_conntrack); nf_nat_used_tuple()
150 /* If we source map this tuple so reply looks like reply_tuple, will
203 /* Copy source part from reply tuple. */ find_appropriate_src()
397 /* What we've got will look like inverse of reply. Normally nf_nat_setup_info()
408 struct nf_conntrack_tuple reply; nf_nat_setup_info() local
411 nf_ct_invert_tuplepr(&reply, &new_tuple); nf_nat_setup_info()
412 nf_conntrack_alter_reply(ct, &reply); nf_nat_setup_info()
453 * Use reply in case it's already been mangled (eg local packet). __nf_nat_alloc_null_binding()
491 /* Invert if this is reply dir. */ nf_nat_packet()
H A Dnf_conntrack_netbios_ns.c14 * timing out) matching all reply connections from the
/linux-4.4.14/tools/usb/usbip/src/
H A Dusbip_attach.c124 struct op_import_reply reply; query_import_device() local
128 memset(&reply, 0, sizeof(reply)); query_import_device()
147 /* receive a reply */ query_import_device()
154 rc = usbip_net_recv(sockfd, (void *) &reply, sizeof(reply)); query_import_device()
160 PACK_OP_IMPORT_REPLY(0, &reply); query_import_device()
162 /* check the reply */ query_import_device()
163 if (strncmp(reply.udev.busid, busid, SYSFS_BUS_ID_SIZE)) { query_import_device()
164 err("recv different busid %s", reply.udev.busid); query_import_device()
169 return import_device(sockfd, &reply.udev); query_import_device()
H A Dusbip_network.h32 uint32_t status; /* op_code status (for reply) */
81 #define PACK_OP_IMPORT_REPLY(pack, reply) do {\
82 usbip_net_pack_usb_device(pack, &(reply)->udev);\
104 #define PACK_OP_EXPORT_REPLY(pack, reply) do {\
125 #define PACK_OP_UNEXPORT_REPLY(pack, reply) do {\
166 #define PACK_OP_DEVLIST_REPLY(pack, reply) do {\
167 usbip_net_pack_uint32_t(pack, &(reply)->ndev);\
H A Dusbip_list.c52 struct op_devlist_reply reply; get_exported_devices() local
71 memset(&reply, 0, sizeof(reply)); get_exported_devices()
72 rc = usbip_net_recv(sockfd, &reply, sizeof(reply)); get_exported_devices()
77 PACK_OP_DEVLIST_REPLY(0, &reply); get_exported_devices()
78 dbg("exportable devices: %d\n", reply.ndev); get_exported_devices()
80 if (reply.ndev == 0) { get_exported_devices()
89 for (i = 0; i < reply.ndev; i++) { get_exported_devices()
H A Dusbipd.c163 struct op_devlist_reply reply; send_reply_devlist() local
167 reply.ndev = 0; send_reply_devlist()
170 reply.ndev += 1; send_reply_devlist()
172 info("exportable devices: %d", reply.ndev); send_reply_devlist()
179 PACK_OP_DEVLIST_REPLY(1, &reply); send_reply_devlist()
181 rc = usbip_net_send(connfd, &reply, sizeof(reply)); send_reply_devlist()
/linux-4.4.14/drivers/s390/crypto/
H A Dzcrypt_msgtype6.c46 #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
73 * The request (or reply) parameter block is organized thus:
98 unsigned short rpl_parml; /* reply parameter buffer */
101 unsigned char rpl_parmp[4]; /* reply parameter buffer *
105 unsigned char rpl_datal[4]; /* reply data buffer len ULELONG */
106 unsigned char rpl_datap[4]; /* reply data buffer */
487 * Copy results from a type 86 ICA reply message back to user space.
490 * @reply: reply AP message.
512 struct ap_message *reply, convert_type86_ica()
551 struct type86x_reply *msg = reply->message; convert_type86_ica()
612 * Copy results from a type 86 XCRB reply message back to user space.
615 * @reply: reply AP message.
621 struct ap_message *reply, convert_type86_xcrb()
624 struct type86_fmt2_msg *msg = reply->message; convert_type86_xcrb()
625 char *data = reply->message; convert_type86_xcrb()
643 * Copy results from a type 86 EP11 XCRB reply message back to user space.
646 * @reply: reply AP message.
652 struct ap_message *reply, convert_type86_ep11_xcrb()
655 struct type86_fmt2_msg *msg = reply->message; convert_type86_ep11_xcrb()
656 char *data = reply->message; convert_type86_ep11_xcrb()
670 struct ap_message *reply, convert_type86_rng()
677 } __packed * msg = reply->message; convert_type86_rng()
678 char *data = reply->message; convert_type86_rng()
687 struct ap_message *reply, convert_response_ica()
691 struct type86x_reply *msg = reply->message; convert_response_ica()
694 switch (((unsigned char *) reply->message)[1]) { convert_response_ica()
697 return convert_error(zdev, reply); convert_response_ica()
709 return convert_error(zdev, reply); convert_response_ica()
711 return convert_type86_ica(zdev, reply, convert_response_ica()
726 struct ap_message *reply, convert_response_xcrb()
729 struct type86x_reply *msg = reply->message; convert_response_xcrb()
732 switch (((unsigned char *) reply->message)[1]) { convert_response_xcrb()
736 return convert_error(zdev, reply); convert_response_xcrb()
740 return convert_error(zdev, reply); convert_response_xcrb()
743 return convert_type86_xcrb(zdev, reply, xcRB); convert_response_xcrb()
758 struct ap_message *reply, struct ep11_urb *xcRB) convert_response_ep11_xcrb()
760 struct type86_ep11_reply *msg = reply->message; convert_response_ep11_xcrb()
763 switch (((unsigned char *)reply->message)[1]) { convert_response_ep11_xcrb()
766 return convert_error(zdev, reply); convert_response_ep11_xcrb()
769 return convert_error(zdev, reply); convert_response_ep11_xcrb()
771 return convert_type86_ep11_xcrb(zdev, reply, xcRB); convert_response_ep11_xcrb()
784 struct ap_message *reply, convert_response_rng()
787 struct type86x_reply *msg = reply->message; convert_response_rng()
797 return convert_type86_rng(zdev, reply, data); convert_response_rng()
812 * "msg" has finished with the reply message "reply".
816 * @reply: pointer to the AP reply message
820 struct ap_message *reply) zcrypt_msgtype6_receive()
831 /* Copy the reply message to the request message buffer. */ zcrypt_msgtype6_receive()
832 if (!reply) zcrypt_msgtype6_receive()
834 t86r = reply->message; zcrypt_msgtype6_receive()
842 memcpy(msg->message, reply->message, length); zcrypt_msgtype6_receive()
847 memcpy(msg->message, reply->message, length); zcrypt_msgtype6_receive()
854 memcpy(msg->message, reply->message, sizeof(error_reply)); zcrypt_msgtype6_receive()
861 * "msg" has finished with the reply message "reply".
865 * @reply: pointer to the AP reply message
869 struct ap_message *reply) zcrypt_msgtype6_receive_ep11()
880 /* Copy the reply message to the request message buffer. */ zcrypt_msgtype6_receive_ep11()
881 if (!reply) zcrypt_msgtype6_receive_ep11()
883 t86r = reply->message; zcrypt_msgtype6_receive_ep11()
890 memcpy(msg->message, reply->message, length); zcrypt_msgtype6_receive_ep11()
896 memcpy(msg->message, reply->message, sizeof(error_reply)); zcrypt_msgtype6_receive_ep11()
511 convert_type86_ica(struct zcrypt_device *zdev, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) convert_type86_ica() argument
620 convert_type86_xcrb(struct zcrypt_device *zdev, struct ap_message *reply, struct ica_xcRB *xcRB) convert_type86_xcrb() argument
651 convert_type86_ep11_xcrb(struct zcrypt_device *zdev, struct ap_message *reply, struct ep11_urb *xcRB) convert_type86_ep11_xcrb() argument
669 convert_type86_rng(struct zcrypt_device *zdev, struct ap_message *reply, char *buffer) convert_type86_rng() argument
686 convert_response_ica(struct zcrypt_device *zdev, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) convert_response_ica() argument
725 convert_response_xcrb(struct zcrypt_device *zdev, struct ap_message *reply, struct ica_xcRB *xcRB) convert_response_xcrb() argument
757 convert_response_ep11_xcrb(struct zcrypt_device *zdev, struct ap_message *reply, struct ep11_urb *xcRB) convert_response_ep11_xcrb() argument
783 convert_response_rng(struct zcrypt_device *zdev, struct ap_message *reply, char *data) convert_response_rng() argument
818 zcrypt_msgtype6_receive(struct ap_device *ap_dev, struct ap_message *msg, struct ap_message *reply) zcrypt_msgtype6_receive() argument
867 zcrypt_msgtype6_receive_ep11(struct ap_device *ap_dev, struct ap_message *msg, struct ap_message *reply) zcrypt_msgtype6_receive_ep11() argument
H A Dzcrypt_msgtype50.c318 * Copy results from a type 80 reply message back to user space.
321 * @reply: reply AP message.
328 struct ap_message *reply, convert_type80()
332 struct type80_hdr *t80h = reply->message; convert_type80()
349 data = reply->message + t80h->len - outputdatalength; convert_type80()
356 struct ap_message *reply, convert_response()
361 switch (((unsigned char *) reply->message)[1]) { convert_response()
364 return convert_error(zdev, reply); convert_response()
366 return convert_type80(zdev, reply, convert_response()
380 * "msg" has finished with the reply message "reply".
384 * @reply: pointer to the AP reply message
388 struct ap_message *reply) zcrypt_cex2a_receive()
397 /* Copy the reply message to the request message buffer. */ zcrypt_cex2a_receive()
398 if (!reply) zcrypt_cex2a_receive()
400 t80h = reply->message; zcrypt_cex2a_receive()
408 memcpy(msg->message, reply->message, length); zcrypt_cex2a_receive()
410 memcpy(msg->message, reply->message, sizeof(error_reply)); zcrypt_cex2a_receive()
327 convert_type80(struct zcrypt_device *zdev, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) convert_type80() argument
355 convert_response(struct zcrypt_device *zdev, struct ap_message *reply, char __user *outputdata, unsigned int outputdatalength) convert_response() argument
386 zcrypt_cex2a_receive(struct ap_device *ap_dev, struct ap_message *msg, struct ap_message *reply) zcrypt_cex2a_receive() argument
H A Dzcrypt_error.h36 * Error reply messages are of two types:
41 * Request reply messages are of three known types:
51 unsigned char reply_code; /* reply code */
91 struct ap_message *reply) convert_error()
93 struct error_hdr *ehdr = reply->message; convert_error()
90 convert_error(struct zcrypt_device *zdev, struct ap_message *reply) convert_error() argument
H A Dzcrypt_pcixcc.c55 #define PCIXCC_MAX_ICA_RESPONSE_SIZE 0x77c /* max size type86 v2 reply */
172 char *reply; zcrypt_pcixcc_mcl() local
175 reply = (void *) get_zeroed_page(GFP_KERNEL); zcrypt_pcixcc_mcl()
176 if (!reply) zcrypt_pcixcc_mcl()
186 rc = ap_recv(ap_dev->qid, &psmid, reply, 4096); zcrypt_pcixcc_mcl()
197 cprbx = (struct CPRBX *) (reply + 48); zcrypt_pcixcc_mcl()
203 free_page((unsigned long) reply); zcrypt_pcixcc_mcl()
222 } __attribute__((packed)) *reply; zcrypt_pcixcc_rng_supported() local
250 reply = ap_msg.message; zcrypt_pcixcc_rng_supported()
251 if (reply->cprbx.ccp_rtcode == 0 && reply->cprbx.ccp_rscode == 0) zcrypt_pcixcc_rng_supported()
330 ap_dev->reply = &zdev->reply; zcrypt_pcixcc_probe()
H A Dzcrypt_api.h119 struct ap_message reply; /* Per-device reply structure. */ member in struct:zcrypt_device
H A Dzcrypt_cex2a.c129 ap_dev->reply = &zdev->reply; zcrypt_cex2a_probe()
H A Dzcrypt_cex4.c150 ap_dev->reply = &zdev->reply; zcrypt_cex4_probe()
/linux-4.4.14/include/linux/
H A Dbsg-lib.h43 /* Transport/driver specific request/reply structs */
45 void *reply; member in struct:bsg_job
51 * the reply.
54 * to indicates the size of the reply to be returned to the
H A Dadb.h13 unsigned char reply[32]; member in struct:adb_request
41 #define ADBREQ_REPLY 1 /* expect reply */
H A Ddrbd_genl_api.h21 * @ret_code: kernel->userland unicast cfg reply return code (union with flags);
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmfmac/
H A Dvendor.c37 struct sk_buff *reply; brcmf_cfg80211_vndr_cmds_dcmd_handler() local
91 reply = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, payload); brcmf_cfg80211_vndr_cmds_dcmd_handler()
92 if (NULL == reply) { brcmf_cfg80211_vndr_cmds_dcmd_handler()
97 if (nla_put(reply, BRCMF_NLATTR_DATA, msglen, wr_pointer) || brcmf_cfg80211_vndr_cmds_dcmd_handler()
98 nla_put_u16(reply, BRCMF_NLATTR_LEN, msglen)) { brcmf_cfg80211_vndr_cmds_dcmd_handler()
99 kfree_skb(reply); brcmf_cfg80211_vndr_cmds_dcmd_handler()
104 ret = cfg80211_vendor_cmd_reply(reply); brcmf_cfg80211_vndr_cmds_dcmd_handler()
/linux-4.4.14/drivers/staging/lustre/lnet/selftest/
H A Dping_test.c126 srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply; ping_client_done_rpc() local
141 __swab32s(&reply->pnr_seq); ping_client_done_rpc()
142 __swab32s(&reply->pnr_magic); ping_client_done_rpc()
143 __swab32s(&reply->pnr_status); ping_client_done_rpc()
146 if (reply->pnr_magic != LST_PING_TEST_MAGIC) { ping_client_done_rpc()
150 reply->pnr_magic, libcfs_id2str(rpc->crpc_dest), ping_client_done_rpc()
155 if (reply->pnr_seq != reqst->pnr_seq) { ping_client_done_rpc()
159 reply->pnr_seq, libcfs_id2str(rpc->crpc_dest), ping_client_done_rpc()
165 CDEBUG(D_NET, "%d reply in %u usec\n", reply->pnr_seq, ping_client_done_rpc()
H A Drpc.h69 * All srpc_*_reqst_t's 1st field must be matchbits of reply buffer,
76 __u64 rpyid; /* reply buffer matchbits */
87 __u64 mksn_rpyid; /* reply buffer matchbits */
98 } WIRE_ATTR srpc_mksn_reply_t; /* make session reply */
101 __u64 rmsn_rpyid; /* reply buffer matchbits */
108 } WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */
111 __u64 join_rpyid; /* reply buffer matchbits */
125 __u64 dbg_rpyid; /* reply buffer matchbits */
143 __u64 bar_rpyid; /* reply buffer matchbits */
159 __u64 str_rpyid; /* reply buffer matchbits */
191 __u64 tsr_rpyid; /* reply buffer matchbits */
231 __u64 brw_rpyid; /* reply buffer matchbits */
240 } WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */
254 srpc_generic_reply_t reply; member in union:srpc_msg::__anon10476
H A Dframework.c368 sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply) sfw_get_stats() argument
371 sfw_counters_t *cnt = &reply->str_fw; sfw_get_stats()
374 reply->str_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; sfw_get_stats()
377 reply->str_status = EINVAL; sfw_get_stats()
382 reply->str_status = ESRCH; sfw_get_stats()
386 lnet_counters_get(&reply->str_lnet); sfw_get_stats()
387 srpc_get_counters(&reply->str_rpc); sfw_get_stats()
402 reply->str_status = 0; sfw_get_stats()
407 sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply) sfw_make_session() argument
415 reply->mksn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; sfw_make_session()
416 reply->mksn_status = EINVAL; sfw_make_session()
421 reply->mksn_status = 0; sfw_make_session()
422 reply->mksn_sid = sn->sn_id; sfw_make_session()
423 reply->mksn_timeout = sn->sn_timeout; sfw_make_session()
431 reply->mksn_status = EBUSY; sfw_make_session()
432 cplen = strlcpy(&reply->mksn_name[0], &sn->sn_name[0], sfw_make_session()
433 sizeof(reply->mksn_name)); sfw_make_session()
434 if (cplen >= sizeof(reply->mksn_name)) sfw_make_session()
447 reply->mksn_status = EPROTO; sfw_make_session()
469 reply->mksn_status = 0; sfw_make_session()
470 reply->mksn_sid = sn->sn_id; sfw_make_session()
471 reply->mksn_timeout = sn->sn_timeout; sfw_make_session()
476 sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply) sfw_remove_session() argument
480 reply->rmsn_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; sfw_remove_session()
483 reply->rmsn_status = EINVAL; sfw_remove_session()
488 reply->rmsn_status = (sn == NULL) ? ESRCH : EBUSY; sfw_remove_session()
493 reply->rmsn_status = 0; sfw_remove_session()
501 reply->rmsn_status = 0; sfw_remove_session()
502 reply->rmsn_sid = LST_INVALID_SID; sfw_remove_session()
508 sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply) sfw_debug_session() argument
513 reply->dbg_status = ESRCH; sfw_debug_session()
514 reply->dbg_sid = LST_INVALID_SID; sfw_debug_session()
518 reply->dbg_status = 0; sfw_debug_session()
519 reply->dbg_sid = sn->sn_id; sfw_debug_session()
520 reply->dbg_timeout = sn->sn_timeout; sfw_debug_session()
521 if (strlcpy(reply->dbg_name, &sn->sn_name[0], sizeof(reply->dbg_name)) sfw_debug_session()
522 >= sizeof(reply->dbg_name)) sfw_debug_session()
1070 sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply) sfw_query_batch() argument
1078 reply->bar_active = atomic_read(&tsb->bat_nactive); sfw_query_batch()
1086 reply->bar_active = atomic_read(&tsi->tsi_nactive); sfw_query_batch()
1118 srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply; sfw_add_test() local
1124 reply->tsr_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; sfw_add_test()
1134 reply->tsr_status = EINVAL; sfw_add_test()
1140 reply->tsr_status = ENOENT; sfw_add_test()
1153 reply->tsr_status = EBUSY; sfw_add_test()
1180 reply->tsr_status = (rc < 0) ? -rc : rc; sfw_add_test()
1185 sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply) sfw_control_batch() argument
1191 reply->bar_sid = (sn == NULL) ? LST_INVALID_SID : sn->sn_id; sfw_control_batch()
1194 reply->bar_status = ESRCH; sfw_control_batch()
1200 reply->bar_status = ENOENT; sfw_control_batch()
1214 rc = sfw_query_batch(bat, request->bar_testidx, reply); sfw_control_batch()
1221 reply->bar_status = (rc < 0) ? -rc : rc; sfw_control_batch()
1229 srpc_msg_t *reply = &rpc->srpc_replymsg; sfw_handle_server_rpc() local
1269 reply->msg_body.reply.status = EPROTO; sfw_handle_server_rpc()
1270 reply->msg_body.reply.sid = sn->sn_id; sfw_handle_server_rpc()
1278 reply->msg_body.reply.status = EPROTO; sfw_handle_server_rpc()
1291 &reply->msg_body.bat_reply); sfw_handle_server_rpc()
1296 &reply->msg_body.stat_reply); sfw_handle_server_rpc()
1301 &reply->msg_body.dbg_reply); sfw_handle_server_rpc()
1306 &reply->msg_body.mksn_reply); sfw_handle_server_rpc()
1311 &reply->msg_body.rmsn_reply); sfw_handle_server_rpc()
1318 reply->msg_ses_feats = features; sfw_handle_server_rpc()
H A Dbrw_test.c318 srpc_brw_reply_t *reply = &msg->msg_body.brw_reply; brw_client_done_rpc() local
333 __swab32s(&reply->brw_status); brw_client_done_rpc()
336 CDEBUG(reply->brw_status ? D_WARNING : D_NET, brw_client_done_rpc()
338 libcfs_id2str(rpc->crpc_dest), reply->brw_status); brw_client_done_rpc()
340 if (reply->brw_status != 0) { brw_client_done_rpc()
342 rpc->crpc_status = -(int)reply->brw_status; brw_client_done_rpc()
384 srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply; brw_bulk_ready() local
410 reply->brw_status = EBADMSG; brw_bulk_ready()
422 srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply; brw_server_handle() local
440 reply->brw_status = 0; brw_server_handle()
447 reply->brw_status = EINVAL; brw_server_handle()
453 reply->brw_status = EPROTO; brw_server_handle()
460 reply->brw_status = EINVAL; brw_server_handle()
472 reply->brw_status = EINVAL; brw_server_handle()
H A Drpc.c993 srpc_generic_reply_t *reply; srpc_handle_rpc() local
996 reply = &rpc->srpc_replymsg.msg_body.reply; srpc_handle_rpc()
1009 reply->status = EPROTO; srpc_handle_rpc()
1010 /* drop through and send reply */ srpc_handle_rpc()
1012 reply->status = 0; srpc_handle_rpc()
1014 LASSERT(reply->status == 0 || !rpc->srpc_bulk); srpc_handle_rpc()
1050 return 0; /* wait for reply */ srpc_handle_rpc()
1177 srpc_msg_t *reply; srpc_send_rpc() local
1187 reply = &rpc->crpc_replymsg; srpc_send_rpc()
1242 srpc_unpack_msg_hdr(reply); srpc_send_rpc()
1243 if (reply->msg_type != type || srpc_send_rpc()
1244 (reply->msg_magic != SRPC_MSG_MAGIC && srpc_send_rpc()
1245 reply->msg_magic != __swab32(SRPC_MSG_MAGIC))) { srpc_send_rpc()
1248 reply->msg_type, type, srpc_send_rpc()
1249 reply->msg_magic, SRPC_MSG_MAGIC); srpc_send_rpc()
1254 if (do_bulk && reply->msg_body.reply.status != 0) { srpc_send_rpc()
1256 reply->msg_body.reply.status, srpc_send_rpc()
1270 * since reply buffer still contains valid data. srpc_send_rpc()
1274 rpc->crpc_status == 0 && reply->msg_body.reply.status != 0) srpc_send_rpc()
1375 * might send me another RPC once it gets the reply */ srpc_send_reply()
H A Dselftest.h93 /* all reply/bulk RDMAs go to this portal */
142 SRPC_REPLY_RCVD = 4, /* incoming reply received */
143 SRPC_REPLY_SENT = 5, /* outgoing reply sent */
190 srpc_event_t srpc_ev; /* bulk/reply event */
209 int crpc_timeout; /* # seconds to wait for reply */
226 srpc_event_t crpc_replyev; /* reply event */
228 /* bulk, request(reqst), and reply exchanged on wire */
430 int sfw_make_session (srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
/linux-4.4.14/drivers/media/firewire/
H A Dfiredtv-ci.c79 struct ca_msg *reply = arg; fdtv_ca_app_info() local
81 return avc_ca_app_info(fdtv, reply->msg, &reply->length); fdtv_ca_app_info()
86 struct ca_msg *reply = arg; fdtv_ca_info() local
88 return avc_ca_info(fdtv, reply->msg, &reply->length); fdtv_ca_info()
93 struct ca_msg *reply = arg; fdtv_ca_get_mmi() local
95 return avc_ca_get_mmi(fdtv, reply->msg, &reply->length); fdtv_ca_get_mmi()
/linux-4.4.14/drivers/nfc/
H A Dmei_phy.c108 struct mei_nfc_reply *reply = NULL; mei_nfc_if_version() local
131 reply = kzalloc(if_version_length, GFP_KERNEL); mei_nfc_if_version()
132 if (!reply) mei_nfc_if_version()
135 bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length); mei_nfc_if_version()
142 version = (struct mei_nfc_if_version *)reply->data; mei_nfc_if_version()
149 kfree(reply); mei_nfc_if_version()
155 struct mei_nfc_cmd *cmd, *reply; mei_nfc_connect() local
174 reply = kzalloc(connect_resp_length, GFP_KERNEL); mei_nfc_connect()
175 if (!reply) { mei_nfc_connect()
180 connect_resp = (struct mei_nfc_connect_resp *)reply->data; mei_nfc_connect()
195 bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, mei_nfc_connect()
203 MEI_DUMP_NFC_HDR("connect reply", &reply->hdr); mei_nfc_connect()
215 kfree(reply); mei_nfc_connect()
/linux-4.4.14/include/trace/events/
H A Dhswadsp.h200 __entry->stream_id = stream->reply.stream_hw_id;
201 __entry->mixer_id = stream->reply.mixer_hw_id;
202 __entry->peak0 = stream->reply.peak_meter_register_address[0];
203 __entry->peak1 = stream->reply.peak_meter_register_address[1];
204 __entry->vol0 = stream->reply.volume_register_address[0];
205 __entry->vol1 = stream->reply.volume_register_address[1];
216 TP_PROTO(struct sst_hsw_ipc_stream_info_reply *reply),
218 TP_ARGS(reply),
229 __entry->mixer_id = reply->mixer_hw_id;
230 __entry->peak0 = reply->peak_meter_register_address[0];
231 __entry->peak1 = reply->peak_meter_register_address[1];
232 __entry->vol0 = reply->volume_register_address[0];
233 __entry->vol1 = reply->volume_register_address[1];
/linux-4.4.14/fs/afs/
H A Dcmservice.c157 /* be sure to send the reply *before* attempting to spam the AFS server SRXAFSCB_CallBack()
436 } reply; SRXAFSCB_ProbeUuid() local
442 reply.match = htonl(0); SRXAFSCB_ProbeUuid()
444 reply.match = htonl(1); SRXAFSCB_ProbeUuid()
446 afs_send_simple_reply(call, &reply, sizeof(reply)); SRXAFSCB_ProbeUuid()
543 } reply; SRXAFSCB_TellMeAboutYourself() local
558 memset(&reply, 0, sizeof(reply)); SRXAFSCB_TellMeAboutYourself()
559 reply.ia.nifs = htonl(nifs); SRXAFSCB_TellMeAboutYourself()
561 reply.ia.uuid[0] = htonl(afs_uuid.time_low); SRXAFSCB_TellMeAboutYourself()
562 reply.ia.uuid[1] = htonl(afs_uuid.time_mid); SRXAFSCB_TellMeAboutYourself()
563 reply.ia.uuid[2] = htonl(afs_uuid.time_hi_and_version); SRXAFSCB_TellMeAboutYourself()
564 reply.ia.uuid[3] = htonl((s8) afs_uuid.clock_seq_hi_and_reserved); SRXAFSCB_TellMeAboutYourself()
565 reply.ia.uuid[4] = htonl((s8) afs_uuid.clock_seq_low); SRXAFSCB_TellMeAboutYourself()
567 reply.ia.uuid[loop + 5] = htonl((s8) afs_uuid.node[loop]); SRXAFSCB_TellMeAboutYourself()
571 reply.ia.ifaddr[loop] = ifs[loop].address.s_addr; SRXAFSCB_TellMeAboutYourself()
572 reply.ia.netmask[loop] = ifs[loop].netmask.s_addr; SRXAFSCB_TellMeAboutYourself()
573 reply.ia.mtu[loop] = htonl(ifs[loop].mtu); SRXAFSCB_TellMeAboutYourself()
578 reply.cap.capcount = htonl(1); SRXAFSCB_TellMeAboutYourself()
579 reply.cap.caps[0] = htonl(AFS_CAP_ERROR_TRANSLATION); SRXAFSCB_TellMeAboutYourself()
580 afs_send_simple_reply(call, &reply, sizeof(reply)); SRXAFSCB_TellMeAboutYourself()
H A Dfsclient.c236 * deliver reply data to an FS.FetchStatus
241 struct afs_vnode *vnode = call->reply; afs_deliver_fs_fetch_status()
253 /* unmarshall the reply once we've received all of it */ afs_deliver_fs_fetch_status()
294 call->reply = vnode; afs_fs_fetch_file_status()
310 * deliver reply data to an FS.FetchData
315 struct afs_vnode *vnode = call->reply; afs_deliver_fs_fetch_data()
468 call->reply = vnode; afs_fs_fetch_data64()
513 call->reply = vnode; afs_fs_fetch_data()
533 * deliver reply data to an FS.GiveUpCallBacks
541 return -EBADMSG; /* shouldn't be any reply data */ afs_deliver_fs_give_up_callbacks()
618 * deliver reply data to an FS.CreateFile or an FS.MakeDir
623 struct afs_vnode *vnode = call->reply; afs_deliver_fs_create_vnode()
635 /* unmarshall the reply once we've received all of it */ afs_deliver_fs_create_vnode()
686 call->reply = vnode; afs_fs_create()
717 * deliver reply data to an FS.RemoveFile or FS.RemoveDir
722 struct afs_vnode *vnode = call->reply; afs_deliver_fs_remove()
734 /* unmarshall the reply once we've received all of it */ afs_deliver_fs_remove()
778 call->reply = vnode; afs_fs_remove()
800 * deliver reply data to an FS.Link
805 struct afs_vnode *dvnode = call->reply, *vnode = call->reply2; afs_deliver_fs_link()
817 /* unmarshall the reply once we've received all of it */ afs_deliver_fs_link()
862 call->reply = dvnode; afs_fs_link()
888 * deliver reply data to an FS.Symlink
893 struct afs_vnode *vnode = call->reply; afs_deliver_fs_symlink()
905 /* unmarshall the reply once we've received all of it */ afs_deliver_fs_symlink()
958 call->reply = vnode; afs_fs_symlink()
995 * deliver reply data to an FS.Rename
1000 struct afs_vnode *orig_dvnode = call->reply, *new_dvnode = call->reply2; afs_deliver_fs_rename()
1012 /* unmarshall the reply once we've received all of it */ afs_deliver_fs_rename()
1067 call->reply = orig_dvnode; afs_fs_rename()
1101 * deliver reply data to an FS.StoreData
1106 struct afs_vnode *vnode = call->reply; afs_deliver_fs_store_data()
1123 /* unmarshall the reply once we've received all of it */ afs_deliver_fs_store_data()
1177 call->reply = vnode; afs_fs_store_data64()
1254 call->reply = vnode; afs_fs_store_data()
1287 * deliver reply data to an FS.StoreStatus
1293 struct afs_vnode *vnode = call->reply; afs_deliver_fs_store_status()
1310 /* unmarshall the reply once we've received all of it */ afs_deliver_fs_store_status()
1370 call->reply = vnode; afs_fs_setattr_size64()
1421 call->reply = vnode; afs_fs_setattr_size()
1468 call->reply = vnode; afs_fs_setattr()
1486 * deliver reply data to an FS.GetVolumeStatus
1746 call->reply = vnode; afs_fs_get_volume_status()
1761 * deliver reply data to an FS.SetLock, FS.ExtendLock or FS.ReleaseLock
1777 /* unmarshall the reply once we've received all of it */ afs_deliver_fs_xxxx_lock()
1834 call->reply = vnode; afs_fs_set_lock()
1867 call->reply = vnode; afs_fs_extend_lock()
1899 call->reply = vnode; afs_fs_release_lock()
H A Dvlclient.c59 * deliver reply data to a VL.GetEntryByXXX call
78 /* unmarshall the reply once we've received all of it */ afs_deliver_vl_get_entry_by_xxx()
79 entry = call->reply; afs_deliver_vl_get_entry_by_xxx()
171 call->reply = entry; afs_vl_get_entry_by_name()
207 call->reply = entry; afs_vl_get_entry_by_id()
H A Drxrpc.c216 * allocate a call with flat request and reply buffers
313 * packet as RxRPC might give us the reply before it afs_send_pages()
394 * might give us the reply before it returns from sending the afs_make_call()
649 call->wait_mode->async_complete(call->reply, afs_process_async_call()
651 call->reply = NULL; afs_process_async_call()
666 * empty a socket buffer into a flat reply buffer
768 * send an empty reply
800 * send a simple reply
H A Dinternal.h65 void (*async_complete)(void *reply, int error);
88 void *buffer; /* reply receive buffer */
89 void *reply; /* reply buffer (first part) */ member in struct:afs_call
90 void *reply2; /* reply buffer (second part) */
91 void *reply3; /* reply buffer (third part) */
92 void *reply4; /* reply buffer (fourth part) */
97 AFS_CALL_AWAIT_REPLY, /* awaiting reply to outgoing call */
109 unsigned reply_max; /* maximum size of reply */
110 unsigned reply_size; /* current size of reply */
128 /* deliver request or reply data to an call
/linux-4.4.14/net/ceph/
H A Dauth_x_protocol.h17 /* common request/reply headers */
30 /* initial hello (no reply header) */
H A Dmon_client.c476 if (req->reply) release_generic_request()
477 ceph_msg_put(req->reply); release_generic_request()
510 dout("get_generic_reply %lld got %p\n", tid, req->reply); get_generic_reply()
512 m = ceph_msg_get(req->reply); get_generic_reply()
515 * this reply because we only have one open connection get_generic_reply()
566 struct ceph_mon_statfs_reply *reply = msg->front.iov_base; handle_statfs_reply() local
569 if (msg->front.iov_len != sizeof(*reply)) handle_statfs_reply()
576 *(struct ceph_statfs *)req->buf = reply->st; handle_statfs_reply()
588 pr_err("corrupt statfs reply, tid %llu\n", tid); handle_statfs_reply()
614 req->reply = ceph_msg_new(CEPH_MSG_STATFS_REPLY, 1024, GFP_NOFS, ceph_monc_do_statfs()
616 if (!req->reply) ceph_monc_do_statfs()
665 pr_err("corrupt mon_get_version reply, tid %llu\n", tid); handle_get_version_reply()
670 * Send MMonGetVersion and wait for the reply.
698 req->reply = ceph_msg_new(CEPH_MSG_MON_GET_VERSION_REPLY, 1024, ceph_monc_do_get_version()
700 if (!req->reply) { ceph_monc_do_get_version()
734 ceph_msg_revoke_incoming(req->reply); __resend_generic_request()
1067 * Older OSDs don't set reply tid even if the orignal mon_alloc_msg()
H A Dauth_x.c611 struct ceph_x_authorize_reply reply; ceph_x_verify_authorizer_reply() local
612 void *preply = &reply; ceph_x_verify_authorizer_reply()
616 ret = ceph_x_decrypt(&au->session_key, &p, end, &preply, sizeof(reply)); ceph_x_verify_authorizer_reply()
619 if (ret != sizeof(reply)) ceph_x_verify_authorizer_reply()
622 if (au->nonce + 1 != le64_to_cpu(reply.nonce_plus_one)) ceph_x_verify_authorizer_reply()
627 au->nonce, le64_to_cpu(reply.nonce_plus_one), ret); ceph_x_verify_authorizer_reply()
/linux-4.4.14/include/uapi/linux/
H A Datm_tcp.h31 #define ATMTCP_CTRL_OPEN 1 /* request/reply */
32 #define ATMTCP_CTRL_CLOSE 2 /* request/reply */
H A Dnbd.h49 /* These are sent over the network in the request/reply magic fields */
68 * This is the reply packet that nbd-server sends back to the client after
H A Dif_arp.h104 #define ARPOP_REPLY 2 /* ARP reply */
106 #define ARPOP_RREPLY 4 /* RARP reply */
108 #define ARPOP_InREPLY 9 /* InARP reply */
H A Dwatchdog.h50 #define WDIOF_KEEPALIVEPING 0x8000 /* Keep alive ping reply */
H A Datmsvc.h26 int reply; /* for okay and close: */ member in struct:atmsvc_msg
H A Dfuse.h120 * INIT request and reply respectively.
126 * reply with the major version it supports, ignore the rest of the
132 * communication and reply with that major version (and an arbitrary
214 * INIT request/reply flags
255 * CUSE INIT request/reply flags
319 FUSE_FORGET = 2, /* no reply */
H A Dtipc_config.h53 * to the TIPC configuration service on a node, which sends a reply message
56 * Both request and reply messages consist of a transport header and payload.
61 * For many operations, the request and reply messages have a fixed number
62 * of TLVs (usually zero or one); however, some reply messages may return
64 * of an "error string" TLV in the reply message instead of the TLV(s) the
65 * reply should contain if the request succeeds.
/linux-4.4.14/arch/um/os-Linux/
H A Daio.c103 struct aio_thread_reply reply; aio_thread() local
117 reply = ((struct aio_thread_reply) aio_thread()
120 reply_fd = ((struct aio_context *) reply.data)->reply_fd; aio_thread()
121 err = write(reply_fd, &reply, sizeof(reply)); aio_thread()
122 if (err != sizeof(reply)) aio_thread()
172 struct aio_thread_reply reply; not_aio_thread() local
192 reply = ((struct aio_thread_reply) { .data = req.aio, not_aio_thread()
194 err = write(req.aio->reply_fd, &reply, sizeof(reply)); not_aio_thread()
195 if (err != sizeof(reply)) not_aio_thread()
268 struct aio_thread_reply reply; submit_aio_26() local
273 reply = ((struct aio_thread_reply) { .data = aio, submit_aio_26()
275 err = write(aio->reply_fd, &reply, sizeof(reply)); submit_aio_26()
276 if (err != sizeof(reply)) { submit_aio_26()
/linux-4.4.14/net/atm/
H A Dsignaling.c45 msg->reply = -EOPNOTSUPP; modify_qos()
48 msg->reply = vcc->dev->ops->change_qos(vcc, &msg->qos, modify_qos()
49 msg->reply); modify_qos()
50 if (!msg->reply) modify_qos()
77 sk->sk_err = -msg->reply; sigd_send()
98 sk->sk_err = -msg->reply; sigd_send()
120 vcc_release_async(vcc, msg->reply); sigd_send()
127 sk->sk_err_soft = msg->reply; sigd_send()
144 int reply) sigd_enq2()
158 msg->reply = reply; sigd_enq2()
184 /* other ISP applications may use "reply" */ sigd_enq()
141 sigd_enq2(struct atm_vcc *vcc, enum atmsvc_msg_type type, struct atm_vcc *listen_vcc, const struct sockaddr_atmpvc *pvc, const struct sockaddr_atmsvc *svc, const struct atm_qos *qos, int reply) sigd_enq2() argument
H A Dsignaling.h24 const struct sockaddr_atmsvc *svc,const struct atm_qos *qos,int reply);
/linux-4.4.14/arch/powerpc/platforms/powermac/
H A Dtime.c115 printk(KERN_ERR "cuda_get_time: got %d byte reply\n", cuda_get_time()
117 now = (req.reply[3] << 24) + (req.reply[4] << 16) cuda_get_time()
118 + (req.reply[5] << 8) + req.reply[6]; cuda_get_time()
137 printk(KERN_ERR "cuda_set_rtc_time: got %d byte reply\n", cuda_set_rtc_time()
158 printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n", pmu_get_time()
160 now = (req.reply[0] << 24) + (req.reply[1] << 16) pmu_get_time()
161 + (req.reply[2] << 8) + req.reply[3]; pmu_get_time()
178 printk(KERN_ERR "pmu_set_rtc_time: %d byte reply from PMU\n", pmu_set_rtc_time()
H A Dpfunc_base.c70 /* Check if we have room for reply */ macio_do_gpio_read()
159 /* Check if we have room for reply */ macio_do_read_reg32()
182 /* Check if we have room for reply */ macio_do_read_reg8()
195 /* Check if we have room for reply */ macio_do_read_reg32_msrx()
208 /* Check if we have room for reply */ macio_do_read_reg8_msrx()
/linux-4.4.14/net/openvswitch/
H A Ddatapath.c82 /* Check if need to build a reply message.
83 * OVS userspace sets the NLM_F_ECHO flag if it needs the reply. */ ovs_must_notify()
914 struct sk_buff *reply; ovs_flow_cmd_new() local
966 reply = ovs_flow_cmd_alloc_info(acts, &new_flow->id, info, false, ovs_flow_cmd_new()
968 if (IS_ERR(reply)) { ovs_flow_cmd_new()
969 error = PTR_ERR(reply); ovs_flow_cmd_new()
995 if (unlikely(reply)) { ovs_flow_cmd_new()
998 reply, info->snd_portid, ovs_flow_cmd_new()
1037 if (unlikely(reply)) { ovs_flow_cmd_new()
1040 reply, info->snd_portid, ovs_flow_cmd_new()
1052 if (reply) ovs_flow_cmd_new()
1053 ovs_notify(&dp_flow_genl_family, reply, info); ovs_flow_cmd_new()
1058 kfree_skb(reply); ovs_flow_cmd_new()
1097 struct sk_buff *reply = NULL; ovs_flow_cmd_set() local
1131 reply = ovs_flow_cmd_alloc_info(acts, &sfid, info, false, ovs_flow_cmd_set()
1133 if (IS_ERR(reply)) { ovs_flow_cmd_set()
1134 error = PTR_ERR(reply); ovs_flow_cmd_set()
1160 if (unlikely(reply)) { ovs_flow_cmd_set()
1163 reply, info->snd_portid, ovs_flow_cmd_set()
1171 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, ovs_flow_cmd_set()
1175 if (IS_ERR(reply)) { ovs_flow_cmd_set()
1176 error = PTR_ERR(reply); ovs_flow_cmd_set()
1186 if (reply) ovs_flow_cmd_set()
1187 ovs_notify(&dp_flow_genl_family, reply, info); ovs_flow_cmd_set()
1195 kfree_skb(reply); ovs_flow_cmd_set()
1208 struct sk_buff *reply; ovs_flow_cmd_get() local
1247 reply = ovs_flow_cmd_build_info(flow, ovs_header->dp_ifindex, info, ovs_flow_cmd_get()
1249 if (IS_ERR(reply)) { ovs_flow_cmd_get()
1250 err = PTR_ERR(reply); ovs_flow_cmd_get()
1255 return genlmsg_reply(reply, info); ovs_flow_cmd_get()
1267 struct sk_buff *reply; ovs_flow_cmd_del() local
1310 reply = ovs_flow_cmd_alloc_info((const struct sw_flow_actions __force *) flow->sf_acts, ovs_flow_cmd_del()
1312 if (likely(reply)) { ovs_flow_cmd_del()
1313 if (likely(!IS_ERR(reply))) { ovs_flow_cmd_del()
1316 reply, info->snd_portid, ovs_flow_cmd_del()
1323 ovs_notify(&dp_flow_genl_family, reply, info); ovs_flow_cmd_del()
1325 netlink_set_err(sock_net(skb->sk)->genl_sock, 0, 0, PTR_ERR(reply)); ovs_flow_cmd_del()
1529 struct sk_buff *reply; ovs_dp_cmd_new() local
1539 reply = ovs_dp_cmd_alloc_info(info); ovs_dp_cmd_new()
1540 if (!reply) ovs_dp_cmd_new()
1602 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, ovs_dp_cmd_new()
1611 ovs_notify(&dp_datapath_genl_family, reply, info); ovs_dp_cmd_new()
1624 kfree_skb(reply); ovs_dp_cmd_new()
1656 struct sk_buff *reply; ovs_dp_cmd_del() local
1660 reply = ovs_dp_cmd_alloc_info(info); ovs_dp_cmd_del()
1661 if (!reply) ovs_dp_cmd_del()
1670 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, ovs_dp_cmd_del()
1677 ovs_notify(&dp_datapath_genl_family, reply, info); ovs_dp_cmd_del()
1683 kfree_skb(reply); ovs_dp_cmd_del()
1689 struct sk_buff *reply; ovs_dp_cmd_set() local
1693 reply = ovs_dp_cmd_alloc_info(info); ovs_dp_cmd_set()
1694 if (!reply) ovs_dp_cmd_set()
1705 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, ovs_dp_cmd_set()
1710 ovs_notify(&dp_datapath_genl_family, reply, info); ovs_dp_cmd_set()
1716 kfree_skb(reply); ovs_dp_cmd_set()
1722 struct sk_buff *reply; ovs_dp_cmd_get() local
1726 reply = ovs_dp_cmd_alloc_info(info); ovs_dp_cmd_get()
1727 if (!reply) ovs_dp_cmd_get()
1736 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, ovs_dp_cmd_get()
1741 return genlmsg_reply(reply, info); ovs_dp_cmd_get()
1745 kfree_skb(reply); ovs_dp_cmd_get()
1920 struct sk_buff *reply; ovs_vport_cmd_new() local
1935 reply = ovs_vport_cmd_alloc_info(); ovs_vport_cmd_new()
1936 if (!reply) ovs_vport_cmd_new()
1978 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, ovs_vport_cmd_new()
1983 ovs_notify(&dp_vport_genl_family, reply, info); ovs_vport_cmd_new()
1988 kfree_skb(reply); ovs_vport_cmd_new()
1995 struct sk_buff *reply; ovs_vport_cmd_set() local
1999 reply = ovs_vport_cmd_alloc_info(); ovs_vport_cmd_set()
2000 if (!reply) ovs_vport_cmd_set()
2030 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, ovs_vport_cmd_set()
2035 ovs_notify(&dp_vport_genl_family, reply, info); ovs_vport_cmd_set()
2040 kfree_skb(reply); ovs_vport_cmd_set()
2047 struct sk_buff *reply; ovs_vport_cmd_del() local
2051 reply = ovs_vport_cmd_alloc_info(); ovs_vport_cmd_del()
2052 if (!reply) ovs_vport_cmd_del()
2066 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, ovs_vport_cmd_del()
2072 ovs_notify(&dp_vport_genl_family, reply, info); ovs_vport_cmd_del()
2077 kfree_skb(reply); ovs_vport_cmd_del()
2085 struct sk_buff *reply; ovs_vport_cmd_get() local
2089 reply = ovs_vport_cmd_alloc_info(); ovs_vport_cmd_get()
2090 if (!reply) ovs_vport_cmd_get()
2098 err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, ovs_vport_cmd_get()
2103 return genlmsg_reply(reply, info); ovs_vport_cmd_get()
2107 kfree_skb(reply); ovs_vport_cmd_get()
/linux-4.4.14/drivers/net/ethernet/brocade/bna/
H A Dbfi_cna.h42 u32 msgtag; /*!< msgtag for reply */
51 u32 msgtag; /*!< msgtag for reply */
83 /* @brief Mailbox reply and AEN messages from DCBX/LLDP firmware to host */
118 * @brief reply message from firmware
137 * @brief reply message from firmware
/linux-4.4.14/fs/ncpfs/
H A Dsock.c391 struct ncp_reply_header reply; ncpdgram_rcv_proc() local
394 result = _recv(sock, &reply, sizeof(reply), MSG_PEEK | MSG_DONTWAIT); ncpdgram_rcv_proc()
398 if (result >= sizeof(reply)) { ncpdgram_rcv_proc()
401 if (reply.type == NCP_WATCHDOG) { ncpdgram_rcv_proc()
404 if (server->connection != get_conn_number(&reply)) { ncpdgram_rcv_proc()
424 if (reply.type != NCP_POSITIVE_ACK && reply.type != NCP_REPLY) { ncpdgram_rcv_proc()
434 if (req && (req->tx_type == NCP_ALLOC_SLOT_REQUEST || (server->sequence == reply.sequence && ncpdgram_rcv_proc()
435 server->connection == get_conn_number(&reply)))) { ncpdgram_rcv_proc()
436 if (reply.type == NCP_POSITIVE_ACK) { ncpdgram_rcv_proc()
440 } else if (reply.type == NCP_REPLY) { ncpdgram_rcv_proc()
469 _recv(sock, &reply, sizeof(reply), MSG_DONTWAIT); ncpdgram_rcv_proc()
570 pr_err("tcp: Unexpected reply type %08X\n", ntohl(server->rcv.buf.magic)); __ncptcp_rcv_proc()
576 pr_err("tcp: Unexpected reply len %d\n", datalen); __ncptcp_rcv_proc()
583 pr_err("tcp: Unexpected reply len %d\n", datalen); __ncptcp_rcv_proc()
622 pr_err("tcp: Unexpected reply len %d (expected at most %Zd)\n", datalen, req->datalen + 8); __ncptcp_rcv_proc()
741 void* reply, int max_reply_size) ncp_do_request()
777 result = do_ncp_rpc_call(server, size, reply, max_reply_size); ncp_do_request()
790 /* ncp_do_request assures that at least a complete reply header is
798 struct ncp_reply_header* reply = rpl; ncp_request2() local
813 result = ncp_do_request(server, server->current_size, reply, size); ncp_request2()
818 server->completion = reply->completion_code; ncp_request2()
819 server->conn_status = reply->connection_state; ncp_request2()
823 result = reply->completion_code; ncp_request2()
740 ncp_do_request(struct ncp_server *server, int size, void* reply, int max_reply_size) ncp_do_request() argument
H A Dncp_fs_sb.h62 int reply_size; /* Size of last reply */
68 unsigned char *rxbuf; /* Storage for reply to current request */
117 struct ncp_request_reply* creq; /* STREAM/DGRAM: awaiting reply from this request */
H A Dncp_fs.h74 void* reply, int max_reply_size); ncp_request()
/linux-4.4.14/drivers/net/wireless/ath/wil6210/
H A Dwmi.c810 u16 reply_id, void *reply, u8 reply_size, int to_msec) wmi_call()
822 wil->reply_buf = reply; wmi_call()
882 } __packed reply; wmi_pcp_start() local
900 WMI_PCP_STARTED_EVENTID, &reply, sizeof(reply), 5000); wmi_pcp_start()
904 if (reply.evt.status != WMI_FW_STATUS_SUCCESS) wmi_pcp_start()
936 } __packed reply; wmi_get_ssid() local
937 int len; /* reply.cmd.ssid_len in CPU order */ wmi_get_ssid()
940 &reply, sizeof(reply), 20); wmi_get_ssid()
944 len = le32_to_cpu(reply.cmd.ssid_len); wmi_get_ssid()
945 if (len > sizeof(reply.cmd.ssid)) wmi_get_ssid()
949 memcpy(ssid, reply.cmd.ssid, len); wmi_get_ssid()
969 } __packed reply; wmi_get_channel() local
972 WMI_GET_PCP_CHANNEL_EVENTID, &reply, sizeof(reply), 20); wmi_get_channel()
976 if (reply.cmd.channel > 3) wmi_get_channel()
979 *channel = reply.cmd.channel + 1; wmi_get_channel()
1077 } __packed reply; wmi_rxon() local
1084 &reply, sizeof(reply), 100); wmi_rxon()
1085 if ((rc == 0) && (reply.evt.status != WMI_FW_STATUS_SUCCESS)) wmi_rxon()
1168 } __packed reply; wmi_get_temperature() local
1171 WMI_TEMP_SENSE_DONE_EVENTID, &reply, sizeof(reply), 100); wmi_get_temperature()
1176 *t_bb = le32_to_cpu(reply.evt.baseband_t1000); wmi_get_temperature()
1178 *t_rf = le32_to_cpu(reply.evt.rf_t1000); wmi_get_temperature()
1193 } __packed reply; wmi_disconnect_sta() local
1200 WMI_DISCONNECT_EVENTID, &reply, sizeof(reply), 1000); wmi_disconnect_sta()
1211 reason_code = le16_to_cpu(reply.evt.protocol_reason_status); wmi_disconnect_sta()
1214 reply.evt.bssid, reason_code, wmi_disconnect_sta()
1215 reply.evt.disconnect_reason); wmi_disconnect_sta()
1218 wil6210_disconnect(wil, reply.evt.bssid, reason_code, true); wmi_disconnect_sta()
1284 } __packed reply; wmi_addba_rx_resp() local
1291 WMI_RCP_ADDBA_RESP_SENT_EVENTID, &reply, sizeof(reply), wmi_addba_rx_resp()
1296 if (reply.evt.status) { wmi_addba_rx_resp()
1298 le16_to_cpu(reply.evt.status)); wmi_addba_rx_resp()
809 wmi_call(struct wil6210_priv *wil, u16 cmdid, void *buf, u16 len, u16 reply_id, void *reply, u8 reply_size, int to_msec) wmi_call() argument
H A Ddebugfs.c974 /* if reply is all-0, ignore this CID */ is_all_zeros()
996 } __packed reply; wil_bf_debugfs_show() local
1003 WMI_NOTIFY_REQ_DONE_EVENTID, &reply, wil_bf_debugfs_show()
1004 sizeof(reply), 20); wil_bf_debugfs_show()
1005 /* if reply is all-0, ignore this CID */ wil_bf_debugfs_show()
1006 if (rc || is_all_zeros(&reply.evt, sizeof(reply.evt))) wil_bf_debugfs_show()
1009 status = le32_to_cpu(reply.evt.status); wil_bf_debugfs_show()
1019 le64_to_cpu(reply.evt.tsf), wil_bf_debugfs_show()
1020 le16_to_cpu(reply.evt.bf_mcs), wil_bf_debugfs_show()
1021 le32_to_cpu(reply.evt.tx_tpt), wil_bf_debugfs_show()
1022 reply.evt.sqi, wil_bf_debugfs_show()
1024 le16_to_cpu(reply.evt.my_rx_sector), wil_bf_debugfs_show()
1025 le16_to_cpu(reply.evt.my_tx_sector), wil_bf_debugfs_show()
1026 le16_to_cpu(reply.evt.other_rx_sector), wil_bf_debugfs_show()
1027 le16_to_cpu(reply.evt.other_tx_sector), wil_bf_debugfs_show()
1028 le32_to_cpu(reply.evt.rx_goodput), wil_bf_debugfs_show()
1029 le32_to_cpu(reply.evt.tx_goodput)); wil_bf_debugfs_show()
H A Dcfg80211.c118 } __packed reply; wil_cid_fill_sinfo() local
123 WMI_NOTIFY_REQ_DONE_EVENTID, &reply, sizeof(reply), 20); wil_cid_fill_sinfo()
132 cid, le16_to_cpu(reply.evt.bf_mcs), wil_cid_fill_sinfo()
133 le64_to_cpu(reply.evt.tsf), reply.evt.status, wil_cid_fill_sinfo()
134 le32_to_cpu(reply.evt.snr_val), wil_cid_fill_sinfo()
135 reply.evt.sqi, wil_cid_fill_sinfo()
136 le32_to_cpu(reply.evt.tx_tpt), wil_cid_fill_sinfo()
137 le32_to_cpu(reply.evt.tx_goodput), wil_cid_fill_sinfo()
138 le32_to_cpu(reply.evt.rx_goodput), wil_cid_fill_sinfo()
139 le16_to_cpu(reply.evt.my_rx_sector), wil_cid_fill_sinfo()
140 le16_to_cpu(reply.evt.my_tx_sector), wil_cid_fill_sinfo()
141 le16_to_cpu(reply.evt.other_rx_sector), wil_cid_fill_sinfo()
142 le16_to_cpu(reply.evt.other_tx_sector)); wil_cid_fill_sinfo()
156 sinfo->txrate.mcs = le16_to_cpu(reply.evt.bf_mcs); wil_cid_fill_sinfo()
168 sinfo->signal = reply.evt.sqi; wil_cid_fill_sinfo()
H A Dtxrx.c746 } __packed reply; wil_vring_init_tx() local
774 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); wil_vring_init_tx()
778 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { wil_vring_init_tx()
780 reply.cmd.status); wil_vring_init_tx()
784 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); wil_vring_init_tx()
818 } __packed reply; wil_vring_init_bcast() local
846 WMI_VRING_CFG_DONE_EVENTID, &reply, sizeof(reply), 100); wil_vring_init_bcast()
850 if (reply.cmd.status != WMI_FW_STATUS_SUCCESS) { wil_vring_init_bcast()
852 reply.cmd.status); wil_vring_init_bcast()
856 vring->hwtail = le32_to_cpu(reply.cmd.tx_vring_tail_ptr); wil_vring_init_bcast()
/linux-4.4.14/drivers/message/fusion/lsi/
H A Dmpi_raid.h27 * 07-12-02 01.02.07 Added structures for Mailbox request and reply.
130 /* RAID Action reply message */
152 /* RAID Volume reply ActionStatus values */
160 /* RAID Volume reply RAID Volume Indicator structure */
195 /* SCSI IO RAID Passthrough reply structure */
239 /* Mailbox reply structure */
H A Dmpi_tool.h43 /* Toolbox reply */
297 /* Diagnostic Buffer Post reply */
334 /* Diagnostic Release reply */
/linux-4.4.14/include/linux/sunrpc/
H A Drpc_rdma.h64 * write chunk, and reply chunk.
96 __be32 rm_chunks[0]; /* read, write and reply chunks */
117 RDMA_MSG = 0, /* An RPC call or reply msg */
118 RDMA_NOMSG = 1, /* An RPC call or reply msg - separate body */
119 RDMA_MSGP = 2, /* An RPC call or reply msg with padding */
120 RDMA_DONE = 3, /* Client signals reply completion */
H A Dxprtsock.h30 * State of TCP reply receive
H A Dsvc.h156 * pages are available to write the reply into.
162 * of a reply.
171 * This assumes that the non-page part of an rpc reply will fit
174 * Each request/reply pair can have at most one "payload", plus two pages,
175 * one for the request, and one for the reply.
235 * - reply from here */
251 struct page * *rq_next_page; /* next reply page to use */
265 #define RQ_DROPME (3) /* drop current reply */
367 struct sockaddr_storage addr; /* where reply must go */
369 struct sockaddr_storage daddr; /* where reply must come from */
427 unsigned int pc_xdrressize; /* maximum size of XDR reply */
H A Dsvc_rdma.h92 * and the writing of the reply to the client, the memory in the
98 * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the
100 * mapping of the reply.
/linux-4.4.14/drivers/scsi/qla2xxx/
H A Dqla_bsg.c21 bsg_job->reply->result = res; qla2x00_bsg_job_done()
148 bsg_job->reply->result = DID_OK; qla24xx_proc_fcp_prio_cfg_cmd()
151 bsg_job->reply->result = (DID_ERROR << 16); qla24xx_proc_fcp_prio_cfg_cmd()
163 bsg_job->reply->result = DID_OK; qla24xx_proc_fcp_prio_cfg_cmd()
166 bsg_job->reply->result = (DID_ERROR << 16); qla24xx_proc_fcp_prio_cfg_cmd()
176 bsg_job->reply->result = (DID_ERROR << 16); qla24xx_proc_fcp_prio_cfg_cmd()
180 bsg_job->reply->result = DID_OK; qla24xx_proc_fcp_prio_cfg_cmd()
181 bsg_job->reply->reply_payload_rcv_len = qla24xx_proc_fcp_prio_cfg_cmd()
192 bsg_job->reply->result = (DID_ERROR << 16); qla24xx_proc_fcp_prio_cfg_cmd()
203 bsg_job->reply->result = (DID_ERROR << 16); qla24xx_proc_fcp_prio_cfg_cmd()
218 bsg_job->reply->result = (DID_ERROR << 16); qla24xx_proc_fcp_prio_cfg_cmd()
232 bsg_job->reply->result = DID_OK; qla24xx_proc_fcp_prio_cfg_cmd()
449 "dma_map_sg return %d for reply\n", rsp_sg_cnt); qla2x00_process_ct()
740 "dma_map_sg returned %d for reply.\n", elreq.rsp_sg_cnt); qla2x00_process_loopback()
899 bsg_job->reply->result = (DID_ERROR << 16); qla2x00_process_loopback()
900 bsg_job->reply->reply_payload_rcv_len = 0; qla2x00_process_loopback()
904 bsg_job->reply->result = (DID_OK << 16); qla2x00_process_loopback()
963 bsg_job->reply->result = DID_OK; qla84xx_reset()
1062 bsg_job->reply->result = DID_OK; qla84xx_updatefw()
1119 "dma_map_sg returned %d for reply.\n", sg_cnt); qla84xx_mgmt_cmd()
1242 bsg_job->reply->result = DID_OK; qla84xx_mgmt_cmd()
1246 bsg_job->reply->reply_payload_rcv_len = qla84xx_mgmt_cmd()
1346 rsp_ptr = ((uint8_t *)bsg_job->reply) + qla24xx_iidma()
1353 bsg_job->reply->result = DID_OK; qla24xx_iidma()
1454 bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size; qla2x00_read_optrom()
1455 bsg_job->reply->result = DID_OK; qla2x00_read_optrom()
1489 bsg_job->reply->result = DID_OK; qla2x00_update_optrom()
1512 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla2x00_update_fru_versions()
1528 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla2x00_update_fru_versions()
1535 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; qla2x00_update_fru_versions()
1542 bsg_job->reply->result = DID_OK << 16; qla2x00_update_fru_versions()
1560 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla2x00_read_fru_status()
1574 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla2x00_read_fru_status()
1582 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; qla2x00_read_fru_status()
1589 bsg_job->reply->reply_payload_rcv_len = sizeof(*sr); qla2x00_read_fru_status()
1590 bsg_job->reply->result = DID_OK << 16; qla2x00_read_fru_status()
1608 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla2x00_write_fru_status()
1622 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla2x00_write_fru_status()
1627 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; qla2x00_write_fru_status()
1634 bsg_job->reply->result = DID_OK << 16; qla2x00_write_fru_status()
1652 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla2x00_write_i2c()
1665 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla2x00_write_i2c()
1670 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; qla2x00_write_i2c()
1677 bsg_job->reply->result = DID_OK << 16; qla2x00_write_i2c()
1695 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla2x00_read_i2c()
1707 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla2x00_read_i2c()
1716 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = 0; qla2x00_read_i2c()
1723 bsg_job->reply->reply_payload_rcv_len = sizeof(*i2c); qla2x00_read_i2c()
1724 bsg_job->reply->result = DID_OK << 16; qla2x00_read_i2c()
1898 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = rval; qla24xx_process_bidir_cmd()
1900 bsg_job->reply->reply_payload_rcv_len = 0; qla24xx_process_bidir_cmd()
1901 bsg_job->reply->result = (DID_OK) << 16; qla24xx_process_bidir_cmd()
1953 "dma_map_sg return %d for reply\n", rsp_sg_cnt); qlafx00_mgmt_cmd()
2045 bsg_job->reply->reply_payload_rcv_len = 0; qla26xx_serdes_op()
2051 bsg_job->reply->reply_payload_rcv_len = sizeof(sr); qla26xx_serdes_op()
2060 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla26xx_serdes_op()
2064 bsg_job->reply->result = DID_OK << 16; qla26xx_serdes_op()
2085 bsg_job->reply->reply_payload_rcv_len = 0; qla8044_serdes_op()
2091 bsg_job->reply->reply_payload_rcv_len = sizeof(sr); qla8044_serdes_op()
2100 bsg_job->reply->reply_data.vendor_reply.vendor_rsp[0] = qla8044_serdes_op()
2104 bsg_job->reply->result = DID_OK << 16; qla8044_serdes_op()
2178 bsg_job->reply->reply_payload_rcv_len = 0; qla24xx_bsg_request()
2251 bsg_job->reply->result = -EIO; qla24xx_bsg_timeout()
2257 bsg_job->reply->result = 0; qla24xx_bsg_timeout()
2267 bsg_job->req->errors = bsg_job->reply->result = -ENXIO; qla24xx_bsg_timeout()
/linux-4.4.14/drivers/media/usb/dvb-usb/
H A Daf9005.c111 err("generic read/write, wrong reply code."); af9005_generic_read_write()
115 err("generic read/write, wrong length in reply."); af9005_generic_read_write()
119 err("generic read/write, wrong sequence in reply."); af9005_generic_read_write()
124 the register in the reply is different that what has been sent af9005_generic_read_write()
127 err("generic read/write, wrong register in reply."); af9005_generic_read_write()
131 err("generic read/write wrong command in reply."); af9005_generic_read_write()
136 err("generic read/write wrong status code in reply."); af9005_generic_read_write()
497 err("send command, wrong reply code."); af9005_send_command()
501 err("send command, wrong sequence in reply."); af9005_send_command()
505 err("send command, wrong status code in reply."); af9005_send_command()
509 err("send command, invalid data length in reply."); af9005_send_command()
544 err("Read eeprom, invalid reply code"); af9005_read_eeprom()
548 err("Read eeprom, invalid reply length"); af9005_read_eeprom()
552 err("Read eeprom, wrong sequence in reply "); af9005_read_eeprom()
556 err("Read eeprom, wrong status in reply "); af9005_read_eeprom()
565 static int af9005_boot_packet(struct usb_device *udev, int type, u8 * reply) af9005_boot_packet() argument
658 *reply = buf[6]; af9005_boot_packet()
683 *reply = buf[6]; af9005_boot_packet()
724 u8 reply; af9005_download_firmware() local
726 ret = af9005_boot_packet(udev, FW_CONFIG, &reply); af9005_download_firmware()
729 if (reply != 0x01) { af9005_download_firmware()
730 err("before downloading firmware, FW_CONFIG expected 0x01, received 0x%x", reply); af9005_download_firmware()
749 ret = af9005_boot_packet(udev, FW_CONFIRM, &reply); af9005_download_firmware()
752 if (reply != (u8) (packets & 0xff)) { af9005_download_firmware()
753 err("after downloading firmware, FW_CONFIRM expected 0x%x, received 0x%x", packets & 0xff, reply); af9005_download_firmware()
756 ret = af9005_boot_packet(udev, FW_BOOT, &reply); af9005_download_firmware()
759 ret = af9005_boot_packet(udev, FW_CONFIG, &reply); af9005_download_firmware()
762 if (reply != 0x02) { af9005_download_firmware()
763 err("after downloading firmware, FW_CONFIG expected 0x02, received 0x%x", reply); af9005_download_firmware()
956 u8 reply; af9005_identify_state() local
957 ret = af9005_boot_packet(udev, FW_CONFIG, &reply); af9005_identify_state()
960 deb_info("result of FW_CONFIG in identify state %d\n", reply); af9005_identify_state()
961 if (reply == 0x01) af9005_identify_state()
963 else if (reply == 0x02) af9005_identify_state()
/linux-4.4.14/drivers/xen/xenbus/
H A Dxenbus_dev_frontend.c70 * still waiting a reply.
94 * reply_mutex protects the reply being built up to return to
296 /* success: pass reply list onto watcher */ watch_fired()
309 void *reply; xenbus_write_transaction() local
321 reply = xenbus_dev_request_and_reply(&u->u.msg); xenbus_write_transaction()
322 if (IS_ERR(reply)) { xenbus_write_transaction()
324 rc = PTR_ERR(reply); xenbus_write_transaction()
332 trans->handle.id = simple_strtoul(reply, NULL, 0); xenbus_write_transaction()
348 rc = queue_reply(&staging_q, reply, u->u.msg.len); xenbus_write_transaction()
357 kfree(reply); xenbus_write_transaction()
411 /* Success. Synthesize a reply to say all is OK. */ xenbus_write_watch()
416 } __packed reply = { xenbus_write_watch() local
419 .len = sizeof(reply.body) xenbus_write_watch()
425 rc = queue_reply(&u->read_buffers, &reply, sizeof(reply)); xenbus_write_watch()
H A Dxenbus_xs.c64 } reply; member in union:xs_stored_msg::__anon11312
201 body = msg->u.reply.body; read_reply()
265 /* Send message to xs, get kmalloc'ed reply. ERR_PTR() on error. */ xs_talkv()
336 static int xs_error(char *reply) xs_error() argument
338 if (IS_ERR(reply)) xs_error()
339 return PTR_ERR(reply); xs_error()
340 kfree(reply); xs_error()
920 msg->u.reply.body = body; process_msg()
/linux-4.4.14/fs/nfsd/
H A Dcache.h2 * Request reply cache. This was heavily inspired by the
14 * Representation of a reply cache entry.
H A Dnfscache.c2 * Request reply cache. This is currently a global cache, but this may
46 * Stats and other tracking of on the duplicate reply cache. All of these and
187 printk(KERN_ERR "nfsd: failed to allocate reply cache\n"); nfsd_reply_cache_init()
472 /* Compose RPC reply header */ nfsd_cache_lookup()
495 * the procedure has been executed and the complete reply is in
500 * of memory for a cache with a max reply size of 100 bytes (diropokres).
506 * nfsd failed to encode a reply that otherwise would have been cached.
537 printk("nfsd: RC_REPLSTAT/reply len %d!\n",len); nfsd_cache_update()
566 * Copy cached reply to current reply buffer. Should always fit.
567 * FIXME as reply is in a page, we should just attach the page, and
576 printk(KERN_WARNING "nfsd: cached reply too large (%Zd).\n", nfsd_cache_append()
H A Dstats.c8 * Statistsics for the reply cache
/linux-4.4.14/drivers/usb/gadget/function/
H A Df_mass_storage.c80 * release Information used as a reply to INQUIRY
679 return -EIO; /* No default reply */ do_read()
765 return -EIO; /* No default reply */ do_read()
955 return -EIO; /* No default reply */ do_write()
1019 return -EIO; /* No default reply */ do_verify()
1707 * We can't reply at all until we know the correct data direction check_command()
1831 int reply = -EINVAL; do_scsi_command() local
1853 reply = check_command(common, 6, DATA_DIR_TO_HOST, do_scsi_command()
1856 if (reply == 0) do_scsi_command()
1857 reply = do_inquiry(common, bh); do_scsi_command()
1862 reply = check_command(common, 6, DATA_DIR_FROM_HOST, do_scsi_command()
1865 if (reply == 0) do_scsi_command()
1866 reply = do_mode_select(common, bh); do_scsi_command()
1872 reply = check_command(common, 10, DATA_DIR_FROM_HOST, do_scsi_command()
1875 if (reply == 0) do_scsi_command()
1876 reply = do_mode_select(common, bh); do_scsi_command()
1881 reply = check_command(common, 6, DATA_DIR_TO_HOST, do_scsi_command()
1884 if (reply == 0) do_scsi_command()
1885 reply = do_mode_sense(common, bh); do_scsi_command()
1891 reply = check_command(common, 10, DATA_DIR_TO_HOST, do_scsi_command()
1894 if (reply == 0) do_scsi_command()
1895 reply = do_mode_sense(common, bh); do_scsi_command()
1900 reply = check_command(common, 6, DATA_DIR_NONE, do_scsi_command()
1903 if (reply == 0) do_scsi_command()
1904 reply = do_prevent_allow(common); do_scsi_command()
1910 reply = check_command_size_in_blocks(common, 6, do_scsi_command()
1914 if (reply == 0) do_scsi_command()
1915 reply = do_read(common); do_scsi_command()
1921 reply = check_command_size_in_blocks(common, 10, do_scsi_command()
1925 if (reply == 0) do_scsi_command()
1926 reply = do_read(common); do_scsi_command()
1932 reply = check_command_size_in_blocks(common, 12, do_scsi_command()
1936 if (reply == 0) do_scsi_command()
1937 reply = do_read(common); do_scsi_command()
1942 reply = check_command(common, 10, DATA_DIR_TO_HOST, do_scsi_command()
1945 if (reply == 0) do_scsi_command()
1946 reply = do_read_capacity(common, bh); do_scsi_command()
1954 reply = check_command(common, 10, DATA_DIR_TO_HOST, do_scsi_command()
1957 if (reply == 0) do_scsi_command()
1958 reply = do_read_header(common, bh); do_scsi_command()
1966 reply = check_command(common, 10, DATA_DIR_TO_HOST, do_scsi_command()
1969 if (reply == 0) do_scsi_command()
1970 reply = do_read_toc(common, bh); do_scsi_command()
1976 reply = check_command(common, 10, DATA_DIR_TO_HOST, do_scsi_command()
1979 if (reply == 0) do_scsi_command()
1980 reply = do_read_format_capacities(common, bh); do_scsi_command()
1985 reply = check_command(common, 6, DATA_DIR_TO_HOST, do_scsi_command()
1988 if (reply == 0) do_scsi_command()
1989 reply = do_request_sense(common, bh); do_scsi_command()
1994 reply = check_command(common, 6, DATA_DIR_NONE, do_scsi_command()
1997 if (reply == 0) do_scsi_command()
1998 reply = do_start_stop(common); do_scsi_command()
2003 reply = check_command(common, 10, DATA_DIR_NONE, do_scsi_command()
2006 if (reply == 0) do_scsi_command()
2007 reply = do_synchronize_cache(common); do_scsi_command()
2012 reply = check_command(common, 6, DATA_DIR_NONE, do_scsi_command()
2023 reply = check_command(common, 10, DATA_DIR_NONE, do_scsi_command()
2026 if (reply == 0) do_scsi_command()
2027 reply = do_verify(common); do_scsi_command()
2033 reply = check_command_size_in_blocks(common, 6, do_scsi_command()
2037 if (reply == 0) do_scsi_command()
2038 reply = do_write(common); do_scsi_command()
2044 reply = check_command_size_in_blocks(common, 10, do_scsi_command()
2048 if (reply == 0) do_scsi_command()
2049 reply = do_write(common); do_scsi_command()
2055 reply = check_command_size_in_blocks(common, 12, do_scsi_command()
2059 if (reply == 0) do_scsi_command()
2060 reply = do_write(common); do_scsi_command()
2079 reply = check_command(common, common->cmnd_size, do_scsi_command()
2081 if (reply == 0) { do_scsi_command()
2083 reply = -EINVAL; do_scsi_command()
2089 if (reply == -EINTR || signal_pending(current)) do_scsi_command()
2092 /* Set up the single reply buffer for finish_reply() */ do_scsi_command()
2093 if (reply == -EINVAL) do_scsi_command()
2094 reply = 0; /* Error reply length */ do_scsi_command()
2095 if (reply >= 0 && common->data_dir == DATA_DIR_TO_HOST) { do_scsi_command()
2096 reply = min((u32)reply, common->data_size_from_cmnd); do_scsi_command()
2097 bh->inreq->length = reply; do_scsi_command()
2099 common->residue -= reply; do_scsi_command()
/linux-4.4.14/drivers/macintosh/
H A Dvia-pmu68k.c290 req->reply[0] = ADB_RET_OK; pmu_send_request()
304 req->reply[0] = CUDA_PACKET; pmu_send_request()
305 req->reply[1] = 0; pmu_send_request()
306 req->reply[2] = CUDA_GET_TIME; pmu_send_request()
317 req->reply[0] = CUDA_PACKET; pmu_send_request()
318 req->reply[1] = 0; pmu_send_request()
319 req->reply[2] = CUDA_SET_TIME; pmu_send_request()
330 req->reply[0] = CUDA_PACKET; pmu_send_request()
331 req->reply[1] = 0; pmu_send_request()
332 req->reply[2] = CUDA_GET_PRAM; pmu_send_request()
344 req->reply[0] = CUDA_PACKET; pmu_send_request()
345 req->reply[1] = 0; pmu_send_request()
346 req->reply[2] = CUDA_SET_PRAM; pmu_send_request()
465 req->reply[0] = ADB_RET_OK; pmu_request()
624 reply_ptr = req->reply + req->reply_len; pmu_interrupt()
642 printk(KERN_ERR "PMU: bad reply len %d\n", pmu_interrupt()
710 printk(KERN_ERR "PMU: extra ADB reply\n"); pmu_handle_data()
717 memcpy(req->reply, data + 1, len - 1); pmu_handle_data()
759 printk(KERN_DEBUG "pmu: nvram returned bright: %d\n", (int)req.reply[1]); pmu_enable_backlight()
760 backlight_level = req.reply[1]; pmu_enable_backlight()
H A Dvia-maciisi.c227 /* Send a request, possibly waiting for a reply */
482 reply_ptr = current_req->reply; maciisi_interrupt()
514 /* Shift in, we are about to read a reply (hopefully) */ maciisi_interrupt()
553 printk(KERN_ERR "maciisi_interrupt: reply too long, aborting read\n"); maciisi_interrupt()
585 req->reply_len = reply_ptr - req->reply; maciisi_interrupt()
587 /* Have to adjust the reply from ADB commands */ maciisi_interrupt()
588 if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) { maciisi_interrupt()
592 /* leave just the command and result bytes in the reply */ maciisi_interrupt()
594 memmove(req->reply, req->reply + 2, req->reply_len); maciisi_interrupt()
600 printk(KERN_DEBUG "maciisi_interrupt: reply is "); maciisi_interrupt()
602 printk(" %.2x", req->reply[i]); maciisi_interrupt()
H A Dvia-pmu.c16 * - Cleanup atomically disabling reply to PMU events after
572 pmu_version = req.reply[0]; init_pmu()
580 if (req.reply[1] & PMU_PWR_WAKEUP_AC_INSERT) init_pmu()
610 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); pmu_set_server_mode()
614 req.reply[0], PMU_PWR_WAKEUP_AC_INSERT); pmu_set_server_mode()
649 if (req->reply[0] & 0x01) done_battery_state_ohare()
666 if (req->reply[0] & 0x04) { done_battery_state_ohare()
668 if (req->reply[0] & 0x02) done_battery_state_ohare()
670 vb = (req->reply[1] << 8) | req->reply[2]; done_battery_state_ohare()
672 amperage = req->reply[5]; done_battery_state_ohare()
673 if ((req->reply[0] & 0x01) == 0) { done_battery_state_ohare()
676 } else if (req->reply[0] & 0x02) { done_battery_state_ohare()
681 if (req->reply[0] & 0x40) { done_battery_state_ohare()
682 pcharge = (req->reply[6] << 8) + req->reply[7]; done_battery_state_ohare()
735 if (req->reply[1] & 0x01) done_battery_state_smart()
743 if (req->reply[1] & 0x04) { done_battery_state_smart()
745 switch(req->reply[0]) { done_battery_state_smart()
747 case 4: capa = req->reply[2]; done_battery_state_smart()
748 max = req->reply[3]; done_battery_state_smart()
749 amperage = *((signed char *)&req->reply[4]); done_battery_state_smart()
750 voltage = req->reply[5]; done_battery_state_smart()
752 case 5: capa = (req->reply[2] << 8) | req->reply[3]; done_battery_state_smart()
753 max = (req->reply[4] << 8) | req->reply[5]; done_battery_state_smart()
754 amperage = *((signed short *)&req->reply[6]); done_battery_state_smart()
755 voltage = (req->reply[8] << 8) | req->reply[9]; done_battery_state_smart()
760 req->reply); done_battery_state_smart()
765 if ((req->reply[1] & 0x01) && (amperage > 0)) done_battery_state_smart()
774 if ((req->reply[1] & 0x01) && (amperage > 0)) done_battery_state_smart()
979 req->reply[0] = ADB_RET_OK; pmu_send_request()
993 req->reply[0] = CUDA_PACKET; pmu_send_request()
994 req->reply[1] = 0; pmu_send_request()
995 req->reply[2] = CUDA_GET_TIME; pmu_send_request()
1006 req->reply[0] = CUDA_PACKET; pmu_send_request()
1007 req->reply[1] = 0; pmu_send_request()
1008 req->reply[2] = CUDA_SET_TIME; pmu_send_request()
1384 printk(KERN_ERR "PMU: extra ADB reply\n"); pmu_handle_data()
1391 memcpy(req->reply, data + 1, len - 1); pmu_handle_data()
1497 reply_ptr = req->reply + req->reply_len; pmu_sr_intr()
1519 printk(KERN_ERR "PMU: bad reply len %d\n", bite); pmu_sr_intr()
2488 req->reply[i + req->reply_len] = polled_recv_byte(v); pmu_polled_request()
2511 req.reply[0] = ADB_RET_OK; pmu_blink()
2522 req.reply[0] = ADB_RET_OK; pmu_blink()
H A Dadb-iop.c124 /* we've gotten a valid reply to a TALK, so I'm assuming that */ adb_iop_listen()
130 msg->reply[0] = ADB_IOP_TIMEOUT | ADB_IOP_AUTOPOLL; adb_iop_listen()
131 msg->reply[1] = 0; adb_iop_listen()
132 msg->reply[2] = 0; adb_iop_listen()
143 memcpy(req->reply, &amsg->cmd, req->reply_len); adb_iop_listen()
148 memcpy(msg->reply, msg->message, IOP_MSG_LEN); adb_iop_listen()
H A Dvia-cuda.c491 /* CUDA has sent us the first byte of data of a reply */ cuda_interrupt()
497 reply_ptr = current_req->reply; cuda_interrupt()
559 req->reply_len = reply_ptr - req->reply; cuda_interrupt()
561 /* Have to adjust the reply from ADB commands */ cuda_interrupt()
562 if (req->reply_len <= 2 || (req->reply[1] & 2) != 0) { cuda_interrupt()
566 /* leave just the command and result bytes in the reply */ cuda_interrupt()
568 memmove(req->reply, req->reply + 2, req->reply_len); cuda_interrupt()
H A Dadbhid.c1054 (req.reply[1] == 0x9a) && ((req.reply[2] == 0x21) adbhid_probe()
1055 || (req.reply[2] == 0x20))) { adbhid_probe()
1060 (req.reply[1] == 0x74) && (req.reply[2] == 0x70) && adbhid_probe()
1061 (req.reply[3] == 0x61) && (req.reply[4] == 0x64)) { adbhid_probe()
1066 (req.reply[1] == 0x4b) && (req.reply[2] == 0x4d) && adbhid_probe()
1067 (req.reply[3] == 0x4c) && (req.reply[4] == 0x31)) { adbhid_probe()
1072 (req.reply[1] == 0x4b) && (req.reply[2] == 0x4f) && adbhid_probe()
1073 (req.reply[3] == 0x49) && (req.reply[4] == 0x54)) { adbhid_probe()
1103 memcpy(r1_buffer, &req.reply[1], 8); init_trackpad()
H A Dadb.c120 printk("adb reply (%d)", req->reply_len);
122 printk(" %x", req->reply[i]);
215 adb_handler[i].handler_id = req.reply[2]; adb_scan_bus()
572 if (req.reply[2] != new_id) try_handler_change()
574 adb_handler[address].handler_id = req.reply[2]; try_handler_change()
656 req->reply[0] = adb_handler[req->data[2]].original_address; do_adb_query()
657 req->reply[1] = adb_handler[req->data[2]].handler_id; do_adb_query()
728 if (count > sizeof(req->reply)) adb_read()
729 count = sizeof(req->reply); adb_read()
770 if (ret > 0 && copy_to_user(buf, req->reply, ret)) adb_read()
H A Dvia-macii.c112 static unsigned char *reply_ptr; /* next byte in reply_buf or req->reply */
113 static int reading_reply; /* store reply in reply_buf else req->reply */
115 static int reply_len; /* number of bytes received in reply_buf or req->reply */
233 /* Send an ADB request; if sync, poll out the reply 'till it's done */ macii_send_request()
408 reply_ptr = current_req->reply; macii_interrupt()
/linux-4.4.14/fs/nfs/
H A Dnfs3proc.c73 dprintk("%s: reply fsinfo: %d\n", __func__, status); do_proc_get_root()
78 dprintk("%s: reply getattr: %d\n", __func__, status); do_proc_get_root()
115 dprintk("NFS reply getattr: %d\n", status); nfs3_proc_getattr()
142 dprintk("NFS reply setattr: %d\n", status); nfs3_proc_setattr()
182 dprintk("NFS reply lookup: %d\n", status); nfs3_proc_lookup()
234 dprintk("NFS reply access: %d\n", status); nfs3_proc_access()
264 dprintk("NFS reply readlink: %d\n", status); nfs3_proc_readlink()
390 dprintk("NFS reply setattr (post-create): %d\n", status); nfs3_proc_create()
402 dprintk("NFS reply create: %d\n", status); nfs3_proc_create()
430 dprintk("NFS reply remove: %d\n", status); nfs3_proc_remove()
511 dprintk("NFS reply link: %d\n", status); nfs3_proc_link()
542 dprintk("NFS reply symlink: %d\n", status); nfs3_proc_symlink()
580 dprintk("NFS reply mkdir: %d\n", status); nfs3_proc_mkdir()
609 dprintk("NFS reply rmdir: %d\n", status); nfs3_proc_rmdir()
617 * sure the reply is syntactically correct.
665 dprintk("NFS reply readdir%s: %d\n", nfs3_proc_readdir()
725 dprintk("NFS reply mknod: %d\n", status); nfs3_proc_mknod()
743 dprintk("NFS reply fsstat: %d\n", status); nfs3_proc_statfs()
761 dprintk("NFS reply fsinfo: %d\n", status); do_proc_fsinfo()
795 dprintk("NFS reply pathconf: %d\n", status); nfs3_proc_pathconf()
H A Dproc.c71 dprintk("%s: reply getattr: %d\n", __func__, status); nfs_proc_get_root()
81 dprintk("%s: reply statfs: %d\n", __func__, status); nfs_proc_get_root()
113 dprintk("NFS reply getattr: %d\n", status); nfs_proc_getattr()
143 dprintk("NFS reply setattr: %d\n", status); nfs_proc_setattr()
171 dprintk("NFS reply lookup: %d\n", status); nfs_proc_lookup()
192 dprintk("NFS reply readlink: %d\n", status); nfs_proc_readlink()
250 dprintk("NFS reply create: %d\n", status); nfs_proc_create()
297 dprintk("NFS reply mknod: %d\n", status); nfs_proc_mknod()
318 dprintk("NFS reply remove: %d\n", status); nfs_proc_remove()
378 dprintk("NFS reply link: %d\n", status); nfs_proc_link()
428 dprintk("NFS reply symlink: %d\n", status); nfs_proc_symlink()
454 dprintk("NFS reply mkdir: %d\n", status); nfs_proc_mkdir()
475 dprintk("NFS reply rmdir: %d\n", status); nfs_proc_rmdir()
482 * the receive iovec. The decode function just parses the reply to make
509 dprintk("NFS reply readdir: %d\n", status); nfs_proc_readdir()
528 dprintk("NFS reply statfs: %d\n", status); nfs_proc_statfs()
556 dprintk("NFS reply fsinfo: %d\n", status); nfs_proc_fsinfo()
/linux-4.4.14/arch/m68k/include/asm/
H A Dmac_iop.h53 #define IOP_MSGSTATUS_SENT 2 /* message sent, awaiting reply */
54 #define IOP_MSGSTATUS_COMPLETE 3 /* message complete and reply rcvd */
145 __u8 reply[IOP_MSG_LEN]; /* the reply to the message */ member in struct:iop_msg
147 /* function to call when reply recvd */
/linux-4.4.14/drivers/scsi/mpt3sas/
H A Dmpt3sas_base.c350 * @mpi_reply: reply mf payload returned from firmware
567 * @mpi_reply: reply mf payload returned from firmware
720 * @reply: reply message frame(lower 32bit addr)
726 u32 reply) _base_display_reply_info()
732 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); _base_display_reply_info()
762 * @reply: reply message frame(lower 32bit addr)
769 u32 reply) mpt3sas_base_done()
773 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); mpt3sas_base_done()
783 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); mpt3sas_base_done()
795 * @reply: reply message frame(lower 32bit addr)
801 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) _base_async_event() argument
807 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); _base_async_event()
836 mpt3sas_scsih_event_callback(ioc, msix_index, reply); _base_async_event()
839 mpt3sas_ctl_event_callback(ioc, msix_index, reply); _base_async_event()
935 u32 reply; _base_interrupt() local
961 reply = 0; _base_interrupt()
977 reply = le32_to_cpu( _base_interrupt()
979 if (reply > ioc->reply_dma_max_address || _base_interrupt()
980 reply < ioc->reply_dma_min_address) _base_interrupt()
981 reply = 0; _base_interrupt()
987 msix_index, reply); _base_interrupt()
988 if (reply) _base_interrupt()
990 smid, msix_index, reply); _base_interrupt()
996 _base_async_event(ioc, msix_index, reply); _base_interrupt()
999 /* reply free queue handling */ _base_interrupt()
1000 if (reply) { _base_interrupt()
1006 cpu_to_le32(reply); _base_interrupt()
1046 * For those HBA's which support combined reply queue feature _base_interrupt()
1050 * 2. Then update this register with new reply host index value _base_interrupt()
1057 * new reply host index value in ReplyPostIndex Field and msix_index _base_interrupt()
1073 * _base_is_controller_msix_enabled - is controller support muli-reply queues
1085 * mpt3sas_base_flush_reply_queues - flushing the MSIX reply queues
1090 * to flush the other reply queues so all the outstanding IO has been
2128 /* Use the Combined reply queue feature only for SAS3 C0 & higher mpt3sas_base_map_resources()
2129 * revision HBAs and also only when reply queue count is greater than 8 mpt3sas_base_map_resources()
2143 "allocation for reply Post Register Index failed!!!\n", mpt3sas_base_map_resources()
2218 * mpt3sas_base_get_reply_virt_addr - obtain reply frames virt address
2220 * @phys_addr: lower 32 physical addr of the reply
2229 return ioc->reply + (phys_addr - (u32)ioc->reply_dma); mpt3sas_base_get_reply_virt_addr()
3067 if (ioc->reply) { _base_release_memory_pools()
3068 pci_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma); _base_release_memory_pools()
3073 ioc->name, ioc->reply)); _base_release_memory_pools()
3074 ioc->reply = NULL; _base_release_memory_pools()
3204 /* reply frame size */ _base_allocate_memory_pools()
3235 /* reply free queue sizing - taking into account for 64 FW events */ _base_allocate_memory_pools()
3238 /* calculate reply descriptor post queue depth */ _base_allocate_memory_pools()
3241 /* align the reply post queue on the next 16 count boundary */ _base_allocate_memory_pools()
3263 /* reply post queue, 16 byte align */ _base_allocate_memory_pools()
3302 "reply post free pool (0x%p): depth(%d)," _base_allocate_memory_pools()
3485 /* reply pool, 4 byte align */ _base_allocate_memory_pools()
3487 ioc->reply_dma_pool = pci_pool_create("reply pool", ioc->pdev, sz, 4, _base_allocate_memory_pools()
3490 pr_err(MPT3SAS_FMT "reply pool: pci_pool_create failed\n", _base_allocate_memory_pools()
3494 ioc->reply = pci_pool_alloc(ioc->reply_dma_pool , GFP_KERNEL, _base_allocate_memory_pools()
3496 if (!ioc->reply) { _base_allocate_memory_pools()
3497 pr_err(MPT3SAS_FMT "reply pool: pci_pool_alloc failed\n", _base_allocate_memory_pools()
3504 "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n", _base_allocate_memory_pools()
3505 ioc->name, ioc->reply, _base_allocate_memory_pools()
3511 /* reply free queue, 16 byte align */ _base_allocate_memory_pools()
3805 * @reply_bytes: reply length
3806 * @reply: pointer to reply payload
3814 u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag) _base_handshake_req_reply_wait()
3816 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply; _base_handshake_req_reply_wait()
3869 /* now wait for the reply */ _base_handshake_req_reply_wait()
3877 /* read the first two 16-bits, it gives the total length of the reply */ _base_handshake_req_reply_wait()
3878 reply[0] = le16_to_cpu(readl(&ioc->chip->Doorbell) _base_handshake_req_reply_wait()
3887 reply[1] = le16_to_cpu(readl(&ioc->chip->Doorbell) _base_handshake_req_reply_wait()
3901 reply[i] = le16_to_cpu(readl(&ioc->chip->Doorbell) _base_handshake_req_reply_wait()
3914 mfp = (__le32 *)reply; _base_handshake_req_reply_wait()
3926 * @mpi_reply: the reply payload from FW
4013 memcpy(mpi_reply, ioc->base_cmds.reply, mpt3sas_base_sas_iounit_control()
4034 * @mpi_reply: the reply payload from FW
4111 memcpy(mpi_reply, ioc->base_cmds.reply, mpt3sas_base_scsi_enclosure_processor()
4130 * _base_get_port_facts - obtain port facts reply and save in ioc
4231 * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
4308 "request frame size(%d), reply frame size(%d)\n", ioc->name, _base_get_ioc_facts()
4434 * @reply: reply message frame(lower 32bit addr)
4441 u32 reply) mpt3sas_port_enable_done()
4449 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); mpt3sas_port_enable_done()
4459 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); mpt3sas_port_enable_done()
4532 mpi_reply = ioc->port_enable_cmds.reply; _base_send_port_enable()
4549 * mpt3sas_port_enable - initiate firmware discovery (don't wait for reply)
5044 /* initialize reply queues */ _base_make_ioc_operational()
5077 /* initialize reply free host index */ _base_make_ioc_operational()
5081 /* initialize reply post host index */ _base_make_ioc_operational()
5293 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); mpt3sas_base_attach()
5297 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); mpt3sas_base_attach()
5301 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); mpt3sas_base_attach()
5306 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); mpt3sas_base_attach()
5311 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); mpt3sas_base_attach()
5316 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); mpt3sas_base_attach()
5321 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL); mpt3sas_base_attach()
5326 if (!ioc->base_cmds.reply || !ioc->transport_cmds.reply || mpt3sas_base_attach()
5327 !ioc->scsih_cmds.reply || !ioc->tm_cmds.reply || mpt3sas_base_attach()
5328 !ioc->config_cmds.reply || !ioc->ctl_cmds.reply || mpt3sas_base_attach()
5369 kfree(ioc->tm_cmds.reply); mpt3sas_base_attach()
5370 kfree(ioc->transport_cmds.reply); mpt3sas_base_attach()
5371 kfree(ioc->scsih_cmds.reply); mpt3sas_base_attach()
5372 kfree(ioc->config_cmds.reply); mpt3sas_base_attach()
5373 kfree(ioc->base_cmds.reply); mpt3sas_base_attach()
5374 kfree(ioc->port_enable_cmds.reply); mpt3sas_base_attach()
5375 kfree(ioc->ctl_cmds.reply); mpt3sas_base_attach()
5378 ioc->ctl_cmds.reply = NULL; mpt3sas_base_attach()
5379 ioc->base_cmds.reply = NULL; mpt3sas_base_attach()
5380 ioc->tm_cmds.reply = NULL; mpt3sas_base_attach()
5381 ioc->scsih_cmds.reply = NULL; mpt3sas_base_attach()
5382 ioc->transport_cmds.reply = NULL; mpt3sas_base_attach()
5383 ioc->config_cmds.reply = NULL; mpt3sas_base_attach()
5411 kfree(ioc->ctl_cmds.reply); mpt3sas_base_detach()
5413 kfree(ioc->base_cmds.reply); mpt3sas_base_detach()
5414 kfree(ioc->port_enable_cmds.reply); mpt3sas_base_detach()
5415 kfree(ioc->tm_cmds.reply); mpt3sas_base_detach()
5416 kfree(ioc->transport_cmds.reply); mpt3sas_base_detach()
5417 kfree(ioc->scsih_cmds.reply); mpt3sas_base_detach()
5418 kfree(ioc->config_cmds.reply); mpt3sas_base_detach()
725 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) _base_display_reply_info() argument
768 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) mpt3sas_base_done() argument
3813 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes, u32 *request, int reply_bytes, u16 *reply, int timeout, int sleep_flag) _base_handshake_req_reply_wait() argument
4440 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) mpt3sas_port_enable_done() argument
H A Dmpt3sas_config.c91 * @mpi_reply: reply message frame
230 * @reply: reply message frame(lower 32bit addr)
240 u32 reply) mpt3sas_config_done()
249 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); mpt3sas_config_done()
252 memcpy(ioc->config_cmds.reply, mpi_reply, mpt3sas_config_done()
266 * @mpi_reply: reply mf payload returned from firmware
408 memcpy(mpi_reply, ioc->config_cmds.reply, _config_request()
502 * @mpi_reply: reply mf payload returned from firmware
538 * @mpi_reply: reply mf payload returned from firmware
576 * @mpi_reply: reply mf payload returned from firmware
613 * @mpi_reply: reply mf payload returned from firmware
650 * @mpi_reply: reply mf payload returned from firmware
691 * @mpi_reply: reply mf payload returned from firmware
727 * @mpi_reply: reply mf payload returned from firmware
763 * @mpi_reply: reply mf payload returned from firmware
799 * @mpi_reply: reply mf payload returned from firmware
835 * @mpi_reply: reply mf payload returned from firmware
871 * @mpi_reply: reply mf payload returned from firmware
907 * @mpi_reply: reply mf payload returned from firmware
943 * @mpi_reply: reply mf payload returned from firmware
979 * @mpi_reply: reply mf payload returned from firmware
1020 * @mpi_reply: reply mf payload returned from firmware
1106 * @mpi_reply: reply mf payload returned from firmware
1147 * @mpi_reply: reply mf payload returned from firmware
1188 * @mpi_reply: reply mf payload returned from firmware
1232 * @mpi_reply: reply mf payload returned from firmware
1272 * @mpi_reply: reply mf payload returned from firmware
1315 * @mpi_reply: reply mf payload returned from firmware
1355 * @mpi_reply: reply mf payload returned from firmware
1395 * @mpi_reply: reply mf payload returned from firmware
1435 * @mpi_reply: reply mf payload returned from firmware
1524 * @mpi_reply: reply mf payload returned from firmware
1564 * @mpi_reply: reply mf payload returned from firmware
239 mpt3sas_config_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) mpt3sas_config_done() argument
H A Dmpt3sas_base.h406 #define MPT3_CMD_REPLY_VALID 0x0004 /* reply is valid */
413 * @reply: reply message pointer
421 void *reply; member in struct:_internal_cmd
681 * struct adapter_reply_queue - the reply queue struct
686 * @reply_post_free: reply post base virt address
902 * @reply_depth: hba reply queue depth:
903 * @reply_sz: per reply frame size:
904 * @reply: pool of replys:
907 * @reply_free_queue_depth: reply free depth
908 * @reply_free: pool for reply free queue (32 bit addr)
912 * @reply_post_queue_depth: reply post queue depth
914 * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init
918 * @reply_queue_count: number of reply queue's
919 * @reply_queue_list: link list contaning the reply queue info
1114 /* reply */
1116 u8 *reply; member in struct:MPT3SAS_ADAPTER
1122 /* reply free queue */
1129 /* reply post queue */
1140 /* reply post register index */
1171 u32 reply);
1223 u32 reply);
1225 u8 msix_index, u32 reply);
1251 u32 reply);
1278 u32 reply);
1363 u32 reply);
1366 u8 msix_index, u32 reply);
1378 u32 reply);
H A Dmpt3sas_ctl.c112 * @mpi_reply: reply message frame
261 * @reply: reply message frame(lower 32bit addr)
271 u32 reply) mpt3sas_ctl_done()
283 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); mpt3sas_ctl_done()
285 memcpy(ioc->ctl_cmds.reply, mpi_reply, mpi_reply->MsgLength*4); mpt3sas_ctl_done()
337 * @mpi_reply: reply message frame
388 * @reply: reply message frame(lower 32bit addr)
399 u32 reply) mpt3sas_ctl_event_callback()
403 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); mpt3sas_ctl_event_callback()
544 * during failure, the reply frame is filled.
594 tm_reply = ioc->ctl_cmds.reply; _ctl_set_task_mid()
602 if (copy_to_user(karg->reply_frame_buf_ptr, ioc->ctl_cmds.reply, _ctl_set_task_mid()
719 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); _ctl_do_mpt_command()
925 mpi_reply = ioc->ctl_cmds.reply; _ctl_do_mpt_command()
952 /* copy out reply message frame to user */ _ctl_do_mpt_command()
955 if (copy_to_user(karg.reply_frame_buf_ptr, ioc->ctl_cmds.reply, _ctl_do_mpt_command()
1427 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); _ctl_diag_register_2()
1501 pr_err(MPT3SAS_FMT "%s: no reply message\n", _ctl_diag_register_2()
1507 mpi_reply = ioc->ctl_cmds.reply; _ctl_diag_register_2()
1824 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); mpt3sas_send_diag_release()
1851 pr_err(MPT3SAS_FMT "%s: no reply message\n", mpt3sas_send_diag_release()
1857 mpi_reply = ioc->ctl_cmds.reply; mpt3sas_send_diag_release()
2085 memset(ioc->ctl_cmds.reply, 0, ioc->reply_sz); _ctl_diag_read_buffer()
2118 pr_err(MPT3SAS_FMT "%s: no reply message\n", _ctl_diag_read_buffer()
2124 mpi_reply = ioc->ctl_cmds.reply; _ctl_diag_read_buffer()
2759 * _ctl_ioc_reply_queue_count_show - number of reply queues
2763 * This is number of reply queues
2791 * This is number of reply queues
270 mpt3sas_ctl_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) mpt3sas_ctl_done() argument
398 mpt3sas_ctl_event_callback(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply) mpt3sas_ctl_event_callback() argument
H A Dmpt3sas_transport.c224 * @reply: reply message frame(lower 32bit addr)
234 u32 reply) mpt3sas_transport_done()
238 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply); mpt3sas_transport_done()
245 memcpy(ioc->transport_cmds.reply, mpi_reply, mpt3sas_transport_done()
262 /* report manufacture reply structure */
416 mpi_reply = ioc->transport_cmds.reply; _transport_expander_report_manufacture()
419 "report_manufacture - reply data transfer size(%d)\n", _transport_expander_report_manufacture()
445 "report_manufacture - no reply\n", ioc->name)); _transport_expander_report_manufacture()
1070 /* report phy error log reply structure */
1219 mpi_reply = ioc->transport_cmds.reply; _transport_get_expander_phy_error_log()
1222 "phy_error_log - reply data transfer size(%d)\n", _transport_get_expander_phy_error_log()
1247 "phy_error_log - no reply\n", ioc->name)); _transport_get_expander_phy_error_log()
1388 /* phy control reply structure */
1553 mpi_reply = ioc->transport_cmds.reply; _transport_expander_phy_control()
1556 "phy_control - reply data transfer size(%d)\n", _transport_expander_phy_control()
1573 "phy_control - no reply\n", ioc->name)); _transport_expander_phy_control()
2074 mpi_reply = ioc->transport_cmds.reply; _transport_smp_handler()
2077 "%s - reply data transfer size(%d)\n", _transport_smp_handler()
2110 "%s - no reply\n", ioc->name, __func__)); _transport_smp_handler()
233 mpt3sas_transport_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply) mpt3sas_transport_done() argument
/linux-4.4.14/drivers/s390/net/
H A Dlcs.c775 lcs_get_reply(struct lcs_reply *reply) lcs_get_reply() argument
777 WARN_ON(atomic_read(&reply->refcnt) <= 0); lcs_get_reply()
778 atomic_inc(&reply->refcnt); lcs_get_reply()
782 lcs_put_reply(struct lcs_reply *reply) lcs_put_reply() argument
784 WARN_ON(atomic_read(&reply->refcnt) <= 0); lcs_put_reply()
785 if (atomic_dec_and_test(&reply->refcnt)) { lcs_put_reply()
786 kfree(reply); lcs_put_reply()
794 struct lcs_reply *reply; lcs_alloc_reply() local
798 reply = kzalloc(sizeof(struct lcs_reply), GFP_ATOMIC); lcs_alloc_reply()
799 if (!reply) lcs_alloc_reply()
801 atomic_set(&reply->refcnt,1); lcs_alloc_reply()
802 reply->sequence_no = cmd->sequence_no; lcs_alloc_reply()
803 reply->received = 0; lcs_alloc_reply()
804 reply->rc = 0; lcs_alloc_reply()
805 init_waitqueue_head(&reply->wait_q); lcs_alloc_reply()
807 return reply; lcs_alloc_reply()
817 struct lcs_reply *reply; lcs_notify_lancmd_waiters() local
822 reply = list_entry(l, struct lcs_reply, list); lcs_notify_lancmd_waiters()
823 if (reply->sequence_no == cmd->sequence_no) { lcs_notify_lancmd_waiters()
824 lcs_get_reply(reply); lcs_notify_lancmd_waiters()
825 list_del_init(&reply->list); lcs_notify_lancmd_waiters()
826 if (reply->callback != NULL) lcs_notify_lancmd_waiters()
827 reply->callback(card, cmd); lcs_notify_lancmd_waiters()
828 reply->received = 1; lcs_notify_lancmd_waiters()
829 reply->rc = cmd->return_code; lcs_notify_lancmd_waiters()
830 wake_up(&reply->wait_q); lcs_notify_lancmd_waiters()
831 lcs_put_reply(reply); lcs_notify_lancmd_waiters()
844 struct lcs_reply *reply, *list_reply, *r; lcs_lancmd_timeout() local
848 reply = (struct lcs_reply *) data; lcs_lancmd_timeout()
849 spin_lock_irqsave(&reply->card->lock, flags); lcs_lancmd_timeout()
851 &reply->card->lancmd_waiters,list) { lcs_lancmd_timeout()
852 if (reply == list_reply) { lcs_lancmd_timeout()
853 lcs_get_reply(reply); lcs_lancmd_timeout()
854 list_del_init(&reply->list); lcs_lancmd_timeout()
855 spin_unlock_irqrestore(&reply->card->lock, flags); lcs_lancmd_timeout()
856 reply->received = 1; lcs_lancmd_timeout()
857 reply->rc = -ETIME; lcs_lancmd_timeout()
858 wake_up(&reply->wait_q); lcs_lancmd_timeout()
859 lcs_put_reply(reply); lcs_lancmd_timeout()
863 spin_unlock_irqrestore(&reply->card->lock, flags); lcs_lancmd_timeout()
870 struct lcs_reply *reply; lcs_send_lancmd() local
880 reply = lcs_alloc_reply(cmd); lcs_send_lancmd()
881 if (!reply) lcs_send_lancmd()
883 reply->callback = reply_callback; lcs_send_lancmd()
884 reply->card = card; lcs_send_lancmd()
886 list_add_tail(&reply->list, &card->lancmd_waiters); lcs_send_lancmd()
895 timer.data = (unsigned long) reply; lcs_send_lancmd()
898 wait_event(reply->wait_q, reply->received); lcs_send_lancmd()
901 LCS_DBF_TEXT_(4, trace, "rc:%d",reply->rc); lcs_send_lancmd()
902 rc = reply->rc; lcs_send_lancmd()
903 lcs_put_reply(reply); lcs_send_lancmd()
H A Dqeth_core_main.c554 struct qeth_reply *reply; qeth_alloc_reply() local
556 reply = kzalloc(sizeof(struct qeth_reply), GFP_ATOMIC); qeth_alloc_reply()
557 if (reply) { qeth_alloc_reply()
558 atomic_set(&reply->refcnt, 1); qeth_alloc_reply()
559 atomic_set(&reply->received, 0); qeth_alloc_reply()
560 reply->card = card; qeth_alloc_reply()
562 return reply; qeth_alloc_reply()
565 static void qeth_get_reply(struct qeth_reply *reply) qeth_get_reply() argument
567 WARN_ON(atomic_read(&reply->refcnt) <= 0); qeth_get_reply()
568 atomic_inc(&reply->refcnt); qeth_get_reply()
571 static void qeth_put_reply(struct qeth_reply *reply) qeth_put_reply() argument
573 WARN_ON(atomic_read(&reply->refcnt) <= 0); qeth_put_reply()
574 if (atomic_dec_and_test(&reply->refcnt)) qeth_put_reply()
575 kfree(reply); qeth_put_reply()
666 "but not a reply!\n"); qeth_check_ipa_data()
676 struct qeth_reply *reply, *r; qeth_clear_ipacmd_list() local
682 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { qeth_clear_ipacmd_list()
683 qeth_get_reply(reply); qeth_clear_ipacmd_list()
684 reply->rc = -EIO; qeth_clear_ipacmd_list()
685 atomic_inc(&reply->received); qeth_clear_ipacmd_list()
686 list_del_init(&reply->list); qeth_clear_ipacmd_list()
687 wake_up(&reply->wait_q); qeth_clear_ipacmd_list()
688 qeth_put_reply(reply); qeth_clear_ipacmd_list()
815 struct qeth_reply *reply, *r; qeth_send_control_data_cb() local
848 list_for_each_entry_safe(reply, r, &card->cmd_waiter_list, list) { qeth_send_control_data_cb()
849 if ((reply->seqno == QETH_IDX_COMMAND_SEQNO) || qeth_send_control_data_cb()
850 ((cmd) && (reply->seqno == cmd->hdr.seqno))) { qeth_send_control_data_cb()
851 qeth_get_reply(reply); qeth_send_control_data_cb()
852 list_del_init(&reply->list); qeth_send_control_data_cb()
855 if (reply->callback != NULL) { qeth_send_control_data_cb()
857 reply->offset = (__u16)((char *)cmd - qeth_send_control_data_cb()
859 keep_reply = reply->callback(card, qeth_send_control_data_cb()
860 reply, qeth_send_control_data_cb()
863 keep_reply = reply->callback(card, qeth_send_control_data_cb()
864 reply, qeth_send_control_data_cb()
868 reply->rc = (u16) cmd->hdr.return_code; qeth_send_control_data_cb()
870 reply->rc = iob->rc; qeth_send_control_data_cb()
873 list_add_tail(&reply->list, qeth_send_control_data_cb()
877 atomic_inc(&reply->received); qeth_send_control_data_cb()
878 wake_up(&reply->wait_q); qeth_send_control_data_cb()
880 qeth_put_reply(reply); qeth_send_control_data_cb()
1929 " negative reply\n", qeth_idx_write_cb()
1977 " negative reply\n", qeth_idx_read_cb()
2040 * function must return non-zero if more reply blocks are expected,
2041 * and zero if the last or only reply block is received. Callback
2055 struct qeth_reply *reply = NULL; qeth_send_control_data() local
2065 reply = qeth_alloc_reply(card); qeth_send_control_data()
2066 if (!reply) { qeth_send_control_data()
2069 reply->callback = reply_cb; qeth_send_control_data()
2070 reply->param = reply_param; qeth_send_control_data()
2072 reply->seqno = QETH_IDX_COMMAND_SEQNO; qeth_send_control_data()
2074 reply->seqno = card->seqno.ipa++; qeth_send_control_data()
2075 init_waitqueue_head(&reply->wait_q); qeth_send_control_data()
2077 list_add_tail(&reply->list, &card->cmd_waiter_list); qeth_send_control_data()
2101 list_del_init(&reply->list); qeth_send_control_data()
2102 qeth_put_reply(reply); qeth_send_control_data()
2115 if (!wait_event_timeout(reply->wait_q, qeth_send_control_data()
2116 atomic_read(&reply->received), event_timeout)) qeth_send_control_data()
2119 while (!atomic_read(&reply->received)) { qeth_send_control_data()
2126 if (reply->rc == -EIO) qeth_send_control_data()
2128 rc = reply->rc; qeth_send_control_data()
2129 qeth_put_reply(reply); qeth_send_control_data()
2133 reply->rc = -ETIME; qeth_send_control_data()
2134 spin_lock_irqsave(&reply->card->lock, flags); qeth_send_control_data()
2135 list_del_init(&reply->list); qeth_send_control_data()
2136 spin_unlock_irqrestore(&reply->card->lock, flags); qeth_send_control_data()
2137 atomic_inc(&reply->received); qeth_send_control_data()
2142 rc = reply->rc; qeth_send_control_data()
2143 qeth_put_reply(reply); qeth_send_control_data()
2148 static int qeth_cm_enable_cb(struct qeth_card *card, struct qeth_reply *reply, qeth_cm_enable_cb() argument
2182 static int qeth_cm_setup_cb(struct qeth_card *card, struct qeth_reply *reply, qeth_cm_setup_cb() argument
2274 static int qeth_ulp_enable_cb(struct qeth_card *card, struct qeth_reply *reply, qeth_ulp_enable_cb() argument
2361 static int qeth_ulp_setup_cb(struct qeth_card *card, struct qeth_reply *reply, qeth_ulp_setup_cb() argument
2974 struct qeth_reply *reply, unsigned long data) qeth_default_setadapterparms_cb()
2988 struct qeth_reply *reply, unsigned long data) qeth_query_setadapterparms_cb()
3002 return qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); qeth_query_setadapterparms_cb()
3040 struct qeth_reply *reply, unsigned long data) qeth_query_ipassists_cb()
3092 struct qeth_reply *reply, unsigned long data) qeth_query_switch_attributes_cb()
3100 sw_info = (struct qeth_switch_info *)reply->param; qeth_query_switch_attributes_cb()
3108 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); qeth_query_switch_attributes_cb()
3133 struct qeth_reply *reply, unsigned long data) qeth_query_setdiagass_cb()
3188 struct qeth_reply *reply, unsigned long data) qeth_hw_trap_cb()
4134 struct qeth_reply *reply, unsigned long data) qeth_setadp_promisc_mode_cb()
4144 qeth_default_setadapterparms_cb(card, reply, (unsigned long)cmd); qeth_setadp_promisc_mode_cb()
4218 struct qeth_reply *reply, unsigned long data) qeth_setadpparms_change_macaddr_cb()
4232 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); qeth_setadpparms_change_macaddr_cb()
4261 struct qeth_reply *reply, unsigned long data) qeth_setadpparms_set_access_ctrl_cb()
4265 int fallback = *(int *)reply->param; qeth_setadpparms_set_access_ctrl_cb()
4342 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); qeth_setadpparms_set_access_ctrl_cb()
4504 struct qeth_reply *reply, unsigned long sdata) qeth_snmp_command_cb()
4515 data = (unsigned char *)((char *)cmd - reply->offset); qeth_snmp_command_cb()
4516 qinfo = (struct qeth_arp_query_info *) reply->param; qeth_snmp_command_cb()
4630 struct qeth_reply *reply, unsigned long data) qeth_setadpparms_query_oat_cb()
4640 priv = (struct qeth_qoat_priv *)reply->param; qeth_setadpparms_query_oat_cb()
4732 struct qeth_reply *reply, unsigned long data) qeth_query_card_info_cb()
4739 carrier_info = (struct carrier_info *)reply->param; qeth_query_card_info_cb()
4748 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd); qeth_query_card_info_cb()
5271 struct qeth_reply *reply, unsigned long data) qeth_setassparms_cb()
2973 qeth_default_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_default_setadapterparms_cb() argument
2987 qeth_query_setadapterparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_query_setadapterparms_cb() argument
3039 qeth_query_ipassists_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_query_ipassists_cb() argument
3091 qeth_query_switch_attributes_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_query_switch_attributes_cb() argument
3132 qeth_query_setdiagass_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_query_setdiagass_cb() argument
3187 qeth_hw_trap_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_hw_trap_cb() argument
4133 qeth_setadp_promisc_mode_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_setadp_promisc_mode_cb() argument
4217 qeth_setadpparms_change_macaddr_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_setadpparms_change_macaddr_cb() argument
4260 qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_setadpparms_set_access_ctrl_cb() argument
4503 qeth_snmp_command_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long sdata) qeth_snmp_command_cb() argument
4629 qeth_setadpparms_query_oat_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_setadpparms_query_oat_cb() argument
4731 qeth_query_card_info_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_query_card_info_cb() argument
5270 qeth_setassparms_cb(struct qeth_card *card, struct qeth_reply *reply, unsigned long data) qeth_setassparms_cb() argument
/linux-4.4.14/drivers/android/
H A Dbinder_trace.h107 TP_PROTO(bool reply, struct binder_transaction *t,
109 TP_ARGS(reply, t, target_node),
115 __field(int, reply)
124 __entry->reply = reply;
128 TP_printk("transaction=%d dest_node=%d dest_proc=%d dest_thread=%d reply=%d flags=0x%x code=0x%x",
131 __entry->reply, __entry->flags, __entry->code)
/linux-4.4.14/net/sunrpc/xprtrdma/
H A Drpc_rdma.c70 "reply chunk" /* entire reply via rdma write */
86 /* The client can't know how large the actual reply will be. Thus it
87 * plans for the largest possible reply for that particular ULP
88 * operation. If the maximum combined reply message size exceeds that
89 * limit, the client must provide a write list or a reply chunk for
208 * Create read/write chunk lists, and reply chunks, for RDMA
216 * When used for a single reply chunk (which is a special write
217 * chunk used for the entire reply, rather than just the data), it
221 * of type RDMA_NOMSG carrying the xid and length. As a result, reply
261 /* a write or reply chunk - server will RDMA Write our memory */ rpcrdma_create_chunks()
300 } else { /* write/reply */ rpcrdma_create_chunks()
310 (type == rpcrdma_replych) ? "reply" : "write", rpcrdma_create_chunks()
335 *iptr++ = xdr_zero; /* encode a NULL reply chunk */ rpcrdma_create_chunks()
342 *iptr++ = xdr_zero; /* encode a NULL reply chunk */ rpcrdma_create_chunks()
469 * o Large non-read ops return as a single reply chunk. rpcrdma_marshal_req()
571 * Chase down a received write or reply chunklist to get length
807 dprintk("RPC: %s: reply 0x%p completes request 0x%p\n" rpcrdma_reply_handler()
812 /* from here on, the reply is no longer an orphan */ rpcrdma_reply_handler()
821 /* never expect reply chunks (two ways to check) */ rpcrdma_reply_handler()
830 /* count any expected write chunks in read reply */ rpcrdma_reply_handler()
835 /* check for validity, and no reply chunk after */ rpcrdma_reply_handler()
860 /* never expect read or write chunks, always reply chunks */ rpcrdma_reply_handler()
872 /* Reply chunk buffer already is the reply vector - no fixup. */ rpcrdma_reply_handler()
878 dprintk("%s: invalid rpcrdma reply header (type %d):" rpcrdma_reply_handler()
923 dprintk("RPC: %s: short/invalid reply\n", __func__); rpcrdma_reply_handler()
941 "duplicate reply %p to RPC request %p: xid 0x%08x\n", rpcrdma_reply_handler()
H A Dbackchannel.c95 pr_err("RPC: %s: reply buffer alloc failed\n", rpcrdma_bc_setup_reps()
124 /* The backchannel reply path returns each rpc_rqst to the xprt_rdma_bc_setup()
125 * bc_pa_list _after_ the reply is sent. If the server is xprt_rdma_bc_setup()
131 * always an rpc_rqst available as soon as a reply is sent. xprt_rdma_bc_setup()
198 * rpcrdma_bc_marshal_reply - Send backwards direction reply
199 * @rqst: buffer containing RPC reply data
287 * Called in the RPC reply handler, which runs in a tasklet.
355 * direction reply. rpcrdma_bc_receive_call()
H A Dsvc_rdma_sendto.c165 /* Returns the address of the first reply array element or <nul> if no
166 * reply array is present
175 /* XXX: Need to fix when reply chunk may occur with read list svc_rdma_get_reply_array()
187 /* The reply chunk follows an empty write array located svc_rdma_get_reply_array()
188 * at 'rc_position' here. The reply array is at rc_target. svc_rdma_get_reply_array()
390 /* XXX: need to fix when reply lists occur with read-list and or send_reply_chunks()
404 /* Prepare the reply chunk given the length actually send_reply_chunks()
621 /* Send any reply-list data and update resp reply-list */ svc_rdma_sendto()
625 printk(KERN_ERR "svcrdma: failed to send reply chunks, rc=%d\n", svc_rdma_sendto()
H A Dsvc_rdma_marshal.c125 /* Check for no reply-array */ decode_reply_array()
186 * chunk list and a reply chunk list. svc_rdma_xdr_decode_req()
231 /* There is no read-list in a reply */ svc_rdma_xdr_get_reply_hdr_len()
244 /* skip reply array */ svc_rdma_xdr_get_reply_hdr_len()
271 /* reply-array discriminator */ svc_rdma_xdr_encode_write_list()
/linux-4.4.14/drivers/staging/lustre/lustre/ptlrpc/
H A Dniobuf.c250 /* Let's setup deadline for reply unlink. */ ptlrpc_unregister_bulk()
327 * (to be ignored by client) if it's a error reply during recovery. ptlrpc_at_set_reply()
337 CDEBUG(D_ADAPTTO, "No early reply support: flags=%#x req_flags=%#x magic=%x/%x len=%d\n", ptlrpc_at_set_reply()
345 * Send request reply from request \a req reply buffer.
346 * \a flags defines reply types
355 /* We must already have a reply buffer (only ptlrpc_error() may be ptlrpc_send_reply()
356 * called without one). The reply generated by sptlrpc layer (e.g. ptlrpc_send_reply()
384 * ptlrpc_body in reply buffer to ptlrpc_body_v2, otherwise, the ptlrpc_send_reply()
385 * reply buffer on client will be overflow. ptlrpc_send_reply()
446 * For request \a req send an error reply back. Create empty
447 * reply buffers if necessary.
480 * if \a noreply is set, don't expect any reply back and don't set up
481 * reply buffers.
580 /* We are responsible for unlinking the reply buffer */ ptl_send_rpc()
619 CDEBUG(D_NET, "Setup reply buffer: %u bytes, xid %llu, portal %u\n", ptl_send_rpc()
659 * access the reply buffer. */ ptl_send_rpc()
H A Devents.c73 /* Failed send: make it seem like the reply timed out, just request_out_callback()
85 * Client's incoming reply callback
98 for adaptive timeouts' early reply. */ reply_in_callback()
129 /* Early reply */ reply_in_callback()
131 "Early reply received: mlen=%u offset=%d replen=%d replied=%d unlinked=%d", reply_in_callback()
137 if (req->rq_replied) /* already got the real reply */ reply_in_callback()
146 /* Real reply */ reply_in_callback()
149 /* Got reply, no resend required */ reply_in_callback()
156 "reply in flags=%x mlen=%u offset=%d replen=%d", reply_in_callback()
379 * Server's outgoing reply callback
H A Dsec.c447 * we don't need to alloc reply buffer here, leave it to the sptlrpc_req_ctx_switch()
649 * 4. old reply from server received, we accept and verify the reply. sptlrpc_req_refresh_ctx()
652 * 5. new reply from server received, dropped by LNet. sptlrpc_req_refresh_ctx()
656 * it for reply reconstruction. sptlrpc_req_refresh_ctx()
952 CERROR("failed unpack reply: x%llu\n", req->rq_xid); do_cli_unwrap_reply()
964 CERROR("reply policy %u doesn't match request policy %u\n", do_cli_unwrap_reply()
993 * Used by ptlrpc client, to perform security transformation upon the reply
995 * the reply message in clear text.
997 * \pre the reply buffer should have been un-posted from LNet, so nothing is
1009 CERROR("real reply with offset 0\n"); sptlrpc_cli_unwrap_reply()
1014 CERROR("reply at odd offset %u\n", req->rq_reply_off); sptlrpc_cli_unwrap_reply()
1027 * reply message of \a req. We expect the rq_reply_off is 0, and
1028 * rq_nob_received is the early reply size.
1030 * Because the receive buffer might be still posted, the reply data might be
1032 * we allocate a separate ptlrpc_request and reply buffer for early reply
1074 CERROR("early reply with offset %u\n", req->rq_reply_off); sptlrpc_cli_unwrap_early_reply()
1090 CERROR("early reply length %d too small\n", sptlrpc_cli_unwrap_early_reply()
1113 "error %d unwrap early reply", rc); sptlrpc_cli_unwrap_early_reply()
1131 * Used by ptlrpc client, to release a processed early reply \a early_req.
1601 * Used by ptlrpc client to allocate reply buffer of \a req.
1622 * Used by ptlrpc client to free reply buffer of \a req. After this
1908 * reply message has been prepared.
1966 * Used by ptlrpc server, to allocate reply buffer for \a req. If succeed,
2015 * Used by ptlrpc server, to perform transformation upon reply message.
2017 * \post req->rq_reply_off is set to appropriate server-controlled reply offset.
2107 * This is called after unwrap the reply message.
2133 * This is called after unwrap the reply message.
H A Dsec_plain.c223 CERROR("unexpected reply buf count %u\n", msg->lm_bufcount); plain_ctx_verify()
240 /* expect no user desc in reply */ plain_ctx_verify()
242 CERROR("Unexpected udesc flag in reply\n"); plain_ctx_verify()
247 CERROR("reply bulk flavor %u != %u\n", phdr->ph_bulk_hash_alg, plain_ctx_verify()
261 "early reply checksum mismatch: %08x != %08x\n", plain_ctx_verify()
267 * in reply, except for early reply */ plain_ctx_verify()
271 CERROR("%s bulk checksum in reply\n", plain_ctx_verify()
611 /* add space for early reply */ plain_alloc_repbuf()
H A Dclient.c219 * for reply before timing out this request.
235 * to wait too long for the reply, otherwise the other client ptlrpc_at_set_req_timeout()
302 * bz16408, however, this can also happen if early reply ptlrpc_at_adj_net_latency()
303 * is lost and client RPC is expired and resent, early reply ptlrpc_at_adj_net_latency()
304 * or reply of original RPC can still be fit in reply buffer ptlrpc_at_adj_net_latency()
352 * Handle an early reply message, called with the rq_lock held.
393 * early reply, so client should give it at least that long. ptlrpc_at_recv_early_reply()
399 "Early reply #%d, new deadline in %lds (%lds)", ptlrpc_at_recv_early_reply()
1137 * Versions are obtained from server reply.
1156 * Callback function called when client receives RPC reply for \a req.
1176 DEBUG_REQ(D_ERROR, req, "reply buffer overflow, expected: %d, actual size: %d", after_reply()
1183 * Pass the required reply buffer size (include space for early after_reply()
1184 * reply). NB: no need to round up because alloc_repbuf will after_reply()
1201 DEBUG_REQ(D_ERROR, req, "unwrap reply failed (%d):", rc); after_reply()
1224 /* allocate new xid to avoid reply reconstruction */ after_reply()
1524 * Skip processing until reply is unlinked. We ptlrpc_check_set()
1547 /* Move to next phase if reply was successfully ptlrpc_check_set()
1561 /* Note that this also will start async reply unlink. */ ptlrpc_check_set()
1721 /* Still waiting for a reply? */ ptlrpc_check_set()
1727 /* Did we actually receive a reply? */ ptlrpc_check_set()
1737 * swab in-place of reply buffer ptlrpc_check_set()
1750 * process the reply. Similarly if the RPC returned ptlrpc_check_set()
1767 * The RPC reply arrived OK, but the bulk screwed ptlrpc_check_set()
1783 * reply unlink. ptlrpc_check_set()
1882 "timed out for sent delay" : "timed out for slow reply"), ptlrpc_expire_one_request()
2068 * requests in the set complete (either get a reply, timeout, get an
2312 * Disengage the client's reply buffer from the network
2327 /* Let's setup deadline for reply unlink. */ ptlrpc_unregister_reply()
2342 /* Move to "Unregistering" phase as reply was not unlinked yet. */ ptlrpc_unregister_reply()
2507 * Request got reply but linked to the import list still. ptlrpc_resend_req()
2512 DEBUG_REQ(D_HA, req, "it has reply, so skip it"); ptlrpc_resend_req()
2642 * Callback used for replayed requests reply processing.
2643 * In case of successful reply calls registered request replay callback.
3044 /* don't want reply */ ptlrpcd_alloc_work()
H A Dlayout.c73 * client request and server reply, respectively.
842 DEFINE_MSGF("mgs_config_read reply ", 0,
1828 * Returns the PTLRPC request or reply (\a loc) buffer offset of a \a pill
1922 * Returns the pointer to a PTLRPC request or reply (\a loc) buffer of a \a pill
2039 * Trivial wrapper around __req_capsule_get(), that returns the PTLRPC reply
2089 * Set the size of the PTLRPC request/reply (\a loc) buffer for the given \a
2093 * request or reply.
2123 * Return the actual PTLRPC buffer length of a request or reply (\a loc)
2143 * given \a pill's request or reply (\a loc) given the field size recorded in
2156 * While req_capsule_msg_size() computes the size of a PTLRPC request or reply
2158 * PTLRPC request or reply given only an RQF (\a fmt).
2191 * old format. Specifically: the new format must have as many request and reply
2245 * the format (\a rc_fmt) of \a pill's PTLRPC request or reply (\a loc), else it
2260 * pill's PTLRPC request or reply (\a loc), else it returns 0.
2277 * request or reply (\a loc).
H A Dservice.c57 MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply");
59 MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply");
220 /** reply handling service. */
249 * Put reply state into a queue for processing because we received
473 /* reply states */ ptlrpc_service_part_init()
509 * timeout is less than this, we'll be sending an early reply. */ ptlrpc_service_part_init()
620 /* Increase max reply size to next power of two */ ptlrpc_register_service()
698 /* DEBUG_REQ() assumes the reply state of a request with a valid ptlrpc_server_free_request()
922 /* Add rpc to early reply check list */ ptlrpc_at_add_timed()
1005 /* deadline is when the client expects us to reply, margin is the ptlrpc_at_send_early_reply()
1008 "%ssending early reply (deadline %+lds, margin %+lds) for %d+%d", ptlrpc_at_send_early_reply()
1017 DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), not sending early reply. Consider increasing at_early_margin (%d)?", ptlrpc_at_send_early_reply()
1038 DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%ld/%lld), not sending early reply\n", ptlrpc_at_send_early_reply()
1067 /** if it is last refcount then early reply isn't needed */ ptlrpc_at_send_early_reply()
1069 DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, abort sending early reply\n"); ptlrpc_at_send_early_reply()
1101 DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc); ptlrpc_at_send_early_reply()
1104 /* Free the (early) reply state from lustre_pack_reply. ptlrpc_at_send_early_reply()
1439 * Handle freshly incoming reqs, add to timed early reply list,
1655 The deadline is increased if we send an early reply. */ ptlrpc_server_handle_request()
1747 * An internal function to process a single reply state object.
1780 * handled this reply yet and we race with it to grab ptlrpc_handle_rs()
1781 * exp_uncommitted_replies_lock before removing the reply from ptlrpc_handle_rs()
1783 * reply has already been removed, list_del_init() is a noop. ptlrpc_handle_rs()
1786 * or has handled this reply since store reordering might allow us to ptlrpc_handle_rs()
2042 /* Alloc reply state structure for this one */ ptlrpc_main()
2179 * Main body of "handle reply" function.
2180 * It processes acked reply states
/linux-4.4.14/include/net/iucv/
H A Diucv.h52 * IUCV_IPANSLST Indicates that an address list is used for the reply data.
73 * Use a pointer to an iucv_array as the buffer, reply or answer
109 * length: 32 bit length of the message / reply
110 * reply_size: 32 bit maximum allowed length of the reply
182 * iucv_message_send2way has been replied to. The reply can be
386 * @flags: how the reply is sent (IUCV_IPRMDATA, IUCV_IPPRTY, IUCV_IPBUFLST)
387 * @reply: address of data buffer or address of struct iucv_array
388 * @size: length of reply data buffer
391 * must identify completely the message to which you wish to reply. ie,
398 u8 flags, void *reply, size_t size);
411 * receiver will not reply to the message.
431 * receiver will not reply to the message.
444 * @flags: how the message is sent and the reply is received
450 * @asize: size of reply buffer
454 * reply to the message and a buffer is provided into which IUCV moves
455 * the reply to this message.
470 u8 flags, void *reply, size_t size);
/linux-4.4.14/drivers/usb/storage/
H A Ddatafab.c235 unsigned char *reply = us->iobuf; datafab_write_data() local
301 result = datafab_bulk_read(us, reply, 2); datafab_write_data()
305 if (reply[0] != 0x50 && reply[1] != 0) { datafab_write_data()
307 reply[0], reply[1]); datafab_write_data()
401 unsigned char *reply; datafab_id_device() local
414 reply = kmalloc(512, GFP_NOIO); datafab_id_device()
415 if (!reply) datafab_id_device()
428 rc = datafab_bulk_read(us, reply, 512); datafab_id_device()
432 info->sectors = ((u32)(reply[117]) << 24) | datafab_id_device()
433 ((u32)(reply[116]) << 16) | datafab_id_device()
434 ((u32)(reply[115]) << 8) | datafab_id_device()
435 ((u32)(reply[114]) ); datafab_id_device()
443 kfree(reply); datafab_id_device()
596 // build the reply datafab_transport()
H A Djumpshot.c336 unsigned char *reply; jumpshot_id_device() local
344 reply = kmalloc(512, GFP_NOIO); jumpshot_id_device()
345 if (!reply) jumpshot_id_device()
358 // read the reply jumpshot_id_device()
359 rc = jumpshot_bulk_read(us, reply, 512); jumpshot_id_device()
365 info->sectors = ((u32)(reply[117]) << 24) | jumpshot_id_device()
366 ((u32)(reply[116]) << 16) | jumpshot_id_device()
367 ((u32)(reply[115]) << 8) | jumpshot_id_device()
368 ((u32)(reply[114]) ); jumpshot_id_device()
373 kfree(reply); jumpshot_id_device()
526 // build the reply jumpshot_transport()
H A Dshuttle_usbat.c327 unsigned char *reply = us->iobuf; usbat_check_status() local
330 rc = usbat_get_status(us, reply); usbat_check_status()
335 if (*reply & 0x01 && *reply != 0x51) usbat_check_status()
339 if (*reply & 0x20) usbat_check_status()
1070 unsigned char *reply; usbat_flash_get_sector_count() local
1077 reply = kmalloc(512, GFP_NOIO); usbat_flash_get_sector_count()
1078 if (!reply) usbat_flash_get_sector_count()
1098 rc = usbat_read_block(us, reply, 512, 0); usbat_flash_get_sector_count()
1102 info->sectors = ((u32)(reply[117]) << 24) | usbat_flash_get_sector_count()
1103 ((u32)(reply[116]) << 16) | usbat_flash_get_sector_count()
1104 ((u32)(reply[115]) << 8) | usbat_flash_get_sector_count()
1105 ((u32)(reply[114]) ); usbat_flash_get_sector_count()
1110 kfree(reply); usbat_flash_get_sector_count()
1721 * build the reply usbat_flash_transport()
/linux-4.4.14/drivers/staging/rdma/hfi1/
H A Dmad.c63 static int reply(struct ib_mad_hdr *smp) reply() function
301 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_nodedesc()
311 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_nodedesc()
327 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_nodeinfo()
350 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_nodeinfo()
383 return reply((struct ib_mad_hdr *)smp); subn_get_nodeinfo()
532 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_portinfo()
543 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_portinfo()
712 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_portinfo()
748 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_pkeytable()
762 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_pkeytable()
777 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_pkeytable()
999 * Don't send a reply if the response would be sent set_port_states()
1061 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_portinfo()
1208 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_portinfo()
1414 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_pkeytable()
1425 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_pkeytable()
1433 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_pkeytable()
1497 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_sl_to_sc()
1506 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_sl_to_sc()
1519 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_sl_to_sc()
1539 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_sc_to_sl()
1548 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_sc_to_sl()
1561 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_sc_to_sl()
1581 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_sc_to_vlt()
1589 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_sc_to_vlt()
1605 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_sc_to_vlt()
1616 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_sc_to_vlt()
1636 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_sc_to_vlnt()
1646 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_sc_to_vlnt()
1661 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_sc_to_vlnt()
1669 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_sc_to_vlnt()
1693 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_psi()
1726 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_psi()
1744 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_psi()
1793 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_cable_info()
1800 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_cable_info()
1810 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_cable_info()
1816 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_cable_info()
1830 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_bct()
1839 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_bct()
1852 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_bct()
1858 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_bct()
1876 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_vl_arb()
1902 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_vl_arb()
1916 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_vl_arb()
2266 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_classportinfo()
2338 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_portstatus()
2344 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_portstatus()
2474 return reply((struct ib_mad_hdr *)pmp);
2591 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_datacounters()
2600 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_datacounters()
2613 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_datacounters()
2718 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_datacounters()
2749 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_porterrors()
2757 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_porterrors()
2769 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_porterrors()
2834 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_porterrors()
2860 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_errorinfo()
2868 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_errorinfo()
2881 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_errorinfo()
2929 return reply((struct ib_mad_hdr *)pmp); pma_get_opa_errorinfo()
2948 return reply((struct ib_mad_hdr *)pmp); pma_set_opa_portstatus()
3076 return reply((struct ib_mad_hdr *)pmp);
3101 return reply((struct ib_mad_hdr *)pmp); pma_set_opa_errorinfo()
3114 return reply((struct ib_mad_hdr *)pmp); pma_set_opa_errorinfo()
3150 return reply((struct ib_mad_hdr *)pmp); pma_set_opa_errorinfo()
3175 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_cong_info()
3197 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_cong_setting()
3216 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_cong_setting()
3257 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_hfi1_cong_log()
3308 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_hfi1_cong_log()
3330 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_cc_table()
3339 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_cc_table()
3359 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_cc_table()
3388 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_cc_table()
3399 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_cc_table()
3413 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_cc_table()
3463 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_led_info()
3474 return reply((struct ib_mad_hdr *)smp); __subn_get_opa_led_info()
3488 return reply((struct ib_mad_hdr *)smp); __subn_set_opa_led_info()
3580 ret = reply((struct ib_mad_hdr *)smp); subn_get_opa_sma()
3650 ret = reply((struct ib_mad_hdr *)smp); subn_set_opa_sma()
3671 return reply((struct ib_mad_hdr *)smp); subn_get_opa_aggregate()
3689 return reply((struct ib_mad_hdr *)smp); subn_get_opa_aggregate()
3699 return reply((struct ib_mad_hdr *)smp); subn_get_opa_aggregate()
3705 return reply((struct ib_mad_hdr *)smp); subn_get_opa_aggregate()
3718 return reply((struct ib_mad_hdr *)smp); subn_set_opa_aggregate()
3736 return reply((struct ib_mad_hdr *)smp); subn_set_opa_aggregate()
3743 return reply((struct ib_mad_hdr *)smp); subn_set_opa_aggregate()
3749 return reply((struct ib_mad_hdr *)smp); subn_set_opa_aggregate()
3863 ret = reply((struct ib_mad_hdr *)smp); process_subn_opa()
3932 ret = reply((struct ib_mad_hdr *)smp); process_subn_opa()
3950 ret = reply((struct ib_mad_hdr *)smp); process_subn()
3989 ret = reply((struct ib_mad_hdr *)smp); process_subn()
4009 return reply((struct ib_mad_hdr *)pmp); process_perf_opa()
4038 ret = reply((struct ib_mad_hdr *)pmp); process_perf_opa()
4054 ret = reply((struct ib_mad_hdr *)pmp); process_perf_opa()
4070 ret = reply((struct ib_mad_hdr *)pmp); process_perf_opa()
4155 * @out_mad: any outgoing MAD reply
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/core/
H A Dnotify.h32 void *data, u32 size, u32 reply,
/linux-4.4.14/Documentation/connector/
H A Ducon.c116 struct nlmsghdr *reply; main() local
228 reply = (struct nlmsghdr *)buf; main()
230 switch (reply->nlmsg_type) { main()
236 data = (struct cn_msg *)NLMSG_DATA(reply); main()
/linux-4.4.14/drivers/net/usb/
H A Dcx82310_eth.c58 * - optionally wait for the reply
59 * - optionally read some data from the reply
61 static int cx82310_cmd(struct usbnet *dev, enum cx82310_cmd cmd, bool reply, cx82310_cmd() argument
86 if (reply) { cx82310_cmd()
87 /* wait for reply, retry if it's empty */ cx82310_cmd()
95 "reply receive error %d\n", cx82310_cmd()
103 dev_err(&dev->udev->dev, "no reply to command %#x\n", cx82310_cmd()
110 "got reply to command %#x, expected: %#x\n", cx82310_cmd()
/linux-4.4.14/net/netfilter/ipvs/
H A Dip_vs_nfct.c41 * - alter reply for NAT when forwarding packet in original direction:
96 /* Alter reply only in original direction */ ip_vs_update_conntrack()
108 * IP_CT_DIR_ORIGINAL untouched. When the reply comes back from the ip_vs_update_conntrack()
127 "ctinfo=%d, old reply=" FMT_TUPLE ip_vs_update_conntrack()
128 ", new reply=" FMT_TUPLE ", cp=" FMT_CONN "\n", ip_vs_update_conntrack()
169 /* Change reply CLIENT->RS to CLIENT->VS */ ip_vs_nfct_expect_callback()
189 /* Change reply VS->CLIENT to RS->CLIENT */ ip_vs_nfct_expect_callback()
221 * Then the default callback function will alter the reply and will confirm
/linux-4.4.14/sound/soc/intel/haswell/
H A Dsst-haswell-ipc.c221 struct sst_hsw_ipc_stream_alloc_reply reply; member in struct:sst_hsw_stream
432 if (stream->reply.stream_hw_id == stream_id) get_stream_by_id()
495 stream->reply.stream_hw_id); hsw_notification_work()
505 stream->reply.stream_hw_id); hsw_notification_work()
542 trace_ipc_notification("stream reset", stream->reply.stream_hw_id); hsw_stream_update()
547 stream->reply.stream_hw_id); hsw_stream_update()
552 stream->reply.stream_hw_id); hsw_stream_update()
560 u32 reply = msg_get_global_reply(header); hsw_process_reply() local
571 switch (reply) { hsw_process_reply()
599 trace_ipc_error("error: reply busy", header); hsw_process_reply()
603 trace_ipc_error("error: reply failure", header); hsw_process_reply()
611 trace_ipc_error("error: reply not found", header); hsw_process_reply()
627 trace_ipc_error("error: unknown reply", header); hsw_process_reply()
789 /* reply message from DSP */ hsw_irq_thread()
792 /* Handle Immediate reply from DSP Core */ hsw_irq_thread()
809 /* Handle Notification and Delayed reply from DSP Core */ hsw_irq_thread()
853 stream->reply.volume_register_address[channel], sst_hsw_stream_get_volume()
867 trace_ipc_request("set stream volume", stream->reply.stream_hw_id); sst_hsw_stream_set_volume()
874 header |= (stream->reply.stream_hw_id << IPC_STR_ID_SHIFT); sst_hsw_stream_set_volume()
991 stream->reply.stream_hw_id = INVALID_STREAM_HW_ID; sst_hsw_stream_new()
1023 stream->free_req.stream_id = stream->reply.stream_hw_id; sst_hsw_stream_free()
1200 struct sst_hsw_ipc_stream_alloc_reply *reply = &stream->reply; sst_hsw_stream_commit() local
1219 sizeof(*str_req), reply, sizeof(*reply)); sst_hsw_stream_commit()
1259 struct sst_hsw_ipc_stream_info_reply *reply; sst_hsw_mixer_get_info() local
1263 reply = &hsw->mixer_info; sst_hsw_mixer_get_info()
1269 reply, sizeof(*reply)); sst_hsw_mixer_get_info()
1275 trace_hsw_mixer_info_reply(reply); sst_hsw_mixer_get_info()
1307 trace_ipc_request("stream pause", stream->reply.stream_hw_id); sst_hsw_stream_pause()
1310 stream->reply.stream_hw_id, wait); sst_hsw_stream_pause()
1313 stream->reply.stream_hw_id); sst_hsw_stream_pause()
1328 trace_ipc_request("stream resume", stream->reply.stream_hw_id); sst_hsw_stream_resume()
1331 stream->reply.stream_hw_id, wait); sst_hsw_stream_resume()
1334 stream->reply.stream_hw_id); sst_hsw_stream_resume()
1357 stream->reply.stream_hw_id); sst_hsw_stream_reset()
1361 trace_ipc_request("stream reset", stream->reply.stream_hw_id); sst_hsw_stream_reset()
1364 stream->reply.stream_hw_id, 1); sst_hsw_stream_reset()
1367 stream->reply.stream_hw_id); sst_hsw_stream_reset()
1378 stream->reply.read_position_register_address, sizeof(rpos)); sst_hsw_get_dsp_position()
1390 stream->reply.presentation_position_register_address, sst_hsw_get_dsp_presentation_position()
2098 /* clear reply bits & status bits */ hsw_reply_msg_match()
/linux-4.4.14/drivers/scsi/libfc/
H A Dfc_libfc.c159 * @fp: reply frame containing header to be filled in
160 * @in_fp: request frame containing header to use in filling in reply
216 * fc_fill_reply_hdr() - fill FC reply header fields based on request
217 * @fp: reply frame containing header to be filled in
218 * @in_fp: request frame containing header to use in filling in reply
219 * @r_ctl: R_CTL value for reply
/linux-4.4.14/drivers/media/usb/dvb-usb-v2/
H A Danysee.h102 General reply packet(s) are always used if not own reply defined.
107 | 00 | reply data (if any) from previous transaction
108 | | Just same reply packet as returned during previous transaction.
109 | | Needed only if reply is missed in previous transaction.
122 | 00 | reply data (if any)
H A Dec168.c190 u8 reply; ec168_identify_state() local
191 struct ec168_req req = {GET_CONFIG, 0, 1, sizeof(reply), &reply}; ec168_identify_state()
198 dev_dbg(&d->udev->dev, "%s: reply=%02x\n", __func__, reply); ec168_identify_state()
200 if (reply == 0x01) ec168_identify_state()
/linux-4.4.14/drivers/misc/mei/
H A Dbus-fixup.c130 struct mei_nfc_reply *reply = NULL; mei_nfc_if_version() local
148 reply = kzalloc(if_version_length, GFP_KERNEL); mei_nfc_if_version()
149 if (!reply) mei_nfc_if_version()
153 bytes_recv = __mei_cl_recv(cl, (u8 *)reply, if_version_length); mei_nfc_if_version()
160 memcpy(ver, reply->data, sizeof(struct mei_nfc_if_version)); mei_nfc_if_version()
166 kfree(reply); mei_nfc_if_version()
/linux-4.4.14/drivers/gpu/drm/nouveau/nvkm/core/
H A Dclient.c98 u8 index, reply; nvkm_client_notify_new() local
115 nvif_ioctl(object, "notify new vers %d reply %d route %02x " nvkm_client_notify_new()
117 req->v0.reply, req->v0.route, req->v0.token); nvkm_client_notify_new()
123 reply = req->v0.reply; nvkm_client_notify_new()
128 false, data, size, reply, &notify->n); nvkm_client_notify_new()
H A Dnotify.c134 void *data, u32 size, u32 reply, nvkm_notify_init()
141 if (ret == 0 && (ret = -EINVAL, notify->size == reply)) { nvkm_notify_init()
149 notify->data = kmalloc(reply, GFP_KERNEL); nvkm_notify_init()
132 nvkm_notify_init(struct nvkm_object *object, struct nvkm_event *event, int (*func)(struct nvkm_notify *), bool work, void *data, u32 size, u32 reply, struct nvkm_notify *notify) nvkm_notify_init() argument
/linux-4.4.14/drivers/thunderbolt/
H A Dctl.c593 * reply (even though the switch will reset). The caller should check for
601 struct tb_cfg_header reply; tb_cfg_reset() local
607 return tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route, tb_cfg_reset()
630 struct cfg_write_pkg reply; tb_cfg_read_raw() local
636 res = tb_ctl_rx(ctl, &reply, 12 + 4 * length, timeout_msec, route, tb_cfg_read_raw()
641 res.response_port = reply.addr.port; tb_cfg_read_raw()
642 res.err = check_config_address(reply.addr, space, offset, length); tb_cfg_read_raw()
644 memcpy(buffer, &reply.data, 4 * length); tb_cfg_read_raw()
667 struct cfg_read_pkg reply; tb_cfg_write_raw() local
675 res = tb_ctl_rx(ctl, &reply, sizeof(reply), timeout_msec, route, tb_cfg_write_raw()
680 res.response_port = reply.addr.port; tb_cfg_write_raw()
681 res.err = check_config_address(reply.addr, space, offset, length); tb_cfg_write_raw()
715 * returns the port number from which the reply originated.
H A Dctl.h37 TB_CFG_ERROR_ACK_PLUG_EVENT = 7, /* send as reply to TB_CFG_PKG_EVENT */
/linux-4.4.14/arch/m68k/mac/
H A Dmisc.c47 time = (req.reply[3] << 24) | (req.reply[4] << 16) cuda_read_time()
48 | (req.reply[5] << 8) | req.reply[6]; cuda_read_time()
72 return req.reply[3]; cuda_read_pram()
102 time = (req.reply[1] << 24) | (req.reply[2] << 16) pmu_read_time()
103 | (req.reply[3] << 8) | req.reply[4]; pmu_read_time()
127 return req.reply[3]; pmu_read_pram()
158 time = (req.reply[3] << 24) | (req.reply[4] << 16)
159 | (req.reply[5] << 8) | req.reply[6];
178 return req.reply[3];
/linux-4.4.14/drivers/infiniband/hw/qib/
H A Dqib_mad.c40 static int reply(struct ib_smp *smp) reply() function
274 return reply(smp); subn_get_nodedescription()
310 return reply(smp); subn_get_nodeinfo()
343 return reply(smp); subn_get_guidinfo()
469 ret = reply(smp); subn_get_portinfo()
567 ret = reply(smp); subn_get_portinfo()
615 return reply(smp); subn_get_pkeytable()
847 * Don't send a reply if the response would be sent subn_set_portinfo()
1071 return reply(smp); subn_get_sl_to_vl()
1083 return reply(smp); subn_set_sl_to_vl()
1115 return reply(smp); subn_get_vl_arb()
1176 return reply((struct ib_smp *) pmp); pma_get_classportinfo()
1214 return reply((struct ib_smp *) pmp); pma_get_portsamplescontrol()
1232 ret = reply((struct ib_smp *) pmp); pma_set_portsamplescontrol()
1382 return reply((struct ib_smp *) pmp); pma_get_portsamplesresult()
1424 return reply((struct ib_smp *) pmp); pma_get_portsamplesresult_ext()
1522 return reply((struct ib_smp *) pmp); pma_get_portcounters()
1635 return reply((struct ib_smp *)pmp); pma_get_portcounters_cong()
1699 return reply((struct ib_smp *) pmp); pma_get_portcounters_ext()
1869 ret = reply(smp); process_subn()
1931 ret = reply(smp); process_subn()
1965 ret = reply(smp); process_subn()
1974 ret = reply(smp); process_subn()
2002 ret = reply(smp); process_subn()
2019 ret = reply((struct ib_smp *) pmp); process_perf()
2049 ret = reply((struct ib_smp *) pmp); process_perf()
2069 ret = reply((struct ib_smp *) pmp); process_perf()
2085 ret = reply((struct ib_smp *) pmp); process_perf()
2109 return reply((struct ib_smp *) ccp); cc_get_classportinfo()
2125 return reply((struct ib_smp *) ccp); cc_get_congestion_info()
2156 return reply((struct ib_smp *) ccp); cc_get_congestion_setting()
2206 return reply((struct ib_smp *) ccp); cc_get_congestion_control_table()
2237 return reply((struct ib_smp *) ccp); cc_set_congestion_setting()
2290 return reply((struct ib_smp *) ccp); cc_set_congestion_control_table()
2314 ret = reply((struct ib_smp *)ccp); process_cc()
2344 ret = reply((struct ib_smp *) ccp); process_cc()
2361 ret = reply((struct ib_smp *) ccp); process_cc()
2377 ret = reply((struct ib_smp *) ccp); process_cc()
2392 * @out_mad: any outgoing MAD reply
/linux-4.4.14/arch/s390/include/uapi/asm/
H A Dzcrypt.h89 * The request (or reply) parameter block is organized thus:
102 unsigned int rpl_msgbl; /* reply message block length */
104 unsigned int rpl_datal; /* reply data block len */
114 unsigned char * rpl_parmb; /* reply parm block 'address' */
116 unsigned char * rpl_datab; /* reply data block 'address' */
120 unsigned char * rpl_extb; /* reply extension block 'address'*/
276 * devices awaiting the reply.
/linux-4.4.14/drivers/gpu/drm/
H A Ddrm_dp_helper.c208 switch (msg.reply & DP_AUX_NATIVE_REPLY_MASK) { drm_dp_dpcd_access()
435 * In case of i2c defer or short i2c ack reply to a write, drm_dp_i2c_msg_write_status_update()
454 * Calculate the duration of the AUX request/reply in usec. Gives the
533 * reply field.
561 switch (msg->reply & DP_AUX_NATIVE_REPLY_MASK) { drm_dp_i2c_do_msg()
565 * need to check for the I2C ACK reply. drm_dp_i2c_do_msg()
588 DRM_ERROR("invalid native reply %#04x\n", msg->reply); drm_dp_i2c_do_msg()
592 switch (msg->reply & DP_AUX_I2C_REPLY_MASK) { drm_dp_i2c_do_msg()
622 DRM_ERROR("invalid I2C reply %#04x\n", msg->reply); drm_dp_i2c_do_msg()
655 DRM_DEBUG_KMS("Partial I2C reply: requested %zu bytes got %d bytes\n", drm_dp_i2c_drain_msg()
713 * short reply. drm_dp_i2c_xfer()
H A Ddrm_dp_mst_topology.c414 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); drm_dp_sideband_parse_link_address()
433 DRM_DEBUG_KMS("link address reply parse length fail %d %d\n", idx, raw->curlen); drm_dp_sideband_parse_remote_dpcd_read()
466 DRM_DEBUG_KMS("remote i2c reply parse length fail %d %d\n", idx, raw->curlen); drm_dp_sideband_parse_remote_i2c_read_ack()
562 DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type); drm_dp_sideband_parse_reply()
590 DRM_DEBUG_KMS("connection status reply parse length fail %d %d\n", idx, raw->curlen); drm_dp_sideband_parse_connection_status_notify()
613 DRM_DEBUG_KMS("resource status reply parse length fail %d %d\n", idx, raw->curlen); drm_dp_sideband_parse_resource_status_notify()
1562 if (txmsg->reply.reply_type == 1) drm_dp_send_link_address()
1565 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); drm_dp_send_link_address()
1566 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { drm_dp_send_link_address()
1568 txmsg->reply.u.link_addr.ports[i].input_port, drm_dp_send_link_address()
1569 txmsg->reply.u.link_addr.ports[i].peer_device_type, drm_dp_send_link_address()
1570 txmsg->reply.u.link_addr.ports[i].port_number, drm_dp_send_link_address()
1571 txmsg->reply.u.link_addr.ports[i].dpcd_revision, drm_dp_send_link_address()
1572 txmsg->reply.u.link_addr.ports[i].mcs, drm_dp_send_link_address()
1573 txmsg->reply.u.link_addr.ports[i].ddps, drm_dp_send_link_address()
1574 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, drm_dp_send_link_address()
1575 txmsg->reply.u.link_addr.ports[i].num_sdp_streams, drm_dp_send_link_address()
1576 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); drm_dp_send_link_address()
1579 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); drm_dp_send_link_address()
1581 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { drm_dp_send_link_address()
1582 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); drm_dp_send_link_address()
1613 if (txmsg->reply.reply_type == 1) drm_dp_send_enum_path_resources()
1616 if (port->port_num != txmsg->reply.u.path_resources.port_number) drm_dp_send_enum_path_resources()
1618 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number, drm_dp_send_enum_path_resources()
1619 txmsg->reply.u.path_resources.avail_payload_bw_number); drm_dp_send_enum_path_resources()
1620 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; drm_dp_send_enum_path_resources()
1698 if (txmsg->reply.reply_type == 1) { drm_dp_payload_send_msg()
1933 if (txmsg->reply.reply_type == 1) { drm_dp_send_dpcd_write()
1946 struct drm_dp_sideband_msg_reply_body reply; drm_dp_encode_up_ack_reply() local
1948 reply.reply_type = 1; drm_dp_encode_up_ack_reply()
1949 reply.req_type = req_type; drm_dp_encode_up_ack_reply()
1950 drm_dp_encode_sideband_reply(&reply, msg); drm_dp_encode_up_ack_reply()
2220 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct); drm_dp_mst_handle_down_rep()
2233 DRM_DEBUG_KMS("Got MST reply with no msg %p %d %d %02x %02x\n", drm_dp_mst_handle_down_rep()
2244 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); drm_dp_mst_handle_down_rep()
2245 if (txmsg->reply.reply_type == 1) { drm_dp_mst_handle_down_rep()
2246 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data); drm_dp_mst_handle_down_rep()
2277 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); drm_dp_mst_handle_up_req()
2293 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); drm_dp_mst_handle_up_req()
2309 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); drm_dp_mst_handle_up_req()
3008 if (txmsg->reply.reply_type == 1) { /* got a NAK back */ drm_dp_mst_i2c_xfer()
3012 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { drm_dp_mst_i2c_xfer()
3016 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); drm_dp_mst_i2c_xfer()
/linux-4.4.14/sound/soc/intel/skylake/
H A Dskl-sst-ipc.c334 u32 reply = header.primary & IPC_GLB_REPLY_STATUS_MASK; skl_ipc_process_reply() local
344 switch (reply) { skl_ipc_process_reply()
346 dev_info(ipc->dev, "ipc FW reply %x: success\n", header.primary); skl_ipc_process_reply()
350 dev_err(ipc->dev, "ipc fw reply: %x: no memory\n", header.primary); skl_ipc_process_reply()
355 dev_err(ipc->dev, "ipc fw reply: %x: Busy\n", header.primary); skl_ipc_process_reply()
360 dev_err(ipc->dev, "Unknown ipc reply: 0x%x", reply); skl_ipc_process_reply()
365 if (reply != IPC_GLB_REPLY_SUCCESS) { skl_ipc_process_reply()
366 dev_err(ipc->dev, "ipc FW reply: reply=%d", reply); skl_ipc_process_reply()
394 /* reply message from DSP */ skl_dsp_irq_thread_handler()
421 /* Handle Immediate reply from DSP Core */ skl_dsp_irq_thread_handler()
/linux-4.4.14/kernel/
H A Dtaskstats.c83 void *reply; prepare_reply() local
95 reply = genlmsg_put(skb, 0, seq, &family, 0, cmd); prepare_reply()
97 reply = genlmsg_put_reply(skb, info, &family, 0, cmd); prepare_reply()
98 if (reply == NULL) { prepare_reply()
113 void *reply = genlmsg_data(genlhdr); send_reply() local
115 genlmsg_end(skb, reply); send_reply()
129 void *reply = genlmsg_data(genlhdr); send_cpu_listeners() local
132 genlmsg_end(skb, reply); send_cpu_listeners()
375 * boundaries but the layout of the aggregrate reply, with mk_reply()
/linux-4.4.14/fs/nfs/blocklayout/
H A Drpc_pipefs.c58 struct bl_dev_msg *reply = &nn->bl_mount_reply; bl_resolve_deviceid() local
98 if (reply->status != BL_DEVICE_REQUEST_PROC) { bl_resolve_deviceid()
100 __func__, reply->status); bl_resolve_deviceid()
104 dev = MKDEV(reply->major, reply->minor); bl_resolve_deviceid()
/linux-4.4.14/drivers/staging/lustre/lustre/include/
H A Dlustre_sec.h341 * Only used by pre-allocated request/reply pool.
389 * Verify the reply message using \a ctx.
391 * \pre req->rq_repdata point to reply message with signature.
392 * \pre req->rq_repdata_len is the total reply message length.
393 * \post req->rq_repmsg point to reply message without signature.
394 * \post req->rq_replen is the reply message length.
415 * Decrypt the reply message using \a ctx.
417 * \pre req->rq_repdata point to encrypted reply message.
419 * \post req->rq_repmsg point to reply message in clear text.
420 * \post req->rq_replen is the reply message length in clear text.
452 * Unwrap bulk reply data. This is called after wrapping RPC
453 * reply message.
626 * To allocate reply buffer for \a req.
631 * reply which be transformed from \a lustre_msg_size of clear text.
640 * To free reply buffer for \a req.
687 * processed, and reply message has been prepared; req->rq_sp_from is
696 * Perform security transformation upon reply message.
698 * \pre reply message is pointed by req->rq_reply_state->rs_msg, size
718 * \param msgsize size of the reply message in clear text.
H A Dlustre_net.h299 #define PTL_RPC_FL_INTR (1 << 0) /* reply wait was interrupted by user */
300 #define PTL_RPC_FL_TIMEOUT (1 << 7) /* request timed out waiting for reply */
396 /** Maximum number of locks to fit into reply state */
401 * Structure to define reply state on the server
402 * Reply state holds various reply message information. Also for "difficult"
403 * replies (rep-ack case) we store the state after sending reply and wait
410 /** Linkage for list of all reply states in a system */
412 /** Linkage for list of all reply states on same export */
414 /** Linkage for list of all reply states for same obd */
419 /** A spinlock to protect the reply state flags */
442 /** Lnet metadata handle for the reply */
450 /** Size of the reply buffer */
452 /** Size of the reply message */
455 * Actual reply message. Its content is encrypted (if needed) to
456 * produce reply buffer for actual sending. In simple case
459 struct lustre_msg *rs_msg; /* reply message */
463 /** Handles of locks awaiting client reply ACK */
1313 rq_packed_final:1, /* packed final reply */
1336 /** Portal where to wait for reply and where reply would be sent */
1341 * !rq_truncate : # reply bytes actually received,
1389 /* doesn't expect reply FIXME */
1416 /** Fields that help to see if request and reply were swabbed or not */
1438 /** separated reply state */
1443 /** client-only incoming reply */
1472 * when request/reply sent (secs), or time when request should be sent
1479 * so that servers' early reply updates to the deadline aren't
1482 /** when req reply unlink must finish. */
1499 /** Async completion handler, called when reply is received */
1561 * Returns 1 if request reply buffer at offset \a index was already swabbed
1578 * Returns 1 if request reply needs to be swabbed into local cpu byteorder
1596 * Mark request reply buffer at offset \a index that it was already swabbed
1975 /** biggest reply to send */
2028 * serialize operations on RS list (reply states)
2073 * incoming request arrives and when difficult reply has to be handled.
2118 /** early reply timer */
2444 /* maximum reply size this service can ever send */
2676 * Returns true if request \a req got early reply and hard deadline is not met
2688 * Returns true if we got real reply from server for this request
2699 /** Returns true if request \a req is in process of receiving server reply */
/linux-4.4.14/drivers/gpu/drm/nouveau/nvif/
H A Dnotify.c168 bool work, u8 event, void *data, u32 size, u32 reply, nvif_notify_init()
183 notify->size = reply; nvif_notify_init()
199 args->req.reply = notify->size; nvif_notify_init()
167 nvif_notify_init(struct nvif_object *object, int (*func)(struct nvif_notify *), bool work, u8 event, void *data, u32 size, u32 reply, struct nvif_notify *notify) nvif_notify_init() argument
/linux-4.4.14/drivers/scsi/mpt3sas/mpi/
H A Dmpi2_tool.h22 * and reply messages.
59 * Toolbox reply
215 /*Toolbox ISTWI Read Write Tool reply message */
317 /*Toolbox Diagnostic CLI Tool reply message */
422 * Diagnostic Buffer Post reply
462 * Diagnostic Buffer Post reply
/linux-4.4.14/drivers/gpu/drm/nouveau/
H A Dnouveau_usif.c47 u16 reply; member in struct:usif_notify
91 if (WARN_ON(!ntfy->p || ntfy->reply != (length + size))) usif_notify()
147 ntfy->reply = sizeof(struct nvif_notify_rep_v0) + req->v0.reply; usif_notify_new()
208 ntfy->p = kmalloc(sizeof(*ntfy->p) + ntfy->reply, GFP_KERNEL); usif_notify_get()
216 ntfy->p->e.base.length = sizeof(ntfy->p->e.base) + ntfy->reply; usif_notify_get()
/linux-4.4.14/include/scsi/
H A Dscsi_bsg_iscsi.h94 * be no further reply information supplied.
96 * msg and status fields. The per-msgcode reply structure
/linux-4.4.14/include/linux/ceph/
H A Dmon_client.h55 struct ceph_msg *reply; /* and reply */ member in struct:ceph_mon_generic_request
/linux-4.4.14/include/uapi/linux/netfilter/
H A Dnf_conntrack_common.h18 /* >= this indicates reply direction */
24 /* Number of distinct IP_CT types (no NEW in reply dirn). */
/linux-4.4.14/drivers/staging/rdma/ipath/
H A Dipath_mad.c46 static int reply(struct ib_smp *smp) reply() function
66 return reply(smp); recv_subn_get_nodedescription()
119 return reply(smp); recv_subn_get_nodeinfo()
147 return reply(smp); recv_subn_get_guidinfo()
248 ret = reply(smp); recv_subn_get_portinfo()
339 ret = reply(smp); recv_subn_get_portinfo()
381 return reply(smp); recv_subn_get_pkeytable()
819 return reply((struct ib_smp *) pmp); recv_pma_get_classportinfo()
880 return reply((struct ib_smp *) pmp); recv_pma_get_portsamplescontrol()
897 ret = reply((struct ib_smp *) pmp); recv_pma_set_portsamplescontrol()
992 return reply((struct ib_smp *) pmp); recv_pma_get_portsamplesresult()
1019 return reply((struct ib_smp *) pmp); recv_pma_get_portsamplesresult_ext()
1118 return reply((struct ib_smp *) pmp); recv_pma_get_portcounters()
1155 return reply((struct ib_smp *) pmp); recv_pma_get_portcounters_ext()
1270 ret = reply(smp); process_subn()
1338 ret = reply(smp); process_subn()
1366 ret = reply(smp); process_subn()
1384 ret = reply(smp); process_subn()
1401 ret = reply((struct ib_smp *) pmp); process_perf()
1432 ret = reply((struct ib_smp *) pmp); process_perf()
1452 ret = reply((struct ib_smp *) pmp); process_perf()
1466 ret = reply((struct ib_smp *) pmp); process_perf()
1481 * @out_mad: any outgoing MAD reply
/linux-4.4.14/drivers/message/fusion/
H A Dmptlan.c130 MPT_FRAME_HDR *reply);
162 * @reply: Pointer to MPT reply frame
168 lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply) lan_reply() argument
173 dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n", lan_reply()
176 // dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n", lan_reply()
177 // mf, reply)); lan_reply()
180 u32 tmsg = CAST_PTR_TO_U32(reply); lan_reply()
194 // "MessageContext turbo reply received\n")); lan_reply()
226 printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply " lan_reply()
237 // msg = (u32 *) reply; lan_reply()
242 // reply->u.hdr.Function)); lan_reply()
244 switch (reply->u.hdr.Function) { lan_reply()
250 pSendRep = (LANSendReply_t *) reply; lan_reply()
259 pRecvRep = (LANReceivePostReply_t *) reply; lan_reply()
271 /* Just a default reply. Might want to check it to lan_reply()
287 "reply that I don't know what to do with\n"); lan_reply()
H A Dmptctl.c201 mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) mptctl_reply() argument
211 "(0x%02X), req=%p, reply=%p\n", ioc->name, req->u.hdr.Function, mptctl_reply()
212 req, reply)); mptctl_reply()
215 * Handling continuation of the same reply. Processing the first mptctl_reply()
216 * reply, and eating the other replys that come later. mptctl_reply()
223 if (!reply) mptctl_reply()
227 sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength); mptctl_reply()
228 memcpy(ioc->ioctl_cmds.reply, reply, sz); mptctl_reply()
230 if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo) mptctl_reply()
233 le16_to_cpu(reply->u.reply.IOCStatus), mptctl_reply()
234 le32_to_cpu(reply->u.reply.IOCLogInfo))); mptctl_reply()
240 if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState) mptctl_reply()
244 reply->u.sreply.SCSIStatus, mptctl_reply()
245 reply->u.sreply.SCSIState, mptctl_reply()
246 le16_to_cpu(reply->u.sreply.TaskTag), mptctl_reply()
247 le32_to_cpu(reply->u.sreply.TransferCount))); mptctl_reply()
249 if (reply->u.sreply.SCSIState & mptctl_reply()
278 if (reply && (reply->u.reply.MsgFlags & mptctl_reply()
301 memcpy(ioc->taskmgmt_cmds.reply, mr, mptctl_taskmgmt_reply()
302 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); mptctl_taskmgmt_reply()
427 pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply; mptctl_do_taskmgmt()
978 ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply; mptctl_do_fw_download()
2327 /* If a valid reply frame, copy to the user. mptctl_do_mpt_command()
2328 * Offset 2: reply length in U32's mptctl_do_mpt_command()
2333 4*ioc->ioctl_cmds.reply[2]); mptctl_do_mpt_command()
2335 sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]); mptctl_do_mpt_command()
2339 ioc->ioctl_cmds.reply, sz)){ mptctl_do_mpt_command()
2342 "Unable to write out reply frame %p\n", mptctl_do_mpt_command()
H A Dmptbase.c161 MPT_FRAME_HDR *reply);
440 * Process turbo (context) reply...
450 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got TURBO reply req_idx=%08x\n", mpt_turbo_reply()
515 /* non-TURBO reply! Hmmm, something may be up... mpt_reply()
516 * Newest turbo reply mechanism; get address mpt_reply()
520 /* Map DMA address of reply header to cpu address. mpt_reply()
533 dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got non-TURBO reply=%p req_idx=%x cb_idx=%x Function=%x\n", mpt_reply()
539 ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus); mpt_reply()
541 u32 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo); mpt_reply()
565 /* Flush (non-TURBO) reply with a WRITE! */ mpt_reply()
583 * so by reading the reply FIFO. Multiple replies may be processed
600 * Drain the reply FIFO! mpt_interrupt()
618 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
621 * "internal" request/reply processing is routed here.
628 mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) mptbase_reply() argument
635 switch (reply->u.hdr.Function) { mptbase_reply()
637 pEventReply = (EventNotificationReply_t *)reply; mptbase_reply()
649 memcpy(ioc->mptbase_cmds.reply, reply, mptbase_reply()
651 4 * reply->u.reply.MsgLength)); mptbase_reply()
662 "EventAck reply received\n", ioc->name)); mptbase_reply()
666 "Unexpected msg function (=%02Xh) reply received!\n", mptbase_reply()
667 ioc->name, reply->u.hdr.Function); mptbase_reply()
686 * LAN, SCSI target) to register its reply callback routine. Each
1760 * and reply memory pools.
2281 /* Disable reply interrupts (also blocks FreeQ) */ mpt_do_ioc_recovery()
2289 /* Disable alt-IOC's reply interrupts mpt_do_ioc_recovery()
2308 /* (re)Enable alt-IOC! (reply interrupt, FreeQ) */ mpt_do_ioc_recovery()
2310 "alt_ioc reply irq re-enabled\n", ioc->alt_ioc->name)); mpt_do_ioc_recovery()
2415 /* Prime reply & request queues! mpt_do_ioc_recovery()
2497 /* Enable! (reply interrupt) */ mpt_do_ioc_recovery()
2503 /* (re)Enable alt-IOC! (reply interrupt) */ mpt_do_ioc_recovery()
2505 "reply irq re-enabled\n", mpt_do_ioc_recovery()
3071 /* Destination (reply area)... */ GetIocFacts()
3096 * inspection of reply contents. GetIocFacts()
3101 /* Did we get a valid reply? */ GetIocFacts()
3187 * Set values for this IOC's request & reply frame sizes, GetIocFacts()
3188 * and request & reply queue depths... GetIocFacts()
3206 "Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n", GetIocFacts()
3243 /* Destination (reply area)... */ GetPortFacts()
3266 /* Did we get a valid reply? */ GetPortFacts()
3353 * (reply) and sense buffers. SendIocInit()
3378 /* No need to byte swap the multibyte fields in the reply SendIocInit()
3550 u8 reply[sizeof(FWUploadReply_t)]; mpt_do_upload() local
3578 preply = (FWUploadReply_t *)&reply; mpt_do_upload()
3580 reply_sz = sizeof(reply); mpt_do_upload()
4396 * PrimeIocFifos - Initialize IOC request and reply FIFOs.
4399 * This routine allocates memory for the MPT reply and request frame
4400 * pools (if necessary), and primes the IOC reply FIFO with
4401 * reply frames.
4417 /* Prime reply FIFO... */ PrimeIocFifos()
4612 * mpt_handshake_req_reply_wait - Send MPT request to and receive reply
4617 * @replyBytes: Expected size of the reply in bytes
4618 * @u16reply: Pointer to area where reply should be written
4619 * @maxwait: Max wait time for a reply (in seconds)
4638 * Get ready to cache a handshake reply mpt_handshake_req_reply_wait()
4702 * Wait for completion of doorbell handshake reply from the IOC mpt_handshake_req_reply_wait()
4707 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HandShake reply count=%d%s\n", mpt_handshake_req_reply_wait()
4711 * Copy out the cached reply... mpt_handshake_req_reply_wait()
4824 * WaitForDoorbellReply - Wait for and capture an IOC handshake reply.
4829 * This routine polls the IOC for a handshake reply, 16 bits at a time.
4831 * of 128 bytes of reply data.
4833 * Returns a negative value on failure, else size of reply in WORDS.
4848 * Get first two u16's so we can look at IOC's intended reply MsgLength WaitForDoorbellReply()
4864 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "WaitCnt=%d First handshake reply word=%08x%s\n", WaitForDoorbellReply()
4870 * reply 16 bits at a time. WaitForDoorbellReply()
4887 printk(MYIOC_s_ERR_FMT "Handshake reply failure!\n", WaitForDoorbellReply()
4900 dhsprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Got Handshake reply:\n", ioc->name)); WaitForDoorbellReply()
4917 * -EFAULT for non-successful reply or no reply (timeout)
5048 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE); mptbase_sas_persist_operation()
5104 (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply; mptbase_sas_persist_operation()
5261 * -EFAULT for non-successful reply or no reply (timeout)
6311 * -EFAULT for non-successful reply or no reply (timeout)
6364 memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE); mpt_config()
6466 pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply; mpt_config()
6974 * IOC doesn't reply to any outstanding request. This will transfer IOC
7030 /* Disable reply interrupts (also blocks FreeQ) */ mpt_SoftResetHandler()
7696 * @pEventReply: Pointer to EventNotification reply frame
7809 * @log_info: U32 LogInfo reply word from the IOC
8064 * @log_info: U32 LogInfo reply word from the IOC
H A Dmptscsih.c106 MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
426 * mptscsih_info_scsiio - debug print info on reply frame
429 * @pScsiReply: Pointer to MPT reply frame
578 * @r: Pointer to MPT reply frame (NULL if TurboReply)
643 sc->result = DID_OK << 16; /* Set default reply as OK */ mptscsih_io_done()
658 /* special context reply handling */ mptscsih_io_done()
1007 } /* end of address reply case */ mptscsih_io_done()
1021 * Scsi_Host instance taskQ and reply to OS.
1638 (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply); mptscsih_IssueTaskMgmt()
2049 * @mr: Pointer to SCSI task mgmt reply frame
2071 memcpy(ioc->taskmgmt_cmds.reply, mr, mptscsih_taskmgmt_complete()
2072 min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength)); mptscsih_taskmgmt_complete()
2614 * @mr: Pointer to MPT reply frame (NULL if TurboReply)
2629 MPT_FRAME_HDR *reply) mptscsih_scandv_complete()
2640 if (!reply) mptscsih_scandv_complete()
2643 pReply = (SCSIIOReply_t *) reply; mptscsih_scandv_complete()
2646 mptscsih_get_completion_code(ioc, req, reply); mptscsih_scandv_complete()
2648 memcpy(ioc->internal_cmds.reply, reply, mptscsih_scandv_complete()
2649 min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength)); mptscsih_scandv_complete()
2650 cmd = reply->u.hdr.Function; mptscsih_scandv_complete()
2674 * @reply: Pointer to MPT reply frame (NULL if TurboReply)
2679 MPT_FRAME_HDR *reply) mptscsih_get_completion_code()
2687 pReply = (SCSIIOReply_t *)reply; mptscsih_get_completion_code()
2720 pr = (MpiRaidActionReply_t *)reply; mptscsih_get_completion_code()
2628 mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) mptscsih_scandv_complete() argument
2678 mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply) mptscsih_get_completion_code() argument
/linux-4.4.14/drivers/staging/lustre/lustre/ldlm/
H A Dldlm_request.c139 It would be nice to have some kind of "early reply" mechanism for ldlm_get_enq_timeout()
196 * - when a reply to an ENQUEUE RPC is received from the server
298 * bl_ast and -EINVAL reply is sent to server anyways. failed_lock_cleanup()
331 * Called after receiving reply from server.
341 struct ldlm_reply *reply; ldlm_cli_enqueue_fini() local
364 /* Before we return, swab the reply */ ldlm_cli_enqueue_fini()
365 reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); ldlm_cli_enqueue_fini()
366 if (reply == NULL) { ldlm_cli_enqueue_fini()
408 &reply->lock_handle, ldlm_cli_enqueue_fini()
411 lock->l_remote_handle = reply->lock_handle; ldlm_cli_enqueue_fini()
414 *flags = ldlm_flags_from_wire(reply->lock_flags); ldlm_cli_enqueue_fini()
415 lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & ldlm_cli_enqueue_fini()
419 lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags & ldlm_cli_enqueue_fini()
424 lock, reply->lock_handle.cookie, *flags); ldlm_cli_enqueue_fini()
430 int newmode = reply->lock_desc.l_req_mode; ldlm_cli_enqueue_fini()
439 if (!ldlm_res_eq(&reply->lock_desc.l_resource.lr_name, ldlm_cli_enqueue_fini()
443 PLDLMRES(&reply->lock_desc.l_resource), ldlm_cli_enqueue_fini()
447 &reply->lock_desc.l_resource.lr_name); ldlm_cli_enqueue_fini()
460 &reply->lock_desc.l_policy_data, ldlm_cli_enqueue_fini()
475 LDLM_DEBUG(lock, "enqueue reply includes blocking AST"); ldlm_cli_enqueue_fini()
510 * AST (if any) can override what we got in the reply */ ldlm_cli_enqueue_fini()
1791 struct ldlm_reply *reply; replay_lock_interpret() local
1798 reply = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP); replay_lock_interpret()
1799 if (reply == NULL) { replay_lock_interpret()
1807 aa->lock_handle.cookie, reply->lock_handle.cookie, replay_lock_interpret()
1822 &reply->lock_handle, replay_lock_interpret()
1825 lock->l_remote_handle = reply->lock_handle; replay_lock_interpret()
1851 /* If this is reply-less callback lock, we cannot replay it, since replay_one_lock()
1855 LDLM_DEBUG(lock, "Not replaying reply-less lock:"); replay_one_lock()
1870 * then we haven't got a reply yet and don't have a known disposition. replay_one_lock()
H A Dldlm_lib.c661 DEBUG_REQ(D_ERROR, req, "dropping reply"); target_send_reply_msg()
671 DEBUG_REQ(D_NET, req, "sending reply"); target_send_reply_msg()
695 /* req/reply consistent */ target_send_reply()
698 /* "fresh" reply */ target_send_reply()
709 /* disable reply scheduling while I'm setting up */ target_send_reply()
738 /* error sending: reply is off the net. Also we need +1 target_send_reply()
739 * reply ref until ptlrpc_handle_rs() is done target_send_reply()
740 * with the reply state (if the send was successful, there target_send_reply()
752 CDEBUG(D_HA, "Schedule reply immediately\n"); target_send_reply()
H A Dl_lock.c48 * enqueue reply. We rely on lock->l_resource = new_res
/linux-4.4.14/drivers/scsi/
H A Ddpt_i2o.c631 * Turn a pointer to ioctl reply data into an u32 'context'
633 static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply) adpt_ioctl_to_context() argument
636 return (u32)(unsigned long)reply; adpt_ioctl_to_context()
645 pHba->ioctl_reply_context[i] = reply; adpt_ioctl_to_context()
651 kfree (reply); adpt_ioctl_to_context()
662 * Go from an u32 'context' to a pointer to ioctl reply data.
1708 u32* reply = NULL; adpt_i2o_passthru() local
1746 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL); adpt_i2o_passthru()
1747 if(reply == NULL) { adpt_i2o_passthru()
1748 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name); adpt_i2o_passthru()
1753 msg[3] = adpt_ioctl_to_context(pHba, reply); adpt_i2o_passthru()
1764 kfree (reply); adpt_i2o_passthru()
1812 rcode, reply); adpt_i2o_passthru()
1866 /* Copy back the reply to user space */ adpt_i2o_passthru()
1869 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) { adpt_i2o_passthru()
1873 if(copy_to_user(user_reply, reply, reply_size)) { adpt_i2o_passthru()
1874 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name); adpt_i2o_passthru()
1884 kfree (reply); adpt_i2o_passthru()
2125 void __iomem *reply; adpt_isr() local
2146 printk(KERN_ERR"dpti: Could not get reply frame\n"); adpt_isr()
2153 reply = (u8 *)pHba->reply_pool + adpt_isr()
2157 printk(KERN_ERR "dpti: reply frame not from pool\n"); adpt_isr()
2158 reply = (u8 *)bus_to_virt(m); adpt_isr()
2161 if (readl(reply) & MSG_FAIL) { adpt_isr()
2162 u32 old_m = readl(reply+28); adpt_isr()
2171 // Transaction context is 0 in failed reply frame adpt_isr()
2174 writel(old_context, reply+12); adpt_isr()
2177 context = readl(reply+8); adpt_isr()
2179 void *p = adpt_ioctl_from_context(pHba, readl(reply+12)); adpt_isr()
2181 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4); adpt_isr()
2186 status = readl(reply+16); adpt_isr()
2194 readl(reply+12)); adpt_isr()
2201 cmd = adpt_cmd_from_context (pHba, readl(reply+12)); adpt_isr()
2205 adpt_i2o_to_scsi(reply, cmd); adpt_isr()
2373 static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd) adpt_i2o_to_scsi() argument
2378 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits adpt_i2o_to_scsi()
2382 u16 detailed_status = readl(reply+16) &0xffff; adpt_i2o_to_scsi()
2387 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20)); adpt_i2o_to_scsi()
2398 if (readl(reply+20) < cmd->underflow) { adpt_i2o_to_scsi()
2464 memcpy_fromio(cmd->sense_buffer, (reply+28) , len); adpt_i2o_to_scsi()
2856 // Wait for the reply status to come back adpt_i2o_init_outbound_q()
2875 // If the command was successful, fill the fifo with our reply adpt_i2o_init_outbound_q()
2893 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name); adpt_i2o_init_outbound_q()
/linux-4.4.14/drivers/hv/
H A Dhv_snapshot.c84 * Timeout waiting for userspace component to reply happened. vss_timeout_func()
86 pr_warn("VSS: timeout waiting for daemon to reply\n"); vss_timeout_func()
103 /* Daemon doesn't expect us to reply */ vss_handle_handshake()
107 /* Daemon expects us to reply with our own version*/ vss_handle_handshake()
/linux-4.4.14/drivers/gpu/drm/nouveau/include/nvkm/subdev/
H A Dpmu.h26 int nvkm_pmu_send(struct nvkm_pmu *, u32 reply[2], u32 process,
/linux-4.4.14/block/
H A Dbsg-lib.c47 * @result: job reply result
61 /* we're only returning the result field in the reply */ bsg_job_done()
71 /* set reply (bidi) residual */ bsg_job_done()
129 job->reply = req->sense; bsg_create_job()
/linux-4.4.14/net/bridge/netfilter/
H A Debt_arpreply.c97 MODULE_DESCRIPTION("Ebtables: ARP reply target");
/linux-4.4.14/include/scsi/fc/
H A Dfc_fip.h76 FIP_OP_LS = 2, /* Link Service request or reply */
97 FIP_SC_REP = 2, /* reply */
113 FIP_SC_VL_REP = 2, /* reply */
121 FIP_SC_VN_PROBE_REP = 2, /* probe reply */
/linux-4.4.14/include/linux/netfilter/
H A Dnf_conntrack_proto_gre.h77 /* structure for original <-> reply keymap */
/linux-4.4.14/arch/arm64/include/asm/
H A Dkgdb.h78 * "'g' packet reply is too long"
/linux-4.4.14/fs/dlm/
H A Drcom.c257 log_debug(ls, "reject reply %d from %d seq %llx expect %llx", receive_sync_reply()
551 int stop, reply = 0, names = 0, lookup = 0, lock = 0; dlm_receive_rcom() local
557 reply = 1; dlm_receive_rcom()
564 reply = 1; dlm_receive_rcom()
571 reply = 1; dlm_receive_rcom()
578 reply = 1; dlm_receive_rcom()
591 if (reply && (rc->rc_seq_reply != seq)) dlm_receive_rcom()
H A Ddlm_internal.h183 * reply is needed. Only set when the lkb is on the lockspace waiters
184 * list awaiting a reply from a remote node.
243 int8_t lkb_wait_type; /* type of reply waiting for */
249 struct list_head lkb_wait_reply; /* waiting for remote reply */
445 uint64_t rc_id; /* match reply with request */
535 struct list_head ls_waiters; /* lkbs needing a reply */
567 struct dlm_message ls_stub_ms; /* for faking a reply */
H A Drecover.c28 * Recovery waiting routines: these functions wait for a particular reply from
234 * rsb so we can match an rcom reply with the rsb it was sent for.
434 * waiting for a lookup reply are kept on the recover_list.
486 * with an outstanding request in waiters list and a request reply saved in the
487 * requestqueue, cannot know whether it should ignore the reply and resend the
488 * request, or accept the reply and complete the request. It must do the
491 * which case, the request reply would always be ignored and the request
/linux-4.4.14/drivers/scsi/qla4xxx/
H A Dql4_bsg.c17 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; qla4xxx_read_flash()
82 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; qla4xxx_update_flash()
148 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; qla4xxx_get_acb_state()
205 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; qla4xxx_read_nvram()
277 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; qla4xxx_update_nvram()
346 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; qla4xxx_restore_defaults()
390 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; qla4xxx_bsg_get_acb()
454 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; ql4xxx_execute_diag_cmd()
658 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; qla4xxx_execute_diag_loopback_cmd()
812 struct iscsi_bsg_reply *bsg_reply = bsg_job->reply; qla4xxx_process_vendor_specific()
/linux-4.4.14/arch/powerpc/include/asm/
H A Dsmu.h28 * The reply is 6 bytes:
144 * byte 0 of the reply:
146 * - on read, 0x00 or 0x01 : reply is in buffer (after the byte 0)
180 * reply at data offset 6, 7 and 8.
396 int reply_len; /* reply len */
398 void *reply_buf; /* reply buffer */
672 * a reply. The reader will be blocked or not depending on how the device
673 * file is opened. poll() isn't implemented yet. The reply will consist
674 * of a header as well, followed by the reply data if any. You should
675 * always provide a buffer large enough for the maximum reply data, I
/linux-4.4.14/drivers/media/usb/gspca/
H A Dkinect.c168 PDEBUG(D_USBO, "Control reply: %d", actual_len); send_cmd()
213 uint16_t reply[2]; write_register() local
221 res = send_cmd(gspca_dev, 0x03, cmd, 4, reply, 4); write_register()
226 res, reply[0], reply[1]); write_register()
H A Dtouptek.c195 static int val_reply(struct gspca_dev *gspca_dev, const char *reply, int rc) val_reply() argument
198 PERR("reply has error %d", rc); val_reply()
202 PERR("Bad reply size %d", rc); val_reply()
205 if (reply[0] != 0x08) { val_reply()
206 PERR("Bad reply 0x%02X", reply[0]); val_reply()
230 PERR("Bad reply to reg_w(0x0B, 0xC0, 0x%04X, 0x%04X\n", reg_w()
/linux-4.4.14/include/uapi/scsi/fc/
H A Dfc_fs.h70 FC_RCTL_DD_SOL_CTL = 0x03, /* solicited control or reply */
77 #define FC_RCTL_ILS_REP FC_RCTL_DD_SOL_CTL /* ILS reply */
83 FC_RCTL_ELS_REP = 0x23, /* extended link services reply */
85 FC_RCTL_ELS4_REP = 0x33, /* FC-4 ELS reply */
124 [FC_RCTL_DD_SOL_CTL] = "sol ctl/reply", \
/linux-4.4.14/include/uapi/scsi/
H A Dscsi_bsg_fc.h206 * The reply structure is an fc_bsg_ctels_reply structure
255 * The reply structure is an fc_bsg_ctels_reply structure
274 * The reply structure is an fc_bsg_ctels_reply structure
301 * be no further reply information supplied.
303 * msg and status fields. The per-msgcode reply structure
/linux-4.4.14/drivers/scsi/lpfc/
H A Dlpfc_bsg.c353 job->reply->reply_payload_rcv_len = lpfc_bsg_send_mgmt_cmd_cmp()
370 job->reply->result = rc; lpfc_bsg_send_mgmt_cmd_cmp()
401 job->reply->reply_payload_rcv_len = 0; lpfc_bsg_send_mgmt_cmd()
545 job->reply->result = rc; lpfc_bsg_send_mgmt_cmd()
612 job->reply->reply_payload_rcv_len = lpfc_bsg_rport_els_cmp()
618 job->reply->reply_payload_rcv_len = lpfc_bsg_rport_els_cmp()
622 els_reply = &job->reply->reply_data.ctels_reply; lpfc_bsg_rport_els_cmp()
640 job->reply->result = rc; lpfc_bsg_rport_els_cmp()
667 job->reply->reply_payload_rcv_len = 0; lpfc_bsg_rport_els()
774 job->reply->result = rc; lpfc_bsg_rport_els()
1123 job->reply->reply_payload_rcv_len = size; lpfc_bsg_ct_unsol_event()
1125 job->reply->result = 0; lpfc_bsg_ct_unsol_event()
1298 job->reply->reply_data.vendor_reply.vendor_rsp; lpfc_bsg_hba_get_event()
1318 job->reply->reply_payload_rcv_len = 0; lpfc_bsg_hba_get_event()
1334 job->reply->reply_payload_rcv_len = lpfc_bsg_hba_get_event()
1339 job->reply->reply_payload_rcv_len = 0; lpfc_bsg_hba_get_event()
1350 job->reply->result = 0; lpfc_bsg_hba_get_event()
1356 job->reply->result = rc; lpfc_bsg_hba_get_event()
1431 job->reply->reply_payload_rcv_len = 0; lpfc_issue_ct_rsp_cmp()
1445 job->reply->result = rc; lpfc_issue_ct_rsp_cmp()
1621 job->reply->reply_payload_rcv_len = 0; lpfc_bsg_send_mgmt_rsp()
1667 job->reply->result = rc; lpfc_bsg_send_mgmt_rsp()
1774 job->reply->reply_payload_rcv_len = 0; lpfc_sli3_bsg_diag_loopback_mode()
1867 job->reply->result = rc; lpfc_sli3_bsg_diag_loopback_mode()
2025 job->reply->reply_payload_rcv_len = 0; lpfc_sli4_bsg_diag_loopback_mode()
2154 job->reply->result = rc; lpfc_sli4_bsg_diag_loopback_mode()
2266 job->reply->result = rc; lpfc_sli4_bsg_diag_mode_end()
2388 job->reply->reply_data.vendor_reply.vendor_rsp; lpfc_sli4_bsg_link_diag_test()
2393 "3012 Received Run link diag test reply " lpfc_sli4_bsg_link_diag_test()
2416 job->reply->result = rc; lpfc_sli4_bsg_link_diag_test()
3015 job->reply->reply_payload_rcv_len = 0; lpfc_bsg_diag_loopback_run()
3240 job->reply->reply_payload_rcv_len = lpfc_bsg_diag_loopback_run()
3244 job->reply->reply_payload_rcv_len = size; lpfc_bsg_diag_loopback_run()
3274 job->reply->result = rc; lpfc_bsg_diag_loopback_run()
3304 job->reply->reply_data.vendor_reply.vendor_rsp; lpfc_bsg_get_dfc_rev()
3309 "2741 Received GET_DFC_REV reply below " lpfc_bsg_get_dfc_rev()
3318 job->reply->result = rc; lpfc_bsg_get_dfc_rev()
3368 job->reply->reply_payload_rcv_len = lpfc_bsg_issue_mbox_cmpl()
3382 job->reply->result = 0; lpfc_bsg_issue_mbox_cmpl()
3562 job->reply->reply_payload_rcv_len = lpfc_bsg_issue_mbox_ext_handle_job()
3568 job->reply->result = 0; lpfc_bsg_issue_mbox_ext_handle_job()
4099 job->reply->result = 0; lpfc_bsg_sli_cfg_write_cmd_ext()
4310 job->reply->reply_payload_rcv_len = lpfc_bsg_read_ebuf_get()
4324 job->reply->result = 0; lpfc_bsg_read_ebuf_get()
4439 job->reply->result = 0; lpfc_bsg_write_ebuf_set()
4603 job->reply->reply_payload_rcv_len = 0; lpfc_bsg_issue_mbox()
4844 job->reply->reply_payload_rcv_len = lpfc_bsg_issue_mbox()
4881 job->reply->reply_payload_rcv_len = 0; lpfc_bsg_mbox_cmd()
4901 job->reply->result = 0; lpfc_bsg_mbox_cmd()
4909 job->reply->result = rc; lpfc_bsg_mbox_cmd()
4973 job->reply->reply_data.vendor_reply.vendor_rsp; lpfc_bsg_menlo_cmd_cmp()
4993 job->reply->reply_payload_rcv_len = lpfc_bsg_menlo_cmd_cmp()
5010 job->reply->result = rc; lpfc_bsg_menlo_cmd_cmp()
5042 job->reply->reply_payload_rcv_len = 0; lpfc_menlo_cmd()
5057 "2785 Received MENLO_CMD reply below " lpfc_menlo_cmd()
5183 job->reply->result = rc; lpfc_menlo_cmd()
5232 job->reply->reply_payload_rcv_len = 0; lpfc_bsg_hst_vendor()
5234 job->reply->result = rc; lpfc_bsg_hst_vendor()
5264 job->reply->reply_payload_rcv_len = 0; lpfc_bsg_request()
5266 job->reply->result = rc; lpfc_bsg_request()
/linux-4.4.14/drivers/net/
H A Dvxlan.c1390 struct sk_buff *reply; arp_reduce() local
1404 reply = arp_create(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, arp_reduce()
1409 if (reply == NULL) arp_reduce()
1412 skb_reset_mac_header(reply); arp_reduce()
1413 __skb_pull(reply, skb_network_offset(reply)); arp_reduce()
1414 reply->ip_summed = CHECKSUM_UNNECESSARY; arp_reduce()
1415 reply->pkt_type = PACKET_HOST; arp_reduce()
1417 if (netif_rx_ni(reply) == NET_RX_DROP) arp_reduce()
1437 struct sk_buff *reply; vxlan_na_create() local
1450 reply = alloc_skb(len, GFP_ATOMIC); vxlan_na_create()
1451 if (reply == NULL) vxlan_na_create()
1454 reply->protocol = htons(ETH_P_IPV6); vxlan_na_create()
1455 reply->dev = dev; vxlan_na_create()
1456 skb_reserve(reply, LL_RESERVED_SPACE(request->dev)); vxlan_na_create()
1457 skb_push(reply, sizeof(struct ethhdr)); vxlan_na_create()
1458 skb_set_mac_header(reply, 0); vxlan_na_create()
1472 ether_addr_copy(eth_hdr(reply)->h_dest, daddr); vxlan_na_create()
1473 ether_addr_copy(eth_hdr(reply)->h_source, n->ha); vxlan_na_create()
1474 eth_hdr(reply)->h_proto = htons(ETH_P_IPV6); vxlan_na_create()
1475 reply->protocol = htons(ETH_P_IPV6); vxlan_na_create()
1477 skb_pull(reply, sizeof(struct ethhdr)); vxlan_na_create()
1478 skb_set_network_header(reply, 0); vxlan_na_create()
1479 skb_put(reply, sizeof(struct ipv6hdr)); vxlan_na_create()
1483 pip6 = ipv6_hdr(reply); vxlan_na_create()
1492 skb_pull(reply, sizeof(struct ipv6hdr)); vxlan_na_create()
1493 skb_set_transport_header(reply, 0); vxlan_na_create()
1495 na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen); vxlan_na_create()
1514 skb_push(reply, sizeof(struct ipv6hdr)); vxlan_na_create()
1516 reply->ip_summed = CHECKSUM_UNNECESSARY; vxlan_na_create()
1518 return reply; vxlan_na_create()
1551 struct sk_buff *reply; neigh_reduce() local
1565 reply = vxlan_na_create(skb, n, neigh_reduce()
1570 if (reply == NULL) neigh_reduce()
1573 if (netif_rx_ni(reply) == NET_RX_DROP) neigh_reduce()
/linux-4.4.14/drivers/gpu/drm/tegra/
H A Ddpaux.c204 msg->reply = DP_AUX_NATIVE_REPLY_ACK; tegra_dpaux_transfer()
208 msg->reply = DP_AUX_NATIVE_REPLY_NACK; tegra_dpaux_transfer()
212 msg->reply = DP_AUX_NATIVE_REPLY_DEFER; tegra_dpaux_transfer()
216 msg->reply = DP_AUX_I2C_REPLY_NACK; tegra_dpaux_transfer()
220 msg->reply = DP_AUX_I2C_REPLY_DEFER; tegra_dpaux_transfer()
224 if ((msg->size > 0) && (msg->reply == DP_AUX_NATIVE_REPLY_ACK)) { tegra_dpaux_transfer()
/linux-4.4.14/include/rdma/
H A Dib_cm.h410 * ib_send_cm_rep - Sends a connection reply in response to a connection
414 * @param: Connection reply information needed to establish the
422 * to a connection reply message.
446 * ib_send_cm_drep - Sends a disconnection reply to a disconnection request.
450 * disconnection reply message.
502 * the sender to reply to the connection message. The upper 3-bits
597 * ib_send_cm_sidr_rep - Sends a service ID resolution reply to the
601 * @param: Service ID resolution reply information.
/linux-4.4.14/include/uapi/linux/can/
H A Dbcm.h84 TX_STATUS, /* reply to TX_READ request */
86 RX_STATUS, /* reply to RX_READ request */
/linux-4.4.14/net/irda/irlan/
H A Dirlan_provider_event.c95 * INFO, We have issued a GetInfo command and is awaiting a reply.
159 * reply
/linux-4.4.14/sound/soc/intel/baytrail/
H A Dsst-baytrail-ipc.c54 /* mask for differentiating between notification and reply message */
155 struct sst_byt_alloc_response reply; member in struct:sst_byt_stream
327 /* reply from ADSP */ sst_byt_irq_thread()
427 struct sst_byt_alloc_response *reply = &stream->reply; sst_byt_stream_commit() local
436 reply, sizeof(*reply)); sst_byt_stream_commit()
674 /* match reply to message sent based on msg and stream IDs */ byt_reply_msg_match()
/linux-4.4.14/drivers/media/pci/bt8xx/
H A Ddst.c190 u8 reply; dst_wait_dst_ready() local
194 if (dst_gpio_inb(state, &reply) < 0) { dst_wait_dst_ready()
198 if ((reply & RDC_8820_PIO_0_ENABLE) == 0) { dst_wait_dst_ready()
315 dprintk(verbose, DST_DEBUG, 1, "reply is 0x%x", ret[0]); read_dst()
1085 u8 reply; dst_get_device_id() local
1103 if (read_dst(state, &reply, GET_ACK)) dst_get_device_id()
1105 if (reply != ACK) { dst_get_device_id()
1106 dprintk(verbose, DST_INFO, 1, "Write not Acknowledged! [Reply=0x%02x]", reply); dst_get_device_id()
1225 u8 reply; dst_command() local
1246 if (read_dst(state, &reply, GET_ACK)) { dst_command()
1254 if (reply != ACK) { dst_command()
1255 dprintk(verbose, DST_INFO, 1, "write not acknowledged 0x%02x ", reply); dst_command()
1388 u8 reply; dst_write_tuna() local
1422 if ((read_dst(state, &reply, GET_ACK) < 0)) { dst_write_tuna()
1426 if (reply != ACK) { dst_write_tuna()
1427 dprintk(verbose, DST_DEBUG, 1, "write not acknowledged 0x%02x ", reply); dst_write_tuna()
H A Ddst_ca.c92 u8 reply; dst_ci_command() local
107 if (read_dst(state, &reply, GET_ACK) < 0) { dst_ci_command()
377 static int write_to_8820(struct dst_state *state, struct ca_msg *hw_buffer, u8 length, u8 reply) write_to_8820() argument
379 if ((dst_put_ci(state, hw_buffer->msg, length, hw_buffer->msg, reply)) < 0) { write_to_8820()
424 static int ca_set_pmt(struct dst_state *state, struct ca_msg *p_ca_message, struct ca_msg *hw_buffer, u8 reply, u8 query) ca_set_pmt() argument
438 write_to_8820(state, hw_buffer, (length + tag_length), reply); ca_set_pmt()
444 /* Board supports CA PMT reply ? */ dst_check_ca_pmt()
/linux-4.4.14/include/video/
H A Duvesafb.h91 /* How long to wait for a reply from userspace [ms] */
/linux-4.4.14/include/net/netfilter/
H A Dnf_conntrack_l3proto.h34 * Invert the per-proto part of the tuple: ie. turn xmit into reply.
/linux-4.4.14/drivers/md/
H A Ddm-log-userspace-transfer.c76 * both. This function fills in the reply for a waiting request.
77 * If just msg is given, then the reply is simply an ACK from userspace
90 * Each process that is waiting for a reply from the user fill_pkg()
/linux-4.4.14/drivers/gpu/drm/msm/edp/
H A Dedp_aux.c129 msg->reply = native ? edp_aux_transfer()
174 msg->reply = native ? edp_aux_transfer()
178 msg->reply = native ? edp_aux_transfer()
/linux-4.4.14/drivers/block/
H A Dnbd.c336 struct nbd_reply reply; nbd_read_stat() local
339 reply.magic = 0; nbd_read_stat()
340 result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL); nbd_read_stat()
347 if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { nbd_read_stat()
349 (unsigned long)ntohl(reply.magic)); nbd_read_stat()
353 req = nbd_find_request(nbd, *(struct request **)reply.handle); nbd_read_stat()
359 dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n", nbd_read_stat()
360 reply.handle); nbd_read_stat()
364 if (ntohl(reply.error)) { nbd_read_stat()
366 ntohl(reply.error)); nbd_read_stat()
371 dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); nbd_read_stat()
/linux-4.4.14/include/net/nfc/
H A Ddigital.h107 * through callback cb. If an io error occurs or the peer didn't reply
116 * through callback cb. If an io error occurs or the peer didn't reply
152 * would not be able to send new commands, waiting for the reply of the
/linux-4.4.14/net/sunrpc/
H A Dsvc.c555 pages = size / PAGE_SIZE + 1; /* extra page as we hold both request and reply. svc_init_buffer()
1093 /* Setup reply header */ svc_process_common()
1100 /* First words of reply: */ svc_process_common()
1120 * Decode auth data, and add verifier to reply buffer. svc_process_common()
1164 /* Build the reply header. */ svc_process_common()
1176 * better idea of reply size svc_process_common()
1190 /* Encode reply */ svc_process_common()
1199 dprintk("svc: failed to encode reply\n"); svc_process_common()
1206 /* Release reply info */ svc_process_common()
1217 /* Release reply info */ svc_process_common()
1400 /* Finally, send the reply synchronously */ bc_svc_process()
/linux-4.4.14/net/9p/
H A Dtrans_rdma.c309 /* Check that we have not yet received a reply for this request. handle_recv()
312 pr_err("Duplicate reply for request %d", tag); handle_recv()
457 /* Allocate an fcall for the reply */ rdma_request()
467 * there is a reply buffer available for every outstanding rdma_request()
468 * request. A flushed request can result in no reply for an rdma_request()
521 * status in case of a very fast reply. rdma_request()
604 /* A request has been fully flushed without a reply.
/linux-4.4.14/drivers/media/dvb-frontends/
H A Dtda10071.c253 struct dvb_diseqc_slave_reply *reply) tda10071_diseqc_recv_slave_reply()
284 /* reply len */ tda10071_diseqc_recv_slave_reply()
289 reply->msg_len = uitmp & 0x1f; /* [4:0] */ tda10071_diseqc_recv_slave_reply()
290 if (reply->msg_len > sizeof(reply->msg)) tda10071_diseqc_recv_slave_reply()
291 reply->msg_len = sizeof(reply->msg); /* truncate API max */ tda10071_diseqc_recv_slave_reply()
293 /* read reply */ tda10071_diseqc_recv_slave_reply()
301 ret = regmap_bulk_read(dev->regmap, cmd.len, reply->msg, tda10071_diseqc_recv_slave_reply()
302 reply->msg_len); tda10071_diseqc_recv_slave_reply()
252 tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe, struct dvb_diseqc_slave_reply *reply) tda10071_diseqc_recv_slave_reply() argument
/linux-4.4.14/drivers/gpu/drm/gma500/
H A Dcdv_intel_dp.c716 uint8_t reply[20]; cdv_intel_dp_aux_native_read() local
731 reply, reply_bytes); cdv_intel_dp_aux_native_read()
736 ack = reply[0] >> 4; cdv_intel_dp_aux_native_read()
738 memcpy(recv, reply + 1, ret - 1); cdv_intel_dp_aux_native_read()
759 uint8_t reply[2]; cdv_intel_dp_i2c_aux_ch() local
798 reply, reply_bytes); cdv_intel_dp_i2c_aux_ch()
804 switch ((reply[0] >> 4) & DP_AUX_NATIVE_REPLY_MASK) { cdv_intel_dp_i2c_aux_ch()
817 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", cdv_intel_dp_i2c_aux_ch()
818 reply[0]); cdv_intel_dp_i2c_aux_ch()
822 switch ((reply[0] >> 4) & DP_AUX_I2C_REPLY_MASK) { cdv_intel_dp_i2c_aux_ch()
825 *read_byte = reply[1]; cdv_intel_dp_i2c_aux_ch()
836 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); cdv_intel_dp_i2c_aux_ch()

Completed in 6318 milliseconds

1234