Searched refs:tx_msg (Results 1 - 25 of 25) sorted by relevance

/linux-4.4.14/drivers/acpi/
H A Dacpi_ipmi.c225 static void ipmi_msg_release(struct acpi_ipmi_msg *tx_msg) ipmi_msg_release() argument
227 acpi_ipmi_dev_put(tx_msg->device); ipmi_msg_release()
228 kfree(tx_msg); ipmi_msg_release()
233 struct acpi_ipmi_msg *tx_msg = ipmi_msg_release_kref() local
236 ipmi_msg_release(tx_msg); ipmi_msg_release_kref()
239 static struct acpi_ipmi_msg *acpi_ipmi_msg_get(struct acpi_ipmi_msg *tx_msg) acpi_ipmi_msg_get() argument
241 kref_get(&tx_msg->kref); acpi_ipmi_msg_get()
243 return tx_msg; acpi_ipmi_msg_get()
246 static void acpi_ipmi_msg_put(struct acpi_ipmi_msg *tx_msg) acpi_ipmi_msg_put() argument
248 kref_put(&tx_msg->kref, ipmi_msg_release_kref); acpi_ipmi_msg_put()
253 static int acpi_format_ipmi_request(struct acpi_ipmi_msg *tx_msg, acpi_format_ipmi_request() argument
262 msg = &tx_msg->tx_message; acpi_format_ipmi_request()
270 msg->data = tx_msg->data; acpi_format_ipmi_request()
280 dev_WARN_ONCE(tx_msg->device->dev, true, acpi_format_ipmi_request()
286 memcpy(tx_msg->data, buffer->data, msg->data_len); acpi_format_ipmi_request()
295 tx_msg->addr.addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE; acpi_format_ipmi_request()
296 tx_msg->addr.channel = IPMI_BMC_CHANNEL; acpi_format_ipmi_request()
297 tx_msg->addr.data[0] = 0; acpi_format_ipmi_request()
300 device = tx_msg->device; acpi_format_ipmi_request()
304 tx_msg->tx_msgid = device->curr_msgid; acpi_format_ipmi_request()
339 struct acpi_ipmi_msg *tx_msg; ipmi_flush_tx_msg() local
352 tx_msg = list_first_entry(&ipmi->tx_msg_list, ipmi_flush_tx_msg()
355 list_del(&tx_msg->head); ipmi_flush_tx_msg()
359 complete(&tx_msg->tx_complete); ipmi_flush_tx_msg()
360 acpi_ipmi_msg_put(tx_msg); ipmi_flush_tx_msg()
369 struct acpi_ipmi_msg *tx_msg, *temp; ipmi_cancel_tx_msg() local
374 list_for_each_entry_safe(tx_msg, temp, &ipmi->tx_msg_list, head) { ipmi_cancel_tx_msg()
375 if (msg == tx_msg) { ipmi_cancel_tx_msg()
377 list_del(&tx_msg->head); ipmi_cancel_tx_msg()
384 acpi_ipmi_msg_put(tx_msg); ipmi_cancel_tx_msg()
391 struct acpi_ipmi_msg *tx_msg, *temp; ipmi_msg_handler() local
403 list_for_each_entry_safe(tx_msg, temp, &ipmi_device->tx_msg_list, head) { ipmi_msg_handler()
404 if (msg->msgid == tx_msg->tx_msgid) { ipmi_msg_handler()
406 list_del(&tx_msg->head); ipmi_msg_handler()
434 tx_msg->msg_done = ACPI_IPMI_TIMEOUT; ipmi_msg_handler()
439 tx_msg->rx_len = msg->msg.data_len; ipmi_msg_handler()
440 memcpy(tx_msg->data, msg->msg.data, tx_msg->rx_len); ipmi_msg_handler()
441 tx_msg->msg_done = ACPI_IPMI_OK; ipmi_msg_handler()
444 complete(&tx_msg->tx_complete); ipmi_msg_handler()
445 acpi_ipmi_msg_put(tx_msg); ipmi_msg_handler()
541 struct acpi_ipmi_msg *tx_msg; acpi_ipmi_space_handler() local
556 tx_msg = ipmi_msg_alloc(); acpi_ipmi_space_handler()
557 if (!tx_msg) acpi_ipmi_space_handler()
559 ipmi_device = tx_msg->device; acpi_ipmi_space_handler()
561 if (acpi_format_ipmi_request(tx_msg, address, value) != 0) { acpi_ipmi_space_handler()
562 ipmi_msg_release(tx_msg); acpi_ipmi_space_handler()
566 acpi_ipmi_msg_get(tx_msg); acpi_ipmi_space_handler()
568 /* Do not add a tx_msg that can not be flushed. */ acpi_ipmi_space_handler()
571 ipmi_msg_release(tx_msg); acpi_ipmi_space_handler()
575 list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); acpi_ipmi_space_handler()
580 &tx_msg->addr, acpi_ipmi_space_handler()
581 tx_msg->tx_msgid, acpi_ipmi_space_handler()
582 &tx_msg->tx_message, acpi_ipmi_space_handler()
588 wait_for_completion(&tx_msg->tx_complete); acpi_ipmi_space_handler()
590 acpi_format_ipmi_response(tx_msg, value); acpi_ipmi_space_handler()
594 ipmi_cancel_tx_msg(ipmi_device, tx_msg); acpi_ipmi_space_handler()
595 acpi_ipmi_msg_put(tx_msg); acpi_ipmi_space_handler()
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/socklnd/
H A Dsocklnd_proto.c45 ksocknal_queue_tx_msg_v1(ksock_conn_t *conn, ksock_tx_t *tx_msg) ksocknal_queue_tx_msg_v1() argument
48 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); ksocknal_queue_tx_msg_v1()
68 LASSERT(conn->ksnc_tx_carrier->tx_msg.ksm_type == tx->tx_msg.ksm_type); ksocknal_next_tx_carrier()
79 tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); ksocknal_queue_tx_zcack_v2()
97 if (tx->tx_msg.ksm_type == KSOCK_MSG_NOOP) { ksocknal_queue_tx_zcack_v2()
105 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_LNET); ksocknal_queue_tx_zcack_v2()
106 LASSERT(tx->tx_msg.ksm_zc_cookies[1] == 0); ksocknal_queue_tx_zcack_v2()
109 cookie = tx_ack->tx_msg.ksm_zc_cookies[1]; ksocknal_queue_tx_zcack_v2()
112 tx->tx_msg.ksm_zc_cookies[1] = cookie; ksocknal_queue_tx_zcack_v2()
120 ksocknal_queue_tx_msg_v2(ksock_conn_t *conn, ksock_tx_t *tx_msg) ksocknal_queue_tx_msg_v2() argument
125 * Enqueue tx_msg: ksocknal_queue_tx_msg_v2()
127 * tx_msg and return NULL ksocknal_queue_tx_msg_v2()
132 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); ksocknal_queue_tx_msg_v2()
133 conn->ksnc_tx_carrier = tx_msg; ksocknal_queue_tx_msg_v2()
137 if (tx->tx_msg.ksm_type == KSOCK_MSG_LNET) { /* nothing to carry */ ksocknal_queue_tx_msg_v2()
138 list_add_tail(&tx_msg->tx_list, &conn->ksnc_tx_queue); ksocknal_queue_tx_msg_v2()
142 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); ksocknal_queue_tx_msg_v2()
145 tx_msg->tx_msg.ksm_zc_cookies[1] = tx->tx_msg.ksm_zc_cookies[1]; ksocknal_queue_tx_msg_v2()
149 list_add(&tx_msg->tx_list, &tx->tx_list); ksocknal_queue_tx_msg_v2()
166 tx_ack->tx_msg.ksm_type == KSOCK_MSG_NOOP); ksocknal_queue_tx_zcack_v3()
181 cookie = tx_ack->tx_msg.ksm_zc_cookies[1]; ksocknal_queue_tx_zcack_v3()
186 if (tx->tx_msg.ksm_zc_cookies[1] == SOCKNAL_KEEPALIVE_PING) { ksocknal_queue_tx_zcack_v3()
188 LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); ksocknal_queue_tx_zcack_v3()
189 tx->tx_msg.ksm_zc_cookies[1] = cookie; ksocknal_queue_tx_zcack_v3()
193 if (cookie == tx->tx_msg.ksm_zc_cookies[0] || ksocknal_queue_tx_zcack_v3()
194 cookie == tx->tx_msg.ksm_zc_cookies[1]) { ksocknal_queue_tx_zcack_v3()
200 if (tx->tx_msg.ksm_zc_cookies[0] == 0) { ksocknal_queue_tx_zcack_v3()
202 if (tx->tx_msg.ksm_zc_cookies[1] > cookie) { ksocknal_queue_tx_zcack_v3()
203 tx->tx_msg.ksm_zc_cookies[0] = tx->tx_msg.ksm_zc_cookies[1]; ksocknal_queue_tx_zcack_v3()
204 tx->tx_msg.ksm_zc_cookies[1] = cookie; ksocknal_queue_tx_zcack_v3()
206 tx->tx_msg.ksm_zc_cookies[0] = cookie; ksocknal_queue_tx_zcack_v3()
209 if (tx->tx_msg.ksm_zc_cookies[0] - tx->tx_msg.ksm_zc_cookies[1] > 2) { ksocknal_queue_tx_zcack_v3()
219 if (tx->tx_msg.ksm_zc_cookies[0] > tx->tx_msg.ksm_zc_cookies[1]) { ksocknal_queue_tx_zcack_v3()
223 LASSERT(tx->tx_msg.ksm_zc_cookies[0] - ksocknal_queue_tx_zcack_v3()
224 tx->tx_msg.ksm_zc_cookies[1] <= 2); ksocknal_queue_tx_zcack_v3()
226 if (tx->tx_msg.ksm_zc_cookies[0] - ksocknal_queue_tx_zcack_v3()
227 tx->tx_msg.ksm_zc_cookies[1] == 2) { ksocknal_queue_tx_zcack_v3()
228 if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) ksocknal_queue_tx_zcack_v3()
230 } else if (cookie == tx->tx_msg.ksm_zc_cookies[1] - 1) { ksocknal_queue_tx_zcack_v3()
231 tmp = tx->tx_msg.ksm_zc_cookies[1]; ksocknal_queue_tx_zcack_v3()
232 } else if (cookie == tx->tx_msg.ksm_zc_cookies[0] + 1) { ksocknal_queue_tx_zcack_v3()
233 tmp = tx->tx_msg.ksm_zc_cookies[0]; ksocknal_queue_tx_zcack_v3()
238 tx->tx_msg.ksm_zc_cookies[0] = tmp - 1; ksocknal_queue_tx_zcack_v3()
239 tx->tx_msg.ksm_zc_cookies[1] = tmp + 1; ksocknal_queue_tx_zcack_v3()
245 if (cookie >= tx->tx_msg.ksm_zc_cookies[0] && ksocknal_queue_tx_zcack_v3()
246 cookie <= tx->tx_msg.ksm_zc_cookies[1]) { ksocknal_queue_tx_zcack_v3()
252 if (cookie == tx->tx_msg.ksm_zc_cookies[1] + 1) { ksocknal_queue_tx_zcack_v3()
253 tx->tx_msg.ksm_zc_cookies[1] = cookie; ksocknal_queue_tx_zcack_v3()
257 if (cookie == tx->tx_msg.ksm_zc_cookies[0] - 1) { ksocknal_queue_tx_zcack_v3()
258 tx->tx_msg.ksm_zc_cookies[0] = cookie; ksocknal_queue_tx_zcack_v3()
429 __u64 c = tx->tx_msg.ksm_zc_cookies[0]; ksocknal_handle_zcack()
432 tx->tx_msg.ksm_zc_cookies[0] = 0; ksocknal_handle_zcack()
714 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); ksocknal_pack_msg_v1()
726 tx->tx_iov[0].iov_base = &tx->tx_msg; ksocknal_pack_msg_v2()
729 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); ksocknal_pack_msg_v2()
731 tx->tx_msg.ksm_u.lnetmsg.ksnm_hdr = tx->tx_lnetmsg->msg_hdr; ksocknal_pack_msg_v2()
735 LASSERT(tx->tx_msg.ksm_type == KSOCK_MSG_NOOP); ksocknal_pack_msg_v2()
H A Dsocklnd_lib.c85 tx->tx_msg.ksm_csum == 0) /* not checksummed */ ksocknal_lib_send_iov()
130 if (tx->tx_msg.ksm_zc_cookies[0] != 0) { ksocknal_lib_send_kiov()
390 LASSERT(tx->tx_iov[0].iov_base == &tx->tx_msg); ksocknal_lib_csum_tx()
394 tx->tx_msg.ksm_csum = 0; ksocknal_lib_csum_tx()
419 tx->tx_msg.ksm_csum = csum; ksocknal_lib_csum_tx()
H A Dsocklnd_cb.c86 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_NOOP); ksocknal_alloc_tx_noop()
87 tx->tx_msg.ksm_zc_cookies[1] = cookie; ksocknal_alloc_tx_noop()
438 /* Set tx_msg.ksm_zc_cookies[0] to a unique non-zero cookie and add tx ksocknal_check_zc_req()
442 * tx_msg.ksm_zc_cookies[0] remains non-zero while tx is on ksocknal_check_zc_req()
444 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); ksocknal_check_zc_req()
464 LASSERT(tx->tx_msg.ksm_zc_cookies[0] == 0); ksocknal_check_zc_req()
466 tx->tx_msg.ksm_zc_cookies[0] = peer->ksnp_zc_next_cookie++; ksocknal_check_zc_req()
481 LASSERT(tx->tx_msg.ksm_type != KSOCK_MSG_NOOP); ksocknal_uncheck_zc_req()
488 if (tx->tx_msg.ksm_zc_cookies[0] == 0) { ksocknal_uncheck_zc_req()
494 tx->tx_msg.ksm_zc_cookies[0] = 0; ksocknal_uncheck_zc_req()
687 ksock_msg_t *msg = &tx->tx_msg; ksocknal_queue_tx_locked()
988 socklnd_init_msg(&tx->tx_msg, KSOCK_MSG_LNET); ksocknal_send()
H A Dsocklnd.h274 ksock_msg_t tx_msg; /* socklnd message buffer */ member in struct:__anon10438
H A Dsocklnd.c1524 LASSERT(tx->tx_msg.ksm_zc_cookies[0] != 0); ksocknal_finalize_zcreq()
1526 tx->tx_msg.ksm_zc_cookies[0] = 0; ksocknal_finalize_zcreq()
/linux-4.4.14/drivers/net/wimax/i2400m/
H A Dtx.c84 * Open: it is marked as active (i2400m->tx_msg is valid) and we
530 * Assumes a TX message is active (i2400m->tx_msg).
537 struct i2400m_msg_hdr *msg_hdr = i2400m->tx_msg; i2400m_tx_fits()
564 struct i2400m_msg_hdr *tx_msg; i2400m_tx_new() local
566 BUG_ON(i2400m->tx_msg != NULL); i2400m_tx_new()
576 tx_msg = i2400m_tx_fifo_push(i2400m, I2400M_TX_PLD_SIZE, i2400m_tx_new()
578 if (tx_msg == NULL) i2400m_tx_new()
580 else if (tx_msg == TAIL_FULL) { i2400m_tx_new()
586 memset(tx_msg, 0, I2400M_TX_PLD_SIZE); i2400m_tx_new()
587 tx_msg->size = I2400M_TX_PLD_SIZE; i2400m_tx_new()
589 i2400m->tx_msg = tx_msg; i2400m_tx_new()
591 tx_msg, (void *) tx_msg - i2400m->tx_buf); i2400m_tx_new()
616 struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; i2400m_tx_close() local
622 if (tx_msg->size & I2400M_TX_SKIP) /* a skipper? nothing to do */ i2400m_tx_close()
624 num_pls = le16_to_cpu(tx_msg->num_pls); i2400m_tx_close()
629 tx_msg->size |= I2400M_TX_SKIP; i2400m_tx_close()
643 hdr_size = sizeof(*tx_msg) i2400m_tx_close()
644 + le16_to_cpu(tx_msg->num_pls) * sizeof(tx_msg->pld[0]); i2400m_tx_close()
646 tx_msg->offset = I2400M_TX_PLD_SIZE - hdr_size; i2400m_tx_close()
647 tx_msg_moved = (void *) tx_msg + tx_msg->offset; i2400m_tx_close()
648 memmove(tx_msg_moved, tx_msg, hdr_size); i2400m_tx_close()
649 tx_msg_moved->size -= tx_msg->offset; i2400m_tx_close()
676 if (tx_msg != tx_msg_moved) i2400m_tx_close()
677 tx_msg->size += padding; i2400m_tx_close()
679 i2400m->tx_msg = NULL; i2400m_tx_close()
741 if (unlikely(i2400m->tx_msg == NULL)) i2400m_tx()
744 || (is_singleton && i2400m->tx_msg->num_pls != 0))) { i2400m_tx()
747 is_singleton, i2400m->tx_msg->num_pls); i2400m_tx()
751 if (i2400m->tx_msg == NULL) i2400m_tx()
759 if (i2400m->tx_msg->size + padded_len > I2400M_TX_MSG_SIZE) { i2400m_tx()
764 if (i2400m->tx_msg == NULL) i2400m_tx()
780 struct i2400m_msg_hdr *tx_msg = i2400m->tx_msg; i2400m_tx() local
781 unsigned num_pls = le16_to_cpu(tx_msg->num_pls); i2400m_tx()
784 i2400m_pld_set(&tx_msg->pld[num_pls], buf_len, pl_type); i2400m_tx()
786 le32_to_cpu(tx_msg->pld[num_pls].val), i2400m_tx()
788 tx_msg->num_pls = le16_to_cpu(num_pls+1); i2400m_tx()
789 tx_msg->size += padded_len; i2400m_tx()
791 padded_len, tx_msg->size, num_pls+1); i2400m_tx()
794 (void *)tx_msg - i2400m->tx_buf, (size_t)tx_msg->size, i2400m_tx()
843 struct i2400m_msg_hdr *tx_msg, *tx_msg_moved; i2400m_tx_msg_get() local
859 tx_msg = i2400m->tx_buf + i2400m->tx_out % I2400M_TX_BUF_SIZE; i2400m_tx_msg_get()
860 if (tx_msg->size & I2400M_TX_SKIP) { /* skip? */ i2400m_tx_msg_get()
863 (size_t) tx_msg->size & ~I2400M_TX_SKIP); i2400m_tx_msg_get()
864 i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP; i2400m_tx_msg_get()
868 if (tx_msg->num_pls == 0) { /* No payloads? */ i2400m_tx_msg_get()
869 if (tx_msg == i2400m->tx_msg) { /* open, we are done */ i2400m_tx_msg_get()
872 (void *) tx_msg - i2400m->tx_buf); i2400m_tx_msg_get()
873 tx_msg = NULL; i2400m_tx_msg_get()
878 (void *) tx_msg - i2400m->tx_buf, i2400m_tx_msg_get()
879 (size_t) tx_msg->size); i2400m_tx_msg_get()
880 i2400m->tx_out += tx_msg->size & ~I2400M_TX_SKIP; i2400m_tx_msg_get()
884 if (tx_msg == i2400m->tx_msg) /* open msg? */ i2400m_tx_msg_get()
888 tx_msg_moved = (void *) tx_msg + tx_msg->offset; i2400m_tx_msg_get()
889 i2400m->tx_msg_size = tx_msg->size; i2400m_tx_msg_get()
893 current->pid, (void *) tx_msg - i2400m->tx_buf, i2400m_tx_msg_get()
894 (size_t) tx_msg->offset, (size_t) tx_msg->size, i2400m_tx_msg_get()
962 * for tx_in, tx_out, tx_msg_size and tx_msg. We reset them since
992 i2400m->tx_msg = NULL; i2400m_tx_setup()
H A Dusb-tx.c87 int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg, i2400mu_tx() argument
108 tx_msg, tx_msg_size, &sent_size, 200); i2400mu_tx()
155 "tx_msg @%zu %zu B [%d sent]: %d\n", i2400mu_tx()
156 (void *) tx_msg - i2400m->tx_buf, i2400mu_tx()
183 struct i2400m_msg_hdr *tx_msg; i2400mu_txd() local
196 tx_msg = NULL; i2400mu_txd()
200 || (tx_msg = i2400m_tx_msg_get(i2400m, &tx_msg_size))) i2400mu_txd()
204 WARN_ON(tx_msg == NULL); /* should not happen...*/ i2400mu_txd()
206 d_dump(5, dev, tx_msg, tx_msg_size); i2400mu_txd()
208 i2400mu_tx(i2400mu, tx_msg, tx_msg_size); i2400mu_txd()
H A Di2400m.h394 * @tx_msg: current TX message that is active in the FIFO for
602 struct i2400m_msg_hdr *tx_msg; member in struct:i2400m
/linux-4.4.14/drivers/i2c/busses/
H A Di2c-xiic.c59 * @tx_msg: Messages from above to be sent
62 * @nmsgs: Number of messages in tx_msg
72 struct i2c_msg *tx_msg; member in struct:xiic_i2c
174 #define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
342 u16 data = i2c->tx_msg->buf[i2c->tx_pos++]; xiic_fill_tx_fifo()
354 i2c->tx_msg = NULL; xiic_wakeup()
381 i2c->tx_msg, i2c->nmsgs); xiic_process()
404 if (i2c->tx_msg) xiic_process()
436 i2c->tx_msg++; xiic_process()
451 if (!i2c->tx_msg) xiic_process()
466 if (!i2c->tx_msg) { xiic_process()
481 i2c->tx_msg++; xiic_process()
516 if (i2c->tx_msg) xiic_busy()
535 struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg; xiic_start_recv()
571 struct i2c_msg *msg = i2c->tx_msg; xiic_start_send()
624 __func__, i2c->tx_msg, fifo_space); __xiic_start_xfer()
626 if (!i2c->tx_msg) __xiic_start_xfer()
635 i2c->tx_msg++; __xiic_start_xfer()
640 if (i2c->tx_msg->flags & I2C_M_RD) { __xiic_start_xfer()
683 i2c->tx_msg = msgs; xiic_xfer()
692 i2c->tx_msg = NULL; xiic_xfer()
/linux-4.4.14/sound/soc/intel/common/
H A Dsst-ipc.h50 void (*tx_msg)(struct sst_generic_ipc *, struct ipc_message *); member in struct:sst_plat_ipc_ops
H A Dsst-ipc.c184 if (ipc->ops.tx_msg != NULL) ipc_tx_msgs()
185 ipc->ops.tx_msg(ipc, msg); ipc_tx_msgs()
/linux-4.4.14/drivers/net/can/usb/peak_usb/
H A Dpcan_usb_fd.c723 struct pucan_tx_msg *tx_msg = (struct pucan_tx_msg *)obuf; pcan_usb_fd_encode_msg() local
729 tx_msg->size = cpu_to_le16(tx_msg_size); pcan_usb_fd_encode_msg()
730 tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); pcan_usb_fd_encode_msg()
735 tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_EFF_MASK); pcan_usb_fd_encode_msg()
737 tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_SFF_MASK); pcan_usb_fd_encode_msg()
759 tx_msg->flags = cpu_to_le16(tx_msg_flags); pcan_usb_fd_encode_msg()
760 tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, can_dlc); pcan_usb_fd_encode_msg()
761 memcpy(tx_msg->d, cfd->data, cfd->len); pcan_usb_fd_encode_msg()
765 tx_msg = (struct pucan_tx_msg *)(obuf + tx_msg_size); pcan_usb_fd_encode_msg()
767 tx_msg->size = 0; pcan_usb_fd_encode_msg()
H A Dpcan_usb_pro.h184 struct pcan_usb_pro_txmsg tx_msg; member in union:pcan_usb_pro_rec
/linux-4.4.14/drivers/net/ieee802154/
H A Dmrf24j40.c209 struct spi_message tx_msg; member in struct:mrf24j40
585 ret = spi_async(devrec->spi, &devrec->tx_msg); write_tx_buf()
1176 spi_message_init(&devrec->tx_msg); mrf24j40_setup_tx_spi_messages()
1177 devrec->tx_msg.context = devrec; mrf24j40_setup_tx_spi_messages()
1178 devrec->tx_msg.complete = write_tx_buf_complete; mrf24j40_setup_tx_spi_messages()
1181 spi_message_add_tail(&devrec->tx_hdr_trx, &devrec->tx_msg); mrf24j40_setup_tx_spi_messages()
1184 spi_message_add_tail(&devrec->tx_len_trx, &devrec->tx_msg); mrf24j40_setup_tx_spi_messages()
1185 spi_message_add_tail(&devrec->tx_buf_trx, &devrec->tx_msg); mrf24j40_setup_tx_spi_messages()
/linux-4.4.14/drivers/staging/lustre/lnet/klnds/o2iblnd/
H A Do2iblnd_cb.c234 tx->tx_msg->ibm_type == txtype) kiblnd_find_waiting_tx_locked()
239 tx->tx_msg->ibm_type, txtype); kiblnd_find_waiting_tx_locked()
294 tx->tx_msg->ibm_u.completion.ibcm_status = status; kiblnd_send_completion()
295 tx->tx_msg->ibm_u.completion.ibcm_cookie = cookie; kiblnd_send_completion()
413 * (a) I can overwrite tx_msg since my peer has received it! kiblnd_handle_rx()
756 kib_msg_t *msg = tx->tx_msg;
984 if (tx->tx_msg->ibm_type == IBLND_MSG_NOOP) kiblnd_tx_complete()
1023 kiblnd_init_msg(tx->tx_msg, type, body_nob); kiblnd_init_tx_msg()
1048 kib_msg_t *ibmsg = tx->tx_msg; kiblnd_init_rdma()
1145 LASSERT(tx->tx_msg->ibm_type != IBLND_MSG_PUT_DONE); kiblnd_queue_tx_locked()
1149 LASSERT(tx->tx_msg->ibm_type == IBLND_MSG_PUT_DONE); kiblnd_queue_tx_locked()
1152 switch (tx->tx_msg->ibm_type) { kiblnd_queue_tx_locked()
1473 ibmsg = tx->tx_msg; kiblnd_send()
1541 ibmsg = tx->tx_msg; kiblnd_send()
1564 ibmsg = tx->tx_msg; kiblnd_send()
1713 txmsg = tx->tx_msg; kiblnd_recv()
H A Do2iblnd.c1271 tx->tx_msg = (kib_msg_t *)(((char *)page_address(page)) + kiblnd_map_tx_pool()
1275 tpo->tpo_hdev->ibh_ibdev, tx->tx_msg, kiblnd_map_tx_pool()
H A Do2iblnd.h505 kib_msg_t *tx_msg; /* message buffer (host vaddr) */ member in struct:kib_tx
/linux-4.4.14/sound/soc/intel/baytrail/
H A Dsst-baytrail-ipc.c494 void *tx_msg; sst_byt_stream_start() local
503 tx_msg = &start_stream; sst_byt_stream_start()
506 ret = sst_ipc_tx_message_nowait(&byt->ipc, header, tx_msg, size); sst_byt_stream_start()
708 ipc->ops.tx_msg = byt_tx_msg; sst_byt_dsp_init()
/linux-4.4.14/drivers/net/wireless/
H A Drayctl.h255 #define TX_BUF_SIZE (2048 - sizeof(struct tx_msg))
575 struct tx_msg { struct
H A Dray_cs.c84 static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx,
86 static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx,
884 struct tx_msg __iomem *ptx; /* Address of xmit buffer in PC space */ ray_hw_xmit()
951 static int translate_frame(ray_dev_t *local, struct tx_msg __iomem *ptx, translate_frame()
985 static void ray_build_header(ray_dev_t *local, struct tx_msg __iomem *ptx, ray_build_header()
2688 struct tx_msg __iomem *ptx; build_auth_frame()
/linux-4.4.14/drivers/net/can/usb/
H A Desd_usb2.c137 struct tx_msg { struct
179 struct tx_msg tx;
/linux-4.4.14/sound/soc/intel/skylake/
H A Dskl-sst-ipc.c501 ipc->ops.tx_msg = skl_ipc_tx_msg; skl_ipc_init()
/linux-4.4.14/drivers/gpu/drm/
H A Ddrm_dp_mst_topology.c1490 /* construct a chunk from the first msg in the tx_msg queue */ process_single_down_tx_qlock()
1522 /* construct a chunk from the first msg in the tx_msg queue */ process_single_up_tx_qlock()
/linux-4.4.14/sound/soc/intel/haswell/
H A Dsst-haswell-ipc.c2130 ipc->ops.tx_msg = hsw_tx_msg; sst_hsw_dsp_init()

Completed in 727 milliseconds