Searched refs:txmsg (Results 1 - 6 of 6) sorted by relevance

/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_dp_mst_topology.c733 struct drm_dp_sideband_msg_tx *txmsg) check_txmsg_state()
738 * All updates to txmsg->state are protected by mgr->qlock, and the two check_txmsg_state()
742 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || check_txmsg_state()
743 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); check_txmsg_state()
748 struct drm_dp_sideband_msg_tx *txmsg) drm_dp_mst_wait_tx_reply()
754 check_txmsg_state(mgr, txmsg), drm_dp_mst_wait_tx_reply()
758 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { drm_dp_mst_wait_tx_reply()
763 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); drm_dp_mst_wait_tx_reply()
769 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || drm_dp_mst_wait_tx_reply()
770 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) { drm_dp_mst_wait_tx_reply()
771 list_del(&txmsg->next); drm_dp_mst_wait_tx_reply()
774 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || drm_dp_mst_wait_tx_reply()
775 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { drm_dp_mst_wait_tx_reply()
776 mstb->tx_slots[txmsg->seqno] = NULL; drm_dp_mst_wait_tx_reply()
1389 struct drm_dp_sideband_msg_tx *txmsg) set_hdr_from_dst_qlock()
1391 struct drm_dp_mst_branch *mstb = txmsg->dst; set_hdr_from_dst_qlock()
1395 if (txmsg->seqno == -1) { set_hdr_from_dst_qlock()
1401 txmsg->seqno = mstb->last_seqno; set_hdr_from_dst_qlock()
1404 txmsg->seqno = 0; set_hdr_from_dst_qlock()
1406 txmsg->seqno = 1; set_hdr_from_dst_qlock()
1407 mstb->tx_slots[txmsg->seqno] = txmsg; set_hdr_from_dst_qlock()
1410 req_type = txmsg->msg[0] & 0x7f; set_hdr_from_dst_qlock()
1416 hdr->path_msg = txmsg->path_msg; set_hdr_from_dst_qlock()
1421 hdr->seqno = txmsg->seqno; set_hdr_from_dst_qlock()
1428 struct drm_dp_sideband_msg_tx *txmsg, process_single_tx_qlock()
1438 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) { process_single_tx_qlock()
1439 txmsg->seqno = -1; process_single_tx_qlock()
1440 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; process_single_tx_qlock()
1445 ret = set_hdr_from_dst_qlock(&hdr, txmsg); process_single_tx_qlock()
1450 len = txmsg->cur_len - txmsg->cur_offset; process_single_tx_qlock()
1456 if (len == txmsg->cur_len) process_single_tx_qlock()
1464 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); process_single_tx_qlock()
1475 txmsg->cur_offset += tosend; process_single_tx_qlock()
1476 if (txmsg->cur_offset == txmsg->cur_len) { process_single_tx_qlock()
1477 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; process_single_tx_qlock()
1485 struct drm_dp_sideband_msg_tx *txmsg; process_single_down_tx_qlock() local
1497 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); process_single_down_tx_qlock()
1498 ret = process_single_tx_qlock(mgr, txmsg, false); process_single_down_tx_qlock()
1500 /* txmsg is sent it should be in the slots now */ process_single_down_tx_qlock()
1501 list_del(&txmsg->next); process_single_down_tx_qlock()
1504 list_del(&txmsg->next); process_single_down_tx_qlock()
1505 if (txmsg->seqno != -1) process_single_down_tx_qlock()
1506 txmsg->dst->tx_slots[txmsg->seqno] = NULL; process_single_down_tx_qlock()
1507 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; process_single_down_tx_qlock()
1518 struct drm_dp_sideband_msg_tx *txmsg) process_single_up_tx_qlock()
1523 ret = process_single_tx_qlock(mgr, txmsg, true); process_single_up_tx_qlock()
1528 txmsg->dst->tx_slots[txmsg->seqno] = NULL; process_single_up_tx_qlock()
1532 struct drm_dp_sideband_msg_tx *txmsg) drm_dp_queue_down_tx()
1535 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); drm_dp_queue_down_tx()
1545 struct drm_dp_sideband_msg_tx *txmsg; drm_dp_send_link_address() local
1548 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); drm_dp_send_link_address()
1549 if (!txmsg) drm_dp_send_link_address()
1552 txmsg->dst = mstb; drm_dp_send_link_address()
1553 len = build_link_address(txmsg); drm_dp_send_link_address()
1555 drm_dp_queue_down_tx(mgr, txmsg); drm_dp_send_link_address()
1557 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); drm_dp_send_link_address()
1561 if (txmsg->reply.reply_type == 1) drm_dp_send_link_address()
1564 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); drm_dp_send_link_address()
1565 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { drm_dp_send_link_address()
1567 txmsg->reply.u.link_addr.ports[i].input_port, drm_dp_send_link_address()
1568 txmsg->reply.u.link_addr.ports[i].peer_device_type, drm_dp_send_link_address()
1569 txmsg->reply.u.link_addr.ports[i].port_number, drm_dp_send_link_address()
1570 txmsg->reply.u.link_addr.ports[i].dpcd_revision, drm_dp_send_link_address()
1571 txmsg->reply.u.link_addr.ports[i].mcs, drm_dp_send_link_address()
1572 txmsg->reply.u.link_addr.ports[i].ddps, drm_dp_send_link_address()
1573 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, drm_dp_send_link_address()
1574 txmsg->reply.u.link_addr.ports[i].num_sdp_streams, drm_dp_send_link_address()
1575 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); drm_dp_send_link_address()
1578 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); drm_dp_send_link_address()
1580 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { drm_dp_send_link_address()
1581 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); drm_dp_send_link_address()
1588 kfree(txmsg); drm_dp_send_link_address()
1597 struct drm_dp_sideband_msg_tx *txmsg; drm_dp_send_enum_path_resources() local
1600 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); drm_dp_send_enum_path_resources()
1601 if (!txmsg) drm_dp_send_enum_path_resources()
1604 txmsg->dst = mstb; drm_dp_send_enum_path_resources()
1605 len = build_enum_path_resources(txmsg, port->port_num); drm_dp_send_enum_path_resources()
1607 drm_dp_queue_down_tx(mgr, txmsg); drm_dp_send_enum_path_resources()
1609 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); drm_dp_send_enum_path_resources()
1611 if (txmsg->reply.reply_type == 1) drm_dp_send_enum_path_resources()
1614 if (port->port_num != txmsg->reply.u.path_resources.port_number) drm_dp_send_enum_path_resources()
1616 DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg->reply.u.path_resources.full_payload_bw_number, drm_dp_send_enum_path_resources()
1617 txmsg->reply.u.path_resources.avail_payload_bw_number); drm_dp_send_enum_path_resources()
1618 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; drm_dp_send_enum_path_resources()
1622 kfree(txmsg); drm_dp_send_enum_path_resources()
1662 struct drm_dp_sideband_msg_tx *txmsg; drm_dp_payload_send_msg() local
1681 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); drm_dp_payload_send_msg()
1682 if (!txmsg) { drm_dp_payload_send_msg()
1687 txmsg->dst = mstb; drm_dp_payload_send_msg()
1688 len = build_allocate_payload(txmsg, port_num, drm_dp_payload_send_msg()
1692 drm_dp_queue_down_tx(mgr, txmsg); drm_dp_payload_send_msg()
1694 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); drm_dp_payload_send_msg()
1696 if (txmsg->reply.reply_type == 1) { drm_dp_payload_send_msg()
1701 kfree(txmsg); drm_dp_payload_send_msg()
1890 struct drm_dp_sideband_msg_tx *txmsg;
1892 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1893 if (!txmsg)
1896 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1897 txmsg->dst = port->parent;
1899 drm_dp_queue_down_tx(mgr, txmsg);
1911 struct drm_dp_sideband_msg_tx *txmsg; drm_dp_send_dpcd_write() local
1918 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); drm_dp_send_dpcd_write()
1919 if (!txmsg) { drm_dp_send_dpcd_write()
1924 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); drm_dp_send_dpcd_write()
1925 txmsg->dst = mstb; drm_dp_send_dpcd_write()
1927 drm_dp_queue_down_tx(mgr, txmsg); drm_dp_send_dpcd_write()
1929 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); drm_dp_send_dpcd_write()
1931 if (txmsg->reply.reply_type == 1) { drm_dp_send_dpcd_write()
1936 kfree(txmsg); drm_dp_send_dpcd_write()
1956 struct drm_dp_sideband_msg_tx *txmsg; drm_dp_send_up_ack_reply() local
1958 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); drm_dp_send_up_ack_reply()
1959 if (!txmsg) drm_dp_send_up_ack_reply()
1962 txmsg->dst = mstb; drm_dp_send_up_ack_reply()
1963 txmsg->seqno = seqno; drm_dp_send_up_ack_reply()
1964 drm_dp_encode_up_ack_reply(txmsg, req_type); drm_dp_send_up_ack_reply()
1968 process_single_up_tx_qlock(mgr, txmsg); drm_dp_send_up_ack_reply()
1972 kfree(txmsg); drm_dp_send_up_ack_reply()
2210 struct drm_dp_sideband_msg_tx *txmsg; drm_dp_mst_handle_down_rep() local
2226 txmsg = mstb->tx_slots[slot]; drm_dp_mst_handle_down_rep()
2230 if (!txmsg) { drm_dp_mst_handle_down_rep()
2242 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); drm_dp_mst_handle_down_rep()
2243 if (txmsg->reply.reply_type == 1) { drm_dp_mst_handle_down_rep()
2244 DRM_DEBUG_KMS("Got NAK reply: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason, txmsg->reply.u.nak.nak_data); drm_dp_mst_handle_down_rep()
2251 txmsg->state = DRM_DP_SIDEBAND_TX_RX; drm_dp_mst_handle_down_rep()
2952 struct drm_dp_sideband_msg_tx *txmsg = NULL; drm_dp_mst_i2c_xfer() local
2982 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); drm_dp_mst_i2c_xfer()
2983 if (!txmsg) { drm_dp_mst_i2c_xfer()
2988 txmsg->dst = mstb; drm_dp_mst_i2c_xfer()
2989 drm_dp_encode_sideband_req(&msg, txmsg); drm_dp_mst_i2c_xfer()
2991 drm_dp_queue_down_tx(mgr, txmsg); drm_dp_mst_i2c_xfer()
2993 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); drm_dp_mst_i2c_xfer()
2996 if (txmsg->reply.reply_type == 1) { /* got a NAK back */ drm_dp_mst_i2c_xfer()
3000 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { drm_dp_mst_i2c_xfer()
3004 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); drm_dp_mst_i2c_xfer()
3008 kfree(txmsg); drm_dp_mst_i2c_xfer()
732 check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg) check_txmsg_state() argument
747 drm_dp_mst_wait_tx_reply(struct drm_dp_mst_branch *mstb, struct drm_dp_sideband_msg_tx *txmsg) drm_dp_mst_wait_tx_reply() argument
1388 set_hdr_from_dst_qlock(struct drm_dp_sideband_msg_hdr *hdr, struct drm_dp_sideband_msg_tx *txmsg) set_hdr_from_dst_qlock() argument
1427 process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg, bool up) process_single_tx_qlock() argument
1517 process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg) process_single_up_tx_qlock() argument
1531 drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_sideband_msg_tx *txmsg) drm_dp_queue_down_tx() argument
/linux-4.1.27/drivers/hsi/clients/
H A Dcmt_speech.c333 struct hsi_msg *txmsg, *rxmsg; cs_hsi_alloc_data() local
345 txmsg = hsi_alloc_msg(1, GFP_KERNEL); cs_hsi_alloc_data()
346 if (!txmsg) { cs_hsi_alloc_data()
350 txmsg->channel = cs_char_data.channel_id_data; cs_hsi_alloc_data()
351 txmsg->destructor = cs_hsi_data_destructor; cs_hsi_alloc_data()
352 txmsg->context = hi; cs_hsi_alloc_data()
355 hi->data_tx_msg = txmsg; cs_hsi_alloc_data()
702 struct hsi_msg *txmsg; cs_hsi_write_on_data() local
726 txmsg = hi->data_tx_msg; cs_hsi_write_on_data()
727 sg_init_one(txmsg->sgt.sgl, address, hi->buf_size); cs_hsi_write_on_data()
728 txmsg->complete = cs_hsi_write_on_data_complete; cs_hsi_write_on_data()
729 ret = hsi_async_write(hi->cl, txmsg); cs_hsi_write_on_data()
731 cs_hsi_data_write_error(hi, txmsg); cs_hsi_write_on_data()
/linux-4.1.27/net/iucv/
H A Daf_iucv.c1038 struct iucv_message txmsg; iucv_sock_sendmsg() local
1073 txmsg.class = 0; iucv_sock_sendmsg()
1099 memcpy(&txmsg.class, for_each_cmsghdr()
1142 txmsg.tag = iucv->send_tag++;
1143 IUCV_SKB_CB(skb)->tag = txmsg.tag;
1147 err = afiucv_hs_send(&txmsg, sk, skb, 0);
1158 err = iucv_send_iprm(iucv->path, &txmsg, skb);
1176 err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0,
1499 struct iucv_message txmsg; iucv_sock_shutdown() local
1521 txmsg.class = 0; iucv_sock_shutdown()
1522 txmsg.tag = 0; iucv_sock_shutdown()
1523 err = pr_iucv->message_send(iucv->path, &txmsg, iucv_sock_shutdown()
/linux-4.1.27/drivers/staging/lustre/lnet/klnds/o2iblnd/
H A Do2iblnd_cb.c1698 kib_msg_t *txmsg; kiblnd_recv() local
1752 txmsg = tx->tx_msg; kiblnd_recv()
1755 &txmsg->ibm_u.putack.ibpam_rd, kiblnd_recv()
1759 &txmsg->ibm_u.putack.ibpam_rd, kiblnd_recv()
1772 txmsg->ibm_u.putack.ibpam_src_cookie = rxmsg->ibm_u.putreq.ibprm_cookie; kiblnd_recv()
1773 txmsg->ibm_u.putack.ibpam_dst_cookie = tx->tx_cookie; kiblnd_recv()
/linux-4.1.27/drivers/s390/net/
H A Dnetiucv.c726 struct iucv_message txmsg; conn_action_txdone() local
787 txmsg.class = 0; conn_action_txdone()
788 txmsg.tag = 0; conn_action_txdone()
789 rc = iucv_message_send(conn->path, &txmsg, 0, 0, conn_action_txdone()
/linux-4.1.27/include/drm/
H A Ddrm_dp_mst_helper.h444 the mstb tx_slots and txmsg->state once they are queued */

Completed in 211 milliseconds