Lines Matching refs:txmsg
733 struct drm_dp_sideband_msg_tx *txmsg) in check_txmsg_state() argument
742 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || in check_txmsg_state()
743 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); in check_txmsg_state()
748 struct drm_dp_sideband_msg_tx *txmsg) in drm_dp_mst_wait_tx_reply() argument
754 check_txmsg_state(mgr, txmsg), in drm_dp_mst_wait_tx_reply()
758 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { in drm_dp_mst_wait_tx_reply()
763 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); in drm_dp_mst_wait_tx_reply()
769 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || in drm_dp_mst_wait_tx_reply()
770 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) { in drm_dp_mst_wait_tx_reply()
771 list_del(&txmsg->next); in drm_dp_mst_wait_tx_reply()
774 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || in drm_dp_mst_wait_tx_reply()
775 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { in drm_dp_mst_wait_tx_reply()
776 mstb->tx_slots[txmsg->seqno] = NULL; in drm_dp_mst_wait_tx_reply()
1389 struct drm_dp_sideband_msg_tx *txmsg) in set_hdr_from_dst_qlock() argument
1391 struct drm_dp_mst_branch *mstb = txmsg->dst; in set_hdr_from_dst_qlock()
1395 if (txmsg->seqno == -1) { in set_hdr_from_dst_qlock()
1401 txmsg->seqno = mstb->last_seqno; in set_hdr_from_dst_qlock()
1404 txmsg->seqno = 0; in set_hdr_from_dst_qlock()
1406 txmsg->seqno = 1; in set_hdr_from_dst_qlock()
1407 mstb->tx_slots[txmsg->seqno] = txmsg; in set_hdr_from_dst_qlock()
1410 req_type = txmsg->msg[0] & 0x7f; in set_hdr_from_dst_qlock()
1416 hdr->path_msg = txmsg->path_msg; in set_hdr_from_dst_qlock()
1421 hdr->seqno = txmsg->seqno; in set_hdr_from_dst_qlock()
1428 struct drm_dp_sideband_msg_tx *txmsg, in process_single_tx_qlock() argument
1438 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) { in process_single_tx_qlock()
1439 txmsg->seqno = -1; in process_single_tx_qlock()
1440 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; in process_single_tx_qlock()
1445 ret = set_hdr_from_dst_qlock(&hdr, txmsg); in process_single_tx_qlock()
1450 len = txmsg->cur_len - txmsg->cur_offset; in process_single_tx_qlock()
1456 if (len == txmsg->cur_len) in process_single_tx_qlock()
1464 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); in process_single_tx_qlock()
1475 txmsg->cur_offset += tosend; in process_single_tx_qlock()
1476 if (txmsg->cur_offset == txmsg->cur_len) { in process_single_tx_qlock()
1477 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; in process_single_tx_qlock()
1485 struct drm_dp_sideband_msg_tx *txmsg; in process_single_down_tx_qlock() local
1497 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); in process_single_down_tx_qlock()
1498 ret = process_single_tx_qlock(mgr, txmsg, false); in process_single_down_tx_qlock()
1501 list_del(&txmsg->next); in process_single_down_tx_qlock()
1504 list_del(&txmsg->next); in process_single_down_tx_qlock()
1505 if (txmsg->seqno != -1) in process_single_down_tx_qlock()
1506 txmsg->dst->tx_slots[txmsg->seqno] = NULL; in process_single_down_tx_qlock()
1507 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; in process_single_down_tx_qlock()
1518 struct drm_dp_sideband_msg_tx *txmsg) in process_single_up_tx_qlock() argument
1523 ret = process_single_tx_qlock(mgr, txmsg, true); in process_single_up_tx_qlock()
1528 txmsg->dst->tx_slots[txmsg->seqno] = NULL; in process_single_up_tx_qlock()
1532 struct drm_dp_sideband_msg_tx *txmsg) in drm_dp_queue_down_tx() argument
1535 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); in drm_dp_queue_down_tx()
1545 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_send_link_address() local
1548 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_send_link_address()
1549 if (!txmsg) in drm_dp_send_link_address()
1552 txmsg->dst = mstb; in drm_dp_send_link_address()
1553 len = build_link_address(txmsg); in drm_dp_send_link_address()
1555 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_send_link_address()
1557 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); in drm_dp_send_link_address()
1561 if (txmsg->reply.reply_type == 1) in drm_dp_send_link_address()
1564 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); in drm_dp_send_link_address()
1565 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { in drm_dp_send_link_address()
1567 txmsg->reply.u.link_addr.ports[i].input_port, in drm_dp_send_link_address()
1568 txmsg->reply.u.link_addr.ports[i].peer_device_type, in drm_dp_send_link_address()
1569 txmsg->reply.u.link_addr.ports[i].port_number, in drm_dp_send_link_address()
1570 txmsg->reply.u.link_addr.ports[i].dpcd_revision, in drm_dp_send_link_address()
1571 txmsg->reply.u.link_addr.ports[i].mcs, in drm_dp_send_link_address()
1572 txmsg->reply.u.link_addr.ports[i].ddps, in drm_dp_send_link_address()
1573 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, in drm_dp_send_link_address()
1574 txmsg->reply.u.link_addr.ports[i].num_sdp_streams, in drm_dp_send_link_address()
1575 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); in drm_dp_send_link_address()
1578 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); in drm_dp_send_link_address()
1580 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { in drm_dp_send_link_address()
1581 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); in drm_dp_send_link_address()
1588 kfree(txmsg); in drm_dp_send_link_address()
1597 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_send_enum_path_resources() local
1600 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_send_enum_path_resources()
1601 if (!txmsg) in drm_dp_send_enum_path_resources()
1604 txmsg->dst = mstb; in drm_dp_send_enum_path_resources()
1605 len = build_enum_path_resources(txmsg, port->port_num); in drm_dp_send_enum_path_resources()
1607 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_send_enum_path_resources()
1609 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); in drm_dp_send_enum_path_resources()
1611 if (txmsg->reply.reply_type == 1) in drm_dp_send_enum_path_resources()
1614 if (port->port_num != txmsg->reply.u.path_resources.port_number) in drm_dp_send_enum_path_resources()
1616 …DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg-… in drm_dp_send_enum_path_resources()
1617 txmsg->reply.u.path_resources.avail_payload_bw_number); in drm_dp_send_enum_path_resources()
1618 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; in drm_dp_send_enum_path_resources()
1622 kfree(txmsg); in drm_dp_send_enum_path_resources()
1662 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_payload_send_msg() local
1681 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_payload_send_msg()
1682 if (!txmsg) { in drm_dp_payload_send_msg()
1687 txmsg->dst = mstb; in drm_dp_payload_send_msg()
1688 len = build_allocate_payload(txmsg, port_num, in drm_dp_payload_send_msg()
1692 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_payload_send_msg()
1694 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); in drm_dp_payload_send_msg()
1696 if (txmsg->reply.reply_type == 1) { in drm_dp_payload_send_msg()
1701 kfree(txmsg); in drm_dp_payload_send_msg()
1890 struct drm_dp_sideband_msg_tx *txmsg;
1892 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1893 if (!txmsg)
1896 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1897 txmsg->dst = port->parent;
1899 drm_dp_queue_down_tx(mgr, txmsg);
1911 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_send_dpcd_write() local
1918 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_send_dpcd_write()
1919 if (!txmsg) { in drm_dp_send_dpcd_write()
1924 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); in drm_dp_send_dpcd_write()
1925 txmsg->dst = mstb; in drm_dp_send_dpcd_write()
1927 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_send_dpcd_write()
1929 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); in drm_dp_send_dpcd_write()
1931 if (txmsg->reply.reply_type == 1) { in drm_dp_send_dpcd_write()
1936 kfree(txmsg); in drm_dp_send_dpcd_write()
1956 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_send_up_ack_reply() local
1958 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_send_up_ack_reply()
1959 if (!txmsg) in drm_dp_send_up_ack_reply()
1962 txmsg->dst = mstb; in drm_dp_send_up_ack_reply()
1963 txmsg->seqno = seqno; in drm_dp_send_up_ack_reply()
1964 drm_dp_encode_up_ack_reply(txmsg, req_type); in drm_dp_send_up_ack_reply()
1968 process_single_up_tx_qlock(mgr, txmsg); in drm_dp_send_up_ack_reply()
1972 kfree(txmsg); in drm_dp_send_up_ack_reply()
2210 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_mst_handle_down_rep() local
2226 txmsg = mstb->tx_slots[slot]; in drm_dp_mst_handle_down_rep()
2230 if (!txmsg) { in drm_dp_mst_handle_down_rep()
2242 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); in drm_dp_mst_handle_down_rep()
2243 if (txmsg->reply.reply_type == 1) { in drm_dp_mst_handle_down_rep()
2244 …: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason,… in drm_dp_mst_handle_down_rep()
2251 txmsg->state = DRM_DP_SIDEBAND_TX_RX; in drm_dp_mst_handle_down_rep()
2952 struct drm_dp_sideband_msg_tx *txmsg = NULL; in drm_dp_mst_i2c_xfer() local
2982 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_mst_i2c_xfer()
2983 if (!txmsg) { in drm_dp_mst_i2c_xfer()
2988 txmsg->dst = mstb; in drm_dp_mst_i2c_xfer()
2989 drm_dp_encode_sideband_req(&msg, txmsg); in drm_dp_mst_i2c_xfer()
2991 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_mst_i2c_xfer()
2993 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); in drm_dp_mst_i2c_xfer()
2996 if (txmsg->reply.reply_type == 1) { /* got a NAK back */ in drm_dp_mst_i2c_xfer()
3000 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { in drm_dp_mst_i2c_xfer()
3004 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); in drm_dp_mst_i2c_xfer()
3008 kfree(txmsg); in drm_dp_mst_i2c_xfer()