Lines Matching refs:txmsg
733 struct drm_dp_sideband_msg_tx *txmsg) in check_txmsg_state() argument
742 ret = (txmsg->state == DRM_DP_SIDEBAND_TX_RX || in check_txmsg_state()
743 txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT); in check_txmsg_state()
748 struct drm_dp_sideband_msg_tx *txmsg) in drm_dp_mst_wait_tx_reply() argument
754 check_txmsg_state(mgr, txmsg), in drm_dp_mst_wait_tx_reply()
758 if (txmsg->state == DRM_DP_SIDEBAND_TX_TIMEOUT) { in drm_dp_mst_wait_tx_reply()
763 DRM_DEBUG_KMS("timedout msg send %p %d %d\n", txmsg, txmsg->state, txmsg->seqno); in drm_dp_mst_wait_tx_reply()
769 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED || in drm_dp_mst_wait_tx_reply()
770 txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND) { in drm_dp_mst_wait_tx_reply()
771 list_del(&txmsg->next); in drm_dp_mst_wait_tx_reply()
774 if (txmsg->state == DRM_DP_SIDEBAND_TX_START_SEND || in drm_dp_mst_wait_tx_reply()
775 txmsg->state == DRM_DP_SIDEBAND_TX_SENT) { in drm_dp_mst_wait_tx_reply()
776 mstb->tx_slots[txmsg->seqno] = NULL; in drm_dp_mst_wait_tx_reply()
1389 struct drm_dp_sideband_msg_tx *txmsg) in set_hdr_from_dst_qlock() argument
1391 struct drm_dp_mst_branch *mstb = txmsg->dst; in set_hdr_from_dst_qlock()
1395 if (txmsg->seqno == -1) { in set_hdr_from_dst_qlock()
1401 txmsg->seqno = mstb->last_seqno; in set_hdr_from_dst_qlock()
1404 txmsg->seqno = 0; in set_hdr_from_dst_qlock()
1406 txmsg->seqno = 1; in set_hdr_from_dst_qlock()
1407 mstb->tx_slots[txmsg->seqno] = txmsg; in set_hdr_from_dst_qlock()
1410 req_type = txmsg->msg[0] & 0x7f; in set_hdr_from_dst_qlock()
1416 hdr->path_msg = txmsg->path_msg; in set_hdr_from_dst_qlock()
1421 hdr->seqno = txmsg->seqno; in set_hdr_from_dst_qlock()
1428 struct drm_dp_sideband_msg_tx *txmsg, in process_single_tx_qlock() argument
1438 if (txmsg->state == DRM_DP_SIDEBAND_TX_QUEUED) { in process_single_tx_qlock()
1439 txmsg->seqno = -1; in process_single_tx_qlock()
1440 txmsg->state = DRM_DP_SIDEBAND_TX_START_SEND; in process_single_tx_qlock()
1445 ret = set_hdr_from_dst_qlock(&hdr, txmsg); in process_single_tx_qlock()
1450 len = txmsg->cur_len - txmsg->cur_offset; in process_single_tx_qlock()
1456 if (len == txmsg->cur_len) in process_single_tx_qlock()
1464 memcpy(&chunk[idx], &txmsg->msg[txmsg->cur_offset], tosend); in process_single_tx_qlock()
1475 txmsg->cur_offset += tosend; in process_single_tx_qlock()
1476 if (txmsg->cur_offset == txmsg->cur_len) { in process_single_tx_qlock()
1477 txmsg->state = DRM_DP_SIDEBAND_TX_SENT; in process_single_tx_qlock()
1485 struct drm_dp_sideband_msg_tx *txmsg; in process_single_down_tx_qlock() local
1497 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); in process_single_down_tx_qlock()
1498 ret = process_single_tx_qlock(mgr, txmsg, false); in process_single_down_tx_qlock()
1501 list_del(&txmsg->next); in process_single_down_tx_qlock()
1504 list_del(&txmsg->next); in process_single_down_tx_qlock()
1505 if (txmsg->seqno != -1) in process_single_down_tx_qlock()
1506 txmsg->dst->tx_slots[txmsg->seqno] = NULL; in process_single_down_tx_qlock()
1507 txmsg->state = DRM_DP_SIDEBAND_TX_TIMEOUT; in process_single_down_tx_qlock()
1518 struct drm_dp_sideband_msg_tx *txmsg) in process_single_up_tx_qlock() argument
1523 ret = process_single_tx_qlock(mgr, txmsg, true); in process_single_up_tx_qlock()
1528 txmsg->dst->tx_slots[txmsg->seqno] = NULL; in process_single_up_tx_qlock()
1532 struct drm_dp_sideband_msg_tx *txmsg) in drm_dp_queue_down_tx() argument
1535 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); in drm_dp_queue_down_tx()
1545 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_send_link_address() local
1548 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_send_link_address()
1549 if (!txmsg) in drm_dp_send_link_address()
1552 txmsg->dst = mstb; in drm_dp_send_link_address()
1553 len = build_link_address(txmsg); in drm_dp_send_link_address()
1556 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_send_link_address()
1558 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); in drm_dp_send_link_address()
1562 if (txmsg->reply.reply_type == 1) in drm_dp_send_link_address()
1565 DRM_DEBUG_KMS("link address reply: %d\n", txmsg->reply.u.link_addr.nports); in drm_dp_send_link_address()
1566 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { in drm_dp_send_link_address()
1568 txmsg->reply.u.link_addr.ports[i].input_port, in drm_dp_send_link_address()
1569 txmsg->reply.u.link_addr.ports[i].peer_device_type, in drm_dp_send_link_address()
1570 txmsg->reply.u.link_addr.ports[i].port_number, in drm_dp_send_link_address()
1571 txmsg->reply.u.link_addr.ports[i].dpcd_revision, in drm_dp_send_link_address()
1572 txmsg->reply.u.link_addr.ports[i].mcs, in drm_dp_send_link_address()
1573 txmsg->reply.u.link_addr.ports[i].ddps, in drm_dp_send_link_address()
1574 txmsg->reply.u.link_addr.ports[i].legacy_device_plug_status, in drm_dp_send_link_address()
1575 txmsg->reply.u.link_addr.ports[i].num_sdp_streams, in drm_dp_send_link_address()
1576 txmsg->reply.u.link_addr.ports[i].num_sdp_stream_sinks); in drm_dp_send_link_address()
1579 drm_dp_check_mstb_guid(mstb, txmsg->reply.u.link_addr.guid); in drm_dp_send_link_address()
1581 for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) { in drm_dp_send_link_address()
1582 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); in drm_dp_send_link_address()
1591 kfree(txmsg); in drm_dp_send_link_address()
1599 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_send_enum_path_resources() local
1602 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_send_enum_path_resources()
1603 if (!txmsg) in drm_dp_send_enum_path_resources()
1606 txmsg->dst = mstb; in drm_dp_send_enum_path_resources()
1607 len = build_enum_path_resources(txmsg, port->port_num); in drm_dp_send_enum_path_resources()
1609 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_send_enum_path_resources()
1611 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); in drm_dp_send_enum_path_resources()
1613 if (txmsg->reply.reply_type == 1) in drm_dp_send_enum_path_resources()
1616 if (port->port_num != txmsg->reply.u.path_resources.port_number) in drm_dp_send_enum_path_resources()
1618 …DRM_DEBUG_KMS("enum path resources %d: %d %d\n", txmsg->reply.u.path_resources.port_number, txmsg-… in drm_dp_send_enum_path_resources()
1619 txmsg->reply.u.path_resources.avail_payload_bw_number); in drm_dp_send_enum_path_resources()
1620 port->available_pbn = txmsg->reply.u.path_resources.avail_payload_bw_number; in drm_dp_send_enum_path_resources()
1624 kfree(txmsg); in drm_dp_send_enum_path_resources()
1664 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_payload_send_msg() local
1683 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_payload_send_msg()
1684 if (!txmsg) { in drm_dp_payload_send_msg()
1689 txmsg->dst = mstb; in drm_dp_payload_send_msg()
1690 len = build_allocate_payload(txmsg, port_num, in drm_dp_payload_send_msg()
1694 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_payload_send_msg()
1696 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); in drm_dp_payload_send_msg()
1698 if (txmsg->reply.reply_type == 1) { in drm_dp_payload_send_msg()
1703 kfree(txmsg); in drm_dp_payload_send_msg()
1892 struct drm_dp_sideband_msg_tx *txmsg;
1894 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
1895 if (!txmsg)
1898 len = build_dpcd_read(txmsg, port->port_num, 0, 8);
1899 txmsg->dst = port->parent;
1901 drm_dp_queue_down_tx(mgr, txmsg);
1913 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_send_dpcd_write() local
1920 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_send_dpcd_write()
1921 if (!txmsg) { in drm_dp_send_dpcd_write()
1926 len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes); in drm_dp_send_dpcd_write()
1927 txmsg->dst = mstb; in drm_dp_send_dpcd_write()
1929 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_send_dpcd_write()
1931 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); in drm_dp_send_dpcd_write()
1933 if (txmsg->reply.reply_type == 1) { in drm_dp_send_dpcd_write()
1938 kfree(txmsg); in drm_dp_send_dpcd_write()
1958 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_send_up_ack_reply() local
1960 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_send_up_ack_reply()
1961 if (!txmsg) in drm_dp_send_up_ack_reply()
1964 txmsg->dst = mstb; in drm_dp_send_up_ack_reply()
1965 txmsg->seqno = seqno; in drm_dp_send_up_ack_reply()
1966 drm_dp_encode_up_ack_reply(txmsg, req_type); in drm_dp_send_up_ack_reply()
1970 process_single_up_tx_qlock(mgr, txmsg); in drm_dp_send_up_ack_reply()
1974 kfree(txmsg); in drm_dp_send_up_ack_reply()
2212 struct drm_dp_sideband_msg_tx *txmsg; in drm_dp_mst_handle_down_rep() local
2228 txmsg = mstb->tx_slots[slot]; in drm_dp_mst_handle_down_rep()
2232 if (!txmsg) { in drm_dp_mst_handle_down_rep()
2244 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); in drm_dp_mst_handle_down_rep()
2245 if (txmsg->reply.reply_type == 1) { in drm_dp_mst_handle_down_rep()
2246 …: req 0x%02x, reason 0x%02x, nak data 0x%02x\n", txmsg->reply.req_type, txmsg->reply.u.nak.reason,… in drm_dp_mst_handle_down_rep()
2253 txmsg->state = DRM_DP_SIDEBAND_TX_RX; in drm_dp_mst_handle_down_rep()
2964 struct drm_dp_sideband_msg_tx *txmsg = NULL; in drm_dp_mst_i2c_xfer() local
2994 txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL); in drm_dp_mst_i2c_xfer()
2995 if (!txmsg) { in drm_dp_mst_i2c_xfer()
3000 txmsg->dst = mstb; in drm_dp_mst_i2c_xfer()
3001 drm_dp_encode_sideband_req(&msg, txmsg); in drm_dp_mst_i2c_xfer()
3003 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_mst_i2c_xfer()
3005 ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); in drm_dp_mst_i2c_xfer()
3008 if (txmsg->reply.reply_type == 1) { /* got a NAK back */ in drm_dp_mst_i2c_xfer()
3012 if (txmsg->reply.u.remote_i2c_read_ack.num_bytes != msgs[num - 1].len) { in drm_dp_mst_i2c_xfer()
3016 memcpy(msgs[num - 1].buf, txmsg->reply.u.remote_i2c_read_ack.bytes, msgs[num - 1].len); in drm_dp_mst_i2c_xfer()
3020 kfree(txmsg); in drm_dp_mst_i2c_xfer()