Lines Matching refs:mgr
42 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
48 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
52 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
56 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
58 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
61 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
66 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr);
682 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_mst_assign_payload_id() argument
687 mutex_lock(&mgr->payload_lock); in drm_dp_mst_assign_payload_id()
688 ret = find_first_zero_bit(&mgr->payload_mask, mgr->max_payloads + 1); in drm_dp_mst_assign_payload_id()
689 if (ret > mgr->max_payloads) { in drm_dp_mst_assign_payload_id()
695 vcpi_ret = find_first_zero_bit(&mgr->vcpi_mask, mgr->max_payloads + 1); in drm_dp_mst_assign_payload_id()
696 if (vcpi_ret > mgr->max_payloads) { in drm_dp_mst_assign_payload_id()
702 set_bit(ret, &mgr->payload_mask); in drm_dp_mst_assign_payload_id()
703 set_bit(vcpi_ret, &mgr->vcpi_mask); in drm_dp_mst_assign_payload_id()
705 mgr->proposed_vcpis[ret - 1] = vcpi; in drm_dp_mst_assign_payload_id()
707 mutex_unlock(&mgr->payload_lock); in drm_dp_mst_assign_payload_id()
711 static void drm_dp_mst_put_payload_id(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_mst_put_payload_id() argument
718 mutex_lock(&mgr->payload_lock); in drm_dp_mst_put_payload_id()
720 clear_bit(vcpi - 1, &mgr->vcpi_mask); in drm_dp_mst_put_payload_id()
722 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_mst_put_payload_id()
723 if (mgr->proposed_vcpis[i]) in drm_dp_mst_put_payload_id()
724 if (mgr->proposed_vcpis[i]->vcpi == vcpi) { in drm_dp_mst_put_payload_id()
725 mgr->proposed_vcpis[i] = NULL; in drm_dp_mst_put_payload_id()
726 clear_bit(i + 1, &mgr->payload_mask); in drm_dp_mst_put_payload_id()
729 mutex_unlock(&mgr->payload_lock); in drm_dp_mst_put_payload_id()
732 static bool check_txmsg_state(struct drm_dp_mst_topology_mgr *mgr, in check_txmsg_state() argument
750 struct drm_dp_mst_topology_mgr *mgr = mstb->mgr; in drm_dp_mst_wait_tx_reply() local
753 ret = wait_event_timeout(mgr->tx_waitq, in drm_dp_mst_wait_tx_reply()
754 check_txmsg_state(mgr, txmsg), in drm_dp_mst_wait_tx_reply()
756 mutex_lock(&mstb->mgr->qlock); in drm_dp_mst_wait_tx_reply()
780 mutex_unlock(&mgr->qlock); in drm_dp_mst_wait_tx_reply()
839 mutex_lock(&mstb->mgr->qlock); in drm_dp_destroy_mst_branch_device()
850 mutex_unlock(&mstb->mgr->qlock); in drm_dp_destroy_mst_branch_device()
853 wake_up(&mstb->mgr->tx_waitq); in drm_dp_destroy_mst_branch_device()
885 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_destroy_port() local
902 mutex_lock(&mgr->destroy_connector_lock); in drm_dp_destroy_port()
904 list_add(&port->next, &mgr->destroy_connector_list); in drm_dp_destroy_port()
905 mutex_unlock(&mgr->destroy_connector_lock); in drm_dp_destroy_port()
906 schedule_work(&mgr->destroy_connector_work); in drm_dp_destroy_port()
939 static struct drm_dp_mst_branch *drm_dp_get_validated_mstb_ref(struct drm_dp_mst_topology_mgr *mgr,… in drm_dp_get_validated_mstb_ref() argument
942 mutex_lock(&mgr->lock); in drm_dp_get_validated_mstb_ref()
943 if (mgr->mst_primary) in drm_dp_get_validated_mstb_ref()
944 rmstb = drm_dp_mst_get_validated_mstb_ref_locked(mgr->mst_primary, mstb); in drm_dp_get_validated_mstb_ref()
945 mutex_unlock(&mgr->lock); in drm_dp_get_validated_mstb_ref()
967 static struct drm_dp_mst_port *drm_dp_get_validated_port_ref(struct drm_dp_mst_topology_mgr *mgr, s… in drm_dp_get_validated_port_ref() argument
970 mutex_lock(&mgr->lock); in drm_dp_get_validated_port_ref()
971 if (mgr->mst_primary) in drm_dp_get_validated_port_ref()
972 rport = drm_dp_mst_get_port_ref_locked(mgr->mst_primary, port); in drm_dp_get_validated_port_ref()
973 mutex_unlock(&mgr->lock); in drm_dp_get_validated_port_ref()
1030 port->mstb->mgr = port->mgr; in drm_dp_port_setup_pdt()
1045 if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) { in drm_dp_check_mstb_guid()
1048 mstb->mgr, in drm_dp_check_mstb_guid()
1056 mstb->mgr->aux, in drm_dp_check_mstb_guid()
1071 snprintf(proppath, proppath_size, "mst:%d", mstb->mgr->conn_base_id); in build_mst_prop_path()
1099 port->mgr = mstb->mgr; in drm_dp_add_port()
1120 mutex_lock(&mstb->mgr->lock); in drm_dp_add_port()
1123 mutex_unlock(&mstb->mgr->lock); in drm_dp_add_port()
1129 drm_dp_send_enum_path_resources(mstb->mgr, mstb, port); in drm_dp_add_port()
1140 drm_dp_send_link_address(mstb->mgr, port->mstb); in drm_dp_add_port()
1148 port->connector = (*mstb->mgr->cbs->add_connector)(mstb->mgr, port, proppath); in drm_dp_add_port()
1151 mutex_lock(&mstb->mgr->lock); in drm_dp_add_port()
1153 mutex_unlock(&mstb->mgr->lock); in drm_dp_add_port()
1202 queue_work(system_long_wq, &mstb->mgr->work); in drm_dp_update_port()
1206 static struct drm_dp_mst_branch *drm_dp_get_mst_branch_device(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_get_mst_branch_device() argument
1214 mutex_lock(&mgr->lock); in drm_dp_get_mst_branch_device()
1215 mstb = mgr->mst_primary; in drm_dp_get_mst_branch_device()
1235 mutex_unlock(&mgr->lock); in drm_dp_get_mst_branch_device()
1264 struct drm_dp_mst_topology_mgr *mgr, in drm_dp_get_mst_branch_device_by_guid() argument
1270 mutex_lock(&mgr->lock); in drm_dp_get_mst_branch_device_by_guid()
1272 mstb = get_mst_branch_device_by_guid_helper(mgr->mst_primary, guid); in drm_dp_get_mst_branch_device_by_guid()
1277 mutex_unlock(&mgr->lock); in drm_dp_get_mst_branch_device_by_guid()
1281 static void drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_check_and_send_link_address() argument
1287 drm_dp_send_link_address(mgr, mstb); in drm_dp_check_and_send_link_address()
1298 drm_dp_send_enum_path_resources(mgr, mstb, port); in drm_dp_check_and_send_link_address()
1301 mstb_child = drm_dp_get_validated_mstb_ref(mgr, port->mstb); in drm_dp_check_and_send_link_address()
1303 drm_dp_check_and_send_link_address(mgr, mstb_child); in drm_dp_check_and_send_link_address()
1312 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, work); in drm_dp_mst_link_probe_work() local
1315 mutex_lock(&mgr->lock); in drm_dp_mst_link_probe_work()
1316 mstb = mgr->mst_primary; in drm_dp_mst_link_probe_work()
1320 mutex_unlock(&mgr->lock); in drm_dp_mst_link_probe_work()
1322 drm_dp_check_and_send_link_address(mgr, mstb); in drm_dp_mst_link_probe_work()
1327 static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_validate_guid() argument
1356 static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_send_sideband_msg() argument
1368 tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); in drm_dp_send_sideband_msg()
1370 ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, in drm_dp_send_sideband_msg()
1427 static int process_single_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, in process_single_tx_qlock() argument
1469 ret = drm_dp_send_sideband_msg(mgr, up, chunk, idx); in process_single_tx_qlock()
1483 static void process_single_down_tx_qlock(struct drm_dp_mst_topology_mgr *mgr) in process_single_down_tx_qlock() argument
1488 WARN_ON(!mutex_is_locked(&mgr->qlock)); in process_single_down_tx_qlock()
1491 if (list_empty(&mgr->tx_msg_downq)) { in process_single_down_tx_qlock()
1492 mgr->tx_down_in_progress = false; in process_single_down_tx_qlock()
1495 mgr->tx_down_in_progress = true; in process_single_down_tx_qlock()
1497 txmsg = list_first_entry(&mgr->tx_msg_downq, struct drm_dp_sideband_msg_tx, next); in process_single_down_tx_qlock()
1498 ret = process_single_tx_qlock(mgr, txmsg, false); in process_single_down_tx_qlock()
1508 wake_up(&mgr->tx_waitq); in process_single_down_tx_qlock()
1510 if (list_empty(&mgr->tx_msg_downq)) { in process_single_down_tx_qlock()
1511 mgr->tx_down_in_progress = false; in process_single_down_tx_qlock()
1517 static void process_single_up_tx_qlock(struct drm_dp_mst_topology_mgr *mgr, in process_single_up_tx_qlock() argument
1523 ret = process_single_tx_qlock(mgr, txmsg, true); in process_single_up_tx_qlock()
1531 static void drm_dp_queue_down_tx(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_queue_down_tx() argument
1534 mutex_lock(&mgr->qlock); in drm_dp_queue_down_tx()
1535 list_add_tail(&txmsg->next, &mgr->tx_msg_downq); in drm_dp_queue_down_tx()
1536 if (!mgr->tx_down_in_progress) in drm_dp_queue_down_tx()
1537 process_single_down_tx_qlock(mgr); in drm_dp_queue_down_tx()
1538 mutex_unlock(&mgr->qlock); in drm_dp_queue_down_tx()
1541 static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_send_link_address() argument
1555 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_send_link_address()
1581 drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]); in drm_dp_send_link_address()
1583 (*mgr->cbs->hotplug)(mgr); in drm_dp_send_link_address()
1592 static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_send_enum_path_resources() argument
1607 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_send_enum_path_resources()
1637 …uct drm_dp_mst_branch *drm_dp_get_last_connected_port_and_mstb(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_get_last_connected_port_and_mstb() argument
1643 mutex_lock(&mgr->lock); in drm_dp_get_last_connected_port_and_mstb()
1644 if (mgr->mst_primary) { in drm_dp_get_last_connected_port_and_mstb()
1653 mutex_unlock(&mgr->lock); in drm_dp_get_last_connected_port_and_mstb()
1657 static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_payload_send_msg() argument
1666 port = drm_dp_get_validated_port_ref(mgr, port); in drm_dp_payload_send_msg()
1671 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); in drm_dp_payload_send_msg()
1673 mstb = drm_dp_get_last_connected_port_and_mstb(mgr, port->parent, &port_num); in drm_dp_payload_send_msg()
1692 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_payload_send_msg()
1708 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_create_payload_step1() argument
1714 ret = drm_dp_dpcd_write_payload(mgr, id, payload); in drm_dp_create_payload_step1()
1723 static int drm_dp_create_payload_step2(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_create_payload_step2() argument
1729 ret = drm_dp_payload_send_msg(mgr, port, id, port->vcpi.pbn); in drm_dp_create_payload_step2()
1736 static int drm_dp_destroy_payload_step1(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_destroy_payload_step1() argument
1744 drm_dp_payload_send_msg(mgr, port, id, 0); in drm_dp_destroy_payload_step1()
1747 drm_dp_dpcd_write_payload(mgr, id, payload); in drm_dp_destroy_payload_step1()
1752 static int drm_dp_destroy_payload_step2(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_destroy_payload_step2() argument
1773 int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr) in drm_dp_update_payload_part1() argument
1780 mutex_lock(&mgr->payload_lock); in drm_dp_update_payload_part1()
1781 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_update_payload_part1()
1785 if (mgr->proposed_vcpis[i]) { in drm_dp_update_payload_part1()
1786 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); in drm_dp_update_payload_part1()
1787 port = drm_dp_get_validated_port_ref(mgr, port); in drm_dp_update_payload_part1()
1789 mutex_unlock(&mgr->payload_lock); in drm_dp_update_payload_part1()
1792 req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots; in drm_dp_update_payload_part1()
1798 if (mgr->payloads[i].start_slot != req_payload.start_slot) { in drm_dp_update_payload_part1()
1799 mgr->payloads[i].start_slot = req_payload.start_slot; in drm_dp_update_payload_part1()
1802 if (mgr->payloads[i].num_slots != req_payload.num_slots) { in drm_dp_update_payload_part1()
1806 drm_dp_create_payload_step1(mgr, mgr->proposed_vcpis[i]->vcpi, &req_payload); in drm_dp_update_payload_part1()
1807 mgr->payloads[i].num_slots = req_payload.num_slots; in drm_dp_update_payload_part1()
1808 } else if (mgr->payloads[i].num_slots) { in drm_dp_update_payload_part1()
1809 mgr->payloads[i].num_slots = 0; in drm_dp_update_payload_part1()
1810 drm_dp_destroy_payload_step1(mgr, port, port->vcpi.vcpi, &mgr->payloads[i]); in drm_dp_update_payload_part1()
1811 req_payload.payload_state = mgr->payloads[i].payload_state; in drm_dp_update_payload_part1()
1812 mgr->payloads[i].start_slot = 0; in drm_dp_update_payload_part1()
1814 mgr->payloads[i].payload_state = req_payload.payload_state; in drm_dp_update_payload_part1()
1822 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_update_payload_part1()
1823 if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { in drm_dp_update_payload_part1()
1825 for (j = i; j < mgr->max_payloads - 1; j++) { in drm_dp_update_payload_part1()
1826 memcpy(&mgr->payloads[j], &mgr->payloads[j + 1], sizeof(struct drm_dp_payload)); in drm_dp_update_payload_part1()
1827 mgr->proposed_vcpis[j] = mgr->proposed_vcpis[j + 1]; in drm_dp_update_payload_part1()
1828 if (mgr->proposed_vcpis[j] && mgr->proposed_vcpis[j]->num_slots) { in drm_dp_update_payload_part1()
1829 set_bit(j + 1, &mgr->payload_mask); in drm_dp_update_payload_part1()
1831 clear_bit(j + 1, &mgr->payload_mask); in drm_dp_update_payload_part1()
1834 memset(&mgr->payloads[mgr->max_payloads - 1], 0, sizeof(struct drm_dp_payload)); in drm_dp_update_payload_part1()
1835 mgr->proposed_vcpis[mgr->max_payloads - 1] = NULL; in drm_dp_update_payload_part1()
1836 clear_bit(mgr->max_payloads, &mgr->payload_mask); in drm_dp_update_payload_part1()
1840 mutex_unlock(&mgr->payload_lock); in drm_dp_update_payload_part1()
1855 int drm_dp_update_payload_part2(struct drm_dp_mst_topology_mgr *mgr) in drm_dp_update_payload_part2() argument
1860 mutex_lock(&mgr->payload_lock); in drm_dp_update_payload_part2()
1861 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_update_payload_part2()
1863 if (!mgr->proposed_vcpis[i]) in drm_dp_update_payload_part2()
1866 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); in drm_dp_update_payload_part2()
1868 DRM_DEBUG_KMS("payload %d %d\n", i, mgr->payloads[i].payload_state); in drm_dp_update_payload_part2()
1869 if (mgr->payloads[i].payload_state == DP_PAYLOAD_LOCAL) { in drm_dp_update_payload_part2()
1870 ret = drm_dp_create_payload_step2(mgr, port, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); in drm_dp_update_payload_part2()
1871 } else if (mgr->payloads[i].payload_state == DP_PAYLOAD_DELETE_LOCAL) { in drm_dp_update_payload_part2()
1872 ret = drm_dp_destroy_payload_step2(mgr, mgr->proposed_vcpis[i]->vcpi, &mgr->payloads[i]); in drm_dp_update_payload_part2()
1875 mutex_unlock(&mgr->payload_lock); in drm_dp_update_payload_part2()
1879 mutex_unlock(&mgr->payload_lock); in drm_dp_update_payload_part2()
1885 static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
1899 drm_dp_queue_down_tx(mgr, txmsg);
1905 static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_send_dpcd_write() argument
1914 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); in drm_dp_send_dpcd_write()
1927 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_send_dpcd_write()
1952 static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_send_up_ack_reply() argument
1966 mutex_lock(&mgr->qlock); in drm_dp_send_up_ack_reply()
1968 process_single_up_tx_qlock(mgr, txmsg); in drm_dp_send_up_ack_reply()
1970 mutex_unlock(&mgr->qlock); in drm_dp_send_up_ack_reply()
2007 int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state) in drm_dp_mst_topology_mgr_set_mst() argument
2012 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_set_mst()
2013 if (mst_state == mgr->mst_state) in drm_dp_mst_topology_mgr_set_mst()
2016 mgr->mst_state = mst_state; in drm_dp_mst_topology_mgr_set_mst()
2019 WARN_ON(mgr->mst_primary); in drm_dp_mst_topology_mgr_set_mst()
2022 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); in drm_dp_mst_topology_mgr_set_mst()
2028 if (!drm_dp_get_vc_payload_bw(mgr->dpcd[1], in drm_dp_mst_topology_mgr_set_mst()
2029 mgr->dpcd[2] & DP_MAX_LANE_COUNT_MASK, in drm_dp_mst_topology_mgr_set_mst()
2030 &mgr->pbn_div)) { in drm_dp_mst_topology_mgr_set_mst()
2035 mgr->total_pbn = 2560; in drm_dp_mst_topology_mgr_set_mst()
2036 mgr->total_slots = DIV_ROUND_UP(mgr->total_pbn, mgr->pbn_div); in drm_dp_mst_topology_mgr_set_mst()
2037 mgr->avail_slots = mgr->total_slots; in drm_dp_mst_topology_mgr_set_mst()
2045 mstb->mgr = mgr; in drm_dp_mst_topology_mgr_set_mst()
2048 mgr->mst_primary = mstb; in drm_dp_mst_topology_mgr_set_mst()
2049 kref_get(&mgr->mst_primary->kref); in drm_dp_mst_topology_mgr_set_mst()
2051 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_set_mst()
2061 drm_dp_dpcd_write_payload(mgr, 0, &reset_pay); in drm_dp_mst_topology_mgr_set_mst()
2064 queue_work(system_long_wq, &mgr->work); in drm_dp_mst_topology_mgr_set_mst()
2069 mstb = mgr->mst_primary; in drm_dp_mst_topology_mgr_set_mst()
2070 mgr->mst_primary = NULL; in drm_dp_mst_topology_mgr_set_mst()
2072 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); in drm_dp_mst_topology_mgr_set_mst()
2074 memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload)); in drm_dp_mst_topology_mgr_set_mst()
2075 mgr->payload_mask = 0; in drm_dp_mst_topology_mgr_set_mst()
2076 set_bit(0, &mgr->payload_mask); in drm_dp_mst_topology_mgr_set_mst()
2077 mgr->vcpi_mask = 0; in drm_dp_mst_topology_mgr_set_mst()
2081 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_set_mst()
2096 void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) in drm_dp_mst_topology_mgr_suspend() argument
2098 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
2099 drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_suspend()
2101 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_suspend()
2102 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_suspend()
2103 flush_work(&mgr->destroy_connector_work); in drm_dp_mst_topology_mgr_suspend()
2117 int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr) in drm_dp_mst_topology_mgr_resume() argument
2121 mutex_lock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
2123 if (mgr->mst_primary) { in drm_dp_mst_topology_mgr_resume()
2127 sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE); in drm_dp_mst_topology_mgr_resume()
2134 ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, in drm_dp_mst_topology_mgr_resume()
2143 sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); in drm_dp_mst_topology_mgr_resume()
2149 drm_dp_check_mstb_guid(mgr->mst_primary, guid); in drm_dp_mst_topology_mgr_resume()
2156 mutex_unlock(&mgr->lock); in drm_dp_mst_topology_mgr_resume()
2161 static void drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up) in drm_dp_get_one_sb_msg() argument
2169 msg = up ? &mgr->up_req_recv : &mgr->down_rep_recv; in drm_dp_get_one_sb_msg()
2171 len = min(mgr->max_dpcd_transaction_bytes, 16); in drm_dp_get_one_sb_msg()
2172 ret = drm_dp_dpcd_read(mgr->aux, basereg, in drm_dp_get_one_sb_msg()
2189 len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); in drm_dp_get_one_sb_msg()
2190 ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, in drm_dp_get_one_sb_msg()
2203 static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr) in drm_dp_mst_handle_down_rep() argument
2207 drm_dp_get_one_sb_msg(mgr, false); in drm_dp_mst_handle_down_rep()
2209 if (mgr->down_rep_recv.have_eomt) { in drm_dp_mst_handle_down_rep()
2213 mstb = drm_dp_get_mst_branch_device(mgr, in drm_dp_mst_handle_down_rep()
2214 mgr->down_rep_recv.initial_hdr.lct, in drm_dp_mst_handle_down_rep()
2215 mgr->down_rep_recv.initial_hdr.rad); in drm_dp_mst_handle_down_rep()
2218 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->down_rep_recv.initial_hdr.lct); in drm_dp_mst_handle_down_rep()
2219 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); in drm_dp_mst_handle_down_rep()
2224 slot = mgr->down_rep_recv.initial_hdr.seqno; in drm_dp_mst_handle_down_rep()
2225 mutex_lock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
2228 mutex_unlock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
2233 mgr->down_rep_recv.initial_hdr.seqno, in drm_dp_mst_handle_down_rep()
2234 mgr->down_rep_recv.initial_hdr.lct, in drm_dp_mst_handle_down_rep()
2235 mgr->down_rep_recv.initial_hdr.rad[0], in drm_dp_mst_handle_down_rep()
2236 mgr->down_rep_recv.msg[0]); in drm_dp_mst_handle_down_rep()
2238 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); in drm_dp_mst_handle_down_rep()
2242 drm_dp_sideband_parse_reply(&mgr->down_rep_recv, &txmsg->reply); in drm_dp_mst_handle_down_rep()
2247 memset(&mgr->down_rep_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); in drm_dp_mst_handle_down_rep()
2250 mutex_lock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
2253 mutex_unlock(&mgr->qlock); in drm_dp_mst_handle_down_rep()
2255 wake_up(&mgr->tx_waitq); in drm_dp_mst_handle_down_rep()
2260 static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr) in drm_dp_mst_handle_up_req() argument
2263 drm_dp_get_one_sb_msg(mgr, true); in drm_dp_mst_handle_up_req()
2265 if (mgr->up_req_recv.have_eomt) { in drm_dp_mst_handle_up_req()
2270 if (!mgr->up_req_recv.initial_hdr.broadcast) { in drm_dp_mst_handle_up_req()
2271 mstb = drm_dp_get_mst_branch_device(mgr, in drm_dp_mst_handle_up_req()
2272 mgr->up_req_recv.initial_hdr.lct, in drm_dp_mst_handle_up_req()
2273 mgr->up_req_recv.initial_hdr.rad); in drm_dp_mst_handle_up_req()
2275 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); in drm_dp_mst_handle_up_req()
2276 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); in drm_dp_mst_handle_up_req()
2281 seqno = mgr->up_req_recv.initial_hdr.seqno; in drm_dp_mst_handle_up_req()
2282 drm_dp_sideband_parse_req(&mgr->up_req_recv, &msg); in drm_dp_mst_handle_up_req()
2285 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); in drm_dp_mst_handle_up_req()
2288 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.conn_stat.guid); in drm_dp_mst_handle_up_req()
2291 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); in drm_dp_mst_handle_up_req()
2292 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); in drm_dp_mst_handle_up_req()
2299 (*mgr->cbs->hotplug)(mgr); in drm_dp_mst_handle_up_req()
2302 drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false); in drm_dp_mst_handle_up_req()
2304 mstb = drm_dp_get_mst_branch_device_by_guid(mgr, msg.u.resource_stat.guid); in drm_dp_mst_handle_up_req()
2307 DRM_DEBUG_KMS("Got MST reply from unknown device %d\n", mgr->up_req_recv.initial_hdr.lct); in drm_dp_mst_handle_up_req()
2308 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); in drm_dp_mst_handle_up_req()
2316 memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx)); in drm_dp_mst_handle_up_req()
2332 int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled) in drm_dp_mst_hpd_irq() argument
2339 if (sc != mgr->sink_count) { in drm_dp_mst_hpd_irq()
2340 mgr->sink_count = sc; in drm_dp_mst_hpd_irq()
2345 ret = drm_dp_mst_handle_down_rep(mgr); in drm_dp_mst_hpd_irq()
2350 ret |= drm_dp_mst_handle_up_req(mgr); in drm_dp_mst_hpd_irq()
2354 drm_dp_mst_kick_tx(mgr); in drm_dp_mst_hpd_irq()
2368 struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) in drm_dp_mst_detect_port() argument
2373 port = drm_dp_get_validated_port_ref(mgr, port); in drm_dp_mst_detect_port()
2413 …_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_… in drm_dp_mst_get_edid() argument
2418 port = drm_dp_get_validated_port_ref(mgr, port); in drm_dp_mst_get_edid()
2438 int drm_dp_find_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_find_vcpi_slots() argument
2443 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); in drm_dp_find_vcpi_slots()
2445 if (num_slots > mgr->avail_slots) in drm_dp_find_vcpi_slots()
2451 static int drm_dp_init_vcpi(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_init_vcpi() argument
2457 num_slots = DIV_ROUND_UP(pbn, mgr->pbn_div); in drm_dp_init_vcpi()
2459 if (num_slots > mgr->avail_slots) in drm_dp_init_vcpi()
2463 vcpi->aligned_pbn = num_slots * mgr->pbn_div; in drm_dp_init_vcpi()
2466 ret = drm_dp_mst_assign_payload_id(mgr, vcpi); in drm_dp_init_vcpi()
2479 bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, in… in drm_dp_mst_allocate_vcpi() argument
2483 port = drm_dp_get_validated_port_ref(mgr, port); in drm_dp_mst_allocate_vcpi()
2496 ret = drm_dp_init_vcpi(mgr, &port->vcpi, pbn); in drm_dp_mst_allocate_vcpi()
2498 …DRM_DEBUG_KMS("failed to init vcpi %d %d %d\n", DIV_ROUND_UP(pbn, mgr->pbn_div), mgr->avail_slots,… in drm_dp_mst_allocate_vcpi()
2511 int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) in drm_dp_mst_get_vcpi_slots() argument
2514 port = drm_dp_get_validated_port_ref(mgr, port); in drm_dp_mst_get_vcpi_slots()
2531 void drm_dp_mst_reset_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) in drm_dp_mst_reset_vcpi_slots() argument
2533 port = drm_dp_get_validated_port_ref(mgr, port); in drm_dp_mst_reset_vcpi_slots()
2546 void drm_dp_mst_deallocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port) in drm_dp_mst_deallocate_vcpi() argument
2548 port = drm_dp_get_validated_port_ref(mgr, port); in drm_dp_mst_deallocate_vcpi()
2552 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); in drm_dp_mst_deallocate_vcpi()
2561 static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_dpcd_write_payload() argument
2568 drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, in drm_dp_dpcd_write_payload()
2575 ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); in drm_dp_dpcd_write_payload()
2582 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); in drm_dp_dpcd_write_payload()
2610 int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr) in drm_dp_check_act_status() argument
2617 ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); in drm_dp_check_act_status()
2704 static void drm_dp_mst_kick_tx(struct drm_dp_mst_topology_mgr *mgr) in drm_dp_mst_kick_tx() argument
2706 queue_work(system_long_wq, &mgr->tx_work); in drm_dp_mst_kick_tx()
2729 static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, in dump_dp_payload_table() argument
2735 ret = drm_dp_dpcd_read(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS + (i * 16), &buf[i * 16], 16); in dump_dp_payload_table()
2752 struct drm_dp_mst_topology_mgr *mgr) in drm_dp_mst_dump_topology() argument
2756 mutex_lock(&mgr->lock); in drm_dp_mst_dump_topology()
2757 if (mgr->mst_primary) in drm_dp_mst_dump_topology()
2758 drm_dp_mst_dump_mstb(m, mgr->mst_primary); in drm_dp_mst_dump_topology()
2761 mutex_unlock(&mgr->lock); in drm_dp_mst_dump_topology()
2763 mutex_lock(&mgr->payload_lock); in drm_dp_mst_dump_topology()
2764 seq_printf(m, "vcpi: %lx %lx\n", mgr->payload_mask, mgr->vcpi_mask); in drm_dp_mst_dump_topology()
2766 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_mst_dump_topology()
2767 if (mgr->proposed_vcpis[i]) { in drm_dp_mst_dump_topology()
2768 port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi); in drm_dp_mst_dump_topology()
2773 for (i = 0; i < mgr->max_payloads; i++) { in drm_dp_mst_dump_topology()
2776 mgr->payloads[i].payload_state, in drm_dp_mst_dump_topology()
2777 mgr->payloads[i].start_slot, in drm_dp_mst_dump_topology()
2778 mgr->payloads[i].num_slots); in drm_dp_mst_dump_topology()
2782 mutex_unlock(&mgr->payload_lock); in drm_dp_mst_dump_topology()
2784 mutex_lock(&mgr->lock); in drm_dp_mst_dump_topology()
2785 if (mgr->mst_primary) { in drm_dp_mst_dump_topology()
2789 ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE); in drm_dp_mst_dump_topology()
2794 ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); in drm_dp_mst_dump_topology()
2799 ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); in drm_dp_mst_dump_topology()
2805 bret = dump_dp_payload_table(mgr, buf); in drm_dp_mst_dump_topology()
2815 mutex_unlock(&mgr->lock); in drm_dp_mst_dump_topology()
2822 struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, tx_work); in drm_dp_tx_work() local
2824 mutex_lock(&mgr->qlock); in drm_dp_tx_work()
2825 if (mgr->tx_down_in_progress) in drm_dp_tx_work()
2826 process_single_down_tx_qlock(mgr); in drm_dp_tx_work()
2827 mutex_unlock(&mgr->qlock); in drm_dp_tx_work()
2839 …struct drm_dp_mst_topology_mgr *mgr = container_of(work, struct drm_dp_mst_topology_mgr, destroy_c… in drm_dp_destroy_connector_work() local
2848 mutex_lock(&mgr->destroy_connector_lock); in drm_dp_destroy_connector_work()
2849 port = list_first_entry_or_null(&mgr->destroy_connector_list, struct drm_dp_mst_port, next); in drm_dp_destroy_connector_work()
2851 mutex_unlock(&mgr->destroy_connector_lock); in drm_dp_destroy_connector_work()
2855 mutex_unlock(&mgr->destroy_connector_lock); in drm_dp_destroy_connector_work()
2860 mgr->cbs->destroy_connector(mgr, port->connector); in drm_dp_destroy_connector_work()
2865 if (mgr->mst_state) { in drm_dp_destroy_connector_work()
2866 drm_dp_mst_reset_vcpi_slots(mgr, port); in drm_dp_destroy_connector_work()
2867 drm_dp_update_payload_part1(mgr); in drm_dp_destroy_connector_work()
2868 drm_dp_mst_put_payload_id(mgr, port->vcpi.vcpi); in drm_dp_destroy_connector_work()
2876 (*mgr->cbs->hotplug)(mgr); in drm_dp_destroy_connector_work()
2890 int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr, in drm_dp_mst_topology_mgr_init() argument
2895 mutex_init(&mgr->lock); in drm_dp_mst_topology_mgr_init()
2896 mutex_init(&mgr->qlock); in drm_dp_mst_topology_mgr_init()
2897 mutex_init(&mgr->payload_lock); in drm_dp_mst_topology_mgr_init()
2898 mutex_init(&mgr->destroy_connector_lock); in drm_dp_mst_topology_mgr_init()
2899 INIT_LIST_HEAD(&mgr->tx_msg_downq); in drm_dp_mst_topology_mgr_init()
2900 INIT_LIST_HEAD(&mgr->destroy_connector_list); in drm_dp_mst_topology_mgr_init()
2901 INIT_WORK(&mgr->work, drm_dp_mst_link_probe_work); in drm_dp_mst_topology_mgr_init()
2902 INIT_WORK(&mgr->tx_work, drm_dp_tx_work); in drm_dp_mst_topology_mgr_init()
2903 INIT_WORK(&mgr->destroy_connector_work, drm_dp_destroy_connector_work); in drm_dp_mst_topology_mgr_init()
2904 init_waitqueue_head(&mgr->tx_waitq); in drm_dp_mst_topology_mgr_init()
2905 mgr->dev = dev; in drm_dp_mst_topology_mgr_init()
2906 mgr->aux = aux; in drm_dp_mst_topology_mgr_init()
2907 mgr->max_dpcd_transaction_bytes = max_dpcd_transaction_bytes; in drm_dp_mst_topology_mgr_init()
2908 mgr->max_payloads = max_payloads; in drm_dp_mst_topology_mgr_init()
2909 mgr->conn_base_id = conn_base_id; in drm_dp_mst_topology_mgr_init()
2910 mgr->payloads = kcalloc(max_payloads, sizeof(struct drm_dp_payload), GFP_KERNEL); in drm_dp_mst_topology_mgr_init()
2911 if (!mgr->payloads) in drm_dp_mst_topology_mgr_init()
2913 mgr->proposed_vcpis = kcalloc(max_payloads, sizeof(struct drm_dp_vcpi *), GFP_KERNEL); in drm_dp_mst_topology_mgr_init()
2914 if (!mgr->proposed_vcpis) in drm_dp_mst_topology_mgr_init()
2916 set_bit(0, &mgr->payload_mask); in drm_dp_mst_topology_mgr_init()
2926 void drm_dp_mst_topology_mgr_destroy(struct drm_dp_mst_topology_mgr *mgr) in drm_dp_mst_topology_mgr_destroy() argument
2928 flush_work(&mgr->work); in drm_dp_mst_topology_mgr_destroy()
2929 flush_work(&mgr->destroy_connector_work); in drm_dp_mst_topology_mgr_destroy()
2930 mutex_lock(&mgr->payload_lock); in drm_dp_mst_topology_mgr_destroy()
2931 kfree(mgr->payloads); in drm_dp_mst_topology_mgr_destroy()
2932 mgr->payloads = NULL; in drm_dp_mst_topology_mgr_destroy()
2933 kfree(mgr->proposed_vcpis); in drm_dp_mst_topology_mgr_destroy()
2934 mgr->proposed_vcpis = NULL; in drm_dp_mst_topology_mgr_destroy()
2935 mutex_unlock(&mgr->payload_lock); in drm_dp_mst_topology_mgr_destroy()
2936 mgr->dev = NULL; in drm_dp_mst_topology_mgr_destroy()
2937 mgr->aux = NULL; in drm_dp_mst_topology_mgr_destroy()
2948 struct drm_dp_mst_topology_mgr *mgr = port->mgr; in drm_dp_mst_i2c_xfer() local
2955 mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent); in drm_dp_mst_i2c_xfer()
2991 drm_dp_queue_down_tx(mgr, txmsg); in drm_dp_mst_i2c_xfer()