1/* 2 * Copyright �� 2015 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 21 * DEALINGS IN THE SOFTWARE. 22 */ 23 24/** 25 * DOC: atomic modeset support 26 * 27 * The functions here implement the state management and hardware programming 28 * dispatch required by the atomic modeset infrastructure. 29 * See intel_atomic_plane.c for the plane-specific atomic functionality. 30 */ 31 32#include <drm/drmP.h> 33#include <drm/drm_atomic.h> 34#include <drm/drm_atomic_helper.h> 35#include <drm/drm_plane_helper.h> 36#include "intel_drv.h" 37 38/** 39 * intel_connector_atomic_get_property - fetch connector property value 40 * @connector: connector to fetch property for 41 * @state: state containing the property value 42 * @property: property to look up 43 * @val: pointer to write property value into 44 * 45 * The DRM core does not store shadow copies of properties for 46 * atomic-capable drivers. This entrypoint is used to fetch 47 * the current value of a driver-specific connector property. 48 */ 49int 50intel_connector_atomic_get_property(struct drm_connector *connector, 51 const struct drm_connector_state *state, 52 struct drm_property *property, 53 uint64_t *val) 54{ 55 int i; 56 57 /* 58 * TODO: We only have atomic modeset for planes at the moment, so the 59 * crtc/connector code isn't quite ready yet. Until it's ready, 60 * continue to look up all property values in the DRM's shadow copy 61 * in obj->properties->values[]. 62 * 63 * When the crtc/connector state work matures, this function should 64 * be updated to read the values out of the state structure instead. 65 */ 66 for (i = 0; i < connector->base.properties->count; i++) { 67 if (connector->base.properties->properties[i] == property) { 68 *val = connector->base.properties->values[i]; 69 return 0; 70 } 71 } 72 73 return -EINVAL; 74} 75 76/* 77 * intel_crtc_duplicate_state - duplicate crtc state 78 * @crtc: drm crtc 79 * 80 * Allocates and returns a copy of the crtc state (both common and 81 * Intel-specific) for the specified crtc. 82 * 83 * Returns: The newly allocated crtc state, or NULL on failure. 84 */ 85struct drm_crtc_state * 86intel_crtc_duplicate_state(struct drm_crtc *crtc) 87{ 88 struct intel_crtc_state *crtc_state; 89 90 crtc_state = kmemdup(crtc->state, sizeof(*crtc_state), GFP_KERNEL); 91 if (!crtc_state) 92 return NULL; 93 94 __drm_atomic_helper_crtc_duplicate_state(crtc, &crtc_state->base); 95 96 crtc_state->update_pipe = false; 97 98 return &crtc_state->base; 99} 100 101/** 102 * intel_crtc_destroy_state - destroy crtc state 103 * @crtc: drm crtc 104 * 105 * Destroys the crtc state (both common and Intel-specific) for the 106 * specified crtc. 107 */ 108void 109intel_crtc_destroy_state(struct drm_crtc *crtc, 110 struct drm_crtc_state *state) 111{ 112 drm_atomic_helper_crtc_destroy_state(crtc, state); 113} 114 115/** 116 * intel_atomic_setup_scalers() - setup scalers for crtc per staged requests 117 * @dev: DRM device 118 * @crtc: intel crtc 119 * @crtc_state: incoming crtc_state to validate and setup scalers 120 * 121 * This function sets up scalers based on staged scaling requests for 122 * a @crtc and its planes. It is called from crtc level check path. If request 123 * is a supportable request, it attaches scalers to requested planes and crtc. 124 * 125 * This function takes into account the current scaler(s) in use by any planes 126 * not being part of this atomic state 127 * 128 * Returns: 129 * 0 - scalers were setup succesfully 130 * error code - otherwise 131 */ 132int intel_atomic_setup_scalers(struct drm_device *dev, 133 struct intel_crtc *intel_crtc, 134 struct intel_crtc_state *crtc_state) 135{ 136 struct drm_plane *plane = NULL; 137 struct intel_plane *intel_plane; 138 struct intel_plane_state *plane_state = NULL; 139 struct intel_crtc_scaler_state *scaler_state = 140 &crtc_state->scaler_state; 141 struct drm_atomic_state *drm_state = crtc_state->base.state; 142 int num_scalers_need; 143 int i, j; 144 145 num_scalers_need = hweight32(scaler_state->scaler_users); 146 147 /* 148 * High level flow: 149 * - staged scaler requests are already in scaler_state->scaler_users 150 * - check whether staged scaling requests can be supported 151 * - add planes using scalers that aren't in current transaction 152 * - assign scalers to requested users 153 * - as part of plane commit, scalers will be committed 154 * (i.e., either attached or detached) to respective planes in hw 155 * - as part of crtc_commit, scaler will be either attached or detached 156 * to crtc in hw 157 */ 158 159 /* fail if required scalers > available scalers */ 160 if (num_scalers_need > intel_crtc->num_scalers){ 161 DRM_DEBUG_KMS("Too many scaling requests %d > %d\n", 162 num_scalers_need, intel_crtc->num_scalers); 163 return -EINVAL; 164 } 165 166 /* walkthrough scaler_users bits and start assigning scalers */ 167 for (i = 0; i < sizeof(scaler_state->scaler_users) * 8; i++) { 168 int *scaler_id; 169 const char *name; 170 int idx; 171 172 /* skip if scaler not required */ 173 if (!(scaler_state->scaler_users & (1 << i))) 174 continue; 175 176 if (i == SKL_CRTC_INDEX) { 177 name = "CRTC"; 178 idx = intel_crtc->base.base.id; 179 180 /* panel fitter case: assign as a crtc scaler */ 181 scaler_id = &scaler_state->scaler_id; 182 } else { 183 name = "PLANE"; 184 185 /* plane scaler case: assign as a plane scaler */ 186 /* find the plane that set the bit as scaler_user */ 187 plane = drm_state->planes[i]; 188 189 /* 190 * to enable/disable hq mode, add planes that are using scaler 191 * into this transaction 192 */ 193 if (!plane) { 194 struct drm_plane_state *state; 195 plane = drm_plane_from_index(dev, i); 196 state = drm_atomic_get_plane_state(drm_state, plane); 197 if (IS_ERR(state)) { 198 DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n", 199 plane->base.id); 200 return PTR_ERR(state); 201 } 202 203 /* 204 * the plane is added after plane checks are run, 205 * but since this plane is unchanged just do the 206 * minimum required validation. 207 */ 208 if (plane->type == DRM_PLANE_TYPE_PRIMARY) 209 intel_crtc->atomic.wait_for_flips = true; 210 crtc_state->base.planes_changed = true; 211 } 212 213 intel_plane = to_intel_plane(plane); 214 idx = plane->base.id; 215 216 /* plane on different crtc cannot be a scaler user of this crtc */ 217 if (WARN_ON(intel_plane->pipe != intel_crtc->pipe)) { 218 continue; 219 } 220 221 plane_state = to_intel_plane_state(drm_state->plane_states[i]); 222 scaler_id = &plane_state->scaler_id; 223 } 224 225 if (*scaler_id < 0) { 226 /* find a free scaler */ 227 for (j = 0; j < intel_crtc->num_scalers; j++) { 228 if (!scaler_state->scalers[j].in_use) { 229 scaler_state->scalers[j].in_use = 1; 230 *scaler_id = j; 231 DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n", 232 intel_crtc->pipe, *scaler_id, name, idx); 233 break; 234 } 235 } 236 } 237 238 if (WARN_ON(*scaler_id < 0)) { 239 DRM_DEBUG_KMS("Cannot find scaler for %s:%d\n", name, idx); 240 continue; 241 } 242 243 /* set scaler mode */ 244 if (num_scalers_need == 1 && intel_crtc->pipe != PIPE_C) { 245 /* 246 * when only 1 scaler is in use on either pipe A or B, 247 * scaler 0 operates in high quality (HQ) mode. 248 * In this case use scaler 0 to take advantage of HQ mode 249 */ 250 *scaler_id = 0; 251 scaler_state->scalers[0].in_use = 1; 252 scaler_state->scalers[0].mode = PS_SCALER_MODE_HQ; 253 scaler_state->scalers[1].in_use = 0; 254 } else { 255 scaler_state->scalers[*scaler_id].mode = PS_SCALER_MODE_DYN; 256 } 257 } 258 259 return 0; 260} 261 262static void 263intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv, 264 struct intel_shared_dpll_config *shared_dpll) 265{ 266 enum intel_dpll_id i; 267 268 /* Copy shared dpll state */ 269 for (i = 0; i < dev_priv->num_shared_dpll; i++) { 270 struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; 271 272 shared_dpll[i] = pll->config; 273 } 274} 275 276struct intel_shared_dpll_config * 277intel_atomic_get_shared_dpll_state(struct drm_atomic_state *s) 278{ 279 struct intel_atomic_state *state = to_intel_atomic_state(s); 280 281 WARN_ON(!drm_modeset_is_locked(&s->dev->mode_config.connection_mutex)); 282 283 if (!state->dpll_set) { 284 state->dpll_set = true; 285 286 intel_atomic_duplicate_dpll_state(to_i915(s->dev), 287 state->shared_dpll); 288 } 289 290 return state->shared_dpll; 291} 292 293struct drm_atomic_state * 294intel_atomic_state_alloc(struct drm_device *dev) 295{ 296 struct intel_atomic_state *state = kzalloc(sizeof(*state), GFP_KERNEL); 297 298 if (!state || drm_atomic_state_init(dev, &state->base) < 0) { 299 kfree(state); 300 return NULL; 301 } 302 303 return &state->base; 304} 305 306void intel_atomic_state_clear(struct drm_atomic_state *s) 307{ 308 struct intel_atomic_state *state = to_intel_atomic_state(s); 309 drm_atomic_state_default_clear(&state->base); 310 state->dpll_set = false; 311} 312