Lines Matching refs:smp
81 struct mdp5_kms *get_kms(struct mdp5_smp *smp) in get_kms() argument
83 struct msm_drm_private *priv = smp->dev->dev_private; in get_kms()
107 return mdp5_cfg->smp.clients[pipe] + plane; in pipe2client()
111 static int smp_request_block(struct mdp5_smp *smp, in smp_request_block() argument
114 struct mdp5_kms *mdp5_kms = get_kms(smp); in smp_request_block()
116 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; in smp_request_block()
117 int i, ret, avail, cur_nblks, cnt = smp->blk_cnt; in smp_request_block()
122 reserved = hw_cfg->smp.reserved[cid]; in smp_request_block()
124 spin_lock_irqsave(&smp->state_lock, flags); in smp_request_block()
131 avail = cnt - bitmap_weight(smp->state, cnt); in smp_request_block()
143 int blk = find_first_zero_bit(smp->state, cnt); in smp_request_block()
145 set_bit(blk, smp->state); in smp_request_block()
157 spin_unlock_irqrestore(&smp->state_lock, flags); in smp_request_block()
161 static void set_fifo_thresholds(struct mdp5_smp *smp, in set_fifo_thresholds() argument
164 struct mdp5_kms *mdp5_kms = get_kms(smp); in set_fifo_thresholds()
165 u32 smp_entries_per_blk = smp->blk_size / (128 / BITS_PER_BYTE); in set_fifo_thresholds()
182 int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 width) in mdp5_smp_request() argument
184 struct mdp5_kms *mdp5_kms = get_kms(smp); in mdp5_smp_request()
201 n = DIV_ROUND_UP(fetch_stride * nlines, smp->blk_size); in mdp5_smp_request()
208 ret = smp_request_block(smp, pipe2client(pipe, i), n); in mdp5_smp_request()
218 set_fifo_thresholds(smp, pipe, nblks); in mdp5_smp_request()
224 void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) in mdp5_smp_release() argument
229 smp_request_block(smp, pipe2client(pipe, i), 0); in mdp5_smp_release()
230 set_fifo_thresholds(smp, pipe, 0); in mdp5_smp_release()
233 static void update_smp_state(struct mdp5_smp *smp, in update_smp_state() argument
236 struct mdp5_kms *mdp5_kms = get_kms(smp); in update_smp_state()
237 int cnt = smp->blk_cnt; in update_smp_state()
267 void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe) in mdp5_smp_configure() argument
269 int cnt = smp->blk_cnt; in mdp5_smp_configure()
275 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; in mdp5_smp_configure()
278 update_smp_state(smp, cid, &assigned); in mdp5_smp_configure()
283 void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) in mdp5_smp_commit() argument
285 int cnt = smp->blk_cnt; in mdp5_smp_commit()
291 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; in mdp5_smp_commit()
301 spin_lock_irqsave(&smp->state_lock, flags); in mdp5_smp_commit()
303 bitmap_andnot(smp->state, smp->state, released, cnt); in mdp5_smp_commit()
304 spin_unlock_irqrestore(&smp->state_lock, flags); in mdp5_smp_commit()
306 update_smp_state(smp, CID_UNUSED, &released); in mdp5_smp_commit()
313 void mdp5_smp_destroy(struct mdp5_smp *smp) in mdp5_smp_destroy() argument
315 kfree(smp); in mdp5_smp_destroy()
320 struct mdp5_smp *smp = NULL; in mdp5_smp_init() local
323 smp = kzalloc(sizeof(*smp), GFP_KERNEL); in mdp5_smp_init()
324 if (unlikely(!smp)) { in mdp5_smp_init()
329 smp->dev = dev; in mdp5_smp_init()
330 smp->blk_cnt = cfg->mmb_count; in mdp5_smp_init()
331 smp->blk_size = cfg->mmb_size; in mdp5_smp_init()
334 bitmap_copy(smp->state, cfg->reserved_state, smp->blk_cnt); in mdp5_smp_init()
335 spin_lock_init(&smp->state_lock); in mdp5_smp_init()
337 return smp; in mdp5_smp_init()
339 if (smp) in mdp5_smp_init()
340 mdp5_smp_destroy(smp); in mdp5_smp_init()