Searched refs:ctx (Results 1 - 200 of 960) sorted by relevance

12345

/linux-4.1.27/arch/powerpc/platforms/cell/spufs/
H A Dcontext.c38 struct spu_context *ctx; alloc_spu_context() local
40 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); alloc_spu_context()
41 if (!ctx) alloc_spu_context()
46 if (spu_init_csa(&ctx->csa)) alloc_spu_context()
48 spin_lock_init(&ctx->mmio_lock); alloc_spu_context()
49 mutex_init(&ctx->mapping_lock); alloc_spu_context()
50 kref_init(&ctx->kref); alloc_spu_context()
51 mutex_init(&ctx->state_mutex); alloc_spu_context()
52 mutex_init(&ctx->run_mutex); alloc_spu_context()
53 init_waitqueue_head(&ctx->ibox_wq); alloc_spu_context()
54 init_waitqueue_head(&ctx->wbox_wq); alloc_spu_context()
55 init_waitqueue_head(&ctx->stop_wq); alloc_spu_context()
56 init_waitqueue_head(&ctx->mfc_wq); alloc_spu_context()
57 init_waitqueue_head(&ctx->run_wq); alloc_spu_context()
58 ctx->state = SPU_STATE_SAVED; alloc_spu_context()
59 ctx->ops = &spu_backing_ops; alloc_spu_context()
60 ctx->owner = get_task_mm(current); alloc_spu_context()
61 INIT_LIST_HEAD(&ctx->rq); alloc_spu_context()
62 INIT_LIST_HEAD(&ctx->aff_list); alloc_spu_context()
64 spu_gang_add_ctx(gang, ctx); alloc_spu_context()
66 __spu_update_sched_info(ctx); alloc_spu_context()
67 spu_set_timeslice(ctx); alloc_spu_context()
68 ctx->stats.util_state = SPU_UTIL_IDLE_LOADED; alloc_spu_context()
69 ctx->stats.tstamp = ktime_get_ns(); alloc_spu_context()
74 kfree(ctx); alloc_spu_context()
75 ctx = NULL; alloc_spu_context()
77 return ctx; alloc_spu_context()
82 struct spu_context *ctx; destroy_spu_context() local
83 ctx = container_of(kref, struct spu_context, kref); destroy_spu_context()
84 spu_context_nospu_trace(destroy_spu_context__enter, ctx); destroy_spu_context()
85 mutex_lock(&ctx->state_mutex); destroy_spu_context()
86 spu_deactivate(ctx); destroy_spu_context()
87 mutex_unlock(&ctx->state_mutex); destroy_spu_context()
88 spu_fini_csa(&ctx->csa); destroy_spu_context()
89 if (ctx->gang) destroy_spu_context()
90 spu_gang_remove_ctx(ctx->gang, ctx); destroy_spu_context()
91 if (ctx->prof_priv_kref) destroy_spu_context()
92 kref_put(ctx->prof_priv_kref, ctx->prof_priv_release); destroy_spu_context()
93 BUG_ON(!list_empty(&ctx->rq)); destroy_spu_context()
95 kfree(ctx->switch_log); destroy_spu_context()
96 kfree(ctx); destroy_spu_context()
99 struct spu_context * get_spu_context(struct spu_context *ctx) get_spu_context() argument
101 kref_get(&ctx->kref); get_spu_context()
102 return ctx; get_spu_context()
105 int put_spu_context(struct spu_context *ctx) put_spu_context() argument
107 return kref_put(&ctx->kref, &destroy_spu_context); put_spu_context()
111 void spu_forget(struct spu_context *ctx) spu_forget() argument
120 mutex_lock(&ctx->state_mutex); spu_forget()
121 if (ctx->state != SPU_STATE_SAVED) spu_forget()
122 spu_deactivate(ctx); spu_forget()
124 mm = ctx->owner; spu_forget()
125 ctx->owner = NULL; spu_forget()
127 spu_release(ctx); spu_forget()
130 void spu_unmap_mappings(struct spu_context *ctx) spu_unmap_mappings() argument
132 mutex_lock(&ctx->mapping_lock); spu_unmap_mappings()
133 if (ctx->local_store) spu_unmap_mappings()
134 unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); spu_unmap_mappings()
135 if (ctx->mfc) spu_unmap_mappings()
136 unmap_mapping_range(ctx->mfc, 0, SPUFS_MFC_MAP_SIZE, 1); spu_unmap_mappings()
137 if (ctx->cntl) spu_unmap_mappings()
138 unmap_mapping_range(ctx->cntl, 0, SPUFS_CNTL_MAP_SIZE, 1); spu_unmap_mappings()
139 if (ctx->signal1) spu_unmap_mappings()
140 unmap_mapping_range(ctx->signal1, 0, SPUFS_SIGNAL_MAP_SIZE, 1); spu_unmap_mappings()
141 if (ctx->signal2) spu_unmap_mappings()
142 unmap_mapping_range(ctx->signal2, 0, SPUFS_SIGNAL_MAP_SIZE, 1); spu_unmap_mappings()
143 if (ctx->mss) spu_unmap_mappings()
144 unmap_mapping_range(ctx->mss, 0, SPUFS_MSS_MAP_SIZE, 1); spu_unmap_mappings()
145 if (ctx->psmap) spu_unmap_mappings()
146 unmap_mapping_range(ctx->psmap, 0, SPUFS_PS_MAP_SIZE, 1); spu_unmap_mappings()
147 mutex_unlock(&ctx->mapping_lock); spu_unmap_mappings()
152 * @ctx: spu contex to lock
154 int spu_acquire_saved(struct spu_context *ctx) spu_acquire_saved() argument
158 spu_context_nospu_trace(spu_acquire_saved__enter, ctx); spu_acquire_saved()
160 ret = spu_acquire(ctx); spu_acquire_saved()
164 if (ctx->state != SPU_STATE_SAVED) { spu_acquire_saved()
165 set_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags); spu_acquire_saved()
166 spu_deactivate(ctx); spu_acquire_saved()
174 * @ctx: context to unlock
176 void spu_release_saved(struct spu_context *ctx) spu_release_saved() argument
178 BUG_ON(ctx->state != SPU_STATE_SAVED); spu_release_saved()
180 if (test_and_clear_bit(SPU_SCHED_WAS_ACTIVE, &ctx->sched_flags) && spu_release_saved()
181 test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spu_release_saved()
182 spu_activate(ctx, 0); spu_release_saved()
184 spu_release(ctx); spu_release_saved()
H A Dbacking_ops.c47 static void gen_spu_event(struct spu_context *ctx, u32 event) gen_spu_event() argument
53 ch0_cnt = ctx->csa.spu_chnlcnt_RW[0]; gen_spu_event()
54 ch0_data = ctx->csa.spu_chnldata_RW[0]; gen_spu_event()
55 ch1_data = ctx->csa.spu_chnldata_RW[1]; gen_spu_event()
56 ctx->csa.spu_chnldata_RW[0] |= event; gen_spu_event()
58 ctx->csa.spu_chnlcnt_RW[0] = 1; gen_spu_event()
62 static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data) spu_backing_mbox_read() argument
67 spin_lock(&ctx->csa.register_lock); spu_backing_mbox_read()
68 mbox_stat = ctx->csa.prob.mb_stat_R; spu_backing_mbox_read()
74 *data = ctx->csa.prob.pu_mb_R; spu_backing_mbox_read()
75 ctx->csa.prob.mb_stat_R &= ~(0x0000ff); spu_backing_mbox_read()
76 ctx->csa.spu_chnlcnt_RW[28] = 1; spu_backing_mbox_read()
77 gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT); spu_backing_mbox_read()
80 spin_unlock(&ctx->csa.register_lock); spu_backing_mbox_read()
84 static u32 spu_backing_mbox_stat_read(struct spu_context *ctx) spu_backing_mbox_stat_read() argument
86 return ctx->csa.prob.mb_stat_R; spu_backing_mbox_stat_read()
89 static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx, spu_backing_mbox_stat_poll() argument
96 spin_lock_irq(&ctx->csa.register_lock); spu_backing_mbox_stat_poll()
97 stat = ctx->csa.prob.mb_stat_R; spu_backing_mbox_stat_poll()
108 ctx->csa.priv1.int_stat_class2_RW &= spu_backing_mbox_stat_poll()
110 ctx->csa.priv1.int_mask_class2_RW |= spu_backing_mbox_stat_poll()
118 ctx->csa.priv1.int_stat_class2_RW &= spu_backing_mbox_stat_poll()
120 ctx->csa.priv1.int_mask_class2_RW |= spu_backing_mbox_stat_poll()
124 spin_unlock_irq(&ctx->csa.register_lock); spu_backing_mbox_stat_poll()
128 static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data) spu_backing_ibox_read() argument
132 spin_lock(&ctx->csa.register_lock); spu_backing_ibox_read()
133 if (ctx->csa.prob.mb_stat_R & 0xff0000) { spu_backing_ibox_read()
138 *data = ctx->csa.priv2.puint_mb_R; spu_backing_ibox_read()
139 ctx->csa.prob.mb_stat_R &= ~(0xff0000); spu_backing_ibox_read()
140 ctx->csa.spu_chnlcnt_RW[30] = 1; spu_backing_ibox_read()
141 gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT); spu_backing_ibox_read()
145 ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR; spu_backing_ibox_read()
148 spin_unlock(&ctx->csa.register_lock); spu_backing_ibox_read()
152 static int spu_backing_wbox_write(struct spu_context *ctx, u32 data) spu_backing_wbox_write() argument
156 spin_lock(&ctx->csa.register_lock); spu_backing_wbox_write()
157 if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) { spu_backing_wbox_write()
158 int slot = ctx->csa.spu_chnlcnt_RW[29]; spu_backing_wbox_write()
159 int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8; spu_backing_wbox_write()
166 ctx->csa.spu_mailbox_data[slot] = data; spu_backing_wbox_write()
167 ctx->csa.spu_chnlcnt_RW[29] = ++slot; spu_backing_wbox_write()
168 ctx->csa.prob.mb_stat_R &= ~(0x00ff00); spu_backing_wbox_write()
169 ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8); spu_backing_wbox_write()
170 gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT); spu_backing_wbox_write()
175 ctx->csa.priv1.int_mask_class2_RW |= spu_backing_wbox_write()
179 spin_unlock(&ctx->csa.register_lock); spu_backing_wbox_write()
183 static u32 spu_backing_signal1_read(struct spu_context *ctx) spu_backing_signal1_read() argument
185 return ctx->csa.spu_chnldata_RW[3]; spu_backing_signal1_read()
188 static void spu_backing_signal1_write(struct spu_context *ctx, u32 data) spu_backing_signal1_write() argument
190 spin_lock(&ctx->csa.register_lock); spu_backing_signal1_write()
191 if (ctx->csa.priv2.spu_cfg_RW & 0x1) spu_backing_signal1_write()
192 ctx->csa.spu_chnldata_RW[3] |= data; spu_backing_signal1_write()
194 ctx->csa.spu_chnldata_RW[3] = data; spu_backing_signal1_write()
195 ctx->csa.spu_chnlcnt_RW[3] = 1; spu_backing_signal1_write()
196 gen_spu_event(ctx, MFC_SIGNAL_1_EVENT); spu_backing_signal1_write()
197 spin_unlock(&ctx->csa.register_lock); spu_backing_signal1_write()
200 static u32 spu_backing_signal2_read(struct spu_context *ctx) spu_backing_signal2_read() argument
202 return ctx->csa.spu_chnldata_RW[4]; spu_backing_signal2_read()
205 static void spu_backing_signal2_write(struct spu_context *ctx, u32 data) spu_backing_signal2_write() argument
207 spin_lock(&ctx->csa.register_lock); spu_backing_signal2_write()
208 if (ctx->csa.priv2.spu_cfg_RW & 0x2) spu_backing_signal2_write()
209 ctx->csa.spu_chnldata_RW[4] |= data; spu_backing_signal2_write()
211 ctx->csa.spu_chnldata_RW[4] = data; spu_backing_signal2_write()
212 ctx->csa.spu_chnlcnt_RW[4] = 1; spu_backing_signal2_write()
213 gen_spu_event(ctx, MFC_SIGNAL_2_EVENT); spu_backing_signal2_write()
214 spin_unlock(&ctx->csa.register_lock); spu_backing_signal2_write()
217 static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val) spu_backing_signal1_type_set() argument
221 spin_lock(&ctx->csa.register_lock); spu_backing_signal1_type_set()
222 tmp = ctx->csa.priv2.spu_cfg_RW; spu_backing_signal1_type_set()
227 ctx->csa.priv2.spu_cfg_RW = tmp; spu_backing_signal1_type_set()
228 spin_unlock(&ctx->csa.register_lock); spu_backing_signal1_type_set()
231 static u64 spu_backing_signal1_type_get(struct spu_context *ctx) spu_backing_signal1_type_get() argument
233 return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0); spu_backing_signal1_type_get()
236 static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val) spu_backing_signal2_type_set() argument
240 spin_lock(&ctx->csa.register_lock); spu_backing_signal2_type_set()
241 tmp = ctx->csa.priv2.spu_cfg_RW; spu_backing_signal2_type_set()
246 ctx->csa.priv2.spu_cfg_RW = tmp; spu_backing_signal2_type_set()
247 spin_unlock(&ctx->csa.register_lock); spu_backing_signal2_type_set()
250 static u64 spu_backing_signal2_type_get(struct spu_context *ctx) spu_backing_signal2_type_get() argument
252 return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0); spu_backing_signal2_type_get()
255 static u32 spu_backing_npc_read(struct spu_context *ctx) spu_backing_npc_read() argument
257 return ctx->csa.prob.spu_npc_RW; spu_backing_npc_read()
260 static void spu_backing_npc_write(struct spu_context *ctx, u32 val) spu_backing_npc_write() argument
262 ctx->csa.prob.spu_npc_RW = val; spu_backing_npc_write()
265 static u32 spu_backing_status_read(struct spu_context *ctx) spu_backing_status_read() argument
267 return ctx->csa.prob.spu_status_R; spu_backing_status_read()
270 static char *spu_backing_get_ls(struct spu_context *ctx) spu_backing_get_ls() argument
272 return ctx->csa.lscsa->ls; spu_backing_get_ls()
275 static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val) spu_backing_privcntl_write() argument
277 ctx->csa.priv2.spu_privcntl_RW = val; spu_backing_privcntl_write()
280 static u32 spu_backing_runcntl_read(struct spu_context *ctx) spu_backing_runcntl_read() argument
282 return ctx->csa.prob.spu_runcntl_RW; spu_backing_runcntl_read()
285 static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) spu_backing_runcntl_write() argument
287 spin_lock(&ctx->csa.register_lock); spu_backing_runcntl_write()
288 ctx->csa.prob.spu_runcntl_RW = val; spu_backing_runcntl_write()
290 ctx->csa.prob.spu_status_R &= spu_backing_runcntl_write()
296 ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING; spu_backing_runcntl_write()
298 ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING; spu_backing_runcntl_write()
300 spin_unlock(&ctx->csa.register_lock); spu_backing_runcntl_write()
303 static void spu_backing_runcntl_stop(struct spu_context *ctx) spu_backing_runcntl_stop() argument
305 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); spu_backing_runcntl_stop()
308 static void spu_backing_master_start(struct spu_context *ctx) spu_backing_master_start() argument
310 struct spu_state *csa = &ctx->csa; spu_backing_master_start()
319 static void spu_backing_master_stop(struct spu_context *ctx) spu_backing_master_stop() argument
321 struct spu_state *csa = &ctx->csa; spu_backing_master_stop()
330 static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask, spu_backing_set_mfc_query() argument
333 struct spu_problem_collapsed *prob = &ctx->csa.prob; spu_backing_set_mfc_query()
336 spin_lock(&ctx->csa.register_lock); spu_backing_set_mfc_query()
349 ctx->csa.prob.dma_tagstatus_R &= mask; spu_backing_set_mfc_query()
351 spin_unlock(&ctx->csa.register_lock); spu_backing_set_mfc_query()
356 static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx) spu_backing_read_mfc_tagstatus() argument
358 return ctx->csa.prob.dma_tagstatus_R; spu_backing_read_mfc_tagstatus()
361 static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx) spu_backing_get_mfc_free_elements() argument
363 return ctx->csa.prob.dma_qstatus_R; spu_backing_get_mfc_free_elements()
366 static int spu_backing_send_mfc_command(struct spu_context *ctx, spu_backing_send_mfc_command() argument
371 spin_lock(&ctx->csa.register_lock); spu_backing_send_mfc_command()
374 spin_unlock(&ctx->csa.register_lock); spu_backing_send_mfc_command()
379 static void spu_backing_restart_dma(struct spu_context *ctx) spu_backing_restart_dma() argument
381 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND; spu_backing_restart_dma()
H A Drun.c16 struct spu_context *ctx = spu->ctx; spufs_stop_callback() local
25 if (ctx) { spufs_stop_callback()
29 ctx->csa.class_0_pending = spu->class_0_pending; spufs_stop_callback()
30 ctx->csa.class_0_dar = spu->class_0_dar; spufs_stop_callback()
33 ctx->csa.class_1_dsisr = spu->class_1_dsisr; spufs_stop_callback()
34 ctx->csa.class_1_dar = spu->class_1_dar; spufs_stop_callback()
44 wake_up_all(&ctx->stop_wq); spufs_stop_callback()
48 int spu_stopped(struct spu_context *ctx, u32 *stat) spu_stopped() argument
57 *stat = ctx->ops->status_read(ctx); spu_stopped()
68 if (test_bit(SPU_SCHED_NOTIFY_ACTIVE, &ctx->sched_flags)) spu_stopped()
71 dsisr = ctx->csa.class_1_dsisr; spu_stopped()
75 if (ctx->csa.class_0_pending) spu_stopped()
81 static int spu_setup_isolated(struct spu_context *ctx) spu_setup_isolated() argument
101 spu_unmap_mappings(ctx); spu_setup_isolated()
103 mfc_cntl = &ctx->spu->priv2->mfc_control_RW; spu_setup_isolated()
124 sr1 = spu_mfc_sr1_get(ctx->spu); spu_setup_isolated()
126 spu_mfc_sr1_set(ctx->spu, sr1); spu_setup_isolated()
129 ctx->ops->signal1_write(ctx, (unsigned long)isolated_loader >> 32); spu_setup_isolated()
130 ctx->ops->signal2_write(ctx, spu_setup_isolated()
133 ctx->ops->runcntl_write(ctx, spu_setup_isolated()
138 while (((status = ctx->ops->status_read(ctx)) & status_loading) == spu_setup_isolated()
153 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); spu_setup_isolated()
161 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_STOP); spu_setup_isolated()
169 spu_mfc_sr1_set(ctx->spu, sr1); spu_setup_isolated()
175 static int spu_run_init(struct spu_context *ctx, u32 *npc) spu_run_init() argument
180 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); spu_run_init()
186 if (ctx->flags & SPU_CREATE_NOSCHED) { spu_run_init()
187 if (ctx->state == SPU_STATE_SAVED) { spu_run_init()
188 ret = spu_activate(ctx, 0); spu_run_init()
197 if (ctx->flags & SPU_CREATE_ISOLATE) { spu_run_init()
198 if (!(ctx->ops->status_read(ctx) & SPU_STATUS_ISOLATED_STATE)) { spu_run_init()
199 ret = spu_setup_isolated(ctx); spu_run_init()
208 runcntl = ctx->ops->runcntl_read(ctx) & spu_run_init()
220 ctx->ops->privcntl_write(ctx, privcntl); spu_run_init()
221 ctx->ops->npc_write(ctx, *npc); spu_run_init()
224 ctx->ops->runcntl_write(ctx, runcntl); spu_run_init()
226 if (ctx->flags & SPU_CREATE_NOSCHED) { spu_run_init()
227 spuctx_switch_state(ctx, SPU_UTIL_USER); spu_run_init()
230 if (ctx->state == SPU_STATE_SAVED) { spu_run_init()
231 ret = spu_activate(ctx, 0); spu_run_init()
235 spuctx_switch_state(ctx, SPU_UTIL_USER); spu_run_init()
239 set_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); spu_run_init()
243 static int spu_run_fini(struct spu_context *ctx, u32 *npc, spu_run_fini() argument
248 spu_del_from_rq(ctx); spu_run_fini()
250 *status = ctx->ops->status_read(ctx); spu_run_fini()
251 *npc = ctx->ops->npc_read(ctx); spu_run_fini()
253 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); spu_run_fini()
254 clear_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags); spu_run_fini()
255 spu_switch_log_notify(NULL, ctx, SWITCH_LOG_EXIT, *status); spu_run_fini()
256 spu_release(ctx); spu_run_fini()
272 static int spu_handle_restartsys(struct spu_context *ctx, long *spu_ret, spu_handle_restartsys() argument
308 static int spu_process_callback(struct spu_context *ctx) spu_process_callback() argument
317 npc = ctx->ops->npc_read(ctx) & ~3; spu_process_callback()
318 ls = (void __iomem *)ctx->ops->get_ls(ctx); spu_process_callback()
330 spu_release(ctx); spu_process_callback()
334 ret = spu_handle_restartsys(ctx, &spu_ret, &npc); spu_process_callback()
336 mutex_lock(&ctx->state_mutex); spu_process_callback()
343 ls = (void __iomem *)ctx->ops->get_ls(ctx); spu_process_callback()
347 ctx->ops->npc_write(ctx, npc); spu_process_callback()
348 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); spu_process_callback()
352 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *event) spufs_run_spu() argument
358 if (mutex_lock_interruptible(&ctx->run_mutex)) spufs_run_spu()
361 ctx->event_return = 0; spufs_run_spu()
363 ret = spu_acquire(ctx); spufs_run_spu()
367 spu_enable_spu(ctx); spufs_run_spu()
369 spu_update_sched_info(ctx); spufs_run_spu()
371 ret = spu_run_init(ctx, npc); spufs_run_spu()
373 spu_release(ctx); spufs_run_spu()
378 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); spufs_run_spu()
385 mutex_lock(&ctx->state_mutex); spufs_run_spu()
388 spu = ctx->spu; spufs_run_spu()
390 &ctx->sched_flags))) { spufs_run_spu()
392 spu_switch_notify(spu, ctx); spufs_run_spu()
397 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); spufs_run_spu()
401 ret = spu_process_callback(ctx); spufs_run_spu()
406 ret = spufs_handle_class1(ctx); spufs_run_spu()
410 ret = spufs_handle_class0(ctx); spufs_run_spu()
420 spu_disable_spu(ctx); spufs_run_spu()
421 ret = spu_run_fini(ctx, npc, &status); spufs_run_spu()
422 spu_yield(ctx); spufs_run_spu()
426 ctx->stats.libassist++; spufs_run_spu()
450 *event = ctx->event_return; spufs_run_spu()
452 mutex_unlock(&ctx->run_mutex); spufs_run_spu()
H A Dfile.c164 struct spu_context *ctx = i->i_ctx; spufs_mem_open() local
166 mutex_lock(&ctx->mapping_lock); spufs_mem_open()
167 file->private_data = ctx; spufs_mem_open()
169 ctx->local_store = inode->i_mapping; spufs_mem_open()
170 mutex_unlock(&ctx->mapping_lock); spufs_mem_open()
178 struct spu_context *ctx = i->i_ctx; spufs_mem_release() local
180 mutex_lock(&ctx->mapping_lock); spufs_mem_release()
182 ctx->local_store = NULL; spufs_mem_release()
183 mutex_unlock(&ctx->mapping_lock); spufs_mem_release()
188 __spufs_mem_read(struct spu_context *ctx, char __user *buffer, __spufs_mem_read() argument
191 char *local_store = ctx->ops->get_ls(ctx); __spufs_mem_read()
200 struct spu_context *ctx = file->private_data; spufs_mem_read() local
203 ret = spu_acquire(ctx); spufs_mem_read()
206 ret = __spufs_mem_read(ctx, buffer, size, pos); spufs_mem_read()
207 spu_release(ctx); spufs_mem_read()
216 struct spu_context *ctx = file->private_data; spufs_mem_write() local
224 ret = spu_acquire(ctx); spufs_mem_write()
228 local_store = ctx->ops->get_ls(ctx); spufs_mem_write()
230 spu_release(ctx); spufs_mem_write()
238 struct spu_context *ctx = vma->vm_file->private_data; spufs_mem_mmap_fault() local
243 struct spu_state *csa = &ctx->csa; spufs_mem_mmap_fault()
266 if (spu_acquire(ctx)) spufs_mem_mmap_fault()
269 if (ctx->state == SPU_STATE_SAVED) { spufs_mem_mmap_fault()
271 pfn = vmalloc_to_pfn(ctx->csa.lscsa->ls + offset); spufs_mem_mmap_fault()
274 pfn = (ctx->spu->local_store_phys + offset) >> PAGE_SHIFT; spufs_mem_mmap_fault()
278 spu_release(ctx); spufs_mem_mmap_fault()
287 struct spu_context *ctx = vma->vm_file->private_data; spufs_mem_mmap_access() local
293 if (spu_acquire(ctx)) spufs_mem_mmap_access()
297 local_store = ctx->ops->get_ls(ctx); spufs_mem_mmap_access()
302 spu_release(ctx); spufs_mem_mmap_access()
314 struct spu_context *ctx = file->private_data; spufs_mem_mmap() local
315 struct spu_state *csa = &ctx->csa; spufs_mem_mmap()
344 struct spu_context *ctx = file->private_data; spufs_get_unmapped_area() local
345 struct spu_state *csa = &ctx->csa; spufs_get_unmapped_area()
375 struct spu_context *ctx = vma->vm_file->private_data; spufs_ps_fault() local
379 spu_context_nospu_trace(spufs_ps_fault__enter, ctx); spufs_ps_fault()
392 get_spu_context(ctx); spufs_ps_fault()
402 if (spu_acquire(ctx)) spufs_ps_fault()
405 if (ctx->state == SPU_STATE_SAVED) { spufs_ps_fault()
407 spu_context_nospu_trace(spufs_ps_fault__sleep, ctx); spufs_ps_fault()
408 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); spufs_ps_fault()
409 spu_context_trace(spufs_ps_fault__wake, ctx, ctx->spu); spufs_ps_fault()
412 area = ctx->spu->problem_phys + ps_offs; spufs_ps_fault()
415 spu_context_trace(spufs_ps_fault__insert, ctx, ctx->spu); spufs_ps_fault()
419 spu_release(ctx); spufs_ps_fault()
422 put_spu_context(ctx); spufs_ps_fault()
457 struct spu_context *ctx = data; spufs_cntl_get() local
460 ret = spu_acquire(ctx); spufs_cntl_get()
463 *val = ctx->ops->status_read(ctx); spufs_cntl_get()
464 spu_release(ctx); spufs_cntl_get()
471 struct spu_context *ctx = data; spufs_cntl_set() local
474 ret = spu_acquire(ctx); spufs_cntl_set()
477 ctx->ops->runcntl_write(ctx, val); spufs_cntl_set()
478 spu_release(ctx); spufs_cntl_set()
486 struct spu_context *ctx = i->i_ctx; spufs_cntl_open() local
488 mutex_lock(&ctx->mapping_lock); spufs_cntl_open()
489 file->private_data = ctx; spufs_cntl_open()
491 ctx->cntl = inode->i_mapping; spufs_cntl_open()
492 mutex_unlock(&ctx->mapping_lock); spufs_cntl_open()
501 struct spu_context *ctx = i->i_ctx; spufs_cntl_release() local
505 mutex_lock(&ctx->mapping_lock); spufs_cntl_release()
507 ctx->cntl = NULL; spufs_cntl_release()
508 mutex_unlock(&ctx->mapping_lock); spufs_cntl_release()
530 __spufs_regs_read(struct spu_context *ctx, char __user *buffer, __spufs_regs_read() argument
533 struct spu_lscsa *lscsa = ctx->csa.lscsa; __spufs_regs_read()
543 struct spu_context *ctx = file->private_data; spufs_regs_read() local
547 if (*pos >= sizeof(ctx->csa.lscsa->gprs)) spufs_regs_read()
550 ret = spu_acquire_saved(ctx); spufs_regs_read()
553 ret = __spufs_regs_read(ctx, buffer, size, pos); spufs_regs_read()
554 spu_release_saved(ctx); spufs_regs_read()
562 struct spu_context *ctx = file->private_data; spufs_regs_write() local
563 struct spu_lscsa *lscsa = ctx->csa.lscsa; spufs_regs_write()
569 ret = spu_acquire_saved(ctx); spufs_regs_write()
576 spu_release_saved(ctx); spufs_regs_write()
588 __spufs_fpcr_read(struct spu_context *ctx, char __user * buffer, __spufs_fpcr_read() argument
591 struct spu_lscsa *lscsa = ctx->csa.lscsa; __spufs_fpcr_read()
601 struct spu_context *ctx = file->private_data; spufs_fpcr_read() local
603 ret = spu_acquire_saved(ctx); spufs_fpcr_read()
606 ret = __spufs_fpcr_read(ctx, buffer, size, pos); spufs_fpcr_read()
607 spu_release_saved(ctx); spufs_fpcr_read()
615 struct spu_context *ctx = file->private_data; spufs_fpcr_write() local
616 struct spu_lscsa *lscsa = ctx->csa.lscsa; spufs_fpcr_write()
622 ret = spu_acquire_saved(ctx); spufs_fpcr_write()
629 spu_release_saved(ctx); spufs_fpcr_write()
660 struct spu_context *ctx = file->private_data; spufs_mbox_read() local
672 count = spu_acquire(ctx); spufs_mbox_read()
678 ret = ctx->ops->mbox_read(ctx, &mbox_data); spufs_mbox_read()
694 spu_release(ctx); spufs_mbox_read()
711 struct spu_context *ctx = file->private_data; spufs_mbox_stat_read() local
718 ret = spu_acquire(ctx); spufs_mbox_stat_read()
722 mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; spufs_mbox_stat_read()
724 spu_release(ctx); spufs_mbox_stat_read()
739 size_t spu_ibox_read(struct spu_context *ctx, u32 *data) spu_ibox_read() argument
741 return ctx->ops->ibox_read(ctx, data); spu_ibox_read()
746 struct spu_context *ctx = file->private_data; spufs_ibox_fasync() local
748 return fasync_helper(fd, file, on, &ctx->ibox_fasync); spufs_ibox_fasync()
754 struct spu_context *ctx = spu->ctx; spufs_ibox_callback() local
756 if (!ctx) spufs_ibox_callback()
759 wake_up_all(&ctx->ibox_wq); spufs_ibox_callback()
760 kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); spufs_ibox_callback()
778 struct spu_context *ctx = file->private_data; spufs_ibox_read() local
790 count = spu_acquire(ctx); spufs_ibox_read()
797 if (!spu_ibox_read(ctx, &ibox_data)) { spufs_ibox_read()
802 count = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); spufs_ibox_read()
814 ret = ctx->ops->ibox_read(ctx, &ibox_data); spufs_ibox_read()
828 spu_release(ctx); spufs_ibox_read()
835 struct spu_context *ctx = file->private_data; spufs_ibox_poll() local
838 poll_wait(file, &ctx->ibox_wq, wait); spufs_ibox_poll()
844 mutex_lock(&ctx->state_mutex); spufs_ibox_poll()
845 mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); spufs_ibox_poll()
846 spu_release(ctx); spufs_ibox_poll()
862 struct spu_context *ctx = file->private_data; spufs_ibox_stat_read() local
869 ret = spu_acquire(ctx); spufs_ibox_stat_read()
872 ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; spufs_ibox_stat_read()
873 spu_release(ctx); spufs_ibox_stat_read()
888 size_t spu_wbox_write(struct spu_context *ctx, u32 data) spu_wbox_write() argument
890 return ctx->ops->wbox_write(ctx, data); spu_wbox_write()
895 struct spu_context *ctx = file->private_data; spufs_wbox_fasync() local
898 ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); spufs_wbox_fasync()
906 struct spu_context *ctx = spu->ctx; spufs_wbox_callback() local
908 if (!ctx) spufs_wbox_callback()
911 wake_up_all(&ctx->wbox_wq); spufs_wbox_callback()
912 kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); spufs_wbox_callback()
930 struct spu_context *ctx = file->private_data; spufs_wbox_write() local
944 count = spu_acquire(ctx); spufs_wbox_write()
954 if (!spu_wbox_write(ctx, wbox_data)) { spufs_wbox_write()
959 count = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); spufs_wbox_write()
972 ret = spu_wbox_write(ctx, wbox_data); spufs_wbox_write()
978 spu_release(ctx); spufs_wbox_write()
985 struct spu_context *ctx = file->private_data; spufs_wbox_poll() local
988 poll_wait(file, &ctx->wbox_wq, wait); spufs_wbox_poll()
994 mutex_lock(&ctx->state_mutex); spufs_wbox_poll()
995 mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); spufs_wbox_poll()
996 spu_release(ctx); spufs_wbox_poll()
1012 struct spu_context *ctx = file->private_data; spufs_wbox_stat_read() local
1019 ret = spu_acquire(ctx); spufs_wbox_stat_read()
1022 wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; spufs_wbox_stat_read()
1023 spu_release(ctx); spufs_wbox_stat_read()
1040 struct spu_context *ctx = i->i_ctx; spufs_signal1_open() local
1042 mutex_lock(&ctx->mapping_lock); spufs_signal1_open()
1043 file->private_data = ctx; spufs_signal1_open()
1045 ctx->signal1 = inode->i_mapping; spufs_signal1_open()
1046 mutex_unlock(&ctx->mapping_lock); spufs_signal1_open()
1054 struct spu_context *ctx = i->i_ctx; spufs_signal1_release() local
1056 mutex_lock(&ctx->mapping_lock); spufs_signal1_release()
1058 ctx->signal1 = NULL; spufs_signal1_release()
1059 mutex_unlock(&ctx->mapping_lock); spufs_signal1_release()
1063 static ssize_t __spufs_signal1_read(struct spu_context *ctx, char __user *buf, __spufs_signal1_read() argument
1072 if (ctx->csa.spu_chnlcnt_RW[3]) { __spufs_signal1_read()
1073 data = ctx->csa.spu_chnldata_RW[3]; __spufs_signal1_read()
1091 struct spu_context *ctx = file->private_data; spufs_signal1_read() local
1093 ret = spu_acquire_saved(ctx); spufs_signal1_read()
1096 ret = __spufs_signal1_read(ctx, buf, len, pos); spufs_signal1_read()
1097 spu_release_saved(ctx); spufs_signal1_read()
1105 struct spu_context *ctx; spufs_signal1_write() local
1109 ctx = file->private_data; spufs_signal1_write()
1117 ret = spu_acquire(ctx); spufs_signal1_write()
1120 ctx->ops->signal1_write(ctx, data); spufs_signal1_write()
1121 spu_release(ctx); spufs_signal1_write()
1177 struct spu_context *ctx = i->i_ctx; spufs_signal2_open() local
1179 mutex_lock(&ctx->mapping_lock); spufs_signal2_open()
1180 file->private_data = ctx; spufs_signal2_open()
1182 ctx->signal2 = inode->i_mapping; spufs_signal2_open()
1183 mutex_unlock(&ctx->mapping_lock); spufs_signal2_open()
1191 struct spu_context *ctx = i->i_ctx; spufs_signal2_release() local
1193 mutex_lock(&ctx->mapping_lock); spufs_signal2_release()
1195 ctx->signal2 = NULL; spufs_signal2_release()
1196 mutex_unlock(&ctx->mapping_lock); spufs_signal2_release()
1200 static ssize_t __spufs_signal2_read(struct spu_context *ctx, char __user *buf, __spufs_signal2_read() argument
1209 if (ctx->csa.spu_chnlcnt_RW[4]) { __spufs_signal2_read()
1210 data = ctx->csa.spu_chnldata_RW[4]; __spufs_signal2_read()
1227 struct spu_context *ctx = file->private_data; spufs_signal2_read() local
1230 ret = spu_acquire_saved(ctx); spufs_signal2_read()
1233 ret = __spufs_signal2_read(ctx, buf, len, pos); spufs_signal2_read()
1234 spu_release_saved(ctx); spufs_signal2_read()
1242 struct spu_context *ctx; spufs_signal2_write() local
1246 ctx = file->private_data; spufs_signal2_write()
1254 ret = spu_acquire(ctx); spufs_signal2_write()
1257 ctx->ops->signal2_write(ctx, data); spufs_signal2_write()
1258 spu_release(ctx); spufs_signal2_write()
1327 struct spu_context *ctx = data; \
1331 ret = spu_acquire(ctx); \
1334 *val = __get(ctx); \
1335 spu_release(ctx); \
1337 ret = spu_acquire_saved(ctx); \
1340 *val = __get(ctx); \
1341 spu_release_saved(ctx); \
1343 *val = __get(ctx); \
1351 struct spu_context *ctx = data; spufs_signal1_type_set() local
1354 ret = spu_acquire(ctx); spufs_signal1_type_set()
1357 ctx->ops->signal1_type_set(ctx, val); spufs_signal1_type_set()
1358 spu_release(ctx); spufs_signal1_type_set()
1363 static u64 spufs_signal1_type_get(struct spu_context *ctx) spufs_signal1_type_get() argument
1365 return ctx->ops->signal1_type_get(ctx); spufs_signal1_type_get()
1373 struct spu_context *ctx = data; spufs_signal2_type_set() local
1376 ret = spu_acquire(ctx); spufs_signal2_type_set()
1379 ctx->ops->signal2_type_set(ctx, val); spufs_signal2_type_set()
1380 spu_release(ctx); spufs_signal2_type_set()
1385 static u64 spufs_signal2_type_get(struct spu_context *ctx) spufs_signal2_type_get() argument
1387 return ctx->ops->signal2_type_get(ctx); spufs_signal2_type_get()
1424 struct spu_context *ctx = i->i_ctx; spufs_mss_open() local
1428 mutex_lock(&ctx->mapping_lock); spufs_mss_open()
1430 ctx->mss = inode->i_mapping; spufs_mss_open()
1431 mutex_unlock(&ctx->mapping_lock); spufs_mss_open()
1439 struct spu_context *ctx = i->i_ctx; spufs_mss_release() local
1441 mutex_lock(&ctx->mapping_lock); spufs_mss_release()
1443 ctx->mss = NULL; spufs_mss_release()
1444 mutex_unlock(&ctx->mapping_lock); spufs_mss_release()
1483 struct spu_context *ctx = i->i_ctx; spufs_psmap_open() local
1485 mutex_lock(&ctx->mapping_lock); spufs_psmap_open()
1488 ctx->psmap = inode->i_mapping; spufs_psmap_open()
1489 mutex_unlock(&ctx->mapping_lock); spufs_psmap_open()
1497 struct spu_context *ctx = i->i_ctx; spufs_psmap_release() local
1499 mutex_lock(&ctx->mapping_lock); spufs_psmap_release()
1501 ctx->psmap = NULL; spufs_psmap_release()
1502 mutex_unlock(&ctx->mapping_lock); spufs_psmap_release()
1546 struct spu_context *ctx = i->i_ctx; spufs_mfc_open() local
1549 if (ctx->owner != current->mm) spufs_mfc_open()
1555 mutex_lock(&ctx->mapping_lock); spufs_mfc_open()
1556 file->private_data = ctx; spufs_mfc_open()
1558 ctx->mfc = inode->i_mapping; spufs_mfc_open()
1559 mutex_unlock(&ctx->mapping_lock); spufs_mfc_open()
1567 struct spu_context *ctx = i->i_ctx; spufs_mfc_release() local
1569 mutex_lock(&ctx->mapping_lock); spufs_mfc_release()
1571 ctx->mfc = NULL; spufs_mfc_release()
1572 mutex_unlock(&ctx->mapping_lock); spufs_mfc_release()
1579 struct spu_context *ctx = spu->ctx; spufs_mfc_callback() local
1581 if (!ctx) spufs_mfc_callback()
1584 wake_up_all(&ctx->mfc_wq); spufs_mfc_callback()
1587 if (ctx->mfc_fasync) { spufs_mfc_callback()
1592 free_elements = ctx->ops->get_mfc_free_elements(ctx); spufs_mfc_callback()
1593 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); spufs_mfc_callback()
1598 if (tagstatus & ctx->tagwait) spufs_mfc_callback()
1601 kill_fasync(&ctx->mfc_fasync, SIGIO, mask); spufs_mfc_callback()
1605 static int spufs_read_mfc_tagstatus(struct spu_context *ctx, u32 *status) spufs_read_mfc_tagstatus() argument
1609 *status = ctx->ops->read_mfc_tagstatus(ctx) & ctx->tagwait; spufs_read_mfc_tagstatus()
1610 ctx->tagwait &= ~*status; spufs_read_mfc_tagstatus()
1616 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); spufs_read_mfc_tagstatus()
1623 struct spu_context *ctx = file->private_data; spufs_mfc_read() local
1630 ret = spu_acquire(ctx); spufs_mfc_read()
1636 status = ctx->ops->read_mfc_tagstatus(ctx); spufs_mfc_read()
1637 if (!(status & ctx->tagwait)) spufs_mfc_read()
1641 ctx->tagwait &= ~status; spufs_mfc_read()
1643 ret = spufs_wait(ctx->mfc_wq, spufs_mfc_read()
1644 spufs_read_mfc_tagstatus(ctx, &status)); spufs_mfc_read()
1648 spu_release(ctx); spufs_mfc_read()
1728 static int spu_send_mfc_command(struct spu_context *ctx, spu_send_mfc_command() argument
1732 *error = ctx->ops->send_mfc_command(ctx, &cmd); spu_send_mfc_command()
1736 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 1); spu_send_mfc_command()
1739 *error = ctx->ops->send_mfc_command(ctx, &cmd); spu_send_mfc_command()
1749 struct spu_context *ctx = file->private_data; spufs_mfc_write() local
1764 ret = spu_acquire(ctx); spufs_mfc_write()
1768 ret = spufs_wait(ctx->run_wq, ctx->state == SPU_STATE_RUNNABLE); spufs_mfc_write()
1773 ret = ctx->ops->send_mfc_command(ctx, &cmd); spufs_mfc_write()
1776 ret = spufs_wait(ctx->mfc_wq, spufs_mfc_write()
1777 spu_send_mfc_command(ctx, cmd, &status)); spufs_mfc_write()
1787 ctx->tagwait |= 1 << cmd.tag; spufs_mfc_write()
1791 spu_release(ctx); spufs_mfc_write()
1798 struct spu_context *ctx = file->private_data; spufs_mfc_poll() local
1802 poll_wait(file, &ctx->mfc_wq, wait); spufs_mfc_poll()
1808 mutex_lock(&ctx->state_mutex); spufs_mfc_poll()
1809 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2); spufs_mfc_poll()
1810 free_elements = ctx->ops->get_mfc_free_elements(ctx); spufs_mfc_poll()
1811 tagstatus = ctx->ops->read_mfc_tagstatus(ctx); spufs_mfc_poll()
1812 spu_release(ctx); spufs_mfc_poll()
1817 if (tagstatus & ctx->tagwait) spufs_mfc_poll()
1821 free_elements, tagstatus, ctx->tagwait); spufs_mfc_poll()
1828 struct spu_context *ctx = file->private_data; spufs_mfc_flush() local
1831 ret = spu_acquire(ctx); spufs_mfc_flush()
1836 ret = spufs_wait(ctx->mfc_wq, spufs_mfc_flush()
1837 ctx->ops->set_mfc_query(ctx, ctx->tagwait, 2)); spufs_mfc_flush()
1840 ret = spufs_wait(ctx->mfc_wq, spufs_mfc_flush()
1841 ctx->ops->read_mfc_tagstatus(ctx) == ctx->tagwait); spufs_mfc_flush()
1847 spu_release(ctx); spufs_mfc_flush()
1866 struct spu_context *ctx = file->private_data; spufs_mfc_fasync() local
1868 return fasync_helper(fd, file, on, &ctx->mfc_fasync); spufs_mfc_fasync()
1886 struct spu_context *ctx = data; spufs_npc_set() local
1889 ret = spu_acquire(ctx); spufs_npc_set()
1892 ctx->ops->npc_write(ctx, val); spufs_npc_set()
1893 spu_release(ctx); spufs_npc_set()
1898 static u64 spufs_npc_get(struct spu_context *ctx) spufs_npc_get() argument
1900 return ctx->ops->npc_read(ctx); spufs_npc_get()
1907 struct spu_context *ctx = data; spufs_decr_set() local
1908 struct spu_lscsa *lscsa = ctx->csa.lscsa; spufs_decr_set()
1911 ret = spu_acquire_saved(ctx); spufs_decr_set()
1915 spu_release_saved(ctx); spufs_decr_set()
1920 static u64 spufs_decr_get(struct spu_context *ctx) spufs_decr_get() argument
1922 struct spu_lscsa *lscsa = ctx->csa.lscsa; spufs_decr_get()
1930 struct spu_context *ctx = data; spufs_decr_status_set() local
1933 ret = spu_acquire_saved(ctx); spufs_decr_status_set()
1937 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; spufs_decr_status_set()
1939 ctx->csa.priv2.mfc_control_RW &= ~MFC_CNTL_DECREMENTER_RUNNING; spufs_decr_status_set()
1940 spu_release_saved(ctx); spufs_decr_status_set()
1945 static u64 spufs_decr_status_get(struct spu_context *ctx) spufs_decr_status_get() argument
1947 if (ctx->csa.priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) spufs_decr_status_get()
1958 struct spu_context *ctx = data; spufs_event_mask_set() local
1959 struct spu_lscsa *lscsa = ctx->csa.lscsa; spufs_event_mask_set()
1962 ret = spu_acquire_saved(ctx); spufs_event_mask_set()
1966 spu_release_saved(ctx); spufs_event_mask_set()
1971 static u64 spufs_event_mask_get(struct spu_context *ctx) spufs_event_mask_get() argument
1973 struct spu_lscsa *lscsa = ctx->csa.lscsa; spufs_event_mask_get()
1981 static u64 spufs_event_status_get(struct spu_context *ctx) spufs_event_status_get() argument
1983 struct spu_state *state = &ctx->csa; spufs_event_status_get()
1995 struct spu_context *ctx = data; spufs_srr0_set() local
1996 struct spu_lscsa *lscsa = ctx->csa.lscsa; spufs_srr0_set()
1999 ret = spu_acquire_saved(ctx); spufs_srr0_set()
2003 spu_release_saved(ctx); spufs_srr0_set()
2008 static u64 spufs_srr0_get(struct spu_context *ctx) spufs_srr0_get() argument
2010 struct spu_lscsa *lscsa = ctx->csa.lscsa; spufs_srr0_get()
2016 static u64 spufs_id_get(struct spu_context *ctx) spufs_id_get() argument
2020 if (ctx->state == SPU_STATE_RUNNABLE) spufs_id_get()
2021 num = ctx->spu->number; spufs_id_get()
2030 static u64 spufs_object_id_get(struct spu_context *ctx) spufs_object_id_get() argument
2033 return ctx->object_id; spufs_object_id_get()
2038 struct spu_context *ctx = data; spufs_object_id_set() local
2039 ctx->object_id = id; spufs_object_id_set()
2047 static u64 spufs_lslr_get(struct spu_context *ctx) spufs_lslr_get() argument
2049 return ctx->csa.priv2.spu_lslr_RW; spufs_lslr_get()
2057 struct spu_context *ctx = i->i_ctx; spufs_info_open() local
2058 file->private_data = ctx; spufs_info_open()
2064 struct spu_context *ctx = s->private; spufs_caps_show() local
2066 if (!(ctx->flags & SPU_CREATE_NOSCHED)) spufs_caps_show()
2068 if (!(ctx->flags & SPU_CREATE_ISOLATE)) spufs_caps_show()
2085 static ssize_t __spufs_mbox_info_read(struct spu_context *ctx, __spufs_mbox_info_read() argument
2091 if (!(ctx->csa.prob.mb_stat_R & 0x0000ff)) __spufs_mbox_info_read()
2094 data = ctx->csa.prob.pu_mb_R; __spufs_mbox_info_read()
2103 struct spu_context *ctx = file->private_data; spufs_mbox_info_read() local
2108 ret = spu_acquire_saved(ctx); spufs_mbox_info_read()
2111 spin_lock(&ctx->csa.register_lock); spufs_mbox_info_read()
2112 ret = __spufs_mbox_info_read(ctx, buf, len, pos); spufs_mbox_info_read()
2113 spin_unlock(&ctx->csa.register_lock); spufs_mbox_info_read()
2114 spu_release_saved(ctx); spufs_mbox_info_read()
2125 static ssize_t __spufs_ibox_info_read(struct spu_context *ctx, __spufs_ibox_info_read() argument
2131 if (!(ctx->csa.prob.mb_stat_R & 0xff0000)) __spufs_ibox_info_read()
2134 data = ctx->csa.priv2.puint_mb_R; __spufs_ibox_info_read()
2142 struct spu_context *ctx = file->private_data; spufs_ibox_info_read() local
2148 ret = spu_acquire_saved(ctx); spufs_ibox_info_read()
2151 spin_lock(&ctx->csa.register_lock); spufs_ibox_info_read()
2152 ret = __spufs_ibox_info_read(ctx, buf, len, pos); spufs_ibox_info_read()
2153 spin_unlock(&ctx->csa.register_lock); spufs_ibox_info_read()
2154 spu_release_saved(ctx); spufs_ibox_info_read()
2165 static ssize_t __spufs_wbox_info_read(struct spu_context *ctx, __spufs_wbox_info_read() argument
2172 wbox_stat = ctx->csa.prob.mb_stat_R; __spufs_wbox_info_read()
2175 data[i] = ctx->csa.spu_mailbox_data[i]; __spufs_wbox_info_read()
2185 struct spu_context *ctx = file->private_data; spufs_wbox_info_read() local
2191 ret = spu_acquire_saved(ctx); spufs_wbox_info_read()
2194 spin_lock(&ctx->csa.register_lock); spufs_wbox_info_read()
2195 ret = __spufs_wbox_info_read(ctx, buf, len, pos); spufs_wbox_info_read()
2196 spin_unlock(&ctx->csa.register_lock); spufs_wbox_info_read()
2197 spu_release_saved(ctx); spufs_wbox_info_read()
2208 static ssize_t __spufs_dma_info_read(struct spu_context *ctx, __spufs_dma_info_read() argument
2215 info.dma_info_type = ctx->csa.priv2.spu_tag_status_query_RW; __spufs_dma_info_read()
2216 info.dma_info_mask = ctx->csa.lscsa->tag_mask.slot[0]; __spufs_dma_info_read()
2217 info.dma_info_status = ctx->csa.spu_chnldata_RW[24]; __spufs_dma_info_read()
2218 info.dma_info_stall_and_notify = ctx->csa.spu_chnldata_RW[25]; __spufs_dma_info_read()
2219 info.dma_info_atomic_command_status = ctx->csa.spu_chnldata_RW[27]; __spufs_dma_info_read()
2222 spuqp = &ctx->csa.priv2.spuq[i]; __spufs_dma_info_read()
2237 struct spu_context *ctx = file->private_data; spufs_dma_info_read() local
2243 ret = spu_acquire_saved(ctx); spufs_dma_info_read()
2246 spin_lock(&ctx->csa.register_lock); spufs_dma_info_read()
2247 ret = __spufs_dma_info_read(ctx, buf, len, pos); spufs_dma_info_read()
2248 spin_unlock(&ctx->csa.register_lock); spufs_dma_info_read()
2249 spu_release_saved(ctx); spufs_dma_info_read()
2260 static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, __spufs_proxydma_info_read() argument
2274 info.proxydma_info_type = ctx->csa.prob.dma_querytype_RW; __spufs_proxydma_info_read()
2275 info.proxydma_info_mask = ctx->csa.prob.dma_querymask_RW; __spufs_proxydma_info_read()
2276 info.proxydma_info_status = ctx->csa.prob.dma_tagstatus_R; __spufs_proxydma_info_read()
2279 puqp = &ctx->csa.priv2.puq[i]; __spufs_proxydma_info_read()
2294 struct spu_context *ctx = file->private_data; spufs_proxydma_info_read() local
2297 ret = spu_acquire_saved(ctx); spufs_proxydma_info_read()
2300 spin_lock(&ctx->csa.register_lock); spufs_proxydma_info_read()
2301 ret = __spufs_proxydma_info_read(ctx, buf, len, pos); spufs_proxydma_info_read()
2302 spin_unlock(&ctx->csa.register_lock); spufs_proxydma_info_read()
2303 spu_release_saved(ctx); spufs_proxydma_info_read()
2316 struct spu_context *ctx = s->private; spufs_show_tid() local
2318 seq_printf(s, "%d\n", ctx->tid); spufs_show_tid()
2338 static unsigned long long spufs_acct_time(struct spu_context *ctx, spufs_acct_time() argument
2341 unsigned long long time = ctx->stats.times[state]; spufs_acct_time()
2352 if (ctx->spu && ctx->stats.util_state == state) { spufs_acct_time()
2353 time += ktime_get_ns() - ctx->stats.tstamp; spufs_acct_time()
2359 static unsigned long long spufs_slb_flts(struct spu_context *ctx) spufs_slb_flts() argument
2361 unsigned long long slb_flts = ctx->stats.slb_flt; spufs_slb_flts()
2363 if (ctx->state == SPU_STATE_RUNNABLE) { spufs_slb_flts()
2364 slb_flts += (ctx->spu->stats.slb_flt - spufs_slb_flts()
2365 ctx->stats.slb_flt_base); spufs_slb_flts()
2371 static unsigned long long spufs_class2_intrs(struct spu_context *ctx) spufs_class2_intrs() argument
2373 unsigned long long class2_intrs = ctx->stats.class2_intr; spufs_class2_intrs()
2375 if (ctx->state == SPU_STATE_RUNNABLE) { spufs_class2_intrs()
2376 class2_intrs += (ctx->spu->stats.class2_intr - spufs_class2_intrs()
2377 ctx->stats.class2_intr_base); spufs_class2_intrs()
2386 struct spu_context *ctx = s->private; spufs_show_stat() local
2389 ret = spu_acquire(ctx); spufs_show_stat()
2395 ctx_state_names[ctx->stats.util_state], spufs_show_stat()
2396 spufs_acct_time(ctx, SPU_UTIL_USER), spufs_show_stat()
2397 spufs_acct_time(ctx, SPU_UTIL_SYSTEM), spufs_show_stat()
2398 spufs_acct_time(ctx, SPU_UTIL_IOWAIT), spufs_show_stat()
2399 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED), spufs_show_stat()
2400 ctx->stats.vol_ctx_switch, spufs_show_stat()
2401 ctx->stats.invol_ctx_switch, spufs_show_stat()
2402 spufs_slb_flts(ctx), spufs_show_stat()
2403 ctx->stats.hash_flt, spufs_show_stat()
2404 ctx->stats.min_flt, spufs_show_stat()
2405 ctx->stats.maj_flt, spufs_show_stat()
2406 spufs_class2_intrs(ctx), spufs_show_stat()
2407 ctx->stats.libassist); spufs_show_stat()
2408 spu_release(ctx); spufs_show_stat()
2424 static inline int spufs_switch_log_used(struct spu_context *ctx) spufs_switch_log_used() argument
2426 return (ctx->switch_log->head - ctx->switch_log->tail) % spufs_switch_log_used()
2430 static inline int spufs_switch_log_avail(struct spu_context *ctx) spufs_switch_log_avail() argument
2432 return SWITCH_LOG_BUFSIZE - spufs_switch_log_used(ctx); spufs_switch_log_avail()
2437 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; spufs_switch_log_open() local
2440 rc = spu_acquire(ctx); spufs_switch_log_open()
2444 if (ctx->switch_log) { spufs_switch_log_open()
2449 ctx->switch_log = kmalloc(sizeof(struct switch_log) + spufs_switch_log_open()
2453 if (!ctx->switch_log) { spufs_switch_log_open()
2458 ctx->switch_log->head = ctx->switch_log->tail = 0; spufs_switch_log_open()
2459 init_waitqueue_head(&ctx->switch_log->wait); spufs_switch_log_open()
2463 spu_release(ctx); spufs_switch_log_open()
2469 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; spufs_switch_log_release() local
2472 rc = spu_acquire(ctx); spufs_switch_log_release()
2476 kfree(ctx->switch_log); spufs_switch_log_release()
2477 ctx->switch_log = NULL; spufs_switch_log_release()
2478 spu_release(ctx); spufs_switch_log_release()
2483 static int switch_log_sprint(struct spu_context *ctx, char *tbuf, int n) switch_log_sprint() argument
2487 p = ctx->switch_log->log + ctx->switch_log->tail % SWITCH_LOG_BUFSIZE; switch_log_sprint()
2502 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; spufs_switch_log_read() local
2508 error = spu_acquire(ctx); spufs_switch_log_read()
2516 if (spufs_switch_log_used(ctx) == 0) { spufs_switch_log_read()
2530 * ctx->switch_log is stable). spufs_switch_log_read()
2532 error = spufs_wait(ctx->switch_log->wait, spufs_switch_log_read()
2533 spufs_switch_log_used(ctx) > 0); spufs_switch_log_read()
2543 if (spufs_switch_log_used(ctx) == 0) spufs_switch_log_read()
2548 width = switch_log_sprint(ctx, tbuf, sizeof(tbuf)); spufs_switch_log_read()
2550 ctx->switch_log->tail = spufs_switch_log_read()
2551 (ctx->switch_log->tail + 1) % spufs_switch_log_read()
2564 spu_release(ctx); spufs_switch_log_read()
2572 struct spu_context *ctx = SPUFS_I(inode)->i_ctx; spufs_switch_log_poll() local
2576 poll_wait(file, &ctx->switch_log->wait, wait); spufs_switch_log_poll()
2578 rc = spu_acquire(ctx); spufs_switch_log_poll()
2582 if (spufs_switch_log_used(ctx) > 0) spufs_switch_log_poll()
2585 spu_release(ctx); spufs_switch_log_poll()
2601 * Must be called with ctx->state_mutex held.
2603 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx, spu_switch_log_notify() argument
2606 if (!ctx->switch_log) spu_switch_log_notify()
2609 if (spufs_switch_log_avail(ctx) > 1) { spu_switch_log_notify()
2612 p = ctx->switch_log->log + ctx->switch_log->head; spu_switch_log_notify()
2619 ctx->switch_log->head = spu_switch_log_notify()
2620 (ctx->switch_log->head + 1) % SWITCH_LOG_BUFSIZE; spu_switch_log_notify()
2623 wake_up(&ctx->switch_log->wait); spu_switch_log_notify()
2628 struct spu_context *ctx = s->private; spufs_show_ctx() local
2631 mutex_lock(&ctx->state_mutex); spufs_show_ctx()
2632 if (ctx->spu) { spufs_show_ctx()
2633 struct spu *spu = ctx->spu; spufs_show_ctx()
2640 struct spu_state *csa = &ctx->csa; spufs_show_ctx()
2647 ctx->state == SPU_STATE_SAVED ? 'S' : 'R', spufs_show_ctx()
2648 ctx->flags, spufs_show_ctx()
2649 ctx->sched_flags, spufs_show_ctx()
2650 ctx->prio, spufs_show_ctx()
2651 ctx->time_slice, spufs_show_ctx()
2652 ctx->spu ? ctx->spu->number : -1, spufs_show_ctx()
2653 !list_empty(&ctx->rq) ? 'q' : ' ', spufs_show_ctx()
2654 ctx->csa.class_0_pending, spufs_show_ctx()
2655 ctx->csa.class_0_dar, spufs_show_ctx()
2656 ctx->csa.class_1_dsisr, spufs_show_ctx()
2658 ctx->ops->runcntl_read(ctx), spufs_show_ctx()
2659 ctx->ops->status_read(ctx)); spufs_show_ctx()
2661 mutex_unlock(&ctx->state_mutex); spufs_show_ctx()
2745 { ".ctx", &spufs_ctx_fops, 0444, },
H A Dfault.c36 static void spufs_handle_event(struct spu_context *ctx, spufs_handle_event() argument
41 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) { spufs_handle_event()
42 ctx->event_return |= type; spufs_handle_event()
43 wake_up_all(&ctx->stop_wq); spufs_handle_event()
58 ctx->ops->restart_dma(ctx); spufs_handle_event()
68 ctx->ops->npc_read(ctx) - 4; spufs_handle_event()
77 int spufs_handle_class0(struct spu_context *ctx) spufs_handle_class0() argument
79 unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK; spufs_handle_class0()
85 spufs_handle_event(ctx, ctx->csa.class_0_dar, spufs_handle_class0()
89 spufs_handle_event(ctx, ctx->csa.class_0_dar, spufs_handle_class0()
93 spufs_handle_event(ctx, ctx->csa.class_0_dar, spufs_handle_class0()
96 ctx->csa.class_0_pending = 0; spufs_handle_class0()
110 int spufs_handle_class1(struct spu_context *ctx) spufs_handle_class1() argument
126 ea = ctx->csa.class_1_dar; spufs_handle_class1()
127 dsisr = ctx->csa.class_1_dsisr; spufs_handle_class1()
132 spuctx_switch_state(ctx, SPU_UTIL_IOWAIT); spufs_handle_class1()
134 pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea, spufs_handle_class1()
135 dsisr, ctx->state); spufs_handle_class1()
137 ctx->stats.hash_flt++; spufs_handle_class1()
138 if (ctx->state == SPU_STATE_RUNNABLE) spufs_handle_class1()
139 ctx->spu->stats.hash_flt++; spufs_handle_class1()
142 spu_release(ctx); spufs_handle_class1()
158 mutex_lock(&ctx->state_mutex); spufs_handle_class1()
165 ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0; spufs_handle_class1()
174 ctx->stats.maj_flt++; spufs_handle_class1()
176 ctx->stats.min_flt++; spufs_handle_class1()
177 if (ctx->state == SPU_STATE_RUNNABLE) { spufs_handle_class1()
179 ctx->spu->stats.maj_flt++; spufs_handle_class1()
181 ctx->spu->stats.min_flt++; spufs_handle_class1()
184 if (ctx->spu) spufs_handle_class1()
185 ctx->ops->restart_dma(ctx); spufs_handle_class1()
187 spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); spufs_handle_class1()
189 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); spufs_handle_class1()
H A Dhw_ops.c37 static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data) spu_hw_mbox_read() argument
39 struct spu *spu = ctx->spu; spu_hw_mbox_read()
54 static u32 spu_hw_mbox_stat_read(struct spu_context *ctx) spu_hw_mbox_stat_read() argument
56 return in_be32(&ctx->spu->problem->mb_stat_R); spu_hw_mbox_stat_read()
59 static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx, spu_hw_mbox_stat_poll() argument
62 struct spu *spu = ctx->spu; spu_hw_mbox_stat_poll()
96 static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data) spu_hw_ibox_read() argument
98 struct spu *spu = ctx->spu; spu_hw_ibox_read()
117 static int spu_hw_wbox_write(struct spu_context *ctx, u32 data) spu_hw_wbox_write() argument
119 struct spu *spu = ctx->spu; spu_hw_wbox_write()
138 static void spu_hw_signal1_write(struct spu_context *ctx, u32 data) spu_hw_signal1_write() argument
140 out_be32(&ctx->spu->problem->signal_notify1, data); spu_hw_signal1_write()
143 static void spu_hw_signal2_write(struct spu_context *ctx, u32 data) spu_hw_signal2_write() argument
145 out_be32(&ctx->spu->problem->signal_notify2, data); spu_hw_signal2_write()
148 static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val) spu_hw_signal1_type_set() argument
150 struct spu *spu = ctx->spu; spu_hw_signal1_type_set()
164 static u64 spu_hw_signal1_type_get(struct spu_context *ctx) spu_hw_signal1_type_get() argument
166 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0); spu_hw_signal1_type_get()
169 static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val) spu_hw_signal2_type_set() argument
171 struct spu *spu = ctx->spu; spu_hw_signal2_type_set()
185 static u64 spu_hw_signal2_type_get(struct spu_context *ctx) spu_hw_signal2_type_get() argument
187 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0); spu_hw_signal2_type_get()
190 static u32 spu_hw_npc_read(struct spu_context *ctx) spu_hw_npc_read() argument
192 return in_be32(&ctx->spu->problem->spu_npc_RW); spu_hw_npc_read()
195 static void spu_hw_npc_write(struct spu_context *ctx, u32 val) spu_hw_npc_write() argument
197 out_be32(&ctx->spu->problem->spu_npc_RW, val); spu_hw_npc_write()
200 static u32 spu_hw_status_read(struct spu_context *ctx) spu_hw_status_read() argument
202 return in_be32(&ctx->spu->problem->spu_status_R); spu_hw_status_read()
205 static char *spu_hw_get_ls(struct spu_context *ctx) spu_hw_get_ls() argument
207 return ctx->spu->local_store; spu_hw_get_ls()
210 static void spu_hw_privcntl_write(struct spu_context *ctx, u64 val) spu_hw_privcntl_write() argument
212 out_be64(&ctx->spu->priv2->spu_privcntl_RW, val); spu_hw_privcntl_write()
215 static u32 spu_hw_runcntl_read(struct spu_context *ctx) spu_hw_runcntl_read() argument
217 return in_be32(&ctx->spu->problem->spu_runcntl_RW); spu_hw_runcntl_read()
220 static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val) spu_hw_runcntl_write() argument
222 spin_lock_irq(&ctx->spu->register_lock); spu_hw_runcntl_write()
224 spu_hw_privcntl_write(ctx, spu_hw_runcntl_write()
226 out_be32(&ctx->spu->problem->spu_runcntl_RW, val); spu_hw_runcntl_write()
227 spin_unlock_irq(&ctx->spu->register_lock); spu_hw_runcntl_write()
230 static void spu_hw_runcntl_stop(struct spu_context *ctx) spu_hw_runcntl_stop() argument
232 spin_lock_irq(&ctx->spu->register_lock); spu_hw_runcntl_stop()
233 out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP); spu_hw_runcntl_stop()
234 while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING) spu_hw_runcntl_stop()
236 spin_unlock_irq(&ctx->spu->register_lock); spu_hw_runcntl_stop()
239 static void spu_hw_master_start(struct spu_context *ctx) spu_hw_master_start() argument
241 struct spu *spu = ctx->spu; spu_hw_master_start()
250 static void spu_hw_master_stop(struct spu_context *ctx) spu_hw_master_stop() argument
252 struct spu *spu = ctx->spu; spu_hw_master_stop()
261 static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode) spu_hw_set_mfc_query() argument
263 struct spu_problem __iomem *prob = ctx->spu->problem; spu_hw_set_mfc_query()
266 spin_lock_irq(&ctx->spu->register_lock); spu_hw_set_mfc_query()
274 spin_unlock_irq(&ctx->spu->register_lock); spu_hw_set_mfc_query()
278 static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx) spu_hw_read_mfc_tagstatus() argument
280 return in_be32(&ctx->spu->problem->dma_tagstatus_R); spu_hw_read_mfc_tagstatus()
283 static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx) spu_hw_get_mfc_free_elements() argument
285 return in_be32(&ctx->spu->problem->dma_qstatus_R); spu_hw_get_mfc_free_elements()
288 static int spu_hw_send_mfc_command(struct spu_context *ctx, spu_hw_send_mfc_command() argument
292 struct spu_problem __iomem *prob = ctx->spu->problem; spu_hw_send_mfc_command()
294 spin_lock_irq(&ctx->spu->register_lock); spu_hw_send_mfc_command()
302 spin_unlock_irq(&ctx->spu->register_lock); spu_hw_send_mfc_command()
314 static void spu_hw_restart_dma(struct spu_context *ctx) spu_hw_restart_dma() argument
316 struct spu_priv2 __iomem *priv2 = ctx->spu->priv2; spu_hw_restart_dma()
318 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags)) spu_hw_restart_dma()
H A Dsputrace.h10 TP_PROTO(struct spu_context *ctx, struct spu *spu, const char *name),
11 TP_ARGS(ctx, spu, name),
21 __entry->owner_tid = ctx->tid;
29 #define spu_context_trace(name, ctx, spu) \
30 trace_spufs_context(ctx, spu, __stringify(name))
31 #define spu_context_nospu_trace(name, ctx) \
32 trace_spufs_context(ctx, NULL, __stringify(name))
H A Dsched.c97 void spu_set_timeslice(struct spu_context *ctx) spu_set_timeslice() argument
99 if (ctx->prio < NORMAL_PRIO) spu_set_timeslice()
100 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio); spu_set_timeslice()
102 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio); spu_set_timeslice()
108 void __spu_update_sched_info(struct spu_context *ctx) __spu_update_sched_info() argument
114 BUG_ON(!list_empty(&ctx->rq)); __spu_update_sched_info()
121 ctx->tid = current->pid; __spu_update_sched_info()
130 ctx->prio = current->prio; __spu_update_sched_info()
132 ctx->prio = current->static_prio; __spu_update_sched_info()
133 ctx->policy = current->policy; __spu_update_sched_info()
143 cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); __spu_update_sched_info()
146 ctx->last_ran = raw_smp_processor_id(); __spu_update_sched_info()
149 void spu_update_sched_info(struct spu_context *ctx) spu_update_sched_info() argument
153 if (ctx->state == SPU_STATE_RUNNABLE) { spu_update_sched_info()
154 node = ctx->spu->node; spu_update_sched_info()
160 __spu_update_sched_info(ctx); spu_update_sched_info()
163 __spu_update_sched_info(ctx); spu_update_sched_info()
167 static int __node_allowed(struct spu_context *ctx, int node) __node_allowed() argument
172 if (cpumask_intersects(mask, &ctx->cpus_allowed)) __node_allowed()
179 static int node_allowed(struct spu_context *ctx, int node) node_allowed() argument
184 rval = __node_allowed(ctx, node); node_allowed()
206 struct spu_context *ctx = spu->ctx; for_each_online_node() local
208 &ctx->sched_flags); for_each_online_node()
210 wake_up_all(&ctx->stop_wq); for_each_online_node()
220 * @ctx: context to bind
222 static void spu_bind_context(struct spu *spu, struct spu_context *ctx) spu_bind_context() argument
224 spu_context_trace(spu_bind_context__enter, ctx, spu); spu_bind_context()
226 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); spu_bind_context()
228 if (ctx->flags & SPU_CREATE_NOSCHED) spu_bind_context()
231 ctx->stats.slb_flt_base = spu->stats.slb_flt; spu_bind_context()
232 ctx->stats.class2_intr_base = spu->stats.class2_intr; spu_bind_context()
234 spu_associate_mm(spu, ctx->owner); spu_bind_context()
237 spu->ctx = ctx; spu_bind_context()
239 ctx->spu = spu; spu_bind_context()
240 ctx->ops = &spu_hw_ops; spu_bind_context()
249 spu_unmap_mappings(ctx); spu_bind_context()
251 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); spu_bind_context()
252 spu_restore(&ctx->csa, spu); spu_bind_context()
254 spu_switch_notify(spu, ctx); spu_bind_context()
255 ctx->state = SPU_STATE_RUNNABLE; spu_bind_context()
257 spuctx_switch_state(ctx, SPU_UTIL_USER); spu_bind_context()
267 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED)); sched_spu()
272 struct spu_context *ctx; aff_merge_remaining_ctxs() local
274 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) { aff_merge_remaining_ctxs()
275 if (list_empty(&ctx->aff_list)) aff_merge_remaining_ctxs()
276 list_add(&ctx->aff_list, &gang->aff_list_head); aff_merge_remaining_ctxs()
283 struct spu_context *ctx; aff_set_offsets() local
287 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, aff_set_offsets()
289 if (&ctx->aff_list == &gang->aff_list_head) aff_set_offsets()
291 ctx->aff_offset = offset--; aff_set_offsets()
295 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) { aff_set_offsets()
296 if (&ctx->aff_list == &gang->aff_list_head) aff_set_offsets()
298 ctx->aff_offset = offset++; aff_set_offsets()
304 static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff, aff_ref_location() argument
328 if (!node_allowed(ctx, node)) aff_ref_location()
334 if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset aff_ref_location()
335 && spu->ctx->gang->aff_ref_spu) aff_ref_location()
336 available_spus -= spu->ctx->gang->contexts; aff_ref_location()
339 if (available_spus < ctx->gang->contexts) { aff_ref_location()
359 struct spu_context *ctx; aff_set_ref_point_location() local
369 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list, aff_set_ref_point_location()
371 if (&ctx->aff_list == &gang->aff_list_head) aff_set_ref_point_location()
373 lowest_offset = ctx->aff_offset; aff_set_ref_point_location()
410 static int has_affinity(struct spu_context *ctx) has_affinity() argument
412 struct spu_gang *gang = ctx->gang; has_affinity()
414 if (list_empty(&ctx->aff_list)) has_affinity()
417 if (atomic_read(&ctx->gang->aff_sched_count) == 0) has_affinity()
418 ctx->gang->aff_ref_spu = NULL; has_affinity()
434 * @ctx: context to unbind
436 static void spu_unbind_context(struct spu *spu, struct spu_context *ctx) spu_unbind_context() argument
440 spu_context_trace(spu_unbind_context__enter, ctx, spu); spu_unbind_context()
442 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM); spu_unbind_context()
444 if (spu->ctx->flags & SPU_CREATE_NOSCHED) spu_unbind_context()
447 if (ctx->gang) spu_unbind_context()
449 * If ctx->gang->aff_sched_count is positive, SPU affinity is spu_unbind_context()
453 atomic_dec_if_positive(&ctx->gang->aff_sched_count); spu_unbind_context()
456 spu_unmap_mappings(ctx); spu_unbind_context()
457 spu_save(&ctx->csa, spu); spu_unbind_context()
458 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0); spu_unbind_context()
462 ctx->state = SPU_STATE_SAVED; spu_unbind_context()
469 ctx->ops = &spu_backing_ops; spu_unbind_context()
471 spu->ctx = NULL; spu_unbind_context()
476 ctx->stats.slb_flt += spu_unbind_context()
477 (spu->stats.slb_flt - ctx->stats.slb_flt_base); spu_unbind_context()
478 ctx->stats.class2_intr += spu_unbind_context()
479 (spu->stats.class2_intr - ctx->stats.class2_intr_base); spu_unbind_context()
482 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED); spu_unbind_context()
483 ctx->spu = NULL; spu_unbind_context()
485 if (spu_stopped(ctx, &status)) spu_unbind_context()
486 wake_up_all(&ctx->stop_wq); spu_unbind_context()
491 * @ctx: context to add
493 static void __spu_add_to_rq(struct spu_context *ctx) __spu_add_to_rq() argument
508 if (list_empty(&ctx->rq)) { __spu_add_to_rq()
509 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]); __spu_add_to_rq()
510 set_bit(ctx->prio, spu_prio->bitmap); __spu_add_to_rq()
516 static void spu_add_to_rq(struct spu_context *ctx) spu_add_to_rq() argument
519 __spu_add_to_rq(ctx); spu_add_to_rq()
523 static void __spu_del_from_rq(struct spu_context *ctx) __spu_del_from_rq() argument
525 int prio = ctx->prio; __spu_del_from_rq()
527 if (!list_empty(&ctx->rq)) { __spu_del_from_rq()
530 list_del_init(&ctx->rq); __spu_del_from_rq()
537 void spu_del_from_rq(struct spu_context *ctx) spu_del_from_rq() argument
540 __spu_del_from_rq(ctx); spu_del_from_rq()
544 static void spu_prio_wait(struct spu_context *ctx) spu_prio_wait() argument
553 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED)); spu_prio_wait()
556 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE); spu_prio_wait()
558 __spu_add_to_rq(ctx); spu_prio_wait()
560 mutex_unlock(&ctx->state_mutex); spu_prio_wait()
562 mutex_lock(&ctx->state_mutex); spu_prio_wait()
564 __spu_del_from_rq(ctx); spu_prio_wait()
568 remove_wait_queue(&ctx->stop_wq, &wait); spu_prio_wait()
571 static struct spu *spu_get_idle(struct spu_context *ctx) spu_get_idle() argument
576 spu_context_nospu_trace(spu_get_idle__enter, ctx); spu_get_idle()
578 if (ctx->gang) { spu_get_idle()
579 mutex_lock(&ctx->gang->aff_mutex); spu_get_idle()
580 if (has_affinity(ctx)) { spu_get_idle()
581 aff_ref_spu = ctx->gang->aff_ref_spu; spu_get_idle()
582 atomic_inc(&ctx->gang->aff_sched_count); spu_get_idle()
583 mutex_unlock(&ctx->gang->aff_mutex); spu_get_idle()
587 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node); spu_get_idle()
592 atomic_dec(&ctx->gang->aff_sched_count); spu_get_idle()
595 mutex_unlock(&ctx->gang->aff_mutex); spu_get_idle()
600 if (!node_allowed(ctx, node)) spu_get_idle()
612 spu_context_nospu_trace(spu_get_idle__not_found, ctx); spu_get_idle()
618 spu_context_trace(spu_get_idle__found, ctx, spu); spu_get_idle()
625 * @ctx: canidate context for running
629 static struct spu *find_victim(struct spu_context *ctx) find_victim() argument
635 spu_context_nospu_trace(spu_find_victim__enter, ctx); find_victim()
648 if (!node_allowed(ctx, node)) find_victim()
653 struct spu_context *tmp = spu->ctx; find_victim()
655 if (tmp && tmp->prio > ctx->prio && find_victim()
658 victim = spu->ctx; find_victim()
667 * This nests ctx->state_mutex, but we always lock find_victim()
683 if (!spu || victim->prio <= ctx->prio) { find_victim()
695 spu_context_trace(__spu_deactivate__unload, ctx, spu); find_victim()
717 static void __spu_schedule(struct spu *spu, struct spu_context *ctx) __spu_schedule() argument
722 spu_set_timeslice(ctx); __spu_schedule()
725 if (spu->ctx == NULL) { __spu_schedule()
726 spu_bind_context(spu, ctx); __spu_schedule()
734 wake_up_all(&ctx->run_wq); __spu_schedule()
736 spu_add_to_rq(ctx); __spu_schedule()
739 static void spu_schedule(struct spu *spu, struct spu_context *ctx) spu_schedule() argument
743 mutex_lock(&ctx->state_mutex); spu_schedule()
744 if (ctx->state == SPU_STATE_SAVED) spu_schedule()
745 __spu_schedule(spu, ctx); spu_schedule()
746 spu_release(ctx); spu_schedule()
752 * @ctx: The context currently scheduled on the SPU
755 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
760 * Should be called with ctx->state_mutex held.
762 static void spu_unschedule(struct spu *spu, struct spu_context *ctx, spu_unschedule() argument
771 spu_unbind_context(spu, ctx); spu_unschedule()
772 ctx->stats.invol_ctx_switch++; spu_unschedule()
779 * @ctx: spu context to schedule
782 * Tries to find a free spu to run @ctx. If no free spu is available
786 int spu_activate(struct spu_context *ctx, unsigned long flags) spu_activate() argument
796 if (ctx->spu) spu_activate()
803 spu = spu_get_idle(ctx); spu_activate()
808 if (!spu && rt_prio(ctx->prio)) spu_activate()
809 spu = find_victim(ctx); spu_activate()
813 runcntl = ctx->ops->runcntl_read(ctx); spu_activate()
814 __spu_schedule(spu, ctx); spu_activate()
816 spuctx_switch_state(ctx, SPU_UTIL_USER); spu_activate()
821 if (ctx->flags & SPU_CREATE_NOSCHED) { spu_activate()
822 spu_prio_wait(ctx); spu_activate()
826 spu_add_to_rq(ctx); spu_activate()
839 struct spu_context *ctx; grab_runnable_context() local
847 list_for_each_entry(ctx, rq, rq) { list_for_each_entry()
849 if (__node_allowed(ctx, node)) { list_for_each_entry()
850 __spu_del_from_rq(ctx); list_for_each_entry()
856 ctx = NULL;
859 return ctx;
862 static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio) __spu_deactivate() argument
864 struct spu *spu = ctx->spu; __spu_deactivate()
870 spu_unschedule(spu, ctx, new == NULL); __spu_deactivate()
875 spu_release(ctx); __spu_deactivate()
879 mutex_lock(&ctx->state_mutex); __spu_deactivate()
890 * @ctx: spu context to unbind
892 * Unbind @ctx from the physical spu it is running on and schedule
895 void spu_deactivate(struct spu_context *ctx) spu_deactivate() argument
897 spu_context_nospu_trace(spu_deactivate__enter, ctx); spu_deactivate()
898 __spu_deactivate(ctx, 1, MAX_PRIO); spu_deactivate()
903 * @ctx: spu context to yield
906 * unbind @ctx from the physical spu and schedule the highest
909 void spu_yield(struct spu_context *ctx) spu_yield() argument
911 spu_context_nospu_trace(spu_yield__enter, ctx); spu_yield()
912 if (!(ctx->flags & SPU_CREATE_NOSCHED)) { spu_yield()
913 mutex_lock(&ctx->state_mutex); spu_yield()
914 __spu_deactivate(ctx, 0, MAX_PRIO); spu_yield()
915 mutex_unlock(&ctx->state_mutex); spu_yield()
919 static noinline void spusched_tick(struct spu_context *ctx) spusched_tick() argument
924 if (spu_acquire(ctx)) spusched_tick()
927 if (ctx->state != SPU_STATE_RUNNABLE) spusched_tick()
929 if (ctx->flags & SPU_CREATE_NOSCHED) spusched_tick()
931 if (ctx->policy == SCHED_FIFO) spusched_tick()
934 if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spusched_tick()
937 spu = ctx->spu; spusched_tick()
939 spu_context_trace(spusched_tick__preempt, ctx, spu); spusched_tick()
941 new = grab_runnable_context(ctx->prio + 1, spu->node); spusched_tick()
943 spu_unschedule(spu, ctx, 0); spusched_tick()
944 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags)) spusched_tick()
945 spu_add_to_rq(ctx); spusched_tick()
947 spu_context_nospu_trace(spusched_tick__newslice, ctx); spusched_tick()
948 if (!ctx->time_slice) spusched_tick()
949 ctx->time_slice++; spusched_tick()
952 spu_release(ctx); spusched_tick()
1020 struct spu_context *ctx = spu->ctx; spusched_thread() local
1022 if (ctx) { spusched_thread()
1023 get_spu_context(ctx); spusched_thread()
1025 spusched_tick(ctx); spusched_thread()
1027 put_spu_context(ctx); spusched_thread()
1037 void spuctx_switch_state(struct spu_context *ctx, spuctx_switch_state() argument
1047 delta = curtime - ctx->stats.tstamp; spuctx_switch_state()
1049 WARN_ON(!mutex_is_locked(&ctx->state_mutex)); spuctx_switch_state()
1052 spu = ctx->spu; spuctx_switch_state()
1053 old_state = ctx->stats.util_state; spuctx_switch_state()
1054 ctx->stats.util_state = new_state; spuctx_switch_state()
1055 ctx->stats.tstamp = curtime; spuctx_switch_state()
1061 ctx->stats.times[old_state] += delta; spuctx_switch_state()
H A Dspufs.h49 /* ctx->sched_flags */
133 /* updates protected by ctx->state_mutex */
143 unsigned long long slb_flt_base; /* # at last ctx switch */
145 unsigned long long class2_intr_base; /* # at last ctx switch */
188 int (*mbox_read) (struct spu_context * ctx, u32 * data);
189 u32(*mbox_stat_read) (struct spu_context * ctx);
190 unsigned int (*mbox_stat_poll)(struct spu_context *ctx,
192 int (*ibox_read) (struct spu_context * ctx, u32 * data);
193 int (*wbox_write) (struct spu_context * ctx, u32 data);
194 u32(*signal1_read) (struct spu_context * ctx);
195 void (*signal1_write) (struct spu_context * ctx, u32 data);
196 u32(*signal2_read) (struct spu_context * ctx);
197 void (*signal2_write) (struct spu_context * ctx, u32 data);
198 void (*signal1_type_set) (struct spu_context * ctx, u64 val);
199 u64(*signal1_type_get) (struct spu_context * ctx);
200 void (*signal2_type_set) (struct spu_context * ctx, u64 val);
201 u64(*signal2_type_get) (struct spu_context * ctx);
202 u32(*npc_read) (struct spu_context * ctx);
203 void (*npc_write) (struct spu_context * ctx, u32 data);
204 u32(*status_read) (struct spu_context * ctx);
205 char*(*get_ls) (struct spu_context * ctx);
206 void (*privcntl_write) (struct spu_context *ctx, u64 data);
207 u32 (*runcntl_read) (struct spu_context * ctx);
208 void (*runcntl_write) (struct spu_context * ctx, u32 data);
209 void (*runcntl_stop) (struct spu_context * ctx);
210 void (*master_start) (struct spu_context * ctx);
211 void (*master_stop) (struct spu_context * ctx);
212 int (*set_mfc_query)(struct spu_context * ctx, u32 mask, u32 mode);
213 u32 (*read_mfc_tagstatus)(struct spu_context * ctx);
214 u32 (*get_mfc_free_elements)(struct spu_context *ctx);
215 int (*send_mfc_command)(struct spu_context * ctx,
217 void (*dma_info_read) (struct spu_context * ctx,
219 void (*proxydma_info_read) (struct spu_context * ctx,
221 void (*restart_dma)(struct spu_context *ctx);
250 long spufs_run_spu(struct spu_context *ctx, u32 *npc, u32 *status);
263 void spu_gang_remove_ctx(struct spu_gang *gang, struct spu_context *ctx);
264 void spu_gang_add_ctx(struct spu_gang *gang, struct spu_context *ctx);
267 int spufs_handle_class1(struct spu_context *ctx);
268 int spufs_handle_class0(struct spu_context *ctx);
271 struct spu *affinity_check(struct spu_context *ctx);
275 static inline int __must_check spu_acquire(struct spu_context *ctx) spu_acquire() argument
277 return mutex_lock_interruptible(&ctx->state_mutex); spu_acquire()
280 static inline void spu_release(struct spu_context *ctx) spu_release() argument
282 mutex_unlock(&ctx->state_mutex); spu_release()
287 struct spu_context * get_spu_context(struct spu_context *ctx);
288 int put_spu_context(struct spu_context *ctx);
289 void spu_unmap_mappings(struct spu_context *ctx);
291 void spu_forget(struct spu_context *ctx);
292 int __must_check spu_acquire_saved(struct spu_context *ctx);
293 void spu_release_saved(struct spu_context *ctx);
295 int spu_stopped(struct spu_context *ctx, u32 * stat);
296 void spu_del_from_rq(struct spu_context *ctx);
297 int spu_activate(struct spu_context *ctx, unsigned long flags);
298 void spu_deactivate(struct spu_context *ctx);
299 void spu_yield(struct spu_context *ctx);
300 void spu_switch_notify(struct spu *spu, struct spu_context *ctx);
301 void spu_switch_log_notify(struct spu *spu, struct spu_context *ctx,
303 void spu_set_timeslice(struct spu_context *ctx);
304 void spu_update_sched_info(struct spu_context *ctx);
305 void __spu_update_sched_info(struct spu_context *ctx);
314 * we need to call spu_release(ctx) before sleeping, and
315 * then spu_acquire(ctx) when awoken.
329 spu_release(ctx); \
335 __ret = spu_acquire(ctx); \
343 size_t spu_wbox_write(struct spu_context *ctx, u32 data);
344 size_t spu_ibox_read(struct spu_context *ctx, u32 *data);
356 ssize_t (*read)(struct spu_context *ctx,
358 u64 (*get)(struct spu_context *ctx);
373 extern void spuctx_switch_state(struct spu_context *ctx,
H A Dcoredump.c37 static ssize_t do_coredump_read(int num, struct spu_context *ctx, void *buffer, do_coredump_read() argument
44 return spufs_coredump_read[num].read(ctx, buffer, size, off); do_coredump_read()
46 data = spufs_coredump_read[num].get(ctx); do_coredump_read()
53 static int spufs_ctx_note_size(struct spu_context *ctx, int dfd) spufs_ctx_note_size() argument
75 struct spu_context *ctx; match_context() local
78 ctx = SPUFS_I(file_inode(file))->i_ctx; match_context()
79 if (ctx->flags & SPU_CREATE_NOSCHED) match_context()
109 struct spu_context *ctx; spufs_coredump_extra_notes_size() local
113 while ((ctx = coredump_next_context(&fd)) != NULL) { spufs_coredump_extra_notes_size()
114 rc = spu_acquire_saved(ctx); spufs_coredump_extra_notes_size()
117 rc = spufs_ctx_note_size(ctx, fd); spufs_coredump_extra_notes_size()
118 spu_release_saved(ctx); spufs_coredump_extra_notes_size()
131 static int spufs_arch_write_note(struct spu_context *ctx, int i, spufs_arch_write_note() argument
163 rc = do_coredump_read(i, ctx, buf, bufsz, &pos); spufs_arch_write_note()
187 struct spu_context *ctx; spufs_coredump_extra_notes_write() local
191 while ((ctx = coredump_next_context(&fd)) != NULL) { spufs_coredump_extra_notes_write()
192 rc = spu_acquire_saved(ctx); spufs_coredump_extra_notes_write()
197 rc = spufs_arch_write_note(ctx, j, cprm, fd); spufs_coredump_extra_notes_write()
199 spu_release_saved(ctx); spufs_coredump_extra_notes_write()
204 spu_release_saved(ctx); spufs_coredump_extra_notes_write()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/engine/gr/
H A Dctxnv50.c170 static void nv50_gr_construct_mmio(struct nvkm_grctx *ctx);
171 static void nv50_gr_construct_xfer1(struct nvkm_grctx *ctx);
172 static void nv50_gr_construct_xfer2(struct nvkm_grctx *ctx);
177 nv50_grctx_generate(struct nvkm_grctx *ctx) nv50_grctx_generate() argument
179 cp_set (ctx, STATE, RUNNING); nv50_grctx_generate()
180 cp_set (ctx, XFER_SWITCH, ENABLE); nv50_grctx_generate()
182 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); nv50_grctx_generate()
183 cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); nv50_grctx_generate()
185 cp_name(ctx, cp_check_load); nv50_grctx_generate()
186 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); nv50_grctx_generate()
187 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); nv50_grctx_generate()
188 cp_bra (ctx, ALWAYS, TRUE, cp_prepare_exit); nv50_grctx_generate()
191 cp_name(ctx, cp_setup_auto_load); nv50_grctx_generate()
192 cp_out (ctx, CP_DISABLE1); nv50_grctx_generate()
193 cp_out (ctx, CP_DISABLE2); nv50_grctx_generate()
194 cp_out (ctx, CP_ENABLE); nv50_grctx_generate()
195 cp_out (ctx, CP_NEXT_TO_SWAP); nv50_grctx_generate()
196 cp_set (ctx, UNK01, SET); nv50_grctx_generate()
197 cp_name(ctx, cp_setup_load); nv50_grctx_generate()
198 cp_out (ctx, CP_NEWCTX); nv50_grctx_generate()
199 cp_wait(ctx, NEWCTX, BUSY); nv50_grctx_generate()
200 cp_set (ctx, UNK1D, CLEAR); nv50_grctx_generate()
201 cp_set (ctx, SWAP_DIRECTION, LOAD); nv50_grctx_generate()
202 cp_bra (ctx, UNK0B, SET, cp_prepare_exit); nv50_grctx_generate()
203 cp_bra (ctx, ALWAYS, TRUE, cp_swap_state); nv50_grctx_generate()
206 cp_name(ctx, cp_setup_save); nv50_grctx_generate()
207 cp_set (ctx, UNK1D, SET); nv50_grctx_generate()
208 cp_wait(ctx, STATUS, BUSY); nv50_grctx_generate()
209 cp_wait(ctx, INTR, PENDING); nv50_grctx_generate()
210 cp_bra (ctx, STATUS, BUSY, cp_setup_save); nv50_grctx_generate()
211 cp_set (ctx, UNK01, SET); nv50_grctx_generate()
212 cp_set (ctx, SWAP_DIRECTION, SAVE); nv50_grctx_generate()
215 cp_name(ctx, cp_swap_state); nv50_grctx_generate()
216 cp_set (ctx, UNK03, SET); nv50_grctx_generate()
217 cp_pos (ctx, 0x00004/4); nv50_grctx_generate()
218 cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */ nv50_grctx_generate()
219 cp_pos (ctx, 0x00100/4); nv50_grctx_generate()
220 nv50_gr_construct_mmio(ctx); nv50_grctx_generate()
221 nv50_gr_construct_xfer1(ctx); nv50_grctx_generate()
222 nv50_gr_construct_xfer2(ctx); nv50_grctx_generate()
224 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load); nv50_grctx_generate()
226 cp_set (ctx, UNK20, SET); nv50_grctx_generate()
227 cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */ nv50_grctx_generate()
228 cp_lsr (ctx, ctx->ctxvals_base); nv50_grctx_generate()
229 cp_out (ctx, CP_SET_XFER_POINTER); nv50_grctx_generate()
230 cp_lsr (ctx, 4); nv50_grctx_generate()
231 cp_out (ctx, CP_SEEK_1); nv50_grctx_generate()
232 cp_out (ctx, CP_XFER_1); nv50_grctx_generate()
233 cp_wait(ctx, XFER, BUSY); nv50_grctx_generate()
236 cp_name(ctx, cp_prepare_exit); nv50_grctx_generate()
237 cp_set (ctx, UNK01, CLEAR); nv50_grctx_generate()
238 cp_set (ctx, UNK03, CLEAR); nv50_grctx_generate()
239 cp_set (ctx, UNK1D, CLEAR); nv50_grctx_generate()
241 cp_bra (ctx, USER_SAVE, PENDING, cp_exit); nv50_grctx_generate()
242 cp_out (ctx, CP_NEXT_TO_CURRENT); nv50_grctx_generate()
244 cp_name(ctx, cp_exit); nv50_grctx_generate()
245 cp_set (ctx, USER_SAVE, NOT_PENDING); nv50_grctx_generate()
246 cp_set (ctx, USER_LOAD, NOT_PENDING); nv50_grctx_generate()
247 cp_set (ctx, XFER_SWITCH, DISABLE); nv50_grctx_generate()
248 cp_set (ctx, STATE, STOPPED); nv50_grctx_generate()
249 cp_out (ctx, CP_END); nv50_grctx_generate()
250 ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */ nv50_grctx_generate()
269 struct nvkm_grctx ctx = { nv50_grctx_init() local
278 nv50_grctx_generate(&ctx); nv50_grctx_init()
281 for (i = 0; i < ctx.ctxprog_len; i++) nv50_grctx_init()
283 *size = ctx.ctxvals_pos * 4; nv50_grctx_init()
294 nv50_gr_construct_mmio_ddata(struct nvkm_grctx *ctx);
297 nv50_gr_construct_mmio(struct nvkm_grctx *ctx) nv50_gr_construct_mmio() argument
299 struct nvkm_device *device = ctx->device; nv50_gr_construct_mmio()
302 u32 units = nv_rd32 (ctx->device, 0x1540); nv50_gr_construct_mmio()
305 cp_ctx(ctx, 0x400808, 7); nv50_gr_construct_mmio()
306 gr_def(ctx, 0x400814, 0x00000030); nv50_gr_construct_mmio()
307 cp_ctx(ctx, 0x400834, 0x32); nv50_gr_construct_mmio()
309 gr_def(ctx, 0x400834, 0xff400040); nv50_gr_construct_mmio()
310 gr_def(ctx, 0x400838, 0xfff00080); nv50_gr_construct_mmio()
311 gr_def(ctx, 0x40083c, 0xfff70090); nv50_gr_construct_mmio()
312 gr_def(ctx, 0x400840, 0xffe806a8); nv50_gr_construct_mmio()
314 gr_def(ctx, 0x400844, 0x00000002); nv50_gr_construct_mmio()
316 gr_def(ctx, 0x400894, 0x00001000); nv50_gr_construct_mmio()
317 gr_def(ctx, 0x4008e8, 0x00000003); nv50_gr_construct_mmio()
318 gr_def(ctx, 0x4008ec, 0x00001000); nv50_gr_construct_mmio()
320 cp_ctx(ctx, 0x400908, 0xb); nv50_gr_construct_mmio()
322 cp_ctx(ctx, 0x400908, 0xc); nv50_gr_construct_mmio()
324 cp_ctx(ctx, 0x400908, 0xe); nv50_gr_construct_mmio()
327 cp_ctx(ctx, 0x400b00, 0x1); nv50_gr_construct_mmio()
329 cp_ctx(ctx, 0x400b10, 0x1); nv50_gr_construct_mmio()
330 gr_def(ctx, 0x400b10, 0x0001629d); nv50_gr_construct_mmio()
331 cp_ctx(ctx, 0x400b20, 0x1); nv50_gr_construct_mmio()
332 gr_def(ctx, 0x400b20, 0x0001629d); nv50_gr_construct_mmio()
335 nv50_gr_construct_mmio_ddata(ctx); nv50_gr_construct_mmio()
338 cp_ctx(ctx, 0x400c08, 0x2); nv50_gr_construct_mmio()
339 gr_def(ctx, 0x400c08, 0x0000fe0c); nv50_gr_construct_mmio()
343 cp_ctx(ctx, 0x401008, 0x4); nv50_gr_construct_mmio()
344 gr_def(ctx, 0x401014, 0x00001000); nv50_gr_construct_mmio()
346 cp_ctx(ctx, 0x401008, 0x5); nv50_gr_construct_mmio()
347 gr_def(ctx, 0x401018, 0x00001000); nv50_gr_construct_mmio()
349 cp_ctx(ctx, 0x401008, 0x5); nv50_gr_construct_mmio()
350 gr_def(ctx, 0x401018, 0x00004000); nv50_gr_construct_mmio()
354 cp_ctx(ctx, 0x401400, 0x8); nv50_gr_construct_mmio()
355 cp_ctx(ctx, 0x401424, 0x3); nv50_gr_construct_mmio()
357 gr_def(ctx, 0x40142c, 0x0001fd87); nv50_gr_construct_mmio()
359 gr_def(ctx, 0x40142c, 0x00000187); nv50_gr_construct_mmio()
360 cp_ctx(ctx, 0x401540, 0x5); nv50_gr_construct_mmio()
361 gr_def(ctx, 0x401550, 0x00001018); nv50_gr_construct_mmio()
364 cp_ctx(ctx, 0x401814, 0x1); nv50_gr_construct_mmio()
365 gr_def(ctx, 0x401814, 0x000000ff); nv50_gr_construct_mmio()
367 cp_ctx(ctx, 0x40181c, 0xe); nv50_gr_construct_mmio()
368 gr_def(ctx, 0x401850, 0x00000004); nv50_gr_construct_mmio()
370 cp_ctx(ctx, 0x40181c, 0xf); nv50_gr_construct_mmio()
371 gr_def(ctx, 0x401854, 0x00000004); nv50_gr_construct_mmio()
373 cp_ctx(ctx, 0x40181c, 0x13); nv50_gr_construct_mmio()
374 gr_def(ctx, 0x401864, 0x00000004); nv50_gr_construct_mmio()
378 cp_ctx(ctx, 0x401c00, 0x1); nv50_gr_construct_mmio()
381 gr_def(ctx, 0x401c00, 0x0001005f); nv50_gr_construct_mmio()
386 gr_def(ctx, 0x401c00, 0x044d00df); nv50_gr_construct_mmio()
394 gr_def(ctx, 0x401c00, 0x042500df); nv50_gr_construct_mmio()
400 gr_def(ctx, 0x401c00, 0x142500df); nv50_gr_construct_mmio()
407 cp_ctx(ctx, 0x402400, 0x1); nv50_gr_construct_mmio()
409 cp_ctx(ctx, 0x402408, 0x1); nv50_gr_construct_mmio()
411 cp_ctx(ctx, 0x402408, 0x2); nv50_gr_construct_mmio()
412 gr_def(ctx, 0x402408, 0x00000600); nv50_gr_construct_mmio()
415 cp_ctx(ctx, 0x402800, 0x1); nv50_gr_construct_mmio()
417 gr_def(ctx, 0x402800, 0x00000006); nv50_gr_construct_mmio()
420 cp_ctx(ctx, 0x402c08, 0x6); nv50_gr_construct_mmio()
422 gr_def(ctx, 0x402c14, 0x01000000); nv50_gr_construct_mmio()
423 gr_def(ctx, 0x402c18, 0x000000ff); nv50_gr_construct_mmio()
425 cp_ctx(ctx, 0x402ca0, 0x1); nv50_gr_construct_mmio()
427 cp_ctx(ctx, 0x402ca0, 0x2); nv50_gr_construct_mmio()
429 gr_def(ctx, 0x402ca0, 0x00000400); nv50_gr_construct_mmio()
431 gr_def(ctx, 0x402ca0, 0x00000800); nv50_gr_construct_mmio()
433 gr_def(ctx, 0x402ca0, 0x00000400); nv50_gr_construct_mmio()
434 cp_ctx(ctx, 0x402cac, 0x4); nv50_gr_construct_mmio()
437 cp_ctx(ctx, 0x403004, 0x1); nv50_gr_construct_mmio()
438 gr_def(ctx, 0x403004, 0x00000001); nv50_gr_construct_mmio()
442 cp_ctx(ctx, 0x403404, 0x1); nv50_gr_construct_mmio()
443 gr_def(ctx, 0x403404, 0x00000001); nv50_gr_construct_mmio()
447 cp_ctx(ctx, 0x405000, 0x1); nv50_gr_construct_mmio()
450 gr_def(ctx, 0x405000, 0x00300080); nv50_gr_construct_mmio()
460 gr_def(ctx, 0x405000, 0x000e0080); nv50_gr_construct_mmio()
467 gr_def(ctx, 0x405000, 0x00000080); nv50_gr_construct_mmio()
470 cp_ctx(ctx, 0x405014, 0x1); nv50_gr_construct_mmio()
471 gr_def(ctx, 0x405014, 0x00000004); nv50_gr_construct_mmio()
472 cp_ctx(ctx, 0x40501c, 0x1); nv50_gr_construct_mmio()
473 cp_ctx(ctx, 0x405024, 0x1); nv50_gr_construct_mmio()
474 cp_ctx(ctx, 0x40502c, 0x1); nv50_gr_construct_mmio()
478 cp_ctx(ctx, 0x4063e0, 0x1); nv50_gr_construct_mmio()
482 cp_ctx(ctx, 0x406814, 0x2b); nv50_gr_construct_mmio()
483 gr_def(ctx, 0x406818, 0x00000f80); nv50_gr_construct_mmio()
484 gr_def(ctx, 0x406860, 0x007f0080); nv50_gr_construct_mmio()
485 gr_def(ctx, 0x40689c, 0x007f0080); nv50_gr_construct_mmio()
487 cp_ctx(ctx, 0x406814, 0x4); nv50_gr_construct_mmio()
489 gr_def(ctx, 0x406818, 0x00000f80); nv50_gr_construct_mmio()
491 gr_def(ctx, 0x406818, 0x00001f80); nv50_gr_construct_mmio()
493 gr_def(ctx, 0x40681c, 0x00000030); nv50_gr_construct_mmio()
494 cp_ctx(ctx, 0x406830, 0x3); nv50_gr_construct_mmio()
500 cp_ctx(ctx, 0x407000 + (i<<8), 3); nv50_gr_construct_mmio()
502 gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820); nv50_gr_construct_mmio()
504 gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821); nv50_gr_construct_mmio()
506 gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821); nv50_gr_construct_mmio()
507 gr_def(ctx, 0x407004 + (i<<8), 0x89058001); nv50_gr_construct_mmio()
510 cp_ctx(ctx, 0x407010 + (i<<8), 1); nv50_gr_construct_mmio()
512 cp_ctx(ctx, 0x407010 + (i<<8), 2); nv50_gr_construct_mmio()
513 gr_def(ctx, 0x407010 + (i<<8), 0x00001000); nv50_gr_construct_mmio()
514 gr_def(ctx, 0x407014 + (i<<8), 0x0000001f); nv50_gr_construct_mmio()
516 cp_ctx(ctx, 0x407010 + (i<<8), 3); nv50_gr_construct_mmio()
517 gr_def(ctx, 0x407010 + (i<<8), 0x00001000); nv50_gr_construct_mmio()
519 gr_def(ctx, 0x407014 + (i<<8), 0x000000ff); nv50_gr_construct_mmio()
521 gr_def(ctx, 0x407014 + (i<<8), 0x000001ff); nv50_gr_construct_mmio()
524 cp_ctx(ctx, 0x407080 + (i<<8), 4); nv50_gr_construct_mmio()
526 gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa); nv50_gr_construct_mmio()
528 gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa); nv50_gr_construct_mmio()
530 gr_def(ctx, 0x407084 + (i<<8), 0x000000c0); nv50_gr_construct_mmio()
532 gr_def(ctx, 0x407084 + (i<<8), 0x400000c0); nv50_gr_construct_mmio()
533 gr_def(ctx, 0x407088 + (i<<8), 0xb7892080); nv50_gr_construct_mmio()
536 cp_ctx(ctx, 0x407094 + (i<<8), 1); nv50_gr_construct_mmio()
538 cp_ctx(ctx, 0x407094 + (i<<8), 3); nv50_gr_construct_mmio()
540 cp_ctx(ctx, 0x407094 + (i<<8), 4); nv50_gr_construct_mmio()
541 gr_def(ctx, 0x4070a0 + (i<<8), 1); nv50_gr_construct_mmio()
546 cp_ctx(ctx, 0x407c00, 0x3); nv50_gr_construct_mmio()
548 gr_def(ctx, 0x407c00, 0x00010040); nv50_gr_construct_mmio()
550 gr_def(ctx, 0x407c00, 0x00390040); nv50_gr_construct_mmio()
552 gr_def(ctx, 0x407c00, 0x003d0040); nv50_gr_construct_mmio()
553 gr_def(ctx, 0x407c08, 0x00000022); nv50_gr_construct_mmio()
555 cp_ctx(ctx, 0x407c10, 0x3); nv50_gr_construct_mmio()
556 cp_ctx(ctx, 0x407c20, 0x1); nv50_gr_construct_mmio()
557 cp_ctx(ctx, 0x407c2c, 0x1); nv50_gr_construct_mmio()
561 cp_ctx(ctx, 0x407d00, 0x9); nv50_gr_construct_mmio()
563 cp_ctx(ctx, 0x407d00, 0x15); nv50_gr_construct_mmio()
566 gr_def(ctx, 0x407d08, 0x00380040); nv50_gr_construct_mmio()
569 gr_def(ctx, 0x407d08, 0x00010040); nv50_gr_construct_mmio()
571 gr_def(ctx, 0x407d08, 0x00390040); nv50_gr_construct_mmio()
574 gr_def(ctx, 0x407d08, 0x003d0040); nv50_gr_construct_mmio()
576 gr_def(ctx, 0x407d08, 0x003c0040); nv50_gr_construct_mmio()
578 gr_def(ctx, 0x407d0c, 0x00000022); nv50_gr_construct_mmio()
592 cp_ctx(ctx, offset + 0x00, 1); nv50_gr_construct_mmio()
593 gr_def(ctx, offset + 0x00, 0x0000ff0a); nv50_gr_construct_mmio()
594 cp_ctx(ctx, offset + 0x08, 1); nv50_gr_construct_mmio()
603 cp_ctx(ctx, offset, 0x20); nv50_gr_construct_mmio()
604 gr_def(ctx, offset + 0x00, 0x01800000); nv50_gr_construct_mmio()
605 gr_def(ctx, offset + 0x04, 0x00160000); nv50_gr_construct_mmio()
606 gr_def(ctx, offset + 0x08, 0x01800000); nv50_gr_construct_mmio()
607 gr_def(ctx, offset + 0x18, 0x0003ffff); nv50_gr_construct_mmio()
610 gr_def(ctx, offset + 0x1c, 0x00080000); nv50_gr_construct_mmio()
613 gr_def(ctx, offset + 0x1c, 0x00880000); nv50_gr_construct_mmio()
616 gr_def(ctx, offset + 0x1c, 0x018c0000); nv50_gr_construct_mmio()
621 gr_def(ctx, offset + 0x1c, 0x118c0000); nv50_gr_construct_mmio()
624 gr_def(ctx, offset + 0x1c, 0x10880000); nv50_gr_construct_mmio()
628 gr_def(ctx, offset + 0x1c, 0x310c0000); nv50_gr_construct_mmio()
635 gr_def(ctx, offset + 0x1c, 0x300c0000); nv50_gr_construct_mmio()
638 gr_def(ctx, offset + 0x40, 0x00010401); nv50_gr_construct_mmio()
640 gr_def(ctx, offset + 0x48, 0x00000040); nv50_gr_construct_mmio()
642 gr_def(ctx, offset + 0x48, 0x00000078); nv50_gr_construct_mmio()
643 gr_def(ctx, offset + 0x50, 0x000000bf); nv50_gr_construct_mmio()
644 gr_def(ctx, offset + 0x58, 0x00001210); nv50_gr_construct_mmio()
646 gr_def(ctx, offset + 0x5c, 0x00000080); nv50_gr_construct_mmio()
648 gr_def(ctx, offset + 0x5c, 0x08000080); nv50_gr_construct_mmio()
650 gr_def(ctx, offset + 0x68, 0x0000003e); nv50_gr_construct_mmio()
654 cp_ctx(ctx, base + 0x300, 0x4); nv50_gr_construct_mmio()
656 cp_ctx(ctx, base + 0x300, 0x5); nv50_gr_construct_mmio()
658 gr_def(ctx, base + 0x304, 0x00007070); nv50_gr_construct_mmio()
660 gr_def(ctx, base + 0x304, 0x00027070); nv50_gr_construct_mmio()
662 gr_def(ctx, base + 0x304, 0x01127070); nv50_gr_construct_mmio()
664 gr_def(ctx, base + 0x304, 0x05127070); nv50_gr_construct_mmio()
667 cp_ctx(ctx, base + 0x318, 1); nv50_gr_construct_mmio()
669 cp_ctx(ctx, base + 0x320, 1); nv50_gr_construct_mmio()
671 gr_def(ctx, base + 0x318, 0x0003ffff); nv50_gr_construct_mmio()
673 gr_def(ctx, base + 0x318, 0x03ffffff); nv50_gr_construct_mmio()
675 gr_def(ctx, base + 0x320, 0x07ffffff); nv50_gr_construct_mmio()
678 cp_ctx(ctx, base + 0x324, 5); nv50_gr_construct_mmio()
680 cp_ctx(ctx, base + 0x328, 4); nv50_gr_construct_mmio()
683 cp_ctx(ctx, base + 0x340, 9); nv50_gr_construct_mmio()
686 cp_ctx(ctx, base + 0x33c, 0xb); nv50_gr_construct_mmio()
689 cp_ctx(ctx, base + 0x33c, 0xd); nv50_gr_construct_mmio()
692 gr_def(ctx, offset + 0x0, 0x00120407); nv50_gr_construct_mmio()
693 gr_def(ctx, offset + 0x4, 0x05091507); nv50_gr_construct_mmio()
695 gr_def(ctx, offset + 0x8, 0x05100202); nv50_gr_construct_mmio()
697 gr_def(ctx, offset + 0x8, 0x05010202); nv50_gr_construct_mmio()
698 gr_def(ctx, offset + 0xc, 0x00030201); nv50_gr_construct_mmio()
700 cp_ctx(ctx, base + 0x36c, 1); nv50_gr_construct_mmio()
702 cp_ctx(ctx, base + 0x400, 2); nv50_gr_construct_mmio()
703 gr_def(ctx, base + 0x404, 0x00000040); nv50_gr_construct_mmio()
704 cp_ctx(ctx, base + 0x40c, 2); nv50_gr_construct_mmio()
705 gr_def(ctx, base + 0x40c, 0x0d0c0b0a); nv50_gr_construct_mmio()
706 gr_def(ctx, base + 0x410, 0x00141210); nv50_gr_construct_mmio()
712 cp_ctx(ctx, offset, 6); nv50_gr_construct_mmio()
713 gr_def(ctx, offset + 0x0, 0x000001f0); nv50_gr_construct_mmio()
714 gr_def(ctx, offset + 0x4, 0x00000001); nv50_gr_construct_mmio()
715 gr_def(ctx, offset + 0x8, 0x00000003); nv50_gr_construct_mmio()
717 gr_def(ctx, offset + 0xc, 0x00008000); nv50_gr_construct_mmio()
718 gr_def(ctx, offset + 0x14, 0x00039e00); nv50_gr_construct_mmio()
719 cp_ctx(ctx, offset + 0x1c, 2); nv50_gr_construct_mmio()
721 gr_def(ctx, offset + 0x1c, 0x00000040); nv50_gr_construct_mmio()
723 gr_def(ctx, offset + 0x1c, 0x00000100); nv50_gr_construct_mmio()
724 gr_def(ctx, offset + 0x20, 0x00003800); nv50_gr_construct_mmio()
727 cp_ctx(ctx, base + 0x54c, 2); nv50_gr_construct_mmio()
729 gr_def(ctx, base + 0x54c, 0x003fe006); nv50_gr_construct_mmio()
731 gr_def(ctx, base + 0x54c, 0x003fe007); nv50_gr_construct_mmio()
732 gr_def(ctx, base + 0x550, 0x003fe000); nv50_gr_construct_mmio()
739 cp_ctx(ctx, offset, 1); nv50_gr_construct_mmio()
740 gr_def(ctx, offset, 0x00404040); nv50_gr_construct_mmio()
746 cp_ctx(ctx, offset, 2); nv50_gr_construct_mmio()
748 gr_def(ctx, offset, 0x0077f005); nv50_gr_construct_mmio()
750 gr_def(ctx, offset, 0x6cf7f007); nv50_gr_construct_mmio()
752 gr_def(ctx, offset, 0x6cfff007); nv50_gr_construct_mmio()
754 gr_def(ctx, offset, 0x0cfff007); nv50_gr_construct_mmio()
756 gr_def(ctx, offset, 0x0cf7f007); nv50_gr_construct_mmio()
758 gr_def(ctx, offset + 0x4, 0x00007fff); nv50_gr_construct_mmio()
760 gr_def(ctx, offset + 0x4, 0x003f7fff); nv50_gr_construct_mmio()
762 gr_def(ctx, offset + 0x4, 0x02bf7fff); nv50_gr_construct_mmio()
763 cp_ctx(ctx, offset + 0x2c, 1); nv50_gr_construct_mmio()
765 cp_ctx(ctx, offset + 0x50, 9); nv50_gr_construct_mmio()
766 gr_def(ctx, offset + 0x54, 0x000003ff); nv50_gr_construct_mmio()
767 gr_def(ctx, offset + 0x58, 0x00000003); nv50_gr_construct_mmio()
768 gr_def(ctx, offset + 0x5c, 0x00000003); nv50_gr_construct_mmio()
769 gr_def(ctx, offset + 0x60, 0x000001ff); nv50_gr_construct_mmio()
770 gr_def(ctx, offset + 0x64, 0x0000001f); nv50_gr_construct_mmio()
771 gr_def(ctx, offset + 0x68, 0x0000000f); nv50_gr_construct_mmio()
772 gr_def(ctx, offset + 0x6c, 0x0000000f); nv50_gr_construct_mmio()
774 cp_ctx(ctx, offset + 0x50, 1); nv50_gr_construct_mmio()
775 cp_ctx(ctx, offset + 0x70, 1); nv50_gr_construct_mmio()
777 cp_ctx(ctx, offset + 0x50, 1); nv50_gr_construct_mmio()
778 cp_ctx(ctx, offset + 0x60, 5); nv50_gr_construct_mmio()
785 dd_emit(struct nvkm_grctx *ctx, int num, u32 val) { dd_emit() argument
787 if (val && ctx->mode == NVKM_GRCTX_VALS) dd_emit()
789 nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val); dd_emit()
790 ctx->ctxvals_pos += num; dd_emit()
794 nv50_gr_construct_mmio_ddata(struct nvkm_grctx *ctx) nv50_gr_construct_mmio_ddata() argument
796 struct nvkm_device *device = ctx->device; nv50_gr_construct_mmio_ddata()
798 base = ctx->ctxvals_pos; nv50_gr_construct_mmio_ddata()
801 dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */ nv50_gr_construct_mmio_ddata()
802 dd_emit(ctx, 1, 0); /* 00000001 UNK135C */ nv50_gr_construct_mmio_ddata()
805 dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */ nv50_gr_construct_mmio_ddata()
806 dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */ nv50_gr_construct_mmio_ddata()
807 dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */ nv50_gr_construct_mmio_ddata()
808 dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */ nv50_gr_construct_mmio_ddata()
809 dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */ nv50_gr_construct_mmio_ddata()
811 dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */ nv50_gr_construct_mmio_ddata()
812 dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */ nv50_gr_construct_mmio_ddata()
813 dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */ nv50_gr_construct_mmio_ddata()
816 dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */ nv50_gr_construct_mmio_ddata()
817 dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */ nv50_gr_construct_mmio_ddata()
818 dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ nv50_gr_construct_mmio_ddata()
819 dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ nv50_gr_construct_mmio_ddata()
820 dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ nv50_gr_construct_mmio_ddata()
821 dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ nv50_gr_construct_mmio_ddata()
822 dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */ nv50_gr_construct_mmio_ddata()
823 dd_emit(ctx, 1, 1); /* 00000001 LANES32 */ nv50_gr_construct_mmio_ddata()
824 dd_emit(ctx, 1, 0); /* 000000ff UNK370 */ nv50_gr_construct_mmio_ddata()
825 dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */ nv50_gr_construct_mmio_ddata()
826 dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */ nv50_gr_construct_mmio_ddata()
827 dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */ nv50_gr_construct_mmio_ddata()
828 dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ nv50_gr_construct_mmio_ddata()
829 dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ nv50_gr_construct_mmio_ddata()
830 dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ nv50_gr_construct_mmio_ddata()
831 dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */ nv50_gr_construct_mmio_ddata()
832 dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */ nv50_gr_construct_mmio_ddata()
833 dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */ nv50_gr_construct_mmio_ddata()
834 dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */ nv50_gr_construct_mmio_ddata()
835 dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */ nv50_gr_construct_mmio_ddata()
836 dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */ nv50_gr_construct_mmio_ddata()
837 dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */ nv50_gr_construct_mmio_ddata()
838 dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */ nv50_gr_construct_mmio_ddata()
840 dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */ nv50_gr_construct_mmio_ddata()
841 dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */ nv50_gr_construct_mmio_ddata()
842 dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */ nv50_gr_construct_mmio_ddata()
843 dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */ nv50_gr_construct_mmio_ddata()
844 dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */ nv50_gr_construct_mmio_ddata()
845 dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */ nv50_gr_construct_mmio_ddata()
846 dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */ nv50_gr_construct_mmio_ddata()
847 dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */ nv50_gr_construct_mmio_ddata()
848 dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */ nv50_gr_construct_mmio_ddata()
852 dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */ nv50_gr_construct_mmio_ddata()
854 dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */ nv50_gr_construct_mmio_ddata()
856 dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */ nv50_gr_construct_mmio_ddata()
857 dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */ nv50_gr_construct_mmio_ddata()
858 dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */ nv50_gr_construct_mmio_ddata()
860 dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */ nv50_gr_construct_mmio_ddata()
861 dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */ nv50_gr_construct_mmio_ddata()
862 dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */ nv50_gr_construct_mmio_ddata()
863 dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */ nv50_gr_construct_mmio_ddata()
864 dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */ nv50_gr_construct_mmio_ddata()
865 dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */ nv50_gr_construct_mmio_ddata()
866 dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */ nv50_gr_construct_mmio_ddata()
867 dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */ nv50_gr_construct_mmio_ddata()
868 dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */ nv50_gr_construct_mmio_ddata()
869 dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */ nv50_gr_construct_mmio_ddata()
871 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */ nv50_gr_construct_mmio_ddata()
872 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */ nv50_gr_construct_mmio_ddata()
873 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */ nv50_gr_construct_mmio_ddata()
874 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */ nv50_gr_construct_mmio_ddata()
875 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */ nv50_gr_construct_mmio_ddata()
876 dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */ nv50_gr_construct_mmio_ddata()
877 dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */ nv50_gr_construct_mmio_ddata()
878 dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */ nv50_gr_construct_mmio_ddata()
879 dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */ nv50_gr_construct_mmio_ddata()
880 dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */ nv50_gr_construct_mmio_ddata()
882 dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */ nv50_gr_construct_mmio_ddata()
883 dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */ nv50_gr_construct_mmio_ddata()
885 dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */ nv50_gr_construct_mmio_ddata()
887 dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */ nv50_gr_construct_mmio_ddata()
888 dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */ nv50_gr_construct_mmio_ddata()
889 dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */ nv50_gr_construct_mmio_ddata()
890 dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */ nv50_gr_construct_mmio_ddata()
894 dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */ nv50_gr_construct_mmio_ddata()
895 dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */ nv50_gr_construct_mmio_ddata()
896 dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */ nv50_gr_construct_mmio_ddata()
897 dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */ nv50_gr_construct_mmio_ddata()
898 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */ nv50_gr_construct_mmio_ddata()
899 dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */ nv50_gr_construct_mmio_ddata()
900 dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */ nv50_gr_construct_mmio_ddata()
901 dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */ nv50_gr_construct_mmio_ddata()
902 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */ nv50_gr_construct_mmio_ddata()
903 dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */ nv50_gr_construct_mmio_ddata()
904 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */ nv50_gr_construct_mmio_ddata()
905 dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */ nv50_gr_construct_mmio_ddata()
906 dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */ nv50_gr_construct_mmio_ddata()
907 dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */ nv50_gr_construct_mmio_ddata()
908 dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */ nv50_gr_construct_mmio_ddata()
912 dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */ nv50_gr_construct_mmio_ddata()
913 dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */ nv50_gr_construct_mmio_ddata()
915 dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */ nv50_gr_construct_mmio_ddata()
916 dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */ nv50_gr_construct_mmio_ddata()
918 dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */ nv50_gr_construct_mmio_ddata()
919 dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */ nv50_gr_construct_mmio_ddata()
920 dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */ nv50_gr_construct_mmio_ddata()
921 dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */ nv50_gr_construct_mmio_ddata()
922 dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */ nv50_gr_construct_mmio_ddata()
923 dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */ nv50_gr_construct_mmio_ddata()
924 dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */ nv50_gr_construct_mmio_ddata()
925 dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */ nv50_gr_construct_mmio_ddata()
926 dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */ nv50_gr_construct_mmio_ddata()
927 dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */ nv50_gr_construct_mmio_ddata()
928 dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */ nv50_gr_construct_mmio_ddata()
929 dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */ nv50_gr_construct_mmio_ddata()
931 dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */ nv50_gr_construct_mmio_ddata()
935 dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */ nv50_gr_construct_mmio_ddata()
936 dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */ nv50_gr_construct_mmio_ddata()
937 dd_emit(ctx, 1, 0); /* 000000ff */ nv50_gr_construct_mmio_ddata()
938 dd_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_mmio_ddata()
939 dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */ nv50_gr_construct_mmio_ddata()
940 dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */ nv50_gr_construct_mmio_ddata()
941 dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */ nv50_gr_construct_mmio_ddata()
942 dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */ nv50_gr_construct_mmio_ddata()
943 dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */ nv50_gr_construct_mmio_ddata()
944 dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */ nv50_gr_construct_mmio_ddata()
946 dd_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_mmio_ddata()
947 dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_mmio_ddata()
949 dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_mmio_ddata()
951 dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ nv50_gr_construct_mmio_ddata()
953 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ nv50_gr_construct_mmio_ddata()
954 dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */ nv50_gr_construct_mmio_ddata()
955 dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */ nv50_gr_construct_mmio_ddata()
957 dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */ nv50_gr_construct_mmio_ddata()
958 dd_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_mmio_ddata()
960 dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */ nv50_gr_construct_mmio_ddata()
961 dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */ nv50_gr_construct_mmio_ddata()
962 dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */ nv50_gr_construct_mmio_ddata()
963 dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ nv50_gr_construct_mmio_ddata()
964 dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */ nv50_gr_construct_mmio_ddata()
965 dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */ nv50_gr_construct_mmio_ddata()
966 dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ nv50_gr_construct_mmio_ddata()
967 dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */ nv50_gr_construct_mmio_ddata()
969 dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */ nv50_gr_construct_mmio_ddata()
970 dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */ nv50_gr_construct_mmio_ddata()
971 dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */ nv50_gr_construct_mmio_ddata()
972 dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */ nv50_gr_construct_mmio_ddata()
973 dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */ nv50_gr_construct_mmio_ddata()
974 dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */ nv50_gr_construct_mmio_ddata()
975 dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */ nv50_gr_construct_mmio_ddata()
976 dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */ nv50_gr_construct_mmio_ddata()
977 dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */ nv50_gr_construct_mmio_ddata()
978 dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */ nv50_gr_construct_mmio_ddata()
979 dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ nv50_gr_construct_mmio_ddata()
980 dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */ nv50_gr_construct_mmio_ddata()
981 dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */ nv50_gr_construct_mmio_ddata()
983 dd_emit(ctx, 3, 0); /* 1, 1, 1 */ nv50_gr_construct_mmio_ddata()
985 dd_emit(ctx, 2, 0); /* 1, 1 */ nv50_gr_construct_mmio_ddata()
986 dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */ nv50_gr_construct_mmio_ddata()
987 dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/ nv50_gr_construct_mmio_ddata()
988 dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ nv50_gr_construct_mmio_ddata()
989 dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_mmio_ddata()
991 dd_emit(ctx, 1, 3); /* 00000003 */ nv50_gr_construct_mmio_ddata()
992 dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */ nv50_gr_construct_mmio_ddata()
995 dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */ nv50_gr_construct_mmio_ddata()
996 dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */ nv50_gr_construct_mmio_ddata()
997 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */ nv50_gr_construct_mmio_ddata()
999 dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */ nv50_gr_construct_mmio_ddata()
1000 dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */ nv50_gr_construct_mmio_ddata()
1001 dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */ nv50_gr_construct_mmio_ddata()
1002 dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */ nv50_gr_construct_mmio_ddata()
1003 dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */ nv50_gr_construct_mmio_ddata()
1004 dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */ nv50_gr_construct_mmio_ddata()
1005 dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */ nv50_gr_construct_mmio_ddata()
1006 dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */ nv50_gr_construct_mmio_ddata()
1007 dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ nv50_gr_construct_mmio_ddata()
1008 dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ nv50_gr_construct_mmio_ddata()
1009 dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ nv50_gr_construct_mmio_ddata()
1011 dd_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_mmio_ddata()
1012 dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */ nv50_gr_construct_mmio_ddata()
1013 dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */ nv50_gr_construct_mmio_ddata()
1014 dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ nv50_gr_construct_mmio_ddata()
1015 dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ nv50_gr_construct_mmio_ddata()
1016 dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/ nv50_gr_construct_mmio_ddata()
1018 dd_emit(ctx, 8, 0); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1020 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */ nv50_gr_construct_mmio_ddata()
1021 dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */ nv50_gr_construct_mmio_ddata()
1022 dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */ nv50_gr_construct_mmio_ddata()
1023 dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */ nv50_gr_construct_mmio_ddata()
1025 dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ nv50_gr_construct_mmio_ddata()
1026 dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */ nv50_gr_construct_mmio_ddata()
1027 dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_mmio_ddata()
1028 dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */ nv50_gr_construct_mmio_ddata()
1029 dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */ nv50_gr_construct_mmio_ddata()
1031 dd_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1032 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */ nv50_gr_construct_mmio_ddata()
1034 dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */ nv50_gr_construct_mmio_ddata()
1035 dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ nv50_gr_construct_mmio_ddata()
1037 dd_emit(ctx, 1, 0); /* 00000003 */ nv50_gr_construct_mmio_ddata()
1038 dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */ nv50_gr_construct_mmio_ddata()
1039 dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */ nv50_gr_construct_mmio_ddata()
1040 dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */ nv50_gr_construct_mmio_ddata()
1041 dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */ nv50_gr_construct_mmio_ddata()
1042 dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */ nv50_gr_construct_mmio_ddata()
1044 dd_emit(ctx, 1, 0xe00); /* 7fff */ nv50_gr_construct_mmio_ddata()
1045 dd_emit(ctx, 1, 0x1000); /* 7fff */ nv50_gr_construct_mmio_ddata()
1046 dd_emit(ctx, 1, 0x1e00); /* 7fff */ nv50_gr_construct_mmio_ddata()
1048 dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */ nv50_gr_construct_mmio_ddata()
1049 dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */ nv50_gr_construct_mmio_ddata()
1050 dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */ nv50_gr_construct_mmio_ddata()
1051 dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */ nv50_gr_construct_mmio_ddata()
1052 dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */ nv50_gr_construct_mmio_ddata()
1053 dd_emit(ctx, 1, 1); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1054 dd_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1055 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */ nv50_gr_construct_mmio_ddata()
1056 dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */ nv50_gr_construct_mmio_ddata()
1057 dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */ nv50_gr_construct_mmio_ddata()
1059 dd_emit(ctx, 1, 0x200); nv50_gr_construct_mmio_ddata()
1060 dd_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1062 dd_emit(ctx, 1, 1); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1063 dd_emit(ctx, 1, 0x70); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1064 dd_emit(ctx, 1, 0x80); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1065 dd_emit(ctx, 1, 0); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1066 dd_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1067 dd_emit(ctx, 1, 1); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1068 dd_emit(ctx, 1, 0x70); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1069 dd_emit(ctx, 1, 0x80); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1070 dd_emit(ctx, 1, 0); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1072 dd_emit(ctx, 1, 1); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1073 dd_emit(ctx, 1, 0xf0); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1074 dd_emit(ctx, 1, 0xff); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1075 dd_emit(ctx, 1, 0); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1076 dd_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1077 dd_emit(ctx, 1, 1); /* 00000001 */ nv50_gr_construct_mmio_ddata()
1078 dd_emit(ctx, 1, 0xf0); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1079 dd_emit(ctx, 1, 0xff); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1080 dd_emit(ctx, 1, 0); /* 000000ff */ nv50_gr_construct_mmio_ddata()
1081 dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */ nv50_gr_construct_mmio_ddata()
1085 dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */ nv50_gr_construct_mmio_ddata()
1086 dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */ nv50_gr_construct_mmio_ddata()
1087 dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */ nv50_gr_construct_mmio_ddata()
1088 dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */ nv50_gr_construct_mmio_ddata()
1089 dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */ nv50_gr_construct_mmio_ddata()
1090 dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */ nv50_gr_construct_mmio_ddata()
1091 dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */ nv50_gr_construct_mmio_ddata()
1092 dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */ nv50_gr_construct_mmio_ddata()
1093 dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */ nv50_gr_construct_mmio_ddata()
1094 dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */ nv50_gr_construct_mmio_ddata()
1095 dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */ nv50_gr_construct_mmio_ddata()
1096 dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */ nv50_gr_construct_mmio_ddata()
1097 dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */ nv50_gr_construct_mmio_ddata()
1098 dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */ nv50_gr_construct_mmio_ddata()
1099 dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */ nv50_gr_construct_mmio_ddata()
1100 dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */ nv50_gr_construct_mmio_ddata()
1101 dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */ nv50_gr_construct_mmio_ddata()
1102 dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */ nv50_gr_construct_mmio_ddata()
1103 dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */ nv50_gr_construct_mmio_ddata()
1104 dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */ nv50_gr_construct_mmio_ddata()
1106 num = ctx->ctxvals_pos - base; nv50_gr_construct_mmio_ddata()
1107 ctx->ctxvals_pos = base; nv50_gr_construct_mmio_ddata()
1109 cp_ctx(ctx, 0x404800, num); nv50_gr_construct_mmio_ddata()
1111 cp_ctx(ctx, 0x405400, num); nv50_gr_construct_mmio_ddata()
1157 xf_emit(struct nvkm_grctx *ctx, int num, u32 val) { xf_emit() argument
1159 if (val && ctx->mode == NVKM_GRCTX_VALS) xf_emit()
1161 nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val); xf_emit()
1162 ctx->ctxvals_pos += num << 3; xf_emit()
1167 static void nv50_gr_construct_gene_dispatch(struct nvkm_grctx *ctx);
1168 static void nv50_gr_construct_gene_m2mf(struct nvkm_grctx *ctx);
1169 static void nv50_gr_construct_gene_ccache(struct nvkm_grctx *ctx);
1170 static void nv50_gr_construct_gene_unk10xx(struct nvkm_grctx *ctx);
1171 static void nv50_gr_construct_gene_unk14xx(struct nvkm_grctx *ctx);
1172 static void nv50_gr_construct_gene_zcull(struct nvkm_grctx *ctx);
1173 static void nv50_gr_construct_gene_clipid(struct nvkm_grctx *ctx);
1174 static void nv50_gr_construct_gene_unk24xx(struct nvkm_grctx *ctx);
1175 static void nv50_gr_construct_gene_vfetch(struct nvkm_grctx *ctx);
1176 static void nv50_gr_construct_gene_eng2d(struct nvkm_grctx *ctx);
1177 static void nv50_gr_construct_gene_csched(struct nvkm_grctx *ctx);
1178 static void nv50_gr_construct_gene_unk1cxx(struct nvkm_grctx *ctx);
1179 static void nv50_gr_construct_gene_strmout(struct nvkm_grctx *ctx);
1180 static void nv50_gr_construct_gene_unk34xx(struct nvkm_grctx *ctx);
1181 static void nv50_gr_construct_gene_ropm1(struct nvkm_grctx *ctx);
1182 static void nv50_gr_construct_gene_ropm2(struct nvkm_grctx *ctx);
1183 static void nv50_gr_construct_gene_ropc(struct nvkm_grctx *ctx);
1184 static void nv50_gr_construct_xfer_tp(struct nvkm_grctx *ctx);
1187 nv50_gr_construct_xfer1(struct nvkm_grctx *ctx) nv50_gr_construct_xfer1() argument
1189 struct nvkm_device *device = ctx->device; nv50_gr_construct_xfer1()
1193 u32 units = nv_rd32 (ctx->device, 0x1540); nv50_gr_construct_xfer1()
1195 offset = (ctx->ctxvals_pos+0x3f)&~0x3f; nv50_gr_construct_xfer1()
1196 ctx->ctxvals_base = offset; nv50_gr_construct_xfer1()
1200 ctx->ctxvals_pos = offset; nv50_gr_construct_xfer1()
1201 nv50_gr_construct_gene_dispatch(ctx); nv50_gr_construct_xfer1()
1202 nv50_gr_construct_gene_m2mf(ctx); nv50_gr_construct_xfer1()
1203 nv50_gr_construct_gene_unk24xx(ctx); nv50_gr_construct_xfer1()
1204 nv50_gr_construct_gene_clipid(ctx); nv50_gr_construct_xfer1()
1205 nv50_gr_construct_gene_zcull(ctx); nv50_gr_construct_xfer1()
1206 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1207 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1210 ctx->ctxvals_pos = offset + 0x1; nv50_gr_construct_xfer1()
1211 nv50_gr_construct_gene_vfetch(ctx); nv50_gr_construct_xfer1()
1212 nv50_gr_construct_gene_eng2d(ctx); nv50_gr_construct_xfer1()
1213 nv50_gr_construct_gene_csched(ctx); nv50_gr_construct_xfer1()
1214 nv50_gr_construct_gene_ropm1(ctx); nv50_gr_construct_xfer1()
1215 nv50_gr_construct_gene_ropm2(ctx); nv50_gr_construct_xfer1()
1216 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1217 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1220 ctx->ctxvals_pos = offset + 0x2; nv50_gr_construct_xfer1()
1221 nv50_gr_construct_gene_ccache(ctx); nv50_gr_construct_xfer1()
1222 nv50_gr_construct_gene_unk1cxx(ctx); nv50_gr_construct_xfer1()
1223 nv50_gr_construct_gene_strmout(ctx); nv50_gr_construct_xfer1()
1224 nv50_gr_construct_gene_unk14xx(ctx); nv50_gr_construct_xfer1()
1225 nv50_gr_construct_gene_unk10xx(ctx); nv50_gr_construct_xfer1()
1226 nv50_gr_construct_gene_unk34xx(ctx); nv50_gr_construct_xfer1()
1227 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1228 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1231 ctx->ctxvals_pos = offset + 3; nv50_gr_construct_xfer1()
1234 nv50_gr_construct_gene_ropc(ctx); nv50_gr_construct_xfer1()
1235 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1236 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1240 ctx->ctxvals_pos = offset + 4 + i; nv50_gr_construct_xfer1()
1242 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1244 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1245 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1246 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1250 ctx->ctxvals_pos = offset; nv50_gr_construct_xfer1()
1251 nv50_gr_construct_gene_dispatch(ctx); nv50_gr_construct_xfer1()
1252 nv50_gr_construct_gene_m2mf(ctx); nv50_gr_construct_xfer1()
1253 nv50_gr_construct_gene_unk34xx(ctx); nv50_gr_construct_xfer1()
1254 nv50_gr_construct_gene_csched(ctx); nv50_gr_construct_xfer1()
1255 nv50_gr_construct_gene_unk1cxx(ctx); nv50_gr_construct_xfer1()
1256 nv50_gr_construct_gene_strmout(ctx); nv50_gr_construct_xfer1()
1257 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1258 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1261 ctx->ctxvals_pos = offset + 1; nv50_gr_construct_xfer1()
1262 nv50_gr_construct_gene_unk10xx(ctx); nv50_gr_construct_xfer1()
1263 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1264 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1267 ctx->ctxvals_pos = offset + 2; nv50_gr_construct_xfer1()
1269 nv50_gr_construct_gene_unk14xx(ctx); nv50_gr_construct_xfer1()
1270 nv50_gr_construct_gene_unk24xx(ctx); nv50_gr_construct_xfer1()
1271 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1272 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1275 ctx->ctxvals_pos = offset + 3; nv50_gr_construct_xfer1()
1276 nv50_gr_construct_gene_vfetch(ctx); nv50_gr_construct_xfer1()
1277 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1278 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1281 ctx->ctxvals_pos = offset + 4; nv50_gr_construct_xfer1()
1282 nv50_gr_construct_gene_ccache(ctx); nv50_gr_construct_xfer1()
1283 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1284 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1287 ctx->ctxvals_pos = offset + 5; nv50_gr_construct_xfer1()
1288 nv50_gr_construct_gene_ropm2(ctx); nv50_gr_construct_xfer1()
1289 nv50_gr_construct_gene_ropm1(ctx); nv50_gr_construct_xfer1()
1293 nv50_gr_construct_gene_ropc(ctx); nv50_gr_construct_xfer1()
1294 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1295 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1298 ctx->ctxvals_pos = offset + 6; nv50_gr_construct_xfer1()
1299 nv50_gr_construct_gene_zcull(ctx); nv50_gr_construct_xfer1()
1300 nv50_gr_construct_gene_clipid(ctx); nv50_gr_construct_xfer1()
1301 nv50_gr_construct_gene_eng2d(ctx); nv50_gr_construct_xfer1()
1303 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1305 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1307 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1309 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1310 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1311 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1314 ctx->ctxvals_pos = offset + 7; nv50_gr_construct_xfer1()
1317 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1319 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1321 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1323 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1325 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1327 nv50_gr_construct_xfer_tp(ctx); nv50_gr_construct_xfer1()
1329 nv50_gr_construct_gene_unk14xx(ctx); nv50_gr_construct_xfer1()
1331 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer1()
1332 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer1()
1335 ctx->ctxvals_pos = offset + size * 8; nv50_gr_construct_xfer1()
1336 ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; nv50_gr_construct_xfer1()
1337 cp_lsr (ctx, offset); nv50_gr_construct_xfer1()
1338 cp_out (ctx, CP_SET_XFER_POINTER); nv50_gr_construct_xfer1()
1339 cp_lsr (ctx, size); nv50_gr_construct_xfer1()
1340 cp_out (ctx, CP_SEEK_1); nv50_gr_construct_xfer1()
1341 cp_out (ctx, CP_XFER_1); nv50_gr_construct_xfer1()
1342 cp_wait(ctx, XFER, BUSY); nv50_gr_construct_xfer1()
1346 * non-trivial demagiced parts of ctx init go here
1350 nv50_gr_construct_gene_dispatch(struct nvkm_grctx *ctx) nv50_gr_construct_gene_dispatch() argument
1353 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_dispatch()
1356 xf_emit(ctx, 5, 0); nv50_gr_construct_gene_dispatch()
1358 xf_emit(ctx, 6, 0); nv50_gr_construct_gene_dispatch()
1360 xf_emit(ctx, 4, 0); nv50_gr_construct_gene_dispatch()
1364 xf_emit(ctx, 8*3, 0); nv50_gr_construct_gene_dispatch()
1366 xf_emit(ctx, 0x100*3, 0); nv50_gr_construct_gene_dispatch()
1368 xf_emit(ctx, 3, 0); nv50_gr_construct_gene_dispatch()
1371 xf_emit(ctx, 3, 0); nv50_gr_construct_gene_dispatch()
1374 xf_emit(ctx, 9, 0); nv50_gr_construct_gene_dispatch()
1376 xf_emit(ctx, 9, 0); nv50_gr_construct_gene_dispatch()
1378 xf_emit(ctx, 9, 0); nv50_gr_construct_gene_dispatch()
1380 xf_emit(ctx, 9, 0); nv50_gr_construct_gene_dispatch()
1383 xf_emit(ctx, 4, 0); nv50_gr_construct_gene_dispatch()
1385 xf_emit(ctx, 2, 0); nv50_gr_construct_gene_dispatch()
1387 xf_emit(ctx, 6*2, 0); nv50_gr_construct_gene_dispatch()
1388 xf_emit(ctx, 2, 0); nv50_gr_construct_gene_dispatch()
1390 xf_emit(ctx, 2, 0); nv50_gr_construct_gene_dispatch()
1392 xf_emit(ctx, 6*2, 0); nv50_gr_construct_gene_dispatch()
1393 xf_emit(ctx, 2, 0); nv50_gr_construct_gene_dispatch()
1396 xf_emit(ctx, 0x1c, 0); nv50_gr_construct_gene_dispatch()
1398 xf_emit(ctx, 0x1e, 0); nv50_gr_construct_gene_dispatch()
1400 xf_emit(ctx, 0x22, 0); nv50_gr_construct_gene_dispatch()
1402 xf_emit(ctx, 0x15, 0); nv50_gr_construct_gene_dispatch()
1406 nv50_gr_construct_gene_m2mf(struct nvkm_grctx *ctx) nv50_gr_construct_gene_m2mf() argument
1409 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_m2mf()
1414 xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ nv50_gr_construct_gene_m2mf()
1415 xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */ nv50_gr_construct_gene_m2mf()
1416 xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */ nv50_gr_construct_gene_m2mf()
1417 xf_emit (ctx, 1, 0); /* OFFSET_IN */ nv50_gr_construct_gene_m2mf()
1418 xf_emit (ctx, 1, 0); /* OFFSET_OUT */ nv50_gr_construct_gene_m2mf()
1419 xf_emit (ctx, 1, 0); /* PITCH_IN */ nv50_gr_construct_gene_m2mf()
1420 xf_emit (ctx, 1, 0); /* PITCH_OUT */ nv50_gr_construct_gene_m2mf()
1421 xf_emit (ctx, 1, 0); /* LINE_LENGTH */ nv50_gr_construct_gene_m2mf()
1422 xf_emit (ctx, 1, 0); /* LINE_COUNT */ nv50_gr_construct_gene_m2mf()
1423 xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */ nv50_gr_construct_gene_m2mf()
1424 xf_emit (ctx, 1, 1); /* LINEAR_IN */ nv50_gr_construct_gene_m2mf()
1425 xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */ nv50_gr_construct_gene_m2mf()
1426 xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */ nv50_gr_construct_gene_m2mf()
1427 xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */ nv50_gr_construct_gene_m2mf()
1428 xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */ nv50_gr_construct_gene_m2mf()
1429 xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */ nv50_gr_construct_gene_m2mf()
1430 xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */ nv50_gr_construct_gene_m2mf()
1431 xf_emit (ctx, 1, 1); /* LINEAR_OUT */ nv50_gr_construct_gene_m2mf()
1432 xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */ nv50_gr_construct_gene_m2mf()
1433 xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */ nv50_gr_construct_gene_m2mf()
1434 xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */ nv50_gr_construct_gene_m2mf()
1435 xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */ nv50_gr_construct_gene_m2mf()
1436 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */ nv50_gr_construct_gene_m2mf()
1437 xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */ nv50_gr_construct_gene_m2mf()
1438 xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */ nv50_gr_construct_gene_m2mf()
1439 xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */ nv50_gr_construct_gene_m2mf()
1442 xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */ nv50_gr_construct_gene_m2mf()
1444 xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */ nv50_gr_construct_gene_m2mf()
1445 xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */ nv50_gr_construct_gene_m2mf()
1448 xf_emit(ctx, 0x400, 0); /* ffffffff */ nv50_gr_construct_gene_m2mf()
1450 xf_emit(ctx, 0x800, 0); /* ffffffff */ nv50_gr_construct_gene_m2mf()
1451 xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */ nv50_gr_construct_gene_m2mf()
1453 xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */ nv50_gr_construct_gene_m2mf()
1454 xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */ nv50_gr_construct_gene_m2mf()
1458 nv50_gr_construct_gene_ccache(struct nvkm_grctx *ctx) nv50_gr_construct_gene_ccache() argument
1460 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_ccache()
1461 xf_emit(ctx, 2, 0); /* RO */ nv50_gr_construct_gene_ccache()
1462 xf_emit(ctx, 0x800, 0); /* ffffffff */ nv50_gr_construct_gene_ccache()
1467 xf_emit(ctx, 0x2b, 0); nv50_gr_construct_gene_ccache()
1470 xf_emit(ctx, 0x29, 0); nv50_gr_construct_gene_ccache()
1475 xf_emit(ctx, 0x27, 0); nv50_gr_construct_gene_ccache()
1484 xf_emit(ctx, 0x25, 0); nv50_gr_construct_gene_ccache()
1489 xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */ nv50_gr_construct_gene_ccache()
1490 xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */ nv50_gr_construct_gene_ccache()
1491 xf_emit(ctx, 1, 0); /* 0 */ nv50_gr_construct_gene_ccache()
1492 xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */ nv50_gr_construct_gene_ccache()
1493 xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */ nv50_gr_construct_gene_ccache()
1494 xf_emit(ctx, 4, 0); /* RO */ nv50_gr_construct_gene_ccache()
1495 xf_emit(ctx, 0x100, 0); /* ffffffff */ nv50_gr_construct_gene_ccache()
1496 xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */ nv50_gr_construct_gene_ccache()
1497 xf_emit(ctx, 8, 0); /* ffffffff */ nv50_gr_construct_gene_ccache()
1498 xf_emit(ctx, 4, 0); /* ffffffff */ nv50_gr_construct_gene_ccache()
1499 xf_emit(ctx, 1, 0); /* 3 */ nv50_gr_construct_gene_ccache()
1500 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_ccache()
1501 xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */ nv50_gr_construct_gene_ccache()
1502 xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */ nv50_gr_construct_gene_ccache()
1503 xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */ nv50_gr_construct_gene_ccache()
1504 xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ nv50_gr_construct_gene_ccache()
1505 xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */ nv50_gr_construct_gene_ccache()
1506 xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */ nv50_gr_construct_gene_ccache()
1507 xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ nv50_gr_construct_gene_ccache()
1508 xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */ nv50_gr_construct_gene_ccache()
1509 xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */ nv50_gr_construct_gene_ccache()
1510 xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ nv50_gr_construct_gene_ccache()
1511 xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */ nv50_gr_construct_gene_ccache()
1512 xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */ nv50_gr_construct_gene_ccache()
1513 xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */ nv50_gr_construct_gene_ccache()
1514 xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ nv50_gr_construct_gene_ccache()
1515 xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ nv50_gr_construct_gene_ccache()
1516 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_ccache()
1517 xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */ nv50_gr_construct_gene_ccache()
1518 xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */ nv50_gr_construct_gene_ccache()
1519 xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */ nv50_gr_construct_gene_ccache()
1520 xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */ nv50_gr_construct_gene_ccache()
1521 xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ nv50_gr_construct_gene_ccache()
1522 xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */ nv50_gr_construct_gene_ccache()
1526 nv50_gr_construct_gene_unk10xx(struct nvkm_grctx *ctx) nv50_gr_construct_gene_unk10xx() argument
1528 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_unk10xx()
1531 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk10xx()
1532 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk10xx()
1533 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_unk10xx()
1534 xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ nv50_gr_construct_gene_unk10xx()
1535 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ nv50_gr_construct_gene_unk10xx()
1536 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ nv50_gr_construct_gene_unk10xx()
1537 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ nv50_gr_construct_gene_unk10xx()
1539 xf_emit(ctx, 1, 0x3ff); nv50_gr_construct_gene_unk10xx()
1541 xf_emit(ctx, 1, 0x7ff); /* 000007ff */ nv50_gr_construct_gene_unk10xx()
1542 xf_emit(ctx, 1, 0); /* 111/113 */ nv50_gr_construct_gene_unk10xx()
1543 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_unk10xx()
1551 xf_emit(ctx, 0xa0, 0); /* ffffffff */ nv50_gr_construct_gene_unk10xx()
1557 xf_emit(ctx, 0x120, 0); nv50_gr_construct_gene_unk10xx()
1561 xf_emit(ctx, 0x100, 0); /* ffffffff */ nv50_gr_construct_gene_unk10xx()
1566 xf_emit(ctx, 0x400, 0); /* ffffffff */ nv50_gr_construct_gene_unk10xx()
1569 xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */ nv50_gr_construct_gene_unk10xx()
1570 xf_emit(ctx, 4, 0); /* ffffffff */ nv50_gr_construct_gene_unk10xx()
1572 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk10xx()
1573 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk10xx()
1574 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_unk10xx()
1575 xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ nv50_gr_construct_gene_unk10xx()
1576 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */ nv50_gr_construct_gene_unk10xx()
1577 xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */ nv50_gr_construct_gene_unk10xx()
1578 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ nv50_gr_construct_gene_unk10xx()
1579 xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ nv50_gr_construct_gene_unk10xx()
1580 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ nv50_gr_construct_gene_unk10xx()
1581 xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ nv50_gr_construct_gene_unk10xx()
1582 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_unk10xx()
1586 nv50_gr_construct_gene_unk34xx(struct nvkm_grctx *ctx) nv50_gr_construct_gene_unk34xx() argument
1588 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_unk34xx()
1590 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ nv50_gr_construct_gene_unk34xx()
1591 xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */ nv50_gr_construct_gene_unk34xx()
1592 xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */ nv50_gr_construct_gene_unk34xx()
1593 xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ nv50_gr_construct_gene_unk34xx()
1594 xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */ nv50_gr_construct_gene_unk34xx()
1595 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ nv50_gr_construct_gene_unk34xx()
1596 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_gene_unk34xx()
1597 xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ nv50_gr_construct_gene_unk34xx()
1598 xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ nv50_gr_construct_gene_unk34xx()
1599 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ nv50_gr_construct_gene_unk34xx()
1600 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_gene_unk34xx()
1601 xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */ nv50_gr_construct_gene_unk34xx()
1603 xf_emit(ctx, 1, 0x0fac6881); nv50_gr_construct_gene_unk34xx()
1605 xf_emit(ctx, 1, 1); nv50_gr_construct_gene_unk34xx()
1606 xf_emit(ctx, 3, 0); nv50_gr_construct_gene_unk34xx()
1611 nv50_gr_construct_gene_unk14xx(struct nvkm_grctx *ctx) nv50_gr_construct_gene_unk14xx() argument
1613 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_unk14xx()
1616 xf_emit(ctx, 5, 0); /* ffffffff */ nv50_gr_construct_gene_unk14xx()
1617 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ nv50_gr_construct_gene_unk14xx()
1618 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_gene_unk14xx()
1619 xf_emit(ctx, 1, 0); /* 000003ff */ nv50_gr_construct_gene_unk14xx()
1620 xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ nv50_gr_construct_gene_unk14xx()
1621 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_gene_unk14xx()
1622 xf_emit(ctx, 2, 4); /* 7f, ff */ nv50_gr_construct_gene_unk14xx()
1623 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_gene_unk14xx()
1625 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_unk14xx()
1626 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk14xx()
1627 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk14xx()
1628 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_unk14xx()
1629 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ nv50_gr_construct_gene_unk14xx()
1630 xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */ nv50_gr_construct_gene_unk14xx()
1632 xf_emit(ctx, 1, 0); /* 3ff */ nv50_gr_construct_gene_unk14xx()
1633 xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */ nv50_gr_construct_gene_unk14xx()
1634 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ nv50_gr_construct_gene_unk14xx()
1635 xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ nv50_gr_construct_gene_unk14xx()
1636 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ nv50_gr_construct_gene_unk14xx()
1637 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ nv50_gr_construct_gene_unk14xx()
1639 xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */ nv50_gr_construct_gene_unk14xx()
1640 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_unk14xx()
1641 xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */ nv50_gr_construct_gene_unk14xx()
1642 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ nv50_gr_construct_gene_unk14xx()
1643 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ nv50_gr_construct_gene_unk14xx()
1644 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_gene_unk14xx()
1645 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk14xx()
1646 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk14xx()
1647 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_unk14xx()
1648 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ nv50_gr_construct_gene_unk14xx()
1649 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ nv50_gr_construct_gene_unk14xx()
1650 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */ nv50_gr_construct_gene_unk14xx()
1651 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_unk14xx()
1652 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ nv50_gr_construct_gene_unk14xx()
1653 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_gene_unk14xx()
1654 xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ nv50_gr_construct_gene_unk14xx()
1655 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ nv50_gr_construct_gene_unk14xx()
1656 xf_emit(ctx, 1, 0); /* 0000000f */ nv50_gr_construct_gene_unk14xx()
1658 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ nv50_gr_construct_gene_unk14xx()
1660 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ nv50_gr_construct_gene_unk14xx()
1661 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ nv50_gr_construct_gene_unk14xx()
1662 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ nv50_gr_construct_gene_unk14xx()
1663 xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */ nv50_gr_construct_gene_unk14xx()
1664 xf_emit(ctx, 3, 0); /* f, 0, 0 */ nv50_gr_construct_gene_unk14xx()
1665 xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */ nv50_gr_construct_gene_unk14xx()
1666 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_unk14xx()
1667 xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ nv50_gr_construct_gene_unk14xx()
1668 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ nv50_gr_construct_gene_unk14xx()
1669 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ nv50_gr_construct_gene_unk14xx()
1670 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ nv50_gr_construct_gene_unk14xx()
1671 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_gene_unk14xx()
1672 xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */ nv50_gr_construct_gene_unk14xx()
1673 xf_emit(ctx, 3, 0); /* f, 0, 0 */ nv50_gr_construct_gene_unk14xx()
1674 xf_emit(ctx, 3, 0); /* ffffffff */ nv50_gr_construct_gene_unk14xx()
1675 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_unk14xx()
1676 xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */ nv50_gr_construct_gene_unk14xx()
1677 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ nv50_gr_construct_gene_unk14xx()
1678 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_unk14xx()
1679 xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ nv50_gr_construct_gene_unk14xx()
1680 xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ nv50_gr_construct_gene_unk14xx()
1681 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ nv50_gr_construct_gene_unk14xx()
1682 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ nv50_gr_construct_gene_unk14xx()
1683 xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ nv50_gr_construct_gene_unk14xx()
1684 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ nv50_gr_construct_gene_unk14xx()
1685 xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ nv50_gr_construct_gene_unk14xx()
1686 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ nv50_gr_construct_gene_unk14xx()
1687 xf_emit(ctx, 1, 0); /* 0000000f */ nv50_gr_construct_gene_unk14xx()
1688 xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ nv50_gr_construct_gene_unk14xx()
1689 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ nv50_gr_construct_gene_unk14xx()
1690 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ nv50_gr_construct_gene_unk14xx()
1691 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_gene_unk14xx()
1693 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_gene_unk14xx()
1694 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ nv50_gr_construct_gene_unk14xx()
1695 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ nv50_gr_construct_gene_unk14xx()
1697 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_unk14xx()
1698 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_gene_unk14xx()
1699 xf_emit(ctx, 1, 0); /* 000003ff */ nv50_gr_construct_gene_unk14xx()
1701 xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */ nv50_gr_construct_gene_unk14xx()
1702 xf_emit(ctx, 1, 0); /* f */ nv50_gr_construct_gene_unk14xx()
1703 xf_emit(ctx, 1, 0); /* 0? */ nv50_gr_construct_gene_unk14xx()
1704 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_unk14xx()
1705 xf_emit(ctx, 1, 0); /* 003fffff */ nv50_gr_construct_gene_unk14xx()
1706 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_unk14xx()
1707 xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ nv50_gr_construct_gene_unk14xx()
1708 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ nv50_gr_construct_gene_unk14xx()
1709 xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ nv50_gr_construct_gene_unk14xx()
1710 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ nv50_gr_construct_gene_unk14xx()
1711 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk14xx()
1712 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk14xx()
1713 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_unk14xx()
1714 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ nv50_gr_construct_gene_unk14xx()
1715 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ nv50_gr_construct_gene_unk14xx()
1716 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ nv50_gr_construct_gene_unk14xx()
1717 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ nv50_gr_construct_gene_unk14xx()
1718 xf_emit(ctx, 1, 0); /* 0000000f */ nv50_gr_construct_gene_unk14xx()
1722 nv50_gr_construct_gene_zcull(struct nvkm_grctx *ctx) nv50_gr_construct_gene_zcull() argument
1724 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_zcull()
1727 xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ nv50_gr_construct_gene_zcull()
1728 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ nv50_gr_construct_gene_zcull()
1729 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_gene_zcull()
1730 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_gene_zcull()
1731 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ nv50_gr_construct_gene_zcull()
1732 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ nv50_gr_construct_gene_zcull()
1733 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ nv50_gr_construct_gene_zcull()
1734 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ nv50_gr_construct_gene_zcull()
1735 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ nv50_gr_construct_gene_zcull()
1736 xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ nv50_gr_construct_gene_zcull()
1737 xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ nv50_gr_construct_gene_zcull()
1738 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_gene_zcull()
1739 xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ nv50_gr_construct_gene_zcull()
1740 xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ nv50_gr_construct_gene_zcull()
1741 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_gene_zcull()
1742 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ nv50_gr_construct_gene_zcull()
1743 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_gene_zcull()
1744 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ nv50_gr_construct_gene_zcull()
1745 xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ nv50_gr_construct_gene_zcull()
1746 xf_emit(ctx, 1, 0); /* 0000ffff */ nv50_gr_construct_gene_zcull()
1747 xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */ nv50_gr_construct_gene_zcull()
1748 xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ nv50_gr_construct_gene_zcull()
1749 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ nv50_gr_construct_gene_zcull()
1750 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_zcull()
1751 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ nv50_gr_construct_gene_zcull()
1752 xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ nv50_gr_construct_gene_zcull()
1753 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ nv50_gr_construct_gene_zcull()
1754 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ nv50_gr_construct_gene_zcull()
1755 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ nv50_gr_construct_gene_zcull()
1756 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ nv50_gr_construct_gene_zcull()
1757 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ nv50_gr_construct_gene_zcull()
1758 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_gene_zcull()
1759 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ nv50_gr_construct_gene_zcull()
1760 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ nv50_gr_construct_gene_zcull()
1761 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_gene_zcull()
1763 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */ nv50_gr_construct_gene_zcull()
1764 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ nv50_gr_construct_gene_zcull()
1765 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_gene_zcull()
1766 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_gene_zcull()
1767 xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ nv50_gr_construct_gene_zcull()
1769 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ nv50_gr_construct_gene_zcull()
1770 xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ nv50_gr_construct_gene_zcull()
1771 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ nv50_gr_construct_gene_zcull()
1772 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ nv50_gr_construct_gene_zcull()
1773 xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */ nv50_gr_construct_gene_zcull()
1774 xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ nv50_gr_construct_gene_zcull()
1775 xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */ nv50_gr_construct_gene_zcull()
1776 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */ nv50_gr_construct_gene_zcull()
1778 xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */ nv50_gr_construct_gene_zcull()
1779 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */ nv50_gr_construct_gene_zcull()
1783 nv50_gr_construct_gene_clipid(struct nvkm_grctx *ctx) nv50_gr_construct_gene_clipid() argument
1787 xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */ nv50_gr_construct_gene_clipid()
1789 xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */ nv50_gr_construct_gene_clipid()
1790 xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */ nv50_gr_construct_gene_clipid()
1791 xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ nv50_gr_construct_gene_clipid()
1792 xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */ nv50_gr_construct_gene_clipid()
1793 xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ nv50_gr_construct_gene_clipid()
1794 xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */ nv50_gr_construct_gene_clipid()
1795 xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */ nv50_gr_construct_gene_clipid()
1796 xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */ nv50_gr_construct_gene_clipid()
1797 xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */ nv50_gr_construct_gene_clipid()
1798 xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */ nv50_gr_construct_gene_clipid()
1799 xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */ nv50_gr_construct_gene_clipid()
1803 nv50_gr_construct_gene_unk24xx(struct nvkm_grctx *ctx) nv50_gr_construct_gene_unk24xx() argument
1805 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_unk24xx()
1809 xf_emit(ctx, 0x33, 0); nv50_gr_construct_gene_unk24xx()
1811 xf_emit(ctx, 2, 0); nv50_gr_construct_gene_unk24xx()
1813 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_unk24xx()
1814 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk24xx()
1815 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk24xx()
1818 xf_emit(ctx, 4, 0); /* RO */ nv50_gr_construct_gene_unk24xx()
1819 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ nv50_gr_construct_gene_unk24xx()
1820 xf_emit(ctx, 1, 0); /* 1ff */ nv50_gr_construct_gene_unk24xx()
1821 xf_emit(ctx, 8, 0); /* 0? */ nv50_gr_construct_gene_unk24xx()
1822 xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ nv50_gr_construct_gene_unk24xx()
1824 xf_emit(ctx, 4, 0); /* RO */ nv50_gr_construct_gene_unk24xx()
1825 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ nv50_gr_construct_gene_unk24xx()
1826 xf_emit(ctx, 1, 0); /* 1ff */ nv50_gr_construct_gene_unk24xx()
1827 xf_emit(ctx, 8, 0); /* 0? */ nv50_gr_construct_gene_unk24xx()
1828 xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ nv50_gr_construct_gene_unk24xx()
1830 xf_emit(ctx, 0xc, 0); /* RO */ nv50_gr_construct_gene_unk24xx()
1832 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ nv50_gr_construct_gene_unk24xx()
1833 xf_emit(ctx, 1, 0); /* 1ff */ nv50_gr_construct_gene_unk24xx()
1834 xf_emit(ctx, 8, 0); /* 0? */ nv50_gr_construct_gene_unk24xx()
1837 xf_emit(ctx, 0xc, 0); /* RO */ nv50_gr_construct_gene_unk24xx()
1839 xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ nv50_gr_construct_gene_unk24xx()
1840 xf_emit(ctx, 1, 0); /* 1ff */ nv50_gr_construct_gene_unk24xx()
1841 xf_emit(ctx, 8, 0); /* 0? */ nv50_gr_construct_gene_unk24xx()
1844 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_unk24xx()
1845 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk24xx()
1846 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_unk24xx()
1847 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_gene_unk24xx()
1849 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ nv50_gr_construct_gene_unk24xx()
1851 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_unk24xx()
1852 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_gene_unk24xx()
1853 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ nv50_gr_construct_gene_unk24xx()
1854 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ nv50_gr_construct_gene_unk24xx()
1855 xf_emit(ctx, 1, 1); /* 00000001 */ nv50_gr_construct_gene_unk24xx()
1858 xf_emit(ctx, 2, 4); /* 000000ff */ nv50_gr_construct_gene_unk24xx()
1859 xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ nv50_gr_construct_gene_unk24xx()
1860 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ nv50_gr_construct_gene_unk24xx()
1861 xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ nv50_gr_construct_gene_unk24xx()
1862 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_gene_unk24xx()
1863 xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */ nv50_gr_construct_gene_unk24xx()
1864 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_unk24xx()
1865 xf_emit(ctx, 1, 0); /* 0000000f */ nv50_gr_construct_gene_unk24xx()
1866 xf_emit(ctx, 1, 1); /* 00000001 */ nv50_gr_construct_gene_unk24xx()
1869 xf_emit(ctx, 0x40, 0); /* ffffffff */ nv50_gr_construct_gene_unk24xx()
1870 xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */ nv50_gr_construct_gene_unk24xx()
1871 xf_emit(ctx, 0x10, 0); /* ffffffff */ nv50_gr_construct_gene_unk24xx()
1874 xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */ nv50_gr_construct_gene_unk24xx()
1875 xf_emit(ctx, 1, 1); /* 00000001 */ nv50_gr_construct_gene_unk24xx()
1876 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_unk24xx()
1877 xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ nv50_gr_construct_gene_unk24xx()
1878 xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */ nv50_gr_construct_gene_unk24xx()
1879 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ nv50_gr_construct_gene_unk24xx()
1880 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_gene_unk24xx()
1882 xf_emit(ctx, 1, 0); /* 000003ff */ nv50_gr_construct_gene_unk24xx()
1886 nv50_gr_construct_gene_vfetch(struct nvkm_grctx *ctx) nv50_gr_construct_gene_vfetch() argument
1888 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_vfetch()
1895 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */ nv50_gr_construct_gene_vfetch()
1896 xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */ nv50_gr_construct_gene_vfetch()
1898 xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */ nv50_gr_construct_gene_vfetch()
1899 xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */ nv50_gr_construct_gene_vfetch()
1900 xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */ nv50_gr_construct_gene_vfetch()
1901 xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */ nv50_gr_construct_gene_vfetch()
1902 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ nv50_gr_construct_gene_vfetch()
1903 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ nv50_gr_construct_gene_vfetch()
1904 xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */ nv50_gr_construct_gene_vfetch()
1905 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ nv50_gr_construct_gene_vfetch()
1906 xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */ nv50_gr_construct_gene_vfetch()
1907 xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */ nv50_gr_construct_gene_vfetch()
1908 xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */ nv50_gr_construct_gene_vfetch()
1909 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_vfetch()
1912 xf_emit(ctx, 0xb, 0); /* RO */ nv50_gr_construct_gene_vfetch()
1914 xf_emit(ctx, 0x9, 0); /* RO */ nv50_gr_construct_gene_vfetch()
1916 xf_emit(ctx, 0x8, 0); /* RO */ nv50_gr_construct_gene_vfetch()
1918 xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */ nv50_gr_construct_gene_vfetch()
1919 xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ nv50_gr_construct_gene_vfetch()
1920 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_vfetch()
1921 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ nv50_gr_construct_gene_vfetch()
1923 xf_emit(ctx, 0xc, 0); /* RO */ nv50_gr_construct_gene_vfetch()
1925 xf_emit(ctx, 1, 0); /* 7f/ff */ nv50_gr_construct_gene_vfetch()
1926 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ nv50_gr_construct_gene_vfetch()
1927 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_vfetch()
1928 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ nv50_gr_construct_gene_vfetch()
1929 xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */ nv50_gr_construct_gene_vfetch()
1930 xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */ nv50_gr_construct_gene_vfetch()
1931 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_vfetch()
1933 xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */ nv50_gr_construct_gene_vfetch()
1935 xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */ nv50_gr_construct_gene_vfetch()
1937 xf_emit(ctx, 1, 0x1e00); /* 7fff */ nv50_gr_construct_gene_vfetch()
1939 xf_emit(ctx, 0xc, 0); /* RO or close */ nv50_gr_construct_gene_vfetch()
1941 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ nv50_gr_construct_gene_vfetch()
1942 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ nv50_gr_construct_gene_vfetch()
1943 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ nv50_gr_construct_gene_vfetch()
1945 xf_emit(ctx, 2, 0); /* ffffffff */ nv50_gr_construct_gene_vfetch()
1947 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_vfetch()
1948 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */ nv50_gr_construct_gene_vfetch()
1951 xf_emit(ctx, 0x10, 0); /* 0? */ nv50_gr_construct_gene_vfetch()
1952 xf_emit(ctx, 2, 0); /* weird... */ nv50_gr_construct_gene_vfetch()
1953 xf_emit(ctx, 2, 0); /* RO */ nv50_gr_construct_gene_vfetch()
1955 xf_emit(ctx, 8, 0); /* 0? */ nv50_gr_construct_gene_vfetch()
1956 xf_emit(ctx, 1, 0); /* weird... */ nv50_gr_construct_gene_vfetch()
1957 xf_emit(ctx, 2, 0); /* RO */ nv50_gr_construct_gene_vfetch()
1960 xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */ nv50_gr_construct_gene_vfetch()
1961 xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */ nv50_gr_construct_gene_vfetch()
1962 xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */ nv50_gr_construct_gene_vfetch()
1964 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */ nv50_gr_construct_gene_vfetch()
1966 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ nv50_gr_construct_gene_vfetch()
1967 xf_emit(ctx, 1, 0); /* f/1f */ nv50_gr_construct_gene_vfetch()
1969 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ nv50_gr_construct_gene_vfetch()
1970 xf_emit(ctx, 1, 0); /* f/1f */ nv50_gr_construct_gene_vfetch()
1972 xf_emit(ctx, acnt, 0); /* RO */ nv50_gr_construct_gene_vfetch()
1973 xf_emit(ctx, 2, 0); /* RO */ nv50_gr_construct_gene_vfetch()
1975 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */ nv50_gr_construct_gene_vfetch()
1976 xf_emit(ctx, 1, 0); /* RO */ nv50_gr_construct_gene_vfetch()
1978 xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */ nv50_gr_construct_gene_vfetch()
1979 xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */ nv50_gr_construct_gene_vfetch()
1980 xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */ nv50_gr_construct_gene_vfetch()
1981 xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */ nv50_gr_construct_gene_vfetch()
1983 xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */ nv50_gr_construct_gene_vfetch()
1984 xf_emit(ctx, 3, 0); /* f/1f */ nv50_gr_construct_gene_vfetch()
1986 xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */ nv50_gr_construct_gene_vfetch()
1987 xf_emit(ctx, 3, 0); /* f/1f */ nv50_gr_construct_gene_vfetch()
1989 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */ nv50_gr_construct_gene_vfetch()
1990 xf_emit(ctx, 3, 0); /* f/1f */ nv50_gr_construct_gene_vfetch()
1992 xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */ nv50_gr_construct_gene_vfetch()
1993 xf_emit(ctx, 3, 0); /* f/1f */ nv50_gr_construct_gene_vfetch()
1995 xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */ nv50_gr_construct_gene_vfetch()
1996 xf_emit(ctx, 3, 0); /* f/1f */ nv50_gr_construct_gene_vfetch()
1998 xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */ nv50_gr_construct_gene_vfetch()
1999 xf_emit(ctx, 3, 0); /* f/1f */ nv50_gr_construct_gene_vfetch()
2002 xf_emit(ctx, acnt, 0); /* f */ nv50_gr_construct_gene_vfetch()
2003 xf_emit(ctx, 3, 0); /* f/1f */ nv50_gr_construct_gene_vfetch()
2007 xf_emit(ctx, 2, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2009 xf_emit(ctx, 5, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2011 xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */ nv50_gr_construct_gene_vfetch()
2014 xf_emit(ctx, 0x41, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2016 xf_emit(ctx, 0x11, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2018 xf_emit(ctx, 0x50, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2020 xf_emit(ctx, 0x58, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2022 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ nv50_gr_construct_gene_vfetch()
2023 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ nv50_gr_construct_gene_vfetch()
2024 xf_emit(ctx, 1, 1); /* 1 UNK0DEC */ nv50_gr_construct_gene_vfetch()
2026 xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */ nv50_gr_construct_gene_vfetch()
2027 xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */ nv50_gr_construct_gene_vfetch()
2030 xf_emit(ctx, 0x1d, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2032 xf_emit(ctx, 0x16, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2034 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ nv50_gr_construct_gene_vfetch()
2035 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ nv50_gr_construct_gene_vfetch()
2038 xf_emit(ctx, 8, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2040 xf_emit(ctx, 0xc, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2042 xf_emit(ctx, 7, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2044 xf_emit(ctx, 0xa, 0); /* RO */ nv50_gr_construct_gene_vfetch()
2052 xf_emit(ctx, 0x20, 0); /* ffffffff */ nv50_gr_construct_gene_vfetch()
2053 xf_emit(ctx, 0x200, 0); /* ffffffff */ nv50_gr_construct_gene_vfetch()
2054 xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */ nv50_gr_construct_gene_vfetch()
2055 xf_emit(ctx, 4, 0); /* ffffffff */ nv50_gr_construct_gene_vfetch()
2058 xf_emit(ctx, 1, 0); /* 113/111 */ nv50_gr_construct_gene_vfetch()
2059 xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ nv50_gr_construct_gene_vfetch()
2060 xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ nv50_gr_construct_gene_vfetch()
2061 xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */ nv50_gr_construct_gene_vfetch()
2062 xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ nv50_gr_construct_gene_vfetch()
2063 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_vfetch()
2066 xf_emit(ctx, 7, 0); /* weird... */ nv50_gr_construct_gene_vfetch()
2068 xf_emit(ctx, 5, 0); /* weird... */ nv50_gr_construct_gene_vfetch()
2072 nv50_gr_construct_gene_eng2d(struct nvkm_grctx *ctx) nv50_gr_construct_gene_eng2d() argument
2074 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_eng2d()
2077 xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */ nv50_gr_construct_gene_eng2d()
2078 xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */ nv50_gr_construct_gene_eng2d()
2079 xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */ nv50_gr_construct_gene_eng2d()
2083 xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */ nv50_gr_construct_gene_eng2d()
2084 xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */ nv50_gr_construct_gene_eng2d()
2085 xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */ nv50_gr_construct_gene_eng2d()
2087 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ nv50_gr_construct_gene_eng2d()
2088 xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ nv50_gr_construct_gene_eng2d()
2089 xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ nv50_gr_construct_gene_eng2d()
2090 xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */ nv50_gr_construct_gene_eng2d()
2091 xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */ nv50_gr_construct_gene_eng2d()
2092 xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */ nv50_gr_construct_gene_eng2d()
2093 xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */ nv50_gr_construct_gene_eng2d()
2094 xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */ nv50_gr_construct_gene_eng2d()
2095 xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */ nv50_gr_construct_gene_eng2d()
2096 xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */ nv50_gr_construct_gene_eng2d()
2097 xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */ nv50_gr_construct_gene_eng2d()
2098 xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */ nv50_gr_construct_gene_eng2d()
2099 xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */ nv50_gr_construct_gene_eng2d()
2100 xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */ nv50_gr_construct_gene_eng2d()
2101 xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */ nv50_gr_construct_gene_eng2d()
2102 xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */ nv50_gr_construct_gene_eng2d()
2103 xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ nv50_gr_construct_gene_eng2d()
2104 xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */ nv50_gr_construct_gene_eng2d()
2105 xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */ nv50_gr_construct_gene_eng2d()
2106 xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */ nv50_gr_construct_gene_eng2d()
2107 xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */ nv50_gr_construct_gene_eng2d()
2108 xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */ nv50_gr_construct_gene_eng2d()
2109 xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */ nv50_gr_construct_gene_eng2d()
2110 xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ nv50_gr_construct_gene_eng2d()
2111 xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ nv50_gr_construct_gene_eng2d()
2112 xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ nv50_gr_construct_gene_eng2d()
2113 xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ nv50_gr_construct_gene_eng2d()
2114 xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */ nv50_gr_construct_gene_eng2d()
2115 xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */ nv50_gr_construct_gene_eng2d()
2116 xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */ nv50_gr_construct_gene_eng2d()
2117 xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */ nv50_gr_construct_gene_eng2d()
2118 xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */ nv50_gr_construct_gene_eng2d()
2119 xf_emit(ctx, 1, 0); /* 00000001 UNK888 */ nv50_gr_construct_gene_eng2d()
2120 xf_emit(ctx, 1, 4); /* 0000003f UNK884 */ nv50_gr_construct_gene_eng2d()
2121 xf_emit(ctx, 1, 0); /* 00000007 UNK880 */ nv50_gr_construct_gene_eng2d()
2122 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */ nv50_gr_construct_gene_eng2d()
2123 xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */ nv50_gr_construct_gene_eng2d()
2124 xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */ nv50_gr_construct_gene_eng2d()
2125 xf_emit(ctx, 1, 0); /* 00000001 UNK260 */ nv50_gr_construct_gene_eng2d()
2126 xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */ nv50_gr_construct_gene_eng2d()
2128 xf_emit(ctx, 0x10, 0); nv50_gr_construct_gene_eng2d()
2130 xf_emit(ctx, 0x27, 0); nv50_gr_construct_gene_eng2d()
2134 nv50_gr_construct_gene_csched(struct nvkm_grctx *ctx) nv50_gr_construct_gene_csched() argument
2136 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_csched()
2139 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */ nv50_gr_construct_gene_csched()
2140 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ nv50_gr_construct_gene_csched()
2141 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ nv50_gr_construct_gene_csched()
2142 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_gene_csched()
2143 xf_emit(ctx, 1, 0); /* 000003ff */ nv50_gr_construct_gene_csched()
2145 xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */ nv50_gr_construct_gene_csched()
2146 xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */ nv50_gr_construct_gene_csched()
2147 xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */ nv50_gr_construct_gene_csched()
2148 xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */ nv50_gr_construct_gene_csched()
2149 xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */ nv50_gr_construct_gene_csched()
2150 xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */ nv50_gr_construct_gene_csched()
2151 xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */ nv50_gr_construct_gene_csched()
2152 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_csched()
2153 xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ nv50_gr_construct_gene_csched()
2154 xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ nv50_gr_construct_gene_csched()
2155 xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ nv50_gr_construct_gene_csched()
2156 xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ nv50_gr_construct_gene_csched()
2157 xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ nv50_gr_construct_gene_csched()
2158 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ nv50_gr_construct_gene_csched()
2160 xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */ nv50_gr_construct_gene_csched()
2164 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ nv50_gr_construct_gene_csched()
2165 xf_emit(ctx, 0x80, 0); /* fff */ nv50_gr_construct_gene_csched()
2166 xf_emit(ctx, 2, 0); /* ff, fff */ nv50_gr_construct_gene_csched()
2167 xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */ nv50_gr_construct_gene_csched()
2170 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ nv50_gr_construct_gene_csched()
2171 xf_emit(ctx, 0x60, 0); /* fff */ nv50_gr_construct_gene_csched()
2172 xf_emit(ctx, 2, 0); /* ff, fff */ nv50_gr_construct_gene_csched()
2173 xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ nv50_gr_construct_gene_csched()
2177 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ nv50_gr_construct_gene_csched()
2178 xf_emit(ctx, 0x40, 0); /* fff */ nv50_gr_construct_gene_csched()
2179 xf_emit(ctx, 2, 0); /* ff, fff */ nv50_gr_construct_gene_csched()
2180 xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */ nv50_gr_construct_gene_csched()
2184 xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ nv50_gr_construct_gene_csched()
2185 xf_emit(ctx, 0x10, 0); /* fff */ nv50_gr_construct_gene_csched()
2186 xf_emit(ctx, 2, 0); /* ff, fff */ nv50_gr_construct_gene_csched()
2187 xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ nv50_gr_construct_gene_csched()
2190 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ nv50_gr_construct_gene_csched()
2191 xf_emit(ctx, 0xf0, 0); /* fff */ nv50_gr_construct_gene_csched()
2192 xf_emit(ctx, 2, 0); /* ff, fff */ nv50_gr_construct_gene_csched()
2193 xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */ nv50_gr_construct_gene_csched()
2196 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ nv50_gr_construct_gene_csched()
2197 xf_emit(ctx, 0x60, 0); /* fff */ nv50_gr_construct_gene_csched()
2198 xf_emit(ctx, 2, 0); /* ff, fff */ nv50_gr_construct_gene_csched()
2199 xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ nv50_gr_construct_gene_csched()
2203 xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ nv50_gr_construct_gene_csched()
2204 xf_emit(ctx, 0x30, 0); /* fff */ nv50_gr_construct_gene_csched()
2205 xf_emit(ctx, 2, 0); /* ff, fff */ nv50_gr_construct_gene_csched()
2206 xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */ nv50_gr_construct_gene_csched()
2209 xf_emit(ctx, 0x12, 0); nv50_gr_construct_gene_csched()
2213 xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ nv50_gr_construct_gene_csched()
2214 xf_emit(ctx, 0x10, 0); /* fff */ nv50_gr_construct_gene_csched()
2215 xf_emit(ctx, 2, 0); /* ff, fff */ nv50_gr_construct_gene_csched()
2216 xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ nv50_gr_construct_gene_csched()
2219 xf_emit(ctx, 1, 0); /* 0000000f */ nv50_gr_construct_gene_csched()
2220 xf_emit(ctx, 1, 0); /* 00000000 */ nv50_gr_construct_gene_csched()
2221 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_csched()
2222 xf_emit(ctx, 1, 0); /* 0000001f */ nv50_gr_construct_gene_csched()
2223 xf_emit(ctx, 4, 0); /* ffffffff */ nv50_gr_construct_gene_csched()
2224 xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ nv50_gr_construct_gene_csched()
2225 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_csched()
2226 xf_emit(ctx, 4, 0); /* ffffffff */ nv50_gr_construct_gene_csched()
2227 xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ nv50_gr_construct_gene_csched()
2228 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_csched()
2229 xf_emit(ctx, 1, 0); /* 000000ff */ nv50_gr_construct_gene_csched()
2233 nv50_gr_construct_gene_unk1cxx(struct nvkm_grctx *ctx) nv50_gr_construct_gene_unk1cxx() argument
2235 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_unk1cxx()
2236 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ nv50_gr_construct_gene_unk1cxx()
2237 xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ nv50_gr_construct_gene_unk1cxx()
2238 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2239 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ nv50_gr_construct_gene_unk1cxx()
2240 xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2241 xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2242 xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ nv50_gr_construct_gene_unk1cxx()
2243 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ nv50_gr_construct_gene_unk1cxx()
2244 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_gene_unk1cxx()
2245 xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2246 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ nv50_gr_construct_gene_unk1cxx()
2247 xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2248 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_gene_unk1cxx()
2249 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2250 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ nv50_gr_construct_gene_unk1cxx()
2251 xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */ nv50_gr_construct_gene_unk1cxx()
2252 xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */ nv50_gr_construct_gene_unk1cxx()
2253 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ nv50_gr_construct_gene_unk1cxx()
2254 xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ nv50_gr_construct_gene_unk1cxx()
2255 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ nv50_gr_construct_gene_unk1cxx()
2256 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ nv50_gr_construct_gene_unk1cxx()
2257 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ nv50_gr_construct_gene_unk1cxx()
2258 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ nv50_gr_construct_gene_unk1cxx()
2259 xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ nv50_gr_construct_gene_unk1cxx()
2260 xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ nv50_gr_construct_gene_unk1cxx()
2261 xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */ nv50_gr_construct_gene_unk1cxx()
2262 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ nv50_gr_construct_gene_unk1cxx()
2263 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2264 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ nv50_gr_construct_gene_unk1cxx()
2266 xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */ nv50_gr_construct_gene_unk1cxx()
2268 xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */ nv50_gr_construct_gene_unk1cxx()
2269 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ nv50_gr_construct_gene_unk1cxx()
2270 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ nv50_gr_construct_gene_unk1cxx()
2271 xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ nv50_gr_construct_gene_unk1cxx()
2272 xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ nv50_gr_construct_gene_unk1cxx()
2273 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ nv50_gr_construct_gene_unk1cxx()
2274 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2275 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2276 xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */ nv50_gr_construct_gene_unk1cxx()
2277 xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ nv50_gr_construct_gene_unk1cxx()
2278 xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */ nv50_gr_construct_gene_unk1cxx()
2279 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_gene_unk1cxx()
2280 xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */ nv50_gr_construct_gene_unk1cxx()
2282 xf_emit(ctx, 1, 0); /* 3ff */ nv50_gr_construct_gene_unk1cxx()
2283 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */ nv50_gr_construct_gene_unk1cxx()
2286 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ nv50_gr_construct_gene_unk1cxx()
2287 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ nv50_gr_construct_gene_unk1cxx()
2288 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ nv50_gr_construct_gene_unk1cxx()
2289 xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ nv50_gr_construct_gene_unk1cxx()
2290 xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */ nv50_gr_construct_gene_unk1cxx()
2291 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */ nv50_gr_construct_gene_unk1cxx()
2292 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ nv50_gr_construct_gene_unk1cxx()
2293 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2294 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2295 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2296 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ nv50_gr_construct_gene_unk1cxx()
2297 xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_gene_unk1cxx()
2298 xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */ nv50_gr_construct_gene_unk1cxx()
2299 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_gene_unk1cxx()
2300 xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */ nv50_gr_construct_gene_unk1cxx()
2301 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2302 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2303 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ nv50_gr_construct_gene_unk1cxx()
2305 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ nv50_gr_construct_gene_unk1cxx()
2307 xf_emit(ctx, 0x1c, 0); /* RO */ nv50_gr_construct_gene_unk1cxx()
2309 xf_emit(ctx, 0x9, 0); nv50_gr_construct_gene_unk1cxx()
2310 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ nv50_gr_construct_gene_unk1cxx()
2311 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2312 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ nv50_gr_construct_gene_unk1cxx()
2313 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ nv50_gr_construct_gene_unk1cxx()
2314 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ nv50_gr_construct_gene_unk1cxx()
2315 xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ nv50_gr_construct_gene_unk1cxx()
2317 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ nv50_gr_construct_gene_unk1cxx()
2318 xf_emit(ctx, 1, 0); /* 3ff */ nv50_gr_construct_gene_unk1cxx()
2323 xf_emit(ctx, 0x25, 0); nv50_gr_construct_gene_unk1cxx()
2325 xf_emit(ctx, 0x3b, 0); nv50_gr_construct_gene_unk1cxx()
2329 nv50_gr_construct_gene_strmout(struct nvkm_grctx *ctx) nv50_gr_construct_gene_strmout() argument
2331 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_strmout()
2332 xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ nv50_gr_construct_gene_strmout()
2333 xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ nv50_gr_construct_gene_strmout()
2334 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ nv50_gr_construct_gene_strmout()
2336 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ nv50_gr_construct_gene_strmout()
2337 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ nv50_gr_construct_gene_strmout()
2339 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_strmout()
2340 xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ nv50_gr_construct_gene_strmout()
2341 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_gene_strmout()
2343 xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ nv50_gr_construct_gene_strmout()
2345 xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ nv50_gr_construct_gene_strmout()
2346 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_strmout()
2348 xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ nv50_gr_construct_gene_strmout()
2349 xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ nv50_gr_construct_gene_strmout()
2350 xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */ nv50_gr_construct_gene_strmout()
2351 xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */ nv50_gr_construct_gene_strmout()
2352 xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ nv50_gr_construct_gene_strmout()
2354 xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ nv50_gr_construct_gene_strmout()
2355 xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ nv50_gr_construct_gene_strmout()
2357 xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */ nv50_gr_construct_gene_strmout()
2358 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ nv50_gr_construct_gene_strmout()
2359 xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ nv50_gr_construct_gene_strmout()
2360 xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */ nv50_gr_construct_gene_strmout()
2361 xf_emit(ctx, 2, 0); /* ffffffff */ nv50_gr_construct_gene_strmout()
2362 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_gene_strmout()
2364 xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */ nv50_gr_construct_gene_strmout()
2365 xf_emit(ctx, 1, 0); /* 0000000f */ nv50_gr_construct_gene_strmout()
2366 xf_emit(ctx, 1, 0); /* 00000000? */ nv50_gr_construct_gene_strmout()
2367 xf_emit(ctx, 2, 0); /* ffffffff */ nv50_gr_construct_gene_strmout()
2371 nv50_gr_construct_gene_ropm1(struct nvkm_grctx *ctx) nv50_gr_construct_gene_ropm1() argument
2373 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_ropm1()
2374 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ nv50_gr_construct_gene_ropm1()
2375 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ nv50_gr_construct_gene_ropm1()
2376 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_gene_ropm1()
2377 xf_emit(ctx, 1, 0); /* 000003ff */ nv50_gr_construct_gene_ropm1()
2379 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ nv50_gr_construct_gene_ropm1()
2380 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_gene_ropm1()
2384 nv50_gr_construct_gene_ropm2(struct nvkm_grctx *ctx) nv50_gr_construct_gene_ropm2() argument
2386 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_ropm2()
2388 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ nv50_gr_construct_gene_ropm2()
2389 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ nv50_gr_construct_gene_ropm2()
2390 xf_emit(ctx, 2, 0); /* ffffffff */ nv50_gr_construct_gene_ropm2()
2391 xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ nv50_gr_construct_gene_ropm2()
2392 xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ nv50_gr_construct_gene_ropm2()
2393 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ nv50_gr_construct_gene_ropm2()
2394 xf_emit(ctx, 1, 0); /* 7 */ nv50_gr_construct_gene_ropm2()
2396 xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ nv50_gr_construct_gene_ropm2()
2397 xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ nv50_gr_construct_gene_ropm2()
2398 xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ nv50_gr_construct_gene_ropm2()
2399 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ nv50_gr_construct_gene_ropm2()
2400 xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ nv50_gr_construct_gene_ropm2()
2401 xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */ nv50_gr_construct_gene_ropm2()
2402 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_gene_ropm2()
2403 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_gene_ropm2()
2405 xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ nv50_gr_construct_gene_ropm2()
2406 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_gene_ropm2()
2410 nv50_gr_construct_gene_ropc(struct nvkm_grctx *ctx) nv50_gr_construct_gene_ropc() argument
2412 struct nvkm_device *device = ctx->device; nv50_gr_construct_gene_ropc()
2421 xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_gene_ropc()
2422 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_gene_ropc()
2423 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ nv50_gr_construct_gene_ropc()
2424 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ nv50_gr_construct_gene_ropc()
2425 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ nv50_gr_construct_gene_ropc()
2426 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ nv50_gr_construct_gene_ropc()
2427 xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ nv50_gr_construct_gene_ropc()
2428 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_gene_ropc()
2429 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ nv50_gr_construct_gene_ropc()
2430 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ nv50_gr_construct_gene_ropc()
2431 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ nv50_gr_construct_gene_ropc()
2432 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_gene_ropc()
2433 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ nv50_gr_construct_gene_ropc()
2435 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_gene_ropc()
2436 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ nv50_gr_construct_gene_ropc()
2437 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ nv50_gr_construct_gene_ropc()
2438 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ nv50_gr_construct_gene_ropc()
2439 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ nv50_gr_construct_gene_ropc()
2440 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_gene_ropc()
2442 xf_emit(ctx, 1, 0x15); /* 000000ff */ nv50_gr_construct_gene_ropc()
2443 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ nv50_gr_construct_gene_ropc()
2444 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ nv50_gr_construct_gene_ropc()
2445 xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */ nv50_gr_construct_gene_ropc()
2446 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ nv50_gr_construct_gene_ropc()
2447 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_gene_ropc()
2448 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_gene_ropc()
2449 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_gene_ropc()
2451 xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */ nv50_gr_construct_gene_ropc()
2452 xf_emit(ctx, 1, 4); /* 7 */ nv50_gr_construct_gene_ropc()
2453 xf_emit(ctx, 1, 0x400); /* fffffff */ nv50_gr_construct_gene_ropc()
2454 xf_emit(ctx, 1, 0x300); /* ffff */ nv50_gr_construct_gene_ropc()
2455 xf_emit(ctx, 1, 0x1001); /* 1fff */ nv50_gr_construct_gene_ropc()
2458 xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */ nv50_gr_construct_gene_ropc()
2460 xf_emit(ctx, 1, 0x15); /* ff */ nv50_gr_construct_gene_ropc()
2463 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_gene_ropc()
2464 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_gene_ropc()
2465 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ nv50_gr_construct_gene_ropc()
2466 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ nv50_gr_construct_gene_ropc()
2467 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_gene_ropc()
2468 xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ nv50_gr_construct_gene_ropc()
2469 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ nv50_gr_construct_gene_ropc()
2470 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ nv50_gr_construct_gene_ropc()
2471 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_gene_ropc()
2472 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ nv50_gr_construct_gene_ropc()
2473 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ nv50_gr_construct_gene_ropc()
2474 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ nv50_gr_construct_gene_ropc()
2475 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_gene_ropc()
2476 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ nv50_gr_construct_gene_ropc()
2477 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ nv50_gr_construct_gene_ropc()
2478 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ nv50_gr_construct_gene_ropc()
2479 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_gene_ropc()
2480 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_gene_ropc()
2481 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_gene_ropc()
2482 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_gene_ropc()
2483 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ nv50_gr_construct_gene_ropc()
2484 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ nv50_gr_construct_gene_ropc()
2485 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ nv50_gr_construct_gene_ropc()
2486 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ nv50_gr_construct_gene_ropc()
2487 xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ nv50_gr_construct_gene_ropc()
2488 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ nv50_gr_construct_gene_ropc()
2489 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ nv50_gr_construct_gene_ropc()
2490 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_gene_ropc()
2491 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ nv50_gr_construct_gene_ropc()
2492 xf_emit(ctx, 1, 0); /* 0000000f */ nv50_gr_construct_gene_ropc()
2493 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ nv50_gr_construct_gene_ropc()
2494 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ nv50_gr_construct_gene_ropc()
2495 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ nv50_gr_construct_gene_ropc()
2496 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ nv50_gr_construct_gene_ropc()
2497 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_gene_ropc()
2498 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ nv50_gr_construct_gene_ropc()
2499 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ nv50_gr_construct_gene_ropc()
2500 xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ nv50_gr_construct_gene_ropc()
2501 xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ nv50_gr_construct_gene_ropc()
2502 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_gene_ropc()
2503 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_gene_ropc()
2504 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ nv50_gr_construct_gene_ropc()
2505 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ nv50_gr_construct_gene_ropc()
2506 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ nv50_gr_construct_gene_ropc()
2507 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ nv50_gr_construct_gene_ropc()
2508 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ nv50_gr_construct_gene_ropc()
2509 xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ nv50_gr_construct_gene_ropc()
2510 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ nv50_gr_construct_gene_ropc()
2511 xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ nv50_gr_construct_gene_ropc()
2512 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_gene_ropc()
2513 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ nv50_gr_construct_gene_ropc()
2514 xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ nv50_gr_construct_gene_ropc()
2515 xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ nv50_gr_construct_gene_ropc()
2516 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ nv50_gr_construct_gene_ropc()
2517 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ nv50_gr_construct_gene_ropc()
2518 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ nv50_gr_construct_gene_ropc()
2519 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ nv50_gr_construct_gene_ropc()
2520 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_gene_ropc()
2521 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ nv50_gr_construct_gene_ropc()
2522 xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ nv50_gr_construct_gene_ropc()
2523 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_gene_ropc()
2524 xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ nv50_gr_construct_gene_ropc()
2525 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_gene_ropc()
2526 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_gene_ropc()
2527 xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */ nv50_gr_construct_gene_ropc()
2528 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ nv50_gr_construct_gene_ropc()
2529 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ nv50_gr_construct_gene_ropc()
2530 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ nv50_gr_construct_gene_ropc()
2531 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_gene_ropc()
2532 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_gene_ropc()
2533 xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ nv50_gr_construct_gene_ropc()
2534 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ nv50_gr_construct_gene_ropc()
2536 xf_emit(ctx, 2, 0); nv50_gr_construct_gene_ropc()
2537 xf_emit(ctx, 1, 0x1001); nv50_gr_construct_gene_ropc()
2538 xf_emit(ctx, 0xb, 0); nv50_gr_construct_gene_ropc()
2540 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_gene_ropc()
2541 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_gene_ropc()
2542 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_gene_ropc()
2543 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ nv50_gr_construct_gene_ropc()
2544 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_gene_ropc()
2546 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ nv50_gr_construct_gene_ropc()
2547 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ nv50_gr_construct_gene_ropc()
2548 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ nv50_gr_construct_gene_ropc()
2549 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ nv50_gr_construct_gene_ropc()
2550 xf_emit(ctx, 1, 0x11); /* 3f/7f */ nv50_gr_construct_gene_ropc()
2551 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ nv50_gr_construct_gene_ropc()
2553 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ nv50_gr_construct_gene_ropc()
2554 xf_emit(ctx, 1, 0); /* 000000ff */ nv50_gr_construct_gene_ropc()
2556 xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ nv50_gr_construct_gene_ropc()
2557 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_gene_ropc()
2558 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ nv50_gr_construct_gene_ropc()
2559 xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */ nv50_gr_construct_gene_ropc()
2560 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ nv50_gr_construct_gene_ropc()
2561 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ nv50_gr_construct_gene_ropc()
2562 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ nv50_gr_construct_gene_ropc()
2563 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ nv50_gr_construct_gene_ropc()
2564 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ nv50_gr_construct_gene_ropc()
2565 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_gene_ropc()
2566 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ nv50_gr_construct_gene_ropc()
2567 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_gene_ropc()
2568 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ nv50_gr_construct_gene_ropc()
2570 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */ nv50_gr_construct_gene_ropc()
2571 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ nv50_gr_construct_gene_ropc()
2572 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ nv50_gr_construct_gene_ropc()
2573 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ nv50_gr_construct_gene_ropc()
2574 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ nv50_gr_construct_gene_ropc()
2575 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ nv50_gr_construct_gene_ropc()
2576 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ nv50_gr_construct_gene_ropc()
2577 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ nv50_gr_construct_gene_ropc()
2578 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ nv50_gr_construct_gene_ropc()
2579 xf_emit(ctx, 2, 0); /* 00000001 */ nv50_gr_construct_gene_ropc()
2580 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_gene_ropc()
2581 xf_emit(ctx, 1, 0); /* 0000000f */ nv50_gr_construct_gene_ropc()
2582 xf_emit(ctx, 1, 0); /* 00000003 */ nv50_gr_construct_gene_ropc()
2583 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_ropc()
2584 xf_emit(ctx, 2, 0); /* 00000001 */ nv50_gr_construct_gene_ropc()
2585 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_gene_ropc()
2586 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_gene_ropc()
2587 xf_emit(ctx, 1, 0); /* 000003ff */ nv50_gr_construct_gene_ropc()
2589 xf_emit(ctx, 2, 0); /* 00000001 */ nv50_gr_construct_gene_ropc()
2590 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_gene_ropc()
2591 xf_emit(ctx, 1, 0); /* 00000003 */ nv50_gr_construct_gene_ropc()
2592 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_gene_ropc()
2593 xf_emit(ctx, 2, 0); /* 00000001 */ nv50_gr_construct_gene_ropc()
2595 xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_gene_ropc()
2596 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */ nv50_gr_construct_gene_ropc()
2597 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_gene_ropc()
2599 xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */ nv50_gr_construct_gene_ropc()
2600 xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */ nv50_gr_construct_gene_ropc()
2601 xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */ nv50_gr_construct_gene_ropc()
2603 xf_emit(ctx, 2, 0); /* 00000001 */ nv50_gr_construct_gene_ropc()
2604 xf_emit(ctx, 1, 0); /* 000003ff */ nv50_gr_construct_gene_ropc()
2605 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ nv50_gr_construct_gene_ropc()
2606 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ nv50_gr_construct_gene_ropc()
2607 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ nv50_gr_construct_gene_ropc()
2608 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ nv50_gr_construct_gene_ropc()
2609 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ nv50_gr_construct_gene_ropc()
2610 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ nv50_gr_construct_gene_ropc()
2611 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ nv50_gr_construct_gene_ropc()
2612 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ nv50_gr_construct_gene_ropc()
2613 xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ nv50_gr_construct_gene_ropc()
2614 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ nv50_gr_construct_gene_ropc()
2615 xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ nv50_gr_construct_gene_ropc()
2617 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */ nv50_gr_construct_gene_ropc()
2619 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ nv50_gr_construct_gene_ropc()
2620 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ nv50_gr_construct_gene_ropc()
2621 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ nv50_gr_construct_gene_ropc()
2622 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ nv50_gr_construct_gene_ropc()
2623 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ nv50_gr_construct_gene_ropc()
2624 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ nv50_gr_construct_gene_ropc()
2625 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ nv50_gr_construct_gene_ropc()
2626 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */ nv50_gr_construct_gene_ropc()
2627 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_gene_ropc()
2628 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ nv50_gr_construct_gene_ropc()
2630 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ nv50_gr_construct_gene_ropc()
2631 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ nv50_gr_construct_gene_ropc()
2632 xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */ nv50_gr_construct_gene_ropc()
2633 xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */ nv50_gr_construct_gene_ropc()
2634 xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */ nv50_gr_construct_gene_ropc()
2635 xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */ nv50_gr_construct_gene_ropc()
2636 xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */ nv50_gr_construct_gene_ropc()
2637 xf_emit(ctx, 1, 0); /* 000000ff ROP */ nv50_gr_construct_gene_ropc()
2638 xf_emit(ctx, 1, 0); /* ffffffff BETA1 */ nv50_gr_construct_gene_ropc()
2639 xf_emit(ctx, 1, 0); /* ffffffff BETA4 */ nv50_gr_construct_gene_ropc()
2640 xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ nv50_gr_construct_gene_ropc()
2641 xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */ nv50_gr_construct_gene_ropc()
2645 nv50_gr_construct_xfer_unk84xx(struct nvkm_grctx *ctx) nv50_gr_construct_xfer_unk84xx() argument
2647 struct nvkm_device *device = ctx->device; nv50_gr_construct_xfer_unk84xx()
2664 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_xfer_unk84xx()
2665 xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */ nv50_gr_construct_xfer_unk84xx()
2666 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_xfer_unk84xx()
2667 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_xfer_unk84xx()
2668 xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */ nv50_gr_construct_xfer_unk84xx()
2670 xf_emit(ctx, 0x1f, 0); /* ffffffff */ nv50_gr_construct_xfer_unk84xx()
2672 xf_emit(ctx, 0x0f, 0); /* ffffffff */ nv50_gr_construct_xfer_unk84xx()
2674 xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */ nv50_gr_construct_xfer_unk84xx()
2675 xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */ nv50_gr_construct_xfer_unk84xx()
2676 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ nv50_gr_construct_xfer_unk84xx()
2677 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ nv50_gr_construct_xfer_unk84xx()
2679 xf_emit(ctx, 1, 0x03020100); /* ffffffff */ nv50_gr_construct_xfer_unk84xx()
2681 xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */ nv50_gr_construct_xfer_unk84xx()
2682 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_xfer_unk84xx()
2683 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_xfer_unk84xx()
2684 xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */ nv50_gr_construct_xfer_unk84xx()
2685 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ nv50_gr_construct_xfer_unk84xx()
2686 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_xfer_unk84xx()
2687 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_xfer_unk84xx()
2688 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ nv50_gr_construct_xfer_unk84xx()
2689 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_xfer_unk84xx()
2690 xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ nv50_gr_construct_xfer_unk84xx()
2692 xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */ nv50_gr_construct_xfer_unk84xx()
2693 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ nv50_gr_construct_xfer_unk84xx()
2694 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_xfer_unk84xx()
2695 xf_emit(ctx, 1, 0); /* 111/113 */ nv50_gr_construct_xfer_unk84xx()
2696 xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */ nv50_gr_construct_xfer_unk84xx()
2697 xf_emit(ctx, 1, 0); /* 0000001f */ nv50_gr_construct_xfer_unk84xx()
2698 xf_emit(ctx, 1, 0); /* ffffffff */ nv50_gr_construct_xfer_unk84xx()
2699 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_xfer_unk84xx()
2700 xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ nv50_gr_construct_xfer_unk84xx()
2701 xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ nv50_gr_construct_xfer_unk84xx()
2702 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_xfer_unk84xx()
2703 xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */ nv50_gr_construct_xfer_unk84xx()
2704 xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ nv50_gr_construct_xfer_unk84xx()
2706 xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */ nv50_gr_construct_xfer_unk84xx()
2707 xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ nv50_gr_construct_xfer_unk84xx()
2708 xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ nv50_gr_construct_xfer_unk84xx()
2709 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_xfer_unk84xx()
2710 xf_emit(ctx, 1, 0); /* 111/113 */ nv50_gr_construct_xfer_unk84xx()
2711 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_xfer_unk84xx()
2712 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_xfer_unk84xx()
2713 xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ nv50_gr_construct_xfer_unk84xx()
2714 xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ nv50_gr_construct_xfer_unk84xx()
2715 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_xfer_unk84xx()
2716 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */ nv50_gr_construct_xfer_unk84xx()
2717 xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ nv50_gr_construct_xfer_unk84xx()
2718 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_xfer_unk84xx()
2719 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_xfer_unk84xx()
2720 xf_emit(ctx, 1, 0); /* 111/113 */ nv50_gr_construct_xfer_unk84xx()
2722 xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ nv50_gr_construct_xfer_unk84xx()
2724 xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ nv50_gr_construct_xfer_unk84xx()
2726 xf_emit(ctx, 0x210, 0); /* ffffffff */ nv50_gr_construct_xfer_unk84xx()
2728 xf_emit(ctx, 0x410, 0); /* ffffffff */ nv50_gr_construct_xfer_unk84xx()
2729 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_xfer_unk84xx()
2730 xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ nv50_gr_construct_xfer_unk84xx()
2731 xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ nv50_gr_construct_xfer_unk84xx()
2732 xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ nv50_gr_construct_xfer_unk84xx()
2733 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_xfer_unk84xx()
2737 nv50_gr_construct_xfer_tprop(struct nvkm_grctx *ctx) nv50_gr_construct_xfer_tprop() argument
2739 struct nvkm_device *device = ctx->device; nv50_gr_construct_xfer_tprop()
2751 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ nv50_gr_construct_xfer_tprop()
2752 xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */ nv50_gr_construct_xfer_tprop()
2753 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ nv50_gr_construct_xfer_tprop()
2755 xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */ nv50_gr_construct_xfer_tprop()
2756 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_xfer_tprop()
2757 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_xfer_tprop()
2758 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ nv50_gr_construct_xfer_tprop()
2759 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ nv50_gr_construct_xfer_tprop()
2760 xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */ nv50_gr_construct_xfer_tprop()
2761 xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ nv50_gr_construct_xfer_tprop()
2762 xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */ nv50_gr_construct_xfer_tprop()
2763 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ nv50_gr_construct_xfer_tprop()
2764 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ nv50_gr_construct_xfer_tprop()
2765 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_xfer_tprop()
2766 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ nv50_gr_construct_xfer_tprop()
2767 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ nv50_gr_construct_xfer_tprop()
2768 xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */ nv50_gr_construct_xfer_tprop()
2769 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ nv50_gr_construct_xfer_tprop()
2770 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ nv50_gr_construct_xfer_tprop()
2771 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ nv50_gr_construct_xfer_tprop()
2772 xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ nv50_gr_construct_xfer_tprop()
2773 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_xfer_tprop()
2774 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ nv50_gr_construct_xfer_tprop()
2775 xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ nv50_gr_construct_xfer_tprop()
2776 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ nv50_gr_construct_xfer_tprop()
2777 xf_emit(ctx, 1, 0); /* 7 */ nv50_gr_construct_xfer_tprop()
2778 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ nv50_gr_construct_xfer_tprop()
2779 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_xfer_tprop()
2780 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_xfer_tprop()
2781 xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */ nv50_gr_construct_xfer_tprop()
2782 xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */ nv50_gr_construct_xfer_tprop()
2783 xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */ nv50_gr_construct_xfer_tprop()
2784 xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */ nv50_gr_construct_xfer_tprop()
2785 xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */ nv50_gr_construct_xfer_tprop()
2786 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ nv50_gr_construct_xfer_tprop()
2787 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ nv50_gr_construct_xfer_tprop()
2789 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ nv50_gr_construct_xfer_tprop()
2790 xf_emit(ctx, 1, 0); /* 00000003 */ nv50_gr_construct_xfer_tprop()
2791 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */ nv50_gr_construct_xfer_tprop()
2793 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */ nv50_gr_construct_xfer_tprop()
2794 xf_emit(ctx, 1, 0); /* 00000003 */ nv50_gr_construct_xfer_tprop()
2796 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ nv50_gr_construct_xfer_tprop()
2798 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_xfer_tprop()
2799 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ nv50_gr_construct_xfer_tprop()
2800 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ nv50_gr_construct_xfer_tprop()
2801 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ nv50_gr_construct_xfer_tprop()
2802 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ nv50_gr_construct_xfer_tprop()
2803 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ nv50_gr_construct_xfer_tprop()
2804 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ nv50_gr_construct_xfer_tprop()
2805 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ nv50_gr_construct_xfer_tprop()
2807 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ nv50_gr_construct_xfer_tprop()
2808 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ nv50_gr_construct_xfer_tprop()
2809 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ nv50_gr_construct_xfer_tprop()
2810 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ nv50_gr_construct_xfer_tprop()
2811 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */ nv50_gr_construct_xfer_tprop()
2812 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */ nv50_gr_construct_xfer_tprop()
2813 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */ nv50_gr_construct_xfer_tprop()
2814 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */ nv50_gr_construct_xfer_tprop()
2815 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ nv50_gr_construct_xfer_tprop()
2817 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ nv50_gr_construct_xfer_tprop()
2818 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_xfer_tprop()
2819 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ nv50_gr_construct_xfer_tprop()
2820 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ nv50_gr_construct_xfer_tprop()
2821 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ nv50_gr_construct_xfer_tprop()
2822 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ nv50_gr_construct_xfer_tprop()
2823 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_xfer_tprop()
2824 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ nv50_gr_construct_xfer_tprop()
2825 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ nv50_gr_construct_xfer_tprop()
2826 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ nv50_gr_construct_xfer_tprop()
2827 xf_emit(ctx, 1, 0); /* 7 */ nv50_gr_construct_xfer_tprop()
2828 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ nv50_gr_construct_xfer_tprop()
2829 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ nv50_gr_construct_xfer_tprop()
2830 xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ nv50_gr_construct_xfer_tprop()
2831 xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ nv50_gr_construct_xfer_tprop()
2832 xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */ nv50_gr_construct_xfer_tprop()
2833 xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */ nv50_gr_construct_xfer_tprop()
2835 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_xfer_tprop()
2836 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_xfer_tprop()
2837 xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_xfer_tprop()
2838 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ nv50_gr_construct_xfer_tprop()
2839 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ nv50_gr_construct_xfer_tprop()
2840 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ nv50_gr_construct_xfer_tprop()
2841 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ nv50_gr_construct_xfer_tprop()
2842 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ nv50_gr_construct_xfer_tprop()
2843 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ nv50_gr_construct_xfer_tprop()
2844 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ nv50_gr_construct_xfer_tprop()
2845 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ nv50_gr_construct_xfer_tprop()
2846 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_xfer_tprop()
2847 xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */ nv50_gr_construct_xfer_tprop()
2848 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ nv50_gr_construct_xfer_tprop()
2849 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ nv50_gr_construct_xfer_tprop()
2850 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ nv50_gr_construct_xfer_tprop()
2851 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ nv50_gr_construct_xfer_tprop()
2852 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ nv50_gr_construct_xfer_tprop()
2853 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ nv50_gr_construct_xfer_tprop()
2854 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ nv50_gr_construct_xfer_tprop()
2855 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_xfer_tprop()
2856 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ nv50_gr_construct_xfer_tprop()
2857 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ nv50_gr_construct_xfer_tprop()
2859 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_xfer_tprop()
2861 xf_emit(ctx, 1, 0); /* ff */ nv50_gr_construct_xfer_tprop()
2863 xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ nv50_gr_construct_xfer_tprop()
2864 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ nv50_gr_construct_xfer_tprop()
2865 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ nv50_gr_construct_xfer_tprop()
2866 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_xfer_tprop()
2867 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_xfer_tprop()
2868 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ nv50_gr_construct_xfer_tprop()
2869 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_xfer_tprop()
2870 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_xfer_tprop()
2871 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_xfer_tprop()
2872 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_xfer_tprop()
2873 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_xfer_tprop()
2874 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_xfer_tprop()
2875 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ nv50_gr_construct_xfer_tprop()
2876 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ nv50_gr_construct_xfer_tprop()
2877 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ nv50_gr_construct_xfer_tprop()
2878 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ nv50_gr_construct_xfer_tprop()
2879 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_xfer_tprop()
2880 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ nv50_gr_construct_xfer_tprop()
2881 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ nv50_gr_construct_xfer_tprop()
2882 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ nv50_gr_construct_xfer_tprop()
2883 xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ nv50_gr_construct_xfer_tprop()
2884 xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ nv50_gr_construct_xfer_tprop()
2885 xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ nv50_gr_construct_xfer_tprop()
2886 xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ nv50_gr_construct_xfer_tprop()
2887 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_xfer_tprop()
2888 xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */ nv50_gr_construct_xfer_tprop()
2889 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_xfer_tprop()
2890 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ nv50_gr_construct_xfer_tprop()
2891 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_xfer_tprop()
2892 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_xfer_tprop()
2893 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_xfer_tprop()
2894 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_xfer_tprop()
2896 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_xfer_tprop()
2897 xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */ nv50_gr_construct_xfer_tprop()
2898 xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */ nv50_gr_construct_xfer_tprop()
2899 xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */ nv50_gr_construct_xfer_tprop()
2900 xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */ nv50_gr_construct_xfer_tprop()
2901 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_xfer_tprop()
2902 xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */ nv50_gr_construct_xfer_tprop()
2903 xf_emit(ctx, 1, 0); /* 7 */ nv50_gr_construct_xfer_tprop()
2904 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_xfer_tprop()
2905 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_xfer_tprop()
2906 xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */ nv50_gr_construct_xfer_tprop()
2907 xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */ nv50_gr_construct_xfer_tprop()
2908 xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ nv50_gr_construct_xfer_tprop()
2909 xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */ nv50_gr_construct_xfer_tprop()
2910 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ nv50_gr_construct_xfer_tprop()
2911 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ nv50_gr_construct_xfer_tprop()
2912 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ nv50_gr_construct_xfer_tprop()
2913 xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */ nv50_gr_construct_xfer_tprop()
2914 xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */ nv50_gr_construct_xfer_tprop()
2915 xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */ nv50_gr_construct_xfer_tprop()
2916 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ nv50_gr_construct_xfer_tprop()
2917 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ nv50_gr_construct_xfer_tprop()
2918 xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */ nv50_gr_construct_xfer_tprop()
2919 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ nv50_gr_construct_xfer_tprop()
2920 xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ nv50_gr_construct_xfer_tprop()
2921 xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */ nv50_gr_construct_xfer_tprop()
2922 xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ nv50_gr_construct_xfer_tprop()
2923 xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */ nv50_gr_construct_xfer_tprop()
2924 xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */ nv50_gr_construct_xfer_tprop()
2925 xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */ nv50_gr_construct_xfer_tprop()
2926 xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ nv50_gr_construct_xfer_tprop()
2927 xf_emit(ctx, 1, 0); /* 0000ffff */ nv50_gr_construct_xfer_tprop()
2928 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */ nv50_gr_construct_xfer_tprop()
2929 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_xfer_tprop()
2930 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ nv50_gr_construct_xfer_tprop()
2931 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ nv50_gr_construct_xfer_tprop()
2932 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_xfer_tprop()
2934 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_xfer_tprop()
2935 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ nv50_gr_construct_xfer_tprop()
2936 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_xfer_tprop()
2937 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_xfer_tprop()
2938 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_xfer_tprop()
2939 xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ nv50_gr_construct_xfer_tprop()
2940 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ nv50_gr_construct_xfer_tprop()
2941 xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */ nv50_gr_construct_xfer_tprop()
2942 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ nv50_gr_construct_xfer_tprop()
2943 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_xfer_tprop()
2944 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ nv50_gr_construct_xfer_tprop()
2945 xf_emit(ctx, 2, 0); /* ffff, ff/3ff */ nv50_gr_construct_xfer_tprop()
2946 xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ nv50_gr_construct_xfer_tprop()
2947 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_xfer_tprop()
2948 xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ nv50_gr_construct_xfer_tprop()
2949 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ nv50_gr_construct_xfer_tprop()
2950 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_xfer_tprop()
2951 xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */ nv50_gr_construct_xfer_tprop()
2952 xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */ nv50_gr_construct_xfer_tprop()
2953 xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */ nv50_gr_construct_xfer_tprop()
2954 xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */ nv50_gr_construct_xfer_tprop()
2955 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_xfer_tprop()
2956 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_xfer_tprop()
2957 xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */ nv50_gr_construct_xfer_tprop()
2958 xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */ nv50_gr_construct_xfer_tprop()
2959 xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ nv50_gr_construct_xfer_tprop()
2960 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_xfer_tprop()
2961 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_xfer_tprop()
2963 xf_emit(ctx, 1, 0); /* 00000001 */ nv50_gr_construct_xfer_tprop()
2964 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_xfer_tprop()
2965 xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ nv50_gr_construct_xfer_tprop()
2966 xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ nv50_gr_construct_xfer_tprop()
2967 xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ nv50_gr_construct_xfer_tprop()
2968 xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ nv50_gr_construct_xfer_tprop()
2969 xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ nv50_gr_construct_xfer_tprop()
2970 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_xfer_tprop()
2971 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ nv50_gr_construct_xfer_tprop()
2972 xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ nv50_gr_construct_xfer_tprop()
2973 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ nv50_gr_construct_xfer_tprop()
2974 xf_emit(ctx, 1, 0); /* 7 */ nv50_gr_construct_xfer_tprop()
2975 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ nv50_gr_construct_xfer_tprop()
2977 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ nv50_gr_construct_xfer_tprop()
2978 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_xfer_tprop()
2980 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_xfer_tprop()
2981 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ nv50_gr_construct_xfer_tprop()
2982 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_xfer_tprop()
2984 xf_emit(ctx, 1, 0x0fac6881); /* fffffff */ nv50_gr_construct_xfer_tprop()
2985 xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ nv50_gr_construct_xfer_tprop()
2986 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ nv50_gr_construct_xfer_tprop()
2987 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_xfer_tprop()
2988 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ nv50_gr_construct_xfer_tprop()
2989 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ nv50_gr_construct_xfer_tprop()
2990 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ nv50_gr_construct_xfer_tprop()
2991 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_xfer_tprop()
2992 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ nv50_gr_construct_xfer_tprop()
2993 xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ nv50_gr_construct_xfer_tprop()
2994 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ nv50_gr_construct_xfer_tprop()
2995 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ nv50_gr_construct_xfer_tprop()
2996 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_xfer_tprop()
2997 xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ nv50_gr_construct_xfer_tprop()
2998 xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ nv50_gr_construct_xfer_tprop()
2999 xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ nv50_gr_construct_xfer_tprop()
3001 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_xfer_tprop()
3002 xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */ nv50_gr_construct_xfer_tprop()
3004 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ nv50_gr_construct_xfer_tprop()
3006 xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */ nv50_gr_construct_xfer_tprop()
3007 xf_emit(ctx, 1, 0xfac6881); /* fffffff */ nv50_gr_construct_xfer_tprop()
3008 xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */ nv50_gr_construct_xfer_tprop()
3009 xf_emit(ctx, 1, 4); /* 7 */ nv50_gr_construct_xfer_tprop()
3010 xf_emit(ctx, 1, 0); /* 1 */ nv50_gr_construct_xfer_tprop()
3011 xf_emit(ctx, 2, 1); /* 1 */ nv50_gr_construct_xfer_tprop()
3012 xf_emit(ctx, 2, 0); /* 7, f */ nv50_gr_construct_xfer_tprop()
3013 xf_emit(ctx, 1, 1); /* 1 */ nv50_gr_construct_xfer_tprop()
3014 xf_emit(ctx, 1, 0); /* 7/f */ nv50_gr_construct_xfer_tprop()
3016 xf_emit(ctx, 0x9, 0); /* 1 */ nv50_gr_construct_xfer_tprop()
3018 xf_emit(ctx, 0x8, 0); /* 1 */ nv50_gr_construct_xfer_tprop()
3019 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_xfer_tprop()
3020 xf_emit(ctx, 8, 1); /* 1 */ nv50_gr_construct_xfer_tprop()
3021 xf_emit(ctx, 1, 0x11); /* 7f */ nv50_gr_construct_xfer_tprop()
3022 xf_emit(ctx, 7, 0); /* 7f */ nv50_gr_construct_xfer_tprop()
3023 xf_emit(ctx, 1, 0xfac6881); /* fffffff */ nv50_gr_construct_xfer_tprop()
3024 xf_emit(ctx, 1, 0xf); /* f */ nv50_gr_construct_xfer_tprop()
3025 xf_emit(ctx, 7, 0); /* f */ nv50_gr_construct_xfer_tprop()
3026 xf_emit(ctx, 1, 0x11); /* 7f */ nv50_gr_construct_xfer_tprop()
3027 xf_emit(ctx, 1, 1); /* 1 */ nv50_gr_construct_xfer_tprop()
3028 xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */ nv50_gr_construct_xfer_tprop()
3030 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ nv50_gr_construct_xfer_tprop()
3031 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_xfer_tprop()
3037 nv50_gr_construct_xfer_tex(struct nvkm_grctx *ctx) nv50_gr_construct_xfer_tex() argument
3039 struct nvkm_device *device = ctx->device; nv50_gr_construct_xfer_tex()
3040 xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */ nv50_gr_construct_xfer_tex()
3042 xf_emit(ctx, 1, 0); /* 3 */ nv50_gr_construct_xfer_tex()
3043 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */ nv50_gr_construct_xfer_tex()
3044 xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */ nv50_gr_construct_xfer_tex()
3045 xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */ nv50_gr_construct_xfer_tex()
3046 xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */ nv50_gr_construct_xfer_tex()
3048 xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */ nv50_gr_construct_xfer_tex()
3050 xf_emit(ctx, 2, 0); /* 3ff, 1 */ nv50_gr_construct_xfer_tex()
3051 xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */ nv50_gr_construct_xfer_tex()
3052 xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */ nv50_gr_construct_xfer_tex()
3053 xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */ nv50_gr_construct_xfer_tex()
3054 xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */ nv50_gr_construct_xfer_tex()
3055 xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */ nv50_gr_construct_xfer_tex()
3056 xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */ nv50_gr_construct_xfer_tex()
3057 xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */ nv50_gr_construct_xfer_tex()
3058 xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */ nv50_gr_construct_xfer_tex()
3060 xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */ nv50_gr_construct_xfer_tex()
3061 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ nv50_gr_construct_xfer_tex()
3062 xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */ nv50_gr_construct_xfer_tex()
3063 xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */ nv50_gr_construct_xfer_tex()
3064 xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */ nv50_gr_construct_xfer_tex()
3066 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ nv50_gr_construct_xfer_tex()
3067 xf_emit(ctx, 1, 0); /* 00000003 */ nv50_gr_construct_xfer_tex()
3068 xf_emit(ctx, 1, 0); /* 000003ff */ nv50_gr_construct_xfer_tex()
3069 xf_emit(ctx, 1, 0); /* 00000003 */ nv50_gr_construct_xfer_tex()
3070 xf_emit(ctx, 1, 0); /* 000003ff */ nv50_gr_construct_xfer_tex()
3071 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */ nv50_gr_construct_xfer_tex()
3072 xf_emit(ctx, 1, 0); /* 00000003 */ nv50_gr_construct_xfer_tex()
3073 xf_emit(ctx, 1, 0); /* 000003ff */ nv50_gr_construct_xfer_tex()
3075 xf_emit(ctx, 0x6, 0); nv50_gr_construct_xfer_tex()
3077 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ nv50_gr_construct_xfer_tex()
3078 xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */ nv50_gr_construct_xfer_tex()
3079 xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */ nv50_gr_construct_xfer_tex()
3083 nv50_gr_construct_xfer_unk8cxx(struct nvkm_grctx *ctx) nv50_gr_construct_xfer_unk8cxx() argument
3085 struct nvkm_device *device = ctx->device; nv50_gr_construct_xfer_unk8cxx()
3086 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ nv50_gr_construct_xfer_unk8cxx()
3087 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_xfer_unk8cxx()
3088 xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */ nv50_gr_construct_xfer_unk8cxx()
3089 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_xfer_unk8cxx()
3090 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ nv50_gr_construct_xfer_unk8cxx()
3091 xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ nv50_gr_construct_xfer_unk8cxx()
3092 xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ nv50_gr_construct_xfer_unk8cxx()
3093 xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ nv50_gr_construct_xfer_unk8cxx()
3094 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ nv50_gr_construct_xfer_unk8cxx()
3095 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ nv50_gr_construct_xfer_unk8cxx()
3096 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */ nv50_gr_construct_xfer_unk8cxx()
3098 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_xfer_unk8cxx()
3099 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ nv50_gr_construct_xfer_unk8cxx()
3100 xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ nv50_gr_construct_xfer_unk8cxx()
3101 xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ nv50_gr_construct_xfer_unk8cxx()
3102 xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ nv50_gr_construct_xfer_unk8cxx()
3103 xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ nv50_gr_construct_xfer_unk8cxx()
3104 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_xfer_unk8cxx()
3105 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ nv50_gr_construct_xfer_unk8cxx()
3106 xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ nv50_gr_construct_xfer_unk8cxx()
3107 xf_emit(ctx, 1, 0); /* ffff0ff3 */ nv50_gr_construct_xfer_unk8cxx()
3108 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ nv50_gr_construct_xfer_unk8cxx()
3109 xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ nv50_gr_construct_xfer_unk8cxx()
3110 xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ nv50_gr_construct_xfer_unk8cxx()
3111 xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ nv50_gr_construct_xfer_unk8cxx()
3112 xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ nv50_gr_construct_xfer_unk8cxx()
3113 xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */ nv50_gr_construct_xfer_unk8cxx()
3114 xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */ nv50_gr_construct_xfer_unk8cxx()
3115 xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */ nv50_gr_construct_xfer_unk8cxx()
3116 xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */ nv50_gr_construct_xfer_unk8cxx()
3117 xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ nv50_gr_construct_xfer_unk8cxx()
3118 xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ nv50_gr_construct_xfer_unk8cxx()
3122 nv50_gr_construct_xfer_tp(struct nvkm_grctx *ctx) nv50_gr_construct_xfer_tp() argument
3124 struct nvkm_device *device = ctx->device; nv50_gr_construct_xfer_tp()
3126 nv50_gr_construct_xfer_unk84xx(ctx); nv50_gr_construct_xfer_tp()
3127 nv50_gr_construct_xfer_tprop(ctx); nv50_gr_construct_xfer_tp()
3128 nv50_gr_construct_xfer_tex(ctx); nv50_gr_construct_xfer_tp()
3129 nv50_gr_construct_xfer_unk8cxx(ctx); nv50_gr_construct_xfer_tp()
3131 nv50_gr_construct_xfer_tex(ctx); nv50_gr_construct_xfer_tp()
3132 nv50_gr_construct_xfer_tprop(ctx); nv50_gr_construct_xfer_tp()
3133 nv50_gr_construct_xfer_unk8cxx(ctx); nv50_gr_construct_xfer_tp()
3134 nv50_gr_construct_xfer_unk84xx(ctx); nv50_gr_construct_xfer_tp()
3139 nv50_gr_construct_xfer_mpc(struct nvkm_grctx *ctx) nv50_gr_construct_xfer_mpc() argument
3141 struct nvkm_device *device = ctx->device; nv50_gr_construct_xfer_mpc()
3166 xf_emit(ctx, 1, 0); /* ff */ nv50_gr_construct_xfer_mpc()
3167 xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */ nv50_gr_construct_xfer_mpc()
3168 xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */ nv50_gr_construct_xfer_mpc()
3169 xf_emit(ctx, 1, 0x04000400); /* ffffffff */ nv50_gr_construct_xfer_mpc()
3171 xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */ nv50_gr_construct_xfer_mpc()
3172 xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */ nv50_gr_construct_xfer_mpc()
3173 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_xfer_mpc()
3174 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ nv50_gr_construct_xfer_mpc()
3176 xf_emit(ctx, 1, 0xe00); /* 7fff */ nv50_gr_construct_xfer_mpc()
3177 xf_emit(ctx, 1, 0x1e00); /* 7fff */ nv50_gr_construct_xfer_mpc()
3179 xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */ nv50_gr_construct_xfer_mpc()
3180 xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ nv50_gr_construct_xfer_mpc()
3181 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_xfer_mpc()
3183 xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */ nv50_gr_construct_xfer_mpc()
3184 xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */ nv50_gr_construct_xfer_mpc()
3185 xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ nv50_gr_construct_xfer_mpc()
3186 xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ nv50_gr_construct_xfer_mpc()
3187 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ nv50_gr_construct_xfer_mpc()
3189 xf_emit(ctx, 0xb, 0); /* RO */ nv50_gr_construct_xfer_mpc()
3191 xf_emit(ctx, 0xc, 0); /* RO */ nv50_gr_construct_xfer_mpc()
3193 xf_emit(ctx, 0xa, 0); /* RO */ nv50_gr_construct_xfer_mpc()
3195 xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_xfer_mpc()
3196 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_xfer_mpc()
3198 xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */ nv50_gr_construct_xfer_mpc()
3200 xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */ nv50_gr_construct_xfer_mpc()
3201 xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ nv50_gr_construct_xfer_mpc()
3202 xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ nv50_gr_construct_xfer_mpc()
3203 xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ nv50_gr_construct_xfer_mpc()
3204 xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ nv50_gr_construct_xfer_mpc()
3205 xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ nv50_gr_construct_xfer_mpc()
3206 xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ nv50_gr_construct_xfer_mpc()
3207 xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ nv50_gr_construct_xfer_mpc()
3208 xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ nv50_gr_construct_xfer_mpc()
3209 xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */ nv50_gr_construct_xfer_mpc()
3210 xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ nv50_gr_construct_xfer_mpc()
3212 xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ nv50_gr_construct_xfer_mpc()
3213 xf_emit(ctx, 1, 0); /* ff/3ff */ nv50_gr_construct_xfer_mpc()
3214 xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */ nv50_gr_construct_xfer_mpc()
3215 xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */ nv50_gr_construct_xfer_mpc()
3216 xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ nv50_gr_construct_xfer_mpc()
3217 xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ nv50_gr_construct_xfer_mpc()
3218 xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ nv50_gr_construct_xfer_mpc()
3219 xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */ nv50_gr_construct_xfer_mpc()
3220 xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ nv50_gr_construct_xfer_mpc()
3221 xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ nv50_gr_construct_xfer_mpc()
3222 xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ nv50_gr_construct_xfer_mpc()
3223 xf_emit(ctx, 1, 0); /* 00000007 */ nv50_gr_construct_xfer_mpc()
3224 xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */ nv50_gr_construct_xfer_mpc()
3225 xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ nv50_gr_construct_xfer_mpc()
3227 xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ nv50_gr_construct_xfer_mpc()
3228 xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ nv50_gr_construct_xfer_mpc()
3229 xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ nv50_gr_construct_xfer_mpc()
3230 xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ nv50_gr_construct_xfer_mpc()
3231 xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */ nv50_gr_construct_xfer_mpc()
3232 xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ nv50_gr_construct_xfer_mpc()
3233 xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ nv50_gr_construct_xfer_mpc()
3234 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ nv50_gr_construct_xfer_mpc()
3235 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ nv50_gr_construct_xfer_mpc()
3236 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ nv50_gr_construct_xfer_mpc()
3237 xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ nv50_gr_construct_xfer_mpc()
3238 xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ nv50_gr_construct_xfer_mpc()
3239 xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ nv50_gr_construct_xfer_mpc()
3240 xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ nv50_gr_construct_xfer_mpc()
3242 xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ nv50_gr_construct_xfer_mpc()
3243 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ nv50_gr_construct_xfer_mpc()
3244 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ nv50_gr_construct_xfer_mpc()
3245 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ nv50_gr_construct_xfer_mpc()
3246 xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ nv50_gr_construct_xfer_mpc()
3247 xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ nv50_gr_construct_xfer_mpc()
3248 xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ nv50_gr_construct_xfer_mpc()
3249 xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ nv50_gr_construct_xfer_mpc()
3250 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ nv50_gr_construct_xfer_mpc()
3251 xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ nv50_gr_construct_xfer_mpc()
3253 xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ nv50_gr_construct_xfer_mpc()
3254 xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ nv50_gr_construct_xfer_mpc()
3257 xf_emit(ctx, 0x3a0, 0); nv50_gr_construct_xfer_mpc()
3259 xf_emit(ctx, 0x3a2, 0); nv50_gr_construct_xfer_mpc()
3261 xf_emit(ctx, 0x39f, 0); nv50_gr_construct_xfer_mpc()
3263 xf_emit(ctx, 0x3a3, 0); nv50_gr_construct_xfer_mpc()
3264 xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ nv50_gr_construct_xfer_mpc()
3265 xf_emit(ctx, 1, 0); /* 7 OPERATION */ nv50_gr_construct_xfer_mpc()
3266 xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */ nv50_gr_construct_xfer_mpc()
3267 xf_emit(ctx, 0x2d, 0); nv50_gr_construct_xfer_mpc()
3271 nv50_gr_construct_xfer2(struct nvkm_grctx *ctx) nv50_gr_construct_xfer2() argument
3273 struct nvkm_device *device = ctx->device; nv50_gr_construct_xfer2()
3276 u32 units = nv_rd32 (ctx->device, 0x1540); nv50_gr_construct_xfer2()
3279 offset = (ctx->ctxvals_pos+0x3f)&~0x3f; nv50_gr_construct_xfer2()
3283 ctx->ctxvals_pos = offset + i; nv50_gr_construct_xfer2()
3287 xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ nv50_gr_construct_xfer2()
3289 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3290 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer2()
3291 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer2()
3295 ctx->ctxvals_pos = offset; nv50_gr_construct_xfer2()
3298 xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ nv50_gr_construct_xfer2()
3300 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3302 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3303 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer2()
3304 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer2()
3307 ctx->ctxvals_pos = offset + 1; nv50_gr_construct_xfer2()
3309 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3311 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3312 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer2()
3313 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer2()
3316 ctx->ctxvals_pos = offset + 2; nv50_gr_construct_xfer2()
3318 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3320 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3322 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3323 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer2()
3324 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer2()
3327 ctx->ctxvals_pos = offset + 3; nv50_gr_construct_xfer2()
3329 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3331 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3333 nv50_gr_construct_xfer_mpc(ctx); nv50_gr_construct_xfer2()
3334 if ((ctx->ctxvals_pos-offset)/8 > size) nv50_gr_construct_xfer2()
3335 size = (ctx->ctxvals_pos-offset)/8; nv50_gr_construct_xfer2()
3337 ctx->ctxvals_pos = offset + size * 8; nv50_gr_construct_xfer2()
3338 ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; nv50_gr_construct_xfer2()
3339 cp_lsr (ctx, offset); nv50_gr_construct_xfer2()
3340 cp_out (ctx, CP_SET_XFER_POINTER); nv50_gr_construct_xfer2()
3341 cp_lsr (ctx, size); nv50_gr_construct_xfer2()
3342 cp_out (ctx, CP_SEEK_2); nv50_gr_construct_xfer2()
3343 cp_out (ctx, CP_XFER_2); nv50_gr_construct_xfer2()
3344 cp_wait(ctx, XFER, BUSY); nv50_gr_construct_xfer2()
H A Dctxnv40.h23 cp_out(struct nvkm_grctx *ctx, u32 inst) cp_out() argument
25 u32 *ctxprog = ctx->data; cp_out()
27 if (ctx->mode != NVKM_GRCTX_PROG) cp_out()
30 BUG_ON(ctx->ctxprog_len == ctx->ctxprog_max); cp_out()
31 ctxprog[ctx->ctxprog_len++] = inst; cp_out()
35 cp_lsr(struct nvkm_grctx *ctx, u32 val) cp_lsr() argument
37 cp_out(ctx, CP_LOAD_SR | val); cp_lsr()
41 cp_ctx(struct nvkm_grctx *ctx, u32 reg, u32 length) cp_ctx() argument
43 ctx->ctxprog_reg = (reg - 0x00400000) >> 2; cp_ctx()
45 ctx->ctxvals_base = ctx->ctxvals_pos; cp_ctx()
46 ctx->ctxvals_pos = ctx->ctxvals_base + length; cp_ctx()
49 cp_lsr(ctx, length); cp_ctx()
53 cp_out(ctx, CP_CTX | (length << CP_CTX_COUNT_SHIFT) | ctx->ctxprog_reg); cp_ctx()
57 cp_name(struct nvkm_grctx *ctx, int name) cp_name() argument
59 u32 *ctxprog = ctx->data; cp_name()
62 if (ctx->mode != NVKM_GRCTX_PROG) cp_name()
65 ctx->ctxprog_label[name] = ctx->ctxprog_len; cp_name()
66 for (i = 0; i < ctx->ctxprog_len; i++) { cp_name()
72 (ctx->ctxprog_len << CP_BRA_IP_SHIFT); cp_name()
77 _cp_bra(struct nvkm_grctx *ctx, u32 mod, int flag, int state, int name) _cp_bra() argument
82 ip = ctx->ctxprog_label[name] << CP_BRA_IP_SHIFT; _cp_bra()
87 cp_out(ctx, CP_BRA | (mod << 18) | ip | flag | _cp_bra()
95 _cp_wait(struct nvkm_grctx *ctx, int flag, int state) _cp_wait() argument
97 cp_out(ctx, CP_WAIT | flag | (state ? CP_WAIT_SET : 0)); _cp_wait()
102 _cp_set(struct nvkm_grctx *ctx, int flag, int state) _cp_set() argument
104 cp_out(ctx, CP_SET | flag | (state ? CP_SET_1 : 0)); _cp_set()
109 cp_pos(struct nvkm_grctx *ctx, int offset) cp_pos() argument
111 ctx->ctxvals_pos = offset; cp_pos()
112 ctx->ctxvals_base = ctx->ctxvals_pos; cp_pos()
114 cp_lsr(ctx, ctx->ctxvals_pos); cp_pos()
115 cp_out(ctx, CP_SET_CONTEXT_POINTER); cp_pos()
119 gr_def(struct nvkm_grctx *ctx, u32 reg, u32 val) gr_def() argument
121 if (ctx->mode != NVKM_GRCTX_VALS) gr_def()
125 reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base; gr_def()
127 nv_wo32(ctx->data, reg * 4, val); gr_def()
H A Dctxnv40.c49 * rather than a cp_lsr(ctx, dwords_for_1_vs_unit) instruction.
160 nv40_gr_construct_general(struct nvkm_grctx *ctx) nv40_gr_construct_general() argument
162 struct nvkm_device *device = ctx->device; nv40_gr_construct_general()
165 cp_ctx(ctx, 0x4000a4, 1); nv40_gr_construct_general()
166 gr_def(ctx, 0x4000a4, 0x00000008); nv40_gr_construct_general()
167 cp_ctx(ctx, 0x400144, 58); nv40_gr_construct_general()
168 gr_def(ctx, 0x400144, 0x00000001); nv40_gr_construct_general()
169 cp_ctx(ctx, 0x400314, 1); nv40_gr_construct_general()
170 gr_def(ctx, 0x400314, 0x00000000); nv40_gr_construct_general()
171 cp_ctx(ctx, 0x400400, 10); nv40_gr_construct_general()
172 cp_ctx(ctx, 0x400480, 10); nv40_gr_construct_general()
173 cp_ctx(ctx, 0x400500, 19); nv40_gr_construct_general()
174 gr_def(ctx, 0x400514, 0x00040000); nv40_gr_construct_general()
175 gr_def(ctx, 0x400524, 0x55555555); nv40_gr_construct_general()
176 gr_def(ctx, 0x400528, 0x55555555); nv40_gr_construct_general()
177 gr_def(ctx, 0x40052c, 0x55555555); nv40_gr_construct_general()
178 gr_def(ctx, 0x400530, 0x55555555); nv40_gr_construct_general()
179 cp_ctx(ctx, 0x400560, 6); nv40_gr_construct_general()
180 gr_def(ctx, 0x400568, 0x0000ffff); nv40_gr_construct_general()
181 gr_def(ctx, 0x40056c, 0x0000ffff); nv40_gr_construct_general()
182 cp_ctx(ctx, 0x40057c, 5); nv40_gr_construct_general()
183 cp_ctx(ctx, 0x400710, 3); nv40_gr_construct_general()
184 gr_def(ctx, 0x400710, 0x20010001); nv40_gr_construct_general()
185 gr_def(ctx, 0x400714, 0x0f73ef00); nv40_gr_construct_general()
186 cp_ctx(ctx, 0x400724, 1); nv40_gr_construct_general()
187 gr_def(ctx, 0x400724, 0x02008821); nv40_gr_construct_general()
188 cp_ctx(ctx, 0x400770, 3); nv40_gr_construct_general()
190 cp_ctx(ctx, 0x400814, 4); nv40_gr_construct_general()
191 cp_ctx(ctx, 0x400828, 5); nv40_gr_construct_general()
192 cp_ctx(ctx, 0x400840, 5); nv40_gr_construct_general()
193 gr_def(ctx, 0x400850, 0x00000040); nv40_gr_construct_general()
194 cp_ctx(ctx, 0x400858, 4); nv40_gr_construct_general()
195 gr_def(ctx, 0x400858, 0x00000040); nv40_gr_construct_general()
196 gr_def(ctx, 0x40085c, 0x00000040); nv40_gr_construct_general()
197 gr_def(ctx, 0x400864, 0x80000000); nv40_gr_construct_general()
198 cp_ctx(ctx, 0x40086c, 9); nv40_gr_construct_general()
199 gr_def(ctx, 0x40086c, 0x80000000); nv40_gr_construct_general()
200 gr_def(ctx, 0x400870, 0x80000000); nv40_gr_construct_general()
201 gr_def(ctx, 0x400874, 0x80000000); nv40_gr_construct_general()
202 gr_def(ctx, 0x400878, 0x80000000); nv40_gr_construct_general()
203 gr_def(ctx, 0x400888, 0x00000040); nv40_gr_construct_general()
204 gr_def(ctx, 0x40088c, 0x80000000); nv40_gr_construct_general()
205 cp_ctx(ctx, 0x4009c0, 8); nv40_gr_construct_general()
206 gr_def(ctx, 0x4009cc, 0x80000000); nv40_gr_construct_general()
207 gr_def(ctx, 0x4009dc, 0x80000000); nv40_gr_construct_general()
209 cp_ctx(ctx, 0x400840, 20); nv40_gr_construct_general()
210 if (nv44_gr_class(ctx->device)) { nv40_gr_construct_general()
212 gr_def(ctx, 0x400860 + (i * 4), 0x00000001); nv40_gr_construct_general()
214 gr_def(ctx, 0x400880, 0x00000040); nv40_gr_construct_general()
215 gr_def(ctx, 0x400884, 0x00000040); nv40_gr_construct_general()
216 gr_def(ctx, 0x400888, 0x00000040); nv40_gr_construct_general()
217 cp_ctx(ctx, 0x400894, 11); nv40_gr_construct_general()
218 gr_def(ctx, 0x400894, 0x00000040); nv40_gr_construct_general()
219 if (!nv44_gr_class(ctx->device)) { nv40_gr_construct_general()
221 gr_def(ctx, 0x4008a0 + (i * 4), 0x80000000); nv40_gr_construct_general()
223 cp_ctx(ctx, 0x4008e0, 2); nv40_gr_construct_general()
224 cp_ctx(ctx, 0x4008f8, 2); nv40_gr_construct_general()
227 cp_ctx(ctx, 0x4009f8, 1); nv40_gr_construct_general()
229 cp_ctx(ctx, 0x400a00, 73); nv40_gr_construct_general()
230 gr_def(ctx, 0x400b0c, 0x0b0b0b0c); nv40_gr_construct_general()
231 cp_ctx(ctx, 0x401000, 4); nv40_gr_construct_general()
232 cp_ctx(ctx, 0x405004, 1); nv40_gr_construct_general()
237 cp_ctx(ctx, 0x403448, 1); nv40_gr_construct_general()
238 gr_def(ctx, 0x403448, 0x00001010); nv40_gr_construct_general()
241 cp_ctx(ctx, 0x403440, 1); nv40_gr_construct_general()
244 gr_def(ctx, 0x403440, 0x00000010); nv40_gr_construct_general()
249 gr_def(ctx, 0x403440, 0x00003010); nv40_gr_construct_general()
258 gr_def(ctx, 0x403440, 0x00001010); nv40_gr_construct_general()
266 nv40_gr_construct_state3d(struct nvkm_grctx *ctx) nv40_gr_construct_state3d() argument
268 struct nvkm_device *device = ctx->device; nv40_gr_construct_state3d()
272 cp_ctx(ctx, 0x401880, 51); nv40_gr_construct_state3d()
273 gr_def(ctx, 0x401940, 0x00000100); nv40_gr_construct_state3d()
277 cp_ctx(ctx, 0x401880, 32); nv40_gr_construct_state3d()
279 gr_def(ctx, 0x401880 + (i * 4), 0x00000111); nv40_gr_construct_state3d()
281 cp_ctx(ctx, 0x401900, 16); nv40_gr_construct_state3d()
282 cp_ctx(ctx, 0x401940, 3); nv40_gr_construct_state3d()
284 cp_ctx(ctx, 0x40194c, 18); nv40_gr_construct_state3d()
285 gr_def(ctx, 0x401954, 0x00000111); nv40_gr_construct_state3d()
286 gr_def(ctx, 0x401958, 0x00080060); nv40_gr_construct_state3d()
287 gr_def(ctx, 0x401974, 0x00000080); nv40_gr_construct_state3d()
288 gr_def(ctx, 0x401978, 0xffff0000); nv40_gr_construct_state3d()
289 gr_def(ctx, 0x40197c, 0x00000001); nv40_gr_construct_state3d()
290 gr_def(ctx, 0x401990, 0x46400000); nv40_gr_construct_state3d()
292 cp_ctx(ctx, 0x4019a0, 2); nv40_gr_construct_state3d()
293 cp_ctx(ctx, 0x4019ac, 5); nv40_gr_construct_state3d()
295 cp_ctx(ctx, 0x4019a0, 1); nv40_gr_construct_state3d()
296 cp_ctx(ctx, 0x4019b4, 3); nv40_gr_construct_state3d()
298 gr_def(ctx, 0x4019bc, 0xffff0000); nv40_gr_construct_state3d()
304 cp_ctx(ctx, 0x4019c0, 18); nv40_gr_construct_state3d()
306 gr_def(ctx, 0x4019c0 + (i * 4), 0x88888888); nv40_gr_construct_state3d()
309 cp_ctx(ctx, 0x401a08, 8); nv40_gr_construct_state3d()
310 gr_def(ctx, 0x401a10, 0x0fff0000); nv40_gr_construct_state3d()
311 gr_def(ctx, 0x401a14, 0x0fff0000); nv40_gr_construct_state3d()
312 gr_def(ctx, 0x401a1c, 0x00011100); nv40_gr_construct_state3d()
313 cp_ctx(ctx, 0x401a2c, 4); nv40_gr_construct_state3d()
314 cp_ctx(ctx, 0x401a44, 26); nv40_gr_construct_state3d()
316 gr_def(ctx, 0x401a44 + (i * 4), 0x07ff0000); nv40_gr_construct_state3d()
317 gr_def(ctx, 0x401a8c, 0x4b7fffff); nv40_gr_construct_state3d()
319 cp_ctx(ctx, 0x401ab8, 3); nv40_gr_construct_state3d()
321 cp_ctx(ctx, 0x401ab8, 1); nv40_gr_construct_state3d()
322 cp_ctx(ctx, 0x401ac0, 1); nv40_gr_construct_state3d()
324 cp_ctx(ctx, 0x401ad0, 8); nv40_gr_construct_state3d()
325 gr_def(ctx, 0x401ad0, 0x30201000); nv40_gr_construct_state3d()
326 gr_def(ctx, 0x401ad4, 0x70605040); nv40_gr_construct_state3d()
327 gr_def(ctx, 0x401ad8, 0xb8a89888); nv40_gr_construct_state3d()
328 gr_def(ctx, 0x401adc, 0xf8e8d8c8); nv40_gr_construct_state3d()
329 cp_ctx(ctx, 0x401b10, device->chipset == 0x40 ? 2 : 1); nv40_gr_construct_state3d()
330 gr_def(ctx, 0x401b10, 0x40100000); nv40_gr_construct_state3d()
331 cp_ctx(ctx, 0x401b18, device->chipset == 0x40 ? 6 : 5); nv40_gr_construct_state3d()
332 gr_def(ctx, 0x401b28, device->chipset == 0x40 ? nv40_gr_construct_state3d()
334 cp_ctx(ctx, 0x401b30, 25); nv40_gr_construct_state3d()
335 gr_def(ctx, 0x401b34, 0x0000ffff); nv40_gr_construct_state3d()
336 gr_def(ctx, 0x401b68, 0x435185d6); nv40_gr_construct_state3d()
337 gr_def(ctx, 0x401b6c, 0x2155b699); nv40_gr_construct_state3d()
338 gr_def(ctx, 0x401b70, 0xfedcba98); nv40_gr_construct_state3d()
339 gr_def(ctx, 0x401b74, 0x00000098); nv40_gr_construct_state3d()
340 gr_def(ctx, 0x401b84, 0xffffffff); nv40_gr_construct_state3d()
341 gr_def(ctx, 0x401b88, 0x00ff7000); nv40_gr_construct_state3d()
342 gr_def(ctx, 0x401b8c, 0x0000ffff); nv40_gr_construct_state3d()
345 cp_ctx(ctx, 0x401b94, 1); nv40_gr_construct_state3d()
346 cp_ctx(ctx, 0x401b98, 8); nv40_gr_construct_state3d()
347 gr_def(ctx, 0x401b9c, 0x00ff0000); nv40_gr_construct_state3d()
348 cp_ctx(ctx, 0x401bc0, 9); nv40_gr_construct_state3d()
349 gr_def(ctx, 0x401be0, 0x00ffff00); nv40_gr_construct_state3d()
350 cp_ctx(ctx, 0x401c00, 192); nv40_gr_construct_state3d()
352 gr_def(ctx, 0x401c40 + (i * 4), 0x00018488); nv40_gr_construct_state3d()
353 gr_def(ctx, 0x401c80 + (i * 4), 0x00028202); nv40_gr_construct_state3d()
354 gr_def(ctx, 0x401d00 + (i * 4), 0x0000aae4); nv40_gr_construct_state3d()
355 gr_def(ctx, 0x401d40 + (i * 4), 0x01012000); nv40_gr_construct_state3d()
356 gr_def(ctx, 0x401d80 + (i * 4), 0x00080008); nv40_gr_construct_state3d()
357 gr_def(ctx, 0x401e00 + (i * 4), 0x00100008); nv40_gr_construct_state3d()
360 gr_def(ctx, 0x401e90 + (i * 4), 0x0001bc80); nv40_gr_construct_state3d()
361 gr_def(ctx, 0x401ea0 + (i * 4), 0x00000202); nv40_gr_construct_state3d()
362 gr_def(ctx, 0x401ec0 + (i * 4), 0x00000008); nv40_gr_construct_state3d()
363 gr_def(ctx, 0x401ee0 + (i * 4), 0x00080008); nv40_gr_construct_state3d()
365 cp_ctx(ctx, 0x400f5c, 3); nv40_gr_construct_state3d()
366 gr_def(ctx, 0x400f5c, 0x00000002); nv40_gr_construct_state3d()
367 cp_ctx(ctx, 0x400f84, 1); nv40_gr_construct_state3d()
371 nv40_gr_construct_state3d_2(struct nvkm_grctx *ctx) nv40_gr_construct_state3d_2() argument
373 struct nvkm_device *device = ctx->device; nv40_gr_construct_state3d_2()
376 cp_ctx(ctx, 0x402000, 1); nv40_gr_construct_state3d_2()
377 cp_ctx(ctx, 0x402404, device->chipset == 0x40 ? 1 : 2); nv40_gr_construct_state3d_2()
380 gr_def(ctx, 0x402404, 0x00000001); nv40_gr_construct_state3d_2()
385 gr_def(ctx, 0x402404, 0x00000020); nv40_gr_construct_state3d_2()
390 gr_def(ctx, 0x402404, 0x00000421); nv40_gr_construct_state3d_2()
393 gr_def(ctx, 0x402404, 0x00000021); nv40_gr_construct_state3d_2()
396 gr_def(ctx, 0x402408, 0x030c30c3); nv40_gr_construct_state3d_2()
404 cp_ctx(ctx, 0x402440, 1); nv40_gr_construct_state3d_2()
405 gr_def(ctx, 0x402440, 0x00011001); nv40_gr_construct_state3d_2()
410 cp_ctx(ctx, 0x402480, device->chipset == 0x40 ? 8 : 9); nv40_gr_construct_state3d_2()
411 gr_def(ctx, 0x402488, 0x3e020200); nv40_gr_construct_state3d_2()
412 gr_def(ctx, 0x40248c, 0x00ffffff); nv40_gr_construct_state3d_2()
415 gr_def(ctx, 0x402490, 0x60103f00); nv40_gr_construct_state3d_2()
418 gr_def(ctx, 0x402490, 0x40103f00); nv40_gr_construct_state3d_2()
424 gr_def(ctx, 0x402490, 0x20103f00); nv40_gr_construct_state3d_2()
427 gr_def(ctx, 0x402490, 0x0c103f00); nv40_gr_construct_state3d_2()
430 gr_def(ctx, 0x40249c, device->chipset <= 0x43 ? nv40_gr_construct_state3d_2()
432 cp_ctx(ctx, 0x402500, 31); nv40_gr_construct_state3d_2()
433 gr_def(ctx, 0x402530, 0x00008100); nv40_gr_construct_state3d_2()
435 cp_ctx(ctx, 0x40257c, 6); nv40_gr_construct_state3d_2()
436 cp_ctx(ctx, 0x402594, 16); nv40_gr_construct_state3d_2()
437 cp_ctx(ctx, 0x402800, 17); nv40_gr_construct_state3d_2()
438 gr_def(ctx, 0x402800, 0x00000001); nv40_gr_construct_state3d_2()
443 cp_ctx(ctx, 0x402864, 1); nv40_gr_construct_state3d_2()
444 gr_def(ctx, 0x402864, 0x00001001); nv40_gr_construct_state3d_2()
445 cp_ctx(ctx, 0x402870, 3); nv40_gr_construct_state3d_2()
446 gr_def(ctx, 0x402878, 0x00000003); nv40_gr_construct_state3d_2()
448 cp_ctx(ctx, 0x402900, 1); nv40_gr_construct_state3d_2()
449 cp_ctx(ctx, 0x402940, 1); nv40_gr_construct_state3d_2()
450 cp_ctx(ctx, 0x402980, 1); nv40_gr_construct_state3d_2()
451 cp_ctx(ctx, 0x4029c0, 1); nv40_gr_construct_state3d_2()
452 cp_ctx(ctx, 0x402a00, 1); nv40_gr_construct_state3d_2()
453 cp_ctx(ctx, 0x402a40, 1); nv40_gr_construct_state3d_2()
454 cp_ctx(ctx, 0x402a80, 1); nv40_gr_construct_state3d_2()
455 cp_ctx(ctx, 0x402ac0, 1); nv40_gr_construct_state3d_2()
459 cp_ctx(ctx, 0x402844, 1); nv40_gr_construct_state3d_2()
460 gr_def(ctx, 0x402844, 0x00000001); nv40_gr_construct_state3d_2()
461 cp_ctx(ctx, 0x402850, 1); nv40_gr_construct_state3d_2()
464 cp_ctx(ctx, 0x402844, 1); nv40_gr_construct_state3d_2()
465 gr_def(ctx, 0x402844, 0x00001001); nv40_gr_construct_state3d_2()
466 cp_ctx(ctx, 0x402850, 2); nv40_gr_construct_state3d_2()
467 gr_def(ctx, 0x402854, 0x00000003); nv40_gr_construct_state3d_2()
471 cp_ctx(ctx, 0x402c00, 4); nv40_gr_construct_state3d_2()
472 gr_def(ctx, 0x402c00, device->chipset == 0x40 ? nv40_gr_construct_state3d_2()
478 cp_ctx(ctx, 0x402c20, 40); nv40_gr_construct_state3d_2()
480 gr_def(ctx, 0x402c40 + (i * 4), 0xffffffff); nv40_gr_construct_state3d_2()
481 cp_ctx(ctx, 0x4030b8, 13); nv40_gr_construct_state3d_2()
482 gr_def(ctx, 0x4030dc, 0x00000005); nv40_gr_construct_state3d_2()
483 gr_def(ctx, 0x4030e8, 0x0000ffff); nv40_gr_construct_state3d_2()
486 cp_ctx(ctx, 0x402c10, 4); nv40_gr_construct_state3d_2()
488 cp_ctx(ctx, 0x402c20, 36); nv40_gr_construct_state3d_2()
491 cp_ctx(ctx, 0x402c20, 24); nv40_gr_construct_state3d_2()
494 cp_ctx(ctx, 0x402c20, 16); nv40_gr_construct_state3d_2()
496 cp_ctx(ctx, 0x402c20, 8); nv40_gr_construct_state3d_2()
497 cp_ctx(ctx, 0x402cb0, device->chipset == 0x40 ? 12 : 13); nv40_gr_construct_state3d_2()
498 gr_def(ctx, 0x402cd4, 0x00000005); nv40_gr_construct_state3d_2()
500 gr_def(ctx, 0x402ce0, 0x0000ffff); nv40_gr_construct_state3d_2()
504 cp_ctx(ctx, 0x403400, device->chipset == 0x40 ? 4 : 3); nv40_gr_construct_state3d_2()
505 cp_ctx(ctx, 0x403410, device->chipset == 0x40 ? 4 : 3); nv40_gr_construct_state3d_2()
506 cp_ctx(ctx, 0x403420, nv40_gr_vs_count(ctx->device)); nv40_gr_construct_state3d_2()
507 for (i = 0; i < nv40_gr_vs_count(ctx->device); i++) nv40_gr_construct_state3d_2()
508 gr_def(ctx, 0x403420 + (i * 4), 0x00005555); nv40_gr_construct_state3d_2()
511 cp_ctx(ctx, 0x403600, 1); nv40_gr_construct_state3d_2()
512 gr_def(ctx, 0x403600, 0x00000001); nv40_gr_construct_state3d_2()
514 cp_ctx(ctx, 0x403800, 1); nv40_gr_construct_state3d_2()
516 cp_ctx(ctx, 0x403c18, 1); nv40_gr_construct_state3d_2()
517 gr_def(ctx, 0x403c18, 0x00000001); nv40_gr_construct_state3d_2()
523 cp_ctx(ctx, 0x405018, 1); nv40_gr_construct_state3d_2()
524 gr_def(ctx, 0x405018, 0x08e00001); nv40_gr_construct_state3d_2()
525 cp_ctx(ctx, 0x405c24, 1); nv40_gr_construct_state3d_2()
526 gr_def(ctx, 0x405c24, 0x000e3000); nv40_gr_construct_state3d_2()
530 cp_ctx(ctx, 0x405800, 11); nv40_gr_construct_state3d_2()
531 cp_ctx(ctx, 0x407000, 1); nv40_gr_construct_state3d_2()
535 nv40_gr_construct_state3d_3(struct nvkm_grctx *ctx) nv40_gr_construct_state3d_3() argument
537 int len = nv44_gr_class(ctx->device) ? 0x0084 : 0x0684; nv40_gr_construct_state3d_3()
539 cp_out (ctx, 0x300000); nv40_gr_construct_state3d_3()
540 cp_lsr (ctx, len - 4); nv40_gr_construct_state3d_3()
541 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_swap_state3d_3_is_save); nv40_gr_construct_state3d_3()
542 cp_lsr (ctx, len); nv40_gr_construct_state3d_3()
543 cp_name(ctx, cp_swap_state3d_3_is_save); nv40_gr_construct_state3d_3()
544 cp_out (ctx, 0x800001); nv40_gr_construct_state3d_3()
546 ctx->ctxvals_pos += len; nv40_gr_construct_state3d_3()
550 nv40_gr_construct_shader(struct nvkm_grctx *ctx) nv40_gr_construct_shader() argument
552 struct nvkm_device *device = ctx->device; nv40_gr_construct_shader()
553 struct nvkm_gpuobj *obj = ctx->data; nv40_gr_construct_shader()
557 vs_nr = nv40_gr_vs_count(ctx->device); nv40_gr_construct_shader()
575 cp_lsr(ctx, vs_len * vs_nr + 0x300/4); nv40_gr_construct_shader()
576 cp_out(ctx, nv44_gr_class(device) ? 0x800029 : 0x800041); nv40_gr_construct_shader()
578 offset = ctx->ctxvals_pos; nv40_gr_construct_shader()
579 ctx->ctxvals_pos += (0x0300/4 + (vs_nr * vs_len)); nv40_gr_construct_shader()
581 if (ctx->mode != NVKM_GRCTX_VALS) nv40_gr_construct_shader()
597 nv40_grctx_generate(struct nvkm_grctx *ctx) nv40_grctx_generate() argument
600 cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); nv40_grctx_generate()
601 cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); nv40_grctx_generate()
603 cp_name(ctx, cp_check_load); nv40_grctx_generate()
604 cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); nv40_grctx_generate()
605 cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); nv40_grctx_generate()
606 cp_bra (ctx, ALWAYS, TRUE, cp_exit); nv40_grctx_generate()
609 cp_name(ctx, cp_setup_auto_load); nv40_grctx_generate()
610 cp_wait(ctx, STATUS, IDLE); nv40_grctx_generate()
611 cp_out (ctx, CP_NEXT_TO_SWAP); nv40_grctx_generate()
612 cp_name(ctx, cp_setup_load); nv40_grctx_generate()
613 cp_wait(ctx, STATUS, IDLE); nv40_grctx_generate()
614 cp_set (ctx, SWAP_DIRECTION, LOAD); nv40_grctx_generate()
615 cp_out (ctx, 0x00910880); /* ?? */ nv40_grctx_generate()
616 cp_out (ctx, 0x00901ffe); /* ?? */ nv40_grctx_generate()
617 cp_out (ctx, 0x01940000); /* ?? */ nv40_grctx_generate()
618 cp_lsr (ctx, 0x20); nv40_grctx_generate()
619 cp_out (ctx, 0x0060000b); /* ?? */ nv40_grctx_generate()
620 cp_wait(ctx, UNK57, CLEAR); nv40_grctx_generate()
621 cp_out (ctx, 0x0060000c); /* ?? */ nv40_grctx_generate()
622 cp_bra (ctx, ALWAYS, TRUE, cp_swap_state); nv40_grctx_generate()
625 cp_name(ctx, cp_setup_save); nv40_grctx_generate()
626 cp_set (ctx, SWAP_DIRECTION, SAVE); nv40_grctx_generate()
629 cp_name(ctx, cp_swap_state); nv40_grctx_generate()
630 cp_pos (ctx, 0x00020/4); nv40_grctx_generate()
631 nv40_gr_construct_general(ctx); nv40_grctx_generate()
632 cp_wait(ctx, STATUS, IDLE); nv40_grctx_generate()
635 cp_bra (ctx, UNK54, CLEAR, cp_prepare_exit); nv40_grctx_generate()
636 nv40_gr_construct_state3d(ctx); nv40_grctx_generate()
637 cp_wait(ctx, STATUS, IDLE); nv40_grctx_generate()
640 nv40_gr_construct_state3d_2(ctx); nv40_grctx_generate()
643 nv40_gr_construct_state3d_3(ctx); nv40_grctx_generate()
646 cp_pos (ctx, ctx->ctxvals_pos); nv40_grctx_generate()
647 nv40_gr_construct_shader(ctx); nv40_grctx_generate()
650 cp_name(ctx, cp_prepare_exit); nv40_grctx_generate()
651 cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load); nv40_grctx_generate()
652 cp_bra (ctx, USER_SAVE, PENDING, cp_exit); nv40_grctx_generate()
653 cp_out (ctx, CP_NEXT_TO_CURRENT); nv40_grctx_generate()
655 cp_name(ctx, cp_exit); nv40_grctx_generate()
656 cp_set (ctx, USER_SAVE, NOT_PENDING); nv40_grctx_generate()
657 cp_set (ctx, USER_LOAD, NOT_PENDING); nv40_grctx_generate()
658 cp_out (ctx, CP_END); nv40_grctx_generate()
675 struct nvkm_grctx ctx = { nv40_grctx_init() local
685 nv40_grctx_generate(&ctx); nv40_grctx_init()
688 for (i = 0; i < ctx.ctxprog_len; i++) nv40_grctx_init()
690 *size = ctx.ctxvals_pos * 4; nv40_grctx_init()
/linux-4.1.27/arch/mips/net/
H A Dbpf_jit.c154 static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
157 #define emit_instr(ctx, func, ...) \
159 if ((ctx)->target != NULL) { \
160 u32 *p = &(ctx)->target[ctx->idx]; \
163 (ctx)->idx++; \
170 #define emit_long_instr(ctx, func, ...) \
172 if ((ctx)->target != NULL) { \
173 u32 *p = &(ctx)->target[ctx->idx]; \
176 (ctx)->idx++; \
186 unsigned int src2, struct jit_ctx *ctx) emit_addu()
188 emit_instr(ctx, addu, dst, src1, src2); emit_addu()
191 static inline void emit_nop(struct jit_ctx *ctx) emit_nop() argument
193 emit_instr(ctx, nop); emit_nop()
197 static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx) emit_load_imm() argument
199 if (ctx->target != NULL) { emit_load_imm()
202 u32 *p = &ctx->target[ctx->idx]; emit_load_imm()
204 p = &ctx->target[ctx->idx + 1]; emit_load_imm()
207 u32 *p = &ctx->target[ctx->idx]; emit_load_imm()
211 ctx->idx++; emit_load_imm()
214 ctx->idx++; emit_load_imm()
218 unsigned int src2, struct jit_ctx *ctx) emit_or()
220 emit_instr(ctx, or, dst, src1, src2); emit_or()
224 struct jit_ctx *ctx) emit_ori()
227 emit_load_imm(r_tmp, imm, ctx); emit_ori()
228 emit_or(dst, src, r_tmp, ctx); emit_ori()
230 emit_instr(ctx, ori, dst, src, imm); emit_ori()
235 int imm, struct jit_ctx *ctx) emit_daddiu()
241 emit_instr(ctx, daddiu, dst, src, imm); emit_daddiu()
245 u32 imm, struct jit_ctx *ctx) emit_addiu()
248 emit_load_imm(r_tmp, imm, ctx); emit_addiu()
249 emit_addu(dst, r_tmp, src, ctx); emit_addiu()
251 emit_instr(ctx, addiu, dst, src, imm); emit_addiu()
256 unsigned int src2, struct jit_ctx *ctx) emit_and()
258 emit_instr(ctx, and, dst, src1, src2); emit_and()
262 u32 imm, struct jit_ctx *ctx) emit_andi()
266 emit_load_imm(r_tmp, imm, ctx); emit_andi()
267 emit_and(dst, src, r_tmp, ctx); emit_andi()
269 emit_instr(ctx, andi, dst, src, imm); emit_andi()
274 unsigned int src2, struct jit_ctx *ctx) emit_xor()
276 emit_instr(ctx, xor, dst, src1, src2); emit_xor()
279 static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx) emit_xori() argument
283 emit_load_imm(r_tmp, imm, ctx); emit_xori()
284 emit_xor(dst, src, r_tmp, ctx); emit_xori()
286 emit_instr(ctx, xori, dst, src, imm); emit_xori()
290 static inline void emit_stack_offset(int offset, struct jit_ctx *ctx) emit_stack_offset() argument
292 emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset); emit_stack_offset()
296 unsigned int src2, struct jit_ctx *ctx) emit_subu()
298 emit_instr(ctx, subu, dst, src1, src2); emit_subu()
301 static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx) emit_neg() argument
303 emit_subu(reg, r_zero, reg, ctx); emit_neg()
307 unsigned int sa, struct jit_ctx *ctx) emit_sllv()
309 emit_instr(ctx, sllv, dst, src, sa); emit_sllv()
313 unsigned int sa, struct jit_ctx *ctx) emit_sll()
318 emit_jit_reg_move(dst, r_zero, ctx); emit_sll()
320 emit_instr(ctx, sll, dst, src, sa); emit_sll()
324 unsigned int sa, struct jit_ctx *ctx) emit_srlv()
326 emit_instr(ctx, srlv, dst, src, sa); emit_srlv()
330 unsigned int sa, struct jit_ctx *ctx) emit_srl()
335 emit_jit_reg_move(dst, r_zero, ctx); emit_srl()
337 emit_instr(ctx, srl, dst, src, sa); emit_srl()
341 unsigned int src2, struct jit_ctx *ctx) emit_slt()
343 emit_instr(ctx, slt, dst, src1, src2); emit_slt()
347 unsigned int src2, struct jit_ctx *ctx) emit_sltu()
349 emit_instr(ctx, sltu, dst, src1, src2); emit_sltu()
353 unsigned int imm, struct jit_ctx *ctx) emit_sltiu()
357 emit_load_imm(r_tmp, imm, ctx); emit_sltiu()
358 emit_sltu(dst, src, r_tmp, ctx); emit_sltiu()
360 emit_instr(ctx, sltiu, dst, src, imm); emit_sltiu()
368 struct jit_ctx *ctx) emit_store_stack_reg()
370 emit_long_instr(ctx, SW, reg, offset, base); emit_store_stack_reg()
374 struct jit_ctx *ctx) emit_store()
376 emit_instr(ctx, sw, reg, offset, base); emit_store()
381 struct jit_ctx *ctx) emit_load_stack_reg()
383 emit_long_instr(ctx, LW, reg, offset, base); emit_load_stack_reg()
387 unsigned int offset, struct jit_ctx *ctx) emit_load()
389 emit_instr(ctx, lw, reg, offset, base); emit_load()
393 unsigned int offset, struct jit_ctx *ctx) emit_load_byte()
395 emit_instr(ctx, lb, reg, offset, base); emit_load_byte()
399 unsigned int offset, struct jit_ctx *ctx) emit_half_load()
401 emit_instr(ctx, lh, reg, offset, base); emit_half_load()
405 unsigned int src2, struct jit_ctx *ctx) emit_mul()
407 emit_instr(ctx, mul, dst, src1, src2); emit_mul()
411 struct jit_ctx *ctx) emit_div()
413 if (ctx->target != NULL) { emit_div()
414 u32 *p = &ctx->target[ctx->idx]; emit_div()
416 p = &ctx->target[ctx->idx + 1]; emit_div()
419 ctx->idx += 2; /* 2 insts */ emit_div()
423 struct jit_ctx *ctx) emit_mod()
425 if (ctx->target != NULL) { emit_mod()
426 u32 *p = &ctx->target[ctx->idx]; emit_mod()
428 p = &ctx->target[ctx->idx + 1]; emit_mod()
431 ctx->idx += 2; /* 2 insts */ emit_mod()
435 unsigned int sa, struct jit_ctx *ctx) emit_dsll()
437 emit_instr(ctx, dsll, dst, src, sa); emit_dsll()
441 unsigned int sa, struct jit_ctx *ctx) emit_dsrl32()
443 emit_instr(ctx, dsrl32, dst, src, sa); emit_dsrl32()
447 struct jit_ctx *ctx) emit_wsbh()
449 emit_instr(ctx, wsbh, dst, src); emit_wsbh()
454 int imm, struct jit_ctx *ctx) emit_load_ptr()
457 emit_long_instr(ctx, LW, dst, imm, src); emit_load_ptr()
462 struct jit_ctx *ctx) emit_load_func()
466 emit_load_imm(r_tmp, (u64)imm >> 32, ctx); emit_load_func()
467 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ emit_load_func()
468 emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx); emit_load_func()
469 emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */ emit_load_func()
470 emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx); emit_load_func()
472 emit_load_imm(reg, imm, ctx); emit_load_func()
477 static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) emit_reg_move() argument
479 emit_long_instr(ctx, ADDU, dst, src, r_zero); emit_reg_move()
483 static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx) emit_jit_reg_move() argument
485 emit_addu(dst, src, r_zero, ctx); emit_jit_reg_move()
489 static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx) b_imm() argument
491 if (ctx->target == NULL) b_imm()
503 * ctx->idx currently points to the branch instruction b_imm()
507 return ctx->offsets[tgt] - b_imm()
508 (ctx->idx * 4 - ctx->prologue_bytes) - 4; b_imm()
512 unsigned int imm, struct jit_ctx *ctx) emit_bcond()
514 if (ctx->target != NULL) { emit_bcond()
515 u32 *p = &ctx->target[ctx->idx]; emit_bcond()
532 ctx->idx++; emit_bcond()
535 static inline void emit_b(unsigned int imm, struct jit_ctx *ctx) emit_b() argument
537 emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx); emit_b()
541 struct jit_ctx *ctx) emit_jalr()
543 emit_instr(ctx, jalr, link, reg); emit_jalr()
546 static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx) emit_jr() argument
548 emit_instr(ctx, jr, reg); emit_jr()
559 static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset) save_bpf_jit_regs() argument
565 emit_stack_offset(-align_sp(offset), ctx); save_bpf_jit_regs()
567 if (ctx->flags & SEEN_CALL) { save_bpf_jit_regs()
575 emit_store_stack_reg(MIPS_R_A0, r_sp, real_off, ctx); save_bpf_jit_regs()
576 emit_store_stack_reg(MIPS_R_A1, r_sp, real_off + RSIZE, ctx); save_bpf_jit_regs()
581 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; save_bpf_jit_regs()
586 ctx); save_bpf_jit_regs()
594 if (ctx->flags & SEEN_CALL) { save_bpf_jit_regs()
595 emit_store_stack_reg(r_ra, r_sp, real_off, ctx); save_bpf_jit_regs()
600 if (ctx->flags & SEEN_MEM) { save_bpf_jit_regs()
603 emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off); save_bpf_jit_regs()
607 static void restore_bpf_jit_regs(struct jit_ctx *ctx, restore_bpf_jit_regs() argument
613 if (ctx->flags & SEEN_CALL) { restore_bpf_jit_regs()
620 emit_load_stack_reg(MIPS_R_A0, r_sp, real_off, ctx); restore_bpf_jit_regs()
621 emit_load_stack_reg(MIPS_R_A1, r_sp, real_off + RSIZE, ctx); restore_bpf_jit_regs()
626 tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT; restore_bpf_jit_regs()
632 ctx); restore_bpf_jit_regs()
640 if (ctx->flags & SEEN_CALL) restore_bpf_jit_regs()
641 emit_load_stack_reg(r_ra, r_sp, real_off, ctx); restore_bpf_jit_regs()
644 emit_stack_offset(align_sp(offset), ctx); restore_bpf_jit_regs() local
647 static unsigned int get_stack_depth(struct jit_ctx *ctx) get_stack_depth() argument
653 sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * RSIZE; get_stack_depth()
655 if (ctx->flags & SEEN_MEM) get_stack_depth()
658 if (ctx->flags & SEEN_CALL) get_stack_depth()
674 static void build_prologue(struct jit_ctx *ctx) build_prologue() argument
679 sp_off = get_stack_depth(ctx); build_prologue()
680 save_bpf_jit_regs(ctx, sp_off); build_prologue()
682 if (ctx->flags & SEEN_SKB) build_prologue()
683 emit_reg_move(r_skb, MIPS_R_A0, ctx); build_prologue()
685 if (ctx->flags & SEEN_X) build_prologue()
686 emit_jit_reg_move(r_X, r_zero, ctx); build_prologue()
689 if (bpf_needs_clear_a(&ctx->skf->insns[0])) build_prologue()
690 emit_jit_reg_move(r_A, r_zero, ctx); build_prologue()
693 static void build_epilogue(struct jit_ctx *ctx) build_epilogue() argument
699 sp_off = get_stack_depth(ctx); build_epilogue()
700 restore_bpf_jit_regs(ctx, sp_off); build_epilogue()
703 emit_jr(r_ra, ctx); build_epilogue()
704 emit_nop(ctx); build_epilogue()
737 static int build_body(struct jit_ctx *ctx) build_body() argument
740 const struct bpf_prog *prog = ctx->skf; build_body()
754 if (ctx->target == NULL) build_body()
755 ctx->offsets[i] = ctx->idx * 4; build_body()
760 ctx->flags |= SEEN_A; build_body()
761 emit_load_imm(r_A, k, ctx); build_body()
766 ctx->flags |= SEEN_SKB | SEEN_A; build_body()
768 emit_load(r_A, r_skb, off, ctx); build_body()
772 ctx->flags |= SEEN_MEM | SEEN_A; build_body()
773 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx); build_body()
791 emit_load_imm(r_off, k, ctx); build_body()
797 emit_slt(r_s0, r_off, r_zero, ctx); build_body()
799 b_imm(prog->len, ctx), ctx); build_body()
800 emit_reg_move(r_ret, r_zero, ctx); build_body()
802 ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 | build_body()
806 ctx); build_body()
807 emit_reg_move(MIPS_R_A0, r_skb, ctx); build_body()
808 emit_jalr(MIPS_R_RA, r_s0, ctx); build_body()
810 emit_reg_move(MIPS_R_A1, r_off, ctx); build_body()
814 emit_dsrl32(r_s0, r_val, 0, ctx); build_body()
817 ctx); build_body()
821 ctx); build_body()
823 emit_nop(ctx); build_body()
825 emit_b(b_imm(i + 1, ctx), ctx); build_body()
826 emit_jit_reg_move(r_A, r_val, ctx); build_body()
828 emit_b(b_imm(prog->len, ctx), ctx); build_body()
829 emit_reg_move(r_ret, r_zero, ctx); build_body()
843 ctx->flags |= SEEN_OFF | SEEN_X; build_body()
844 emit_addiu(r_off, r_X, k, ctx); build_body()
848 ctx->flags |= SEEN_X; build_body()
849 emit_load_imm(r_X, k, ctx); build_body()
853 ctx->flags |= SEEN_X | SEEN_MEM; build_body()
854 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx); build_body()
858 ctx->flags |= SEEN_X | SEEN_SKB; build_body()
860 emit_load(r_X, r_skb, off, ctx); build_body()
868 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB; build_body()
870 emit_load_func(r_s0, (ptr)jit_get_skb_b, ctx); build_body()
875 emit_load_imm(MIPS_R_A1, k, ctx); build_body()
876 emit_jalr(MIPS_R_RA, r_s0, ctx); build_body()
877 emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */ build_body()
881 emit_dsrl32(r_s0, r_val, 0, ctx); build_body()
883 3 << 2, ctx); build_body()
886 3 << 2, ctx); build_body()
891 emit_andi(r_X, r_val, 0xf, ctx); build_body()
893 emit_b(b_imm(i + 1, ctx), ctx); build_body()
894 emit_sll(r_X, r_X, 2, ctx); /* delay slot */ build_body()
896 emit_b(b_imm(prog->len, ctx), ctx); build_body()
897 emit_load_imm(r_ret, 0, ctx); /* delay slot */ build_body()
901 ctx->flags |= SEEN_MEM | SEEN_A; build_body()
902 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx); build_body()
906 ctx->flags |= SEEN_MEM | SEEN_X; build_body()
907 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx); build_body()
911 ctx->flags |= SEEN_A; build_body()
912 emit_addiu(r_A, r_A, k, ctx); build_body()
916 ctx->flags |= SEEN_A | SEEN_X; build_body()
917 emit_addu(r_A, r_A, r_X, ctx); build_body()
921 ctx->flags |= SEEN_A; build_body()
922 emit_addiu(r_A, r_A, -k, ctx); build_body()
926 ctx->flags |= SEEN_A | SEEN_X; build_body()
927 emit_subu(r_A, r_A, r_X, ctx); build_body()
932 ctx->flags |= SEEN_A | SEEN_S0; build_body()
933 emit_load_imm(r_s0, k, ctx); build_body()
934 emit_mul(r_A, r_A, r_s0, ctx); build_body()
938 ctx->flags |= SEEN_A | SEEN_X; build_body()
939 emit_mul(r_A, r_A, r_X, ctx); build_body()
946 ctx->flags |= SEEN_A; build_body()
947 emit_srl(r_A, r_A, k, ctx); build_body()
950 ctx->flags |= SEEN_A | SEEN_S0; build_body()
951 emit_load_imm(r_s0, k, ctx); build_body()
952 emit_div(r_A, r_s0, ctx); build_body()
957 ctx->flags |= SEEN_A; build_body()
958 emit_jit_reg_move(r_A, r_zero, ctx); build_body()
960 ctx->flags |= SEEN_A | SEEN_S0; build_body()
961 emit_load_imm(r_s0, k, ctx); build_body()
962 emit_mod(r_A, r_s0, ctx); build_body()
967 ctx->flags |= SEEN_X | SEEN_A; build_body()
970 b_imm(prog->len, ctx), ctx); build_body()
971 emit_load_imm(r_val, 0, ctx); /* delay slot */ build_body()
972 emit_div(r_A, r_X, ctx); build_body()
976 ctx->flags |= SEEN_X | SEEN_A; build_body()
979 b_imm(prog->len, ctx), ctx); build_body()
980 emit_load_imm(r_val, 0, ctx); /* delay slot */ build_body()
981 emit_mod(r_A, r_X, ctx); build_body()
985 ctx->flags |= SEEN_A; build_body()
986 emit_ori(r_A, r_A, k, ctx); build_body()
990 ctx->flags |= SEEN_A; build_body()
991 emit_ori(r_A, r_A, r_X, ctx); build_body()
995 ctx->flags |= SEEN_A; build_body()
996 emit_xori(r_A, r_A, k, ctx); build_body()
1001 ctx->flags |= SEEN_A; build_body()
1002 emit_xor(r_A, r_A, r_X, ctx); build_body()
1006 ctx->flags |= SEEN_A; build_body()
1007 emit_andi(r_A, r_A, k, ctx); build_body()
1011 ctx->flags |= SEEN_A | SEEN_X; build_body()
1012 emit_and(r_A, r_A, r_X, ctx); build_body()
1016 ctx->flags |= SEEN_A; build_body()
1017 emit_sll(r_A, r_A, k, ctx); build_body()
1021 ctx->flags |= SEEN_A | SEEN_X; build_body()
1022 emit_sllv(r_A, r_A, r_X, ctx); build_body()
1026 ctx->flags |= SEEN_A; build_body()
1027 emit_srl(r_A, r_A, k, ctx); build_body()
1030 ctx->flags |= SEEN_A | SEEN_X; build_body()
1031 emit_srlv(r_A, r_A, r_X, ctx); build_body()
1035 ctx->flags |= SEEN_A; build_body()
1036 emit_neg(r_A, ctx); build_body()
1040 emit_b(b_imm(i + k + 1, ctx), ctx); build_body()
1041 emit_nop(ctx); build_body()
1048 ctx->flags |= SEEN_X; build_body()
1057 ctx->flags |= SEEN_X; build_body()
1066 ctx->flags |= SEEN_X; build_body()
1074 ctx->flags |= SEEN_S0 | SEEN_A; build_body()
1075 emit_sltiu(r_s0, r_A, k, ctx); build_body()
1077 ctx->flags |= SEEN_S0 | SEEN_A | build_body()
1079 emit_sltu(r_s0, r_A, r_X, ctx); build_body()
1082 b_off = b_imm(i + inst->jf + 1, ctx); build_body()
1084 ctx); build_body()
1085 emit_nop(ctx); build_body()
1089 ctx->flags |= SEEN_S0 | SEEN_A | SEEN_X; build_body()
1091 emit_load_imm(r_s0, k, ctx); build_body()
1094 ctx); build_body()
1095 b_off = b_imm(i + inst->jf + 1, ctx); build_body()
1097 b_off, ctx); build_body()
1098 emit_nop(ctx); build_body()
1100 b_off = b_imm(i + inst->jt + 1, ctx); build_body()
1101 emit_b(b_off, ctx); build_body()
1102 emit_nop(ctx); build_body()
1105 b_off = b_imm(i + inst->jt + 1, ctx); build_body()
1106 emit_b(b_off, ctx); build_body()
1107 emit_nop(ctx); build_body()
1112 ctx->flags |= SEEN_S0 | SEEN_A; build_body()
1113 emit_load_imm(r_s0, k, ctx); build_body()
1115 b_off = b_imm(i + inst->jt + 1, ctx); build_body()
1117 b_off, ctx); build_body()
1118 emit_nop(ctx); build_body()
1121 ctx); build_body()
1123 b_off, ctx); build_body()
1124 emit_nop(ctx); build_body()
1127 ctx->flags |= SEEN_A | SEEN_X; build_body()
1129 ctx); build_body()
1131 b_off, ctx); build_body()
1132 emit_nop(ctx); build_body()
1134 b_off = b_imm(i + inst->jf + 1, ctx); build_body()
1136 b_off, ctx); build_body()
1137 emit_nop(ctx); build_body()
1142 ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A; build_body()
1144 emit_load_imm(r_s1, k, ctx); build_body()
1145 emit_and(r_s0, r_A, r_s1, ctx); build_body()
1147 b_off = b_imm(i + inst->jt + 1, ctx); build_body()
1148 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); build_body()
1149 emit_nop(ctx); build_body()
1151 b_off = b_imm(i + inst->jf + 1, ctx); build_body()
1152 emit_b(b_off, ctx); build_body()
1153 emit_nop(ctx); build_body()
1156 ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A; build_body()
1158 emit_and(r_s0, r_A, r_X, ctx); build_body()
1160 b_off = b_imm(i + inst->jt + 1, ctx); build_body()
1161 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx); build_body()
1162 emit_nop(ctx); build_body()
1164 b_off = b_imm(i + inst->jf + 1, ctx); build_body()
1165 emit_b(b_off, ctx); build_body()
1166 emit_nop(ctx); build_body()
1169 ctx->flags |= SEEN_A; build_body()
1175 emit_b(b_imm(prog->len, ctx), ctx); build_body()
1176 emit_reg_move(r_ret, r_A, ctx); /* delay slot */ build_body()
1183 emit_load_imm(r_ret, k, ctx); build_body()
1189 emit_b(b_imm(prog->len, ctx), ctx); build_body()
1190 emit_nop(ctx); build_body()
1195 ctx->flags |= SEEN_X | SEEN_A; build_body()
1196 emit_jit_reg_move(r_X, r_A, ctx); build_body()
1200 ctx->flags |= SEEN_A | SEEN_X; build_body()
1201 emit_jit_reg_move(r_A, r_X, ctx); build_body()
1206 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A; build_body()
1210 emit_half_load(r_A, r_skb, off, ctx); build_body()
1215 emit_wsbh(r_A, r_A, ctx); build_body()
1218 emit_andi(r_tmp_imm, r_A, 0xff, ctx); build_body()
1220 emit_sll(r_tmp, r_tmp_imm, 8, ctx); build_body()
1222 emit_srl(r_tmp_imm, r_A, 8, ctx); build_body()
1223 emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx); build_body()
1225 emit_or(r_A, r_tmp, r_tmp_imm, ctx); build_body()
1230 ctx->flags |= SEEN_A | SEEN_OFF; build_body()
1236 emit_load(r_A, 28, off, ctx); build_body()
1240 ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0; build_body()
1243 emit_load_ptr(r_s0, r_skb, off, ctx); build_body()
1246 b_imm(prog->len, ctx), ctx); build_body()
1247 emit_reg_move(r_ret, r_zero, ctx); build_body()
1251 emit_load(r_A, r_s0, off, ctx); build_body()
1254 ctx->flags |= SEEN_SKB | SEEN_A; build_body()
1257 emit_load(r_A, r_skb, off, ctx); build_body()
1260 ctx->flags |= SEEN_SKB | SEEN_A; build_body()
1263 emit_load(r_A, r_skb, off, ctx); build_body()
1267 ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A; build_body()
1271 emit_half_load(r_s0, r_skb, off, ctx); build_body()
1273 emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx); build_body()
1275 emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx); build_body()
1277 emit_sltu(r_A, r_zero, r_A, ctx); build_body()
1281 ctx->flags |= SEEN_SKB; build_body()
1283 emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx); build_body()
1285 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); build_body()
1288 emit_srl(r_A, r_A, 5, ctx); build_body()
1292 ctx->flags |= SEEN_SKB | SEEN_A; build_body()
1298 emit_half_load(r_A, r_skb, off, ctx); build_body()
1308 if (ctx->target == NULL) build_body()
1309 ctx->offsets[i] = ctx->idx * 4; build_body()
1318 struct jit_ctx ctx; bpf_jit_compile() local
1324 memset(&ctx, 0, sizeof(ctx)); bpf_jit_compile()
1326 ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL); bpf_jit_compile()
1327 if (ctx.offsets == NULL) bpf_jit_compile()
1330 ctx.skf = fp; bpf_jit_compile()
1332 if (build_body(&ctx)) bpf_jit_compile()
1335 tmp_idx = ctx.idx; bpf_jit_compile()
1336 build_prologue(&ctx); bpf_jit_compile()
1337 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; bpf_jit_compile()
1338 /* just to complete the ctx.idx count */ bpf_jit_compile()
1339 build_epilogue(&ctx); bpf_jit_compile()
1341 alloc_size = 4 * ctx.idx; bpf_jit_compile()
1342 ctx.target = module_alloc(alloc_size); bpf_jit_compile()
1343 if (ctx.target == NULL) bpf_jit_compile()
1347 memset(ctx.target, 0, alloc_size); bpf_jit_compile()
1349 ctx.idx = 0; bpf_jit_compile()
1352 build_prologue(&ctx); bpf_jit_compile()
1353 build_body(&ctx); bpf_jit_compile()
1354 build_epilogue(&ctx); bpf_jit_compile()
1357 flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx)); bpf_jit_compile()
1361 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); bpf_jit_compile()
1363 fp->bpf_func = (void *)ctx.target; bpf_jit_compile()
1367 kfree(ctx.offsets); bpf_jit_compile()
185 emit_addu(unsigned int dst, unsigned int src1, unsigned int src2, struct jit_ctx *ctx) emit_addu() argument
217 emit_or(unsigned int dst, unsigned int src1, unsigned int src2, struct jit_ctx *ctx) emit_or() argument
223 emit_ori(unsigned int dst, unsigned src, u32 imm, struct jit_ctx *ctx) emit_ori() argument
234 emit_daddiu(unsigned int dst, unsigned int src, int imm, struct jit_ctx *ctx) emit_daddiu() argument
244 emit_addiu(unsigned int dst, unsigned int src, u32 imm, struct jit_ctx *ctx) emit_addiu() argument
255 emit_and(unsigned int dst, unsigned int src1, unsigned int src2, struct jit_ctx *ctx) emit_and() argument
261 emit_andi(unsigned int dst, unsigned int src, u32 imm, struct jit_ctx *ctx) emit_andi() argument
273 emit_xor(unsigned int dst, unsigned int src1, unsigned int src2, struct jit_ctx *ctx) emit_xor() argument
295 emit_subu(unsigned int dst, unsigned int src1, unsigned int src2, struct jit_ctx *ctx) emit_subu() argument
306 emit_sllv(unsigned int dst, unsigned int src, unsigned int sa, struct jit_ctx *ctx) emit_sllv() argument
312 emit_sll(unsigned int dst, unsigned int src, unsigned int sa, struct jit_ctx *ctx) emit_sll() argument
323 emit_srlv(unsigned int dst, unsigned int src, unsigned int sa, struct jit_ctx *ctx) emit_srlv() argument
329 emit_srl(unsigned int dst, unsigned int src, unsigned int sa, struct jit_ctx *ctx) emit_srl() argument
340 emit_slt(unsigned int dst, unsigned int src1, unsigned int src2, struct jit_ctx *ctx) emit_slt() argument
346 emit_sltu(unsigned int dst, unsigned int src1, unsigned int src2, struct jit_ctx *ctx) emit_sltu() argument
352 emit_sltiu(unsigned dst, unsigned int src, unsigned int imm, struct jit_ctx *ctx) emit_sltiu() argument
366 emit_store_stack_reg(ptr reg, ptr base, unsigned int offset, struct jit_ctx *ctx) emit_store_stack_reg() argument
373 emit_store(ptr reg, ptr base, unsigned int offset, struct jit_ctx *ctx) emit_store() argument
379 emit_load_stack_reg(ptr reg, ptr base, unsigned int offset, struct jit_ctx *ctx) emit_load_stack_reg() argument
386 emit_load(unsigned int reg, unsigned int base, unsigned int offset, struct jit_ctx *ctx) emit_load() argument
392 emit_load_byte(unsigned int reg, unsigned int base, unsigned int offset, struct jit_ctx *ctx) emit_load_byte() argument
398 emit_half_load(unsigned int reg, unsigned int base, unsigned int offset, struct jit_ctx *ctx) emit_half_load() argument
404 emit_mul(unsigned int dst, unsigned int src1, unsigned int src2, struct jit_ctx *ctx) emit_mul() argument
410 emit_div(unsigned int dst, unsigned int src, struct jit_ctx *ctx) emit_div() argument
422 emit_mod(unsigned int dst, unsigned int src, struct jit_ctx *ctx) emit_mod() argument
434 emit_dsll(unsigned int dst, unsigned int src, unsigned int sa, struct jit_ctx *ctx) emit_dsll() argument
440 emit_dsrl32(unsigned int dst, unsigned int src, unsigned int sa, struct jit_ctx *ctx) emit_dsrl32() argument
446 emit_wsbh(unsigned int dst, unsigned int src, struct jit_ctx *ctx) emit_wsbh() argument
453 emit_load_ptr(unsigned int dst, unsigned int src, int imm, struct jit_ctx *ctx) emit_load_ptr() argument
461 emit_load_func(unsigned int reg, ptr imm, struct jit_ctx *ctx) emit_load_func() argument
511 emit_bcond(int cond, unsigned int reg1, unsigned int reg2, unsigned int imm, struct jit_ctx *ctx) emit_bcond() argument
540 emit_jalr(unsigned int link, unsigned int reg, struct jit_ctx *ctx) emit_jalr() argument
/linux-4.1.27/drivers/media/platform/coda/
H A Dtrace.h15 TP_PROTO(struct coda_ctx *ctx, int cmd),
17 TP_ARGS(ctx, cmd),
21 __field(int, ctx)
26 __entry->minor = ctx->fh.vdev->minor;
27 __entry->ctx = ctx->idx;
31 TP_printk("minor = %d, ctx = %d, cmd = %d",
32 __entry->minor, __entry->ctx, __entry->cmd)
36 TP_PROTO(struct coda_ctx *ctx),
38 TP_ARGS(ctx),
42 __field(int, ctx)
46 __entry->minor = ctx->fh.vdev->minor;
47 __entry->ctx = ctx->idx;
50 TP_printk("minor = %d, ctx = %d", __entry->minor, __entry->ctx)
54 TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
56 TP_ARGS(ctx, buf),
61 __field(int, ctx)
65 __entry->minor = ctx->fh.vdev->minor;
67 __entry->ctx = ctx->idx;
70 TP_printk("minor = %d, index = %d, ctx = %d",
71 __entry->minor, __entry->index, __entry->ctx)
75 TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf),
77 TP_ARGS(ctx, buf),
82 __field(int, ctx)
86 __entry->minor = ctx->fh.vdev->minor;
88 __entry->ctx = ctx->idx;
91 TP_printk("minor = %d, index = %d, ctx = %d",
92 __entry->minor, __entry->index, __entry->ctx)
96 TP_PROTO(struct coda_ctx *ctx, struct vb2_buffer *buf,
99 TP_ARGS(ctx, buf, meta),
106 __field(int, ctx)
110 __entry->minor = ctx->fh.vdev->minor;
114 __entry->ctx = ctx->idx;
117 TP_printk("minor = %d, index = %d, start = 0x%x, end = 0x%x, ctx = %d",
119 __entry->ctx)
123 TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
125 TP_ARGS(ctx, meta),
131 __field(int, ctx)
135 __entry->minor = ctx->fh.vdev->minor;
138 __entry->ctx = ctx->idx;
141 TP_printk("minor = %d, start = 0x%x, end = 0x%x, ctx = %d",
142 __entry->minor, __entry->start, __entry->end, __entry->ctx)
146 TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta),
148 TP_ARGS(ctx, meta),
154 __field(int, ctx)
158 __entry->minor = ctx->fh.vdev->minor;
161 __entry->ctx = ctx->idx;
164 TP_printk("minor = %d, start = 0x%x, end = 0x%x, ctx = %d",
165 __entry->minor, __entry->start, __entry->end, __entry->ctx)
169 TP_PROTO(struct coda_ctx *ctx, struct coda_buffer_meta *meta,
172 TP_ARGS(ctx, meta, buf),
179 __field(int, ctx)
183 __entry->minor = ctx->fh.vdev->minor;
187 __entry->ctx = ctx->idx;
190 TP_printk("minor = %d, start = 0x%x, end = 0x%x, index = %d, ctx = %d",
192 __entry->ctx)
H A Dcoda-bit.c43 static void coda_free_bitstream_buffer(struct coda_ctx *ctx);
66 static void coda_command_async(struct coda_ctx *ctx, int cmd) coda_command_async() argument
68 struct coda_dev *dev = ctx->dev; coda_command_async()
73 coda_write(dev, ctx->bit_stream_param, coda_command_async()
75 coda_write(dev, ctx->frm_dis_flg, coda_command_async()
76 CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx)); coda_command_async()
77 coda_write(dev, ctx->frame_mem_ctrl, coda_command_async()
79 coda_write(dev, ctx->workbuf.paddr, CODA_REG_BIT_WORK_BUF_ADDR); coda_command_async()
89 coda_write(dev, ctx->idx, CODA_REG_BIT_RUN_INDEX); coda_command_async()
90 coda_write(dev, ctx->params.codec_mode, CODA_REG_BIT_RUN_COD_STD); coda_command_async()
91 coda_write(dev, ctx->params.codec_mode_aux, CODA7_REG_BIT_RUN_AUX_STD); coda_command_async()
93 trace_coda_bit_run(ctx, cmd); coda_command_async()
98 static int coda_command_sync(struct coda_ctx *ctx, int cmd) coda_command_sync() argument
100 struct coda_dev *dev = ctx->dev; coda_command_sync()
103 coda_command_async(ctx, cmd); coda_command_sync()
105 trace_coda_bit_done(ctx); coda_command_sync()
110 int coda_hw_reset(struct coda_ctx *ctx) coda_hw_reset() argument
112 struct coda_dev *dev = ctx->dev; coda_hw_reset()
146 static void coda_kfifo_sync_from_device(struct coda_ctx *ctx) coda_kfifo_sync_from_device() argument
148 struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo; coda_kfifo_sync_from_device()
149 struct coda_dev *dev = ctx->dev; coda_kfifo_sync_from_device()
152 rd_ptr = coda_read(dev, CODA_REG_BIT_RD_PTR(ctx->reg_idx)); coda_kfifo_sync_from_device()
154 (rd_ptr - ctx->bitstream.paddr); coda_kfifo_sync_from_device()
159 static void coda_kfifo_sync_to_device_full(struct coda_ctx *ctx) coda_kfifo_sync_to_device_full() argument
161 struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo; coda_kfifo_sync_to_device_full()
162 struct coda_dev *dev = ctx->dev; coda_kfifo_sync_to_device_full()
165 rd_ptr = ctx->bitstream.paddr + (kfifo->out & kfifo->mask); coda_kfifo_sync_to_device_full()
166 coda_write(dev, rd_ptr, CODA_REG_BIT_RD_PTR(ctx->reg_idx)); coda_kfifo_sync_to_device_full()
167 wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask); coda_kfifo_sync_to_device_full()
168 coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx)); coda_kfifo_sync_to_device_full()
171 static void coda_kfifo_sync_to_device_write(struct coda_ctx *ctx) coda_kfifo_sync_to_device_write() argument
173 struct __kfifo *kfifo = &ctx->bitstream_fifo.kfifo; coda_kfifo_sync_to_device_write()
174 struct coda_dev *dev = ctx->dev; coda_kfifo_sync_to_device_write()
177 wr_ptr = ctx->bitstream.paddr + (kfifo->in & kfifo->mask); coda_kfifo_sync_to_device_write()
178 coda_write(dev, wr_ptr, CODA_REG_BIT_WR_PTR(ctx->reg_idx)); coda_kfifo_sync_to_device_write()
181 static int coda_bitstream_queue(struct coda_ctx *ctx, coda_bitstream_queue() argument
187 n = kfifo_in(&ctx->bitstream_fifo, vb2_plane_vaddr(src_buf, 0), coda_bitstream_queue()
192 src_buf->v4l2_buf.sequence = ctx->qsequence++; coda_bitstream_queue()
197 static bool coda_bitstream_try_queue(struct coda_ctx *ctx, coda_bitstream_try_queue() argument
202 if (coda_get_bitstream_payload(ctx) + coda_bitstream_try_queue()
203 vb2_get_plane_payload(src_buf, 0) + 512 >= ctx->bitstream.size) coda_bitstream_try_queue()
207 v4l2_err(&ctx->dev->v4l2_dev, "trying to queue empty buffer\n"); coda_bitstream_try_queue()
211 ret = coda_bitstream_queue(ctx, src_buf); coda_bitstream_try_queue()
213 v4l2_err(&ctx->dev->v4l2_dev, "bitstream buffer overflow\n"); coda_bitstream_try_queue()
217 if (ctx == v4l2_m2m_get_curr_priv(ctx->dev->m2m_dev)) coda_bitstream_try_queue()
218 coda_kfifo_sync_to_device_write(ctx); coda_bitstream_try_queue()
220 ctx->hold = false; coda_bitstream_try_queue()
225 void coda_fill_bitstream(struct coda_ctx *ctx, bool streaming) coda_fill_bitstream() argument
231 while (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) > 0) { coda_fill_bitstream()
236 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG && coda_fill_bitstream()
237 (coda_get_bitstream_payload(ctx) >= 512) && !ctx->hold) coda_fill_bitstream()
240 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); coda_fill_bitstream()
243 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG && coda_fill_bitstream()
244 !coda_jpeg_check_buffer(ctx, src_buf)) { coda_fill_bitstream()
245 v4l2_err(&ctx->dev->v4l2_dev, coda_fill_bitstream()
247 ctx->qsequence); coda_fill_bitstream()
248 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); coda_fill_bitstream()
256 start = ctx->bitstream_fifo.kfifo.in & coda_fill_bitstream()
257 ctx->bitstream_fifo.kfifo.mask; coda_fill_bitstream()
259 if (coda_bitstream_try_queue(ctx, src_buf)) { coda_fill_bitstream()
264 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); coda_fill_bitstream()
272 meta->end = ctx->bitstream_fifo.kfifo.in & coda_fill_bitstream()
273 ctx->bitstream_fifo.kfifo.mask; coda_fill_bitstream()
275 &ctx->buffer_meta_list); coda_fill_bitstream()
277 trace_coda_bit_queue(ctx, src_buf, meta); coda_fill_bitstream()
287 void coda_bit_stream_end_flag(struct coda_ctx *ctx) coda_bit_stream_end_flag() argument
289 struct coda_dev *dev = ctx->dev; coda_bit_stream_end_flag()
291 ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG; coda_bit_stream_end_flag()
296 (ctx->idx == coda_read(dev, CODA_REG_BIT_RUN_INDEX))) { coda_bit_stream_end_flag()
297 coda_write(dev, ctx->bit_stream_param, coda_bit_stream_end_flag()
302 static void coda_parabuf_write(struct coda_ctx *ctx, int index, u32 value) coda_parabuf_write() argument
304 struct coda_dev *dev = ctx->dev; coda_parabuf_write()
305 u32 *p = ctx->parabuf.vaddr; coda_parabuf_write()
313 static inline int coda_alloc_context_buf(struct coda_ctx *ctx, coda_alloc_context_buf() argument
317 return coda_alloc_aux_buf(ctx->dev, buf, size, name, ctx->debugfs_entry); coda_alloc_context_buf()
321 static void coda_free_framebuffers(struct coda_ctx *ctx) coda_free_framebuffers() argument
326 coda_free_aux_buf(ctx->dev, &ctx->internal_frames[i]); coda_free_framebuffers()
329 static int coda_alloc_framebuffers(struct coda_ctx *ctx, coda_alloc_framebuffers() argument
332 struct coda_dev *dev = ctx->dev; coda_alloc_framebuffers()
339 if (ctx->codec && (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 || coda_alloc_framebuffers()
340 ctx->codec->dst_fourcc == V4L2_PIX_FMT_H264)) { coda_alloc_framebuffers()
350 for (i = 0; i < ctx->num_internal_frames; i++) { coda_alloc_framebuffers()
355 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 && coda_alloc_framebuffers()
359 ret = coda_alloc_context_buf(ctx, &ctx->internal_frames[i], coda_alloc_framebuffers()
363 coda_free_framebuffers(ctx); coda_alloc_framebuffers()
369 for (i = 0; i < ctx->num_internal_frames; i++) { coda_alloc_framebuffers()
370 paddr = ctx->internal_frames[i].paddr; coda_alloc_framebuffers()
372 coda_parabuf_write(ctx, i * 3 + 0, paddr); coda_alloc_framebuffers()
373 coda_parabuf_write(ctx, i * 3 + 1, paddr + ysize); coda_alloc_framebuffers()
374 coda_parabuf_write(ctx, i * 3 + 2, paddr + ysize + ysize / 4); coda_alloc_framebuffers()
377 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_H264 && coda_alloc_framebuffers()
379 coda_parabuf_write(ctx, 96 + i, coda_alloc_framebuffers()
380 ctx->internal_frames[i].paddr + coda_alloc_framebuffers()
386 (ctx->codec->src_fourcc == V4L2_PIX_FMT_MPEG4)) coda_alloc_framebuffers()
387 coda_parabuf_write(ctx, 97, ctx->internal_frames[i].paddr + coda_alloc_framebuffers()
393 static void coda_free_context_buffers(struct coda_ctx *ctx) coda_free_context_buffers() argument
395 struct coda_dev *dev = ctx->dev; coda_free_context_buffers()
397 coda_free_aux_buf(dev, &ctx->slicebuf); coda_free_context_buffers()
398 coda_free_aux_buf(dev, &ctx->psbuf); coda_free_context_buffers()
400 coda_free_aux_buf(dev, &ctx->workbuf); coda_free_context_buffers()
401 coda_free_aux_buf(dev, &ctx->parabuf); coda_free_context_buffers()
404 static int coda_alloc_context_buffers(struct coda_ctx *ctx, coda_alloc_context_buffers() argument
407 struct coda_dev *dev = ctx->dev; coda_alloc_context_buffers()
411 if (!ctx->parabuf.vaddr) { coda_alloc_context_buffers()
412 ret = coda_alloc_context_buf(ctx, &ctx->parabuf, coda_alloc_context_buffers()
421 if (!ctx->slicebuf.vaddr && q_data->fourcc == V4L2_PIX_FMT_H264) { coda_alloc_context_buffers()
425 ret = coda_alloc_context_buf(ctx, &ctx->slicebuf, size, coda_alloc_context_buffers()
431 if (!ctx->psbuf.vaddr && dev->devtype->product == CODA_7541) { coda_alloc_context_buffers()
432 ret = coda_alloc_context_buf(ctx, &ctx->psbuf, coda_alloc_context_buffers()
438 if (!ctx->workbuf.vaddr) { coda_alloc_context_buffers()
443 ret = coda_alloc_context_buf(ctx, &ctx->workbuf, size, coda_alloc_context_buffers()
452 coda_free_context_buffers(ctx); coda_alloc_context_buffers()
456 static int coda_encode_header(struct coda_ctx *ctx, struct vb2_buffer *buf, coda_encode_header() argument
459 struct coda_dev *dev = ctx->dev; coda_encode_header()
474 ret = coda_command_sync(ctx, CODA_COMMAND_ENCODE_HEADER); coda_encode_header()
486 *size = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)) - coda_encode_header()
509 static void coda_setup_iram(struct coda_ctx *ctx) coda_setup_iram() argument
511 struct coda_iram_info *iram_info = &ctx->iram_info; coda_setup_iram()
512 struct coda_dev *dev = ctx->dev; coda_setup_iram()
541 if (ctx->inst_type == CODA_INST_ENCODER) { coda_setup_iram()
544 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_setup_iram()
581 } else if (ctx->inst_type == CODA_INST_DECODER) { coda_setup_iram()
584 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); coda_setup_iram()
609 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_setup_iram()
614 if (ctx->inst_type == CODA_INST_DECODER) { coda_setup_iram()
719 static int coda_encoder_reqbufs(struct coda_ctx *ctx, coda_encoder_reqbufs() argument
729 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_encoder_reqbufs()
730 ret = coda_alloc_context_buffers(ctx, q_data_src); coda_encoder_reqbufs()
734 coda_free_context_buffers(ctx); coda_encoder_reqbufs()
740 static int coda_start_encoding(struct coda_ctx *ctx) coda_start_encoding() argument
742 struct coda_dev *dev = ctx->dev; coda_start_encoding()
752 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_start_encoding()
753 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); coda_start_encoding()
756 buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); coda_start_encoding()
766 if (!ctx->params.jpeg_qmat_tab[0]) coda_start_encoding()
767 ctx->params.jpeg_qmat_tab[0] = kmalloc(64, GFP_KERNEL); coda_start_encoding()
768 if (!ctx->params.jpeg_qmat_tab[1]) coda_start_encoding()
769 ctx->params.jpeg_qmat_tab[1] = kmalloc(64, GFP_KERNEL); coda_start_encoding()
770 coda_set_jpeg_compression_quality(ctx, ctx->params.jpeg_quality); coda_start_encoding()
775 coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR); coda_start_encoding()
776 coda_write(dev, bitstream_buf, CODA_REG_BIT_RD_PTR(ctx->reg_idx)); coda_start_encoding()
777 coda_write(dev, bitstream_buf, CODA_REG_BIT_WR_PTR(ctx->reg_idx)); coda_start_encoding()
792 ctx->frame_mem_ctrl &= ~CODA_FRAME_CHROMA_INTERLEAVE; coda_start_encoding()
794 ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE; coda_start_encoding()
795 coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL); coda_start_encoding()
829 ctx->params.framerate = 0; coda_start_encoding()
830 coda_write(dev, ctx->params.framerate, coda_start_encoding()
833 ctx->params.codec_mode = ctx->codec->mode; coda_start_encoding()
851 if (ctx->params.h264_deblk_enabled) { coda_start_encoding()
852 value = ((ctx->params.h264_deblk_alpha & coda_start_encoding()
855 ((ctx->params.h264_deblk_beta & coda_start_encoding()
865 coda_write(dev, ctx->params.jpeg_restart_interval, coda_start_encoding()
871 coda_jpeg_write_tables(ctx); coda_start_encoding()
885 switch (ctx->params.slice_mode) { coda_start_encoding()
890 value = (ctx->params.slice_max_mb & coda_start_encoding()
898 value = (ctx->params.slice_max_bits & coda_start_encoding()
907 value = ctx->params.gop_size & CODA_GOP_SIZE_MASK; coda_start_encoding()
911 if (ctx->params.bitrate) { coda_start_encoding()
913 value = (ctx->params.bitrate & CODA_RATECONTROL_BITRATE_MASK) coda_start_encoding()
924 coda_write(dev, ctx->params.intra_refresh, coda_start_encoding()
941 if (ctx->params.h264_min_qp || ctx->params.h264_max_qp) { coda_start_encoding()
943 ctx->params.h264_min_qp << CODA_QPMIN_OFFSET | coda_start_encoding()
944 ctx->params.h264_max_qp << CODA_QPMAX_OFFSET, coda_start_encoding()
948 if (ctx->params.h264_max_qp) coda_start_encoding()
959 if (ctx->params.h264_min_qp) coda_start_encoding()
961 if (ctx->params.h264_max_qp) coda_start_encoding()
968 coda_setup_iram(ctx); coda_start_encoding()
977 coda_write(dev, ctx->iram_info.search_ram_paddr, coda_start_encoding()
979 coda_write(dev, ctx->iram_info.search_ram_size, coda_start_encoding()
988 ret = coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT); coda_start_encoding()
1002 ctx->num_internal_frames = 4; coda_start_encoding()
1004 ctx->num_internal_frames = 2; coda_start_encoding()
1005 ret = coda_alloc_framebuffers(ctx, q_data_src, dst_fourcc); coda_start_encoding()
1013 ctx->num_internal_frames = 0; coda_start_encoding()
1025 coda_write(dev, ctx->iram_info.buf_bit_use, coda_start_encoding()
1027 coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use, coda_start_encoding()
1029 coda_write(dev, ctx->iram_info.buf_dbk_y_use, coda_start_encoding()
1031 coda_write(dev, ctx->iram_info.buf_dbk_c_use, coda_start_encoding()
1033 coda_write(dev, ctx->iram_info.buf_ovl_use, coda_start_encoding()
1036 coda_write(dev, ctx->iram_info.buf_btp_use, coda_start_encoding()
1040 coda_write(dev, ctx->internal_frames[2].paddr, coda_start_encoding()
1042 coda_write(dev, ctx->internal_frames[3].paddr, coda_start_encoding()
1047 ret = coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF); coda_start_encoding()
1054 buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); coda_start_encoding()
1061 ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_SPS, coda_start_encoding()
1062 &ctx->vpu_header[0][0], coda_start_encoding()
1063 &ctx->vpu_header_size[0]); coda_start_encoding()
1071 ret = coda_encode_header(ctx, buf, CODA_HEADER_H264_PPS, coda_start_encoding()
1072 &ctx->vpu_header[1][0], coda_start_encoding()
1073 &ctx->vpu_header_size[1]); coda_start_encoding()
1082 ctx->vpu_header_size[2] = coda_h264_padding( coda_start_encoding()
1083 (ctx->vpu_header_size[0] + coda_start_encoding()
1084 ctx->vpu_header_size[1]), coda_start_encoding()
1085 ctx->vpu_header[2]); coda_start_encoding()
1092 ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOS, coda_start_encoding()
1093 &ctx->vpu_header[0][0], coda_start_encoding()
1094 &ctx->vpu_header_size[0]); coda_start_encoding()
1098 ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VIS, coda_start_encoding()
1099 &ctx->vpu_header[1][0], coda_start_encoding()
1100 &ctx->vpu_header_size[1]); coda_start_encoding()
1104 ret = coda_encode_header(ctx, buf, CODA_HEADER_MP4V_VOL, coda_start_encoding()
1105 &ctx->vpu_header[2][0], coda_start_encoding()
1106 &ctx->vpu_header_size[2]); coda_start_encoding()
1120 static int coda_prepare_encode(struct coda_ctx *ctx) coda_prepare_encode() argument
1124 struct coda_dev *dev = ctx->dev; coda_prepare_encode()
1132 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); coda_prepare_encode()
1133 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); coda_prepare_encode()
1134 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_prepare_encode()
1135 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); coda_prepare_encode()
1138 src_buf->v4l2_buf.sequence = ctx->osequence; coda_prepare_encode()
1139 dst_buf->v4l2_buf.sequence = ctx->osequence; coda_prepare_encode()
1140 ctx->osequence++; coda_prepare_encode()
1147 if (src_buf->v4l2_buf.sequence % ctx->params.gop_size) { coda_prepare_encode()
1156 coda_set_gdi_regs(ctx); coda_prepare_encode()
1165 ctx->vpu_header_size[0] + coda_prepare_encode()
1166 ctx->vpu_header_size[1] + coda_prepare_encode()
1167 ctx->vpu_header_size[2]; coda_prepare_encode()
1169 ctx->vpu_header_size[0] - coda_prepare_encode()
1170 ctx->vpu_header_size[1] - coda_prepare_encode()
1171 ctx->vpu_header_size[2]; coda_prepare_encode()
1173 &ctx->vpu_header[0][0], ctx->vpu_header_size[0]); coda_prepare_encode()
1174 memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0], coda_prepare_encode()
1175 &ctx->vpu_header[1][0], ctx->vpu_header_size[1]); coda_prepare_encode()
1176 memcpy(vb2_plane_vaddr(dst_buf, 0) + ctx->vpu_header_size[0] + coda_prepare_encode()
1177 ctx->vpu_header_size[1], &ctx->vpu_header[2][0], coda_prepare_encode()
1178 ctx->vpu_header_size[2]); coda_prepare_encode()
1189 quant_param = ctx->params.h264_intra_qp; coda_prepare_encode()
1192 quant_param = ctx->params.mpeg4_intra_qp; coda_prepare_encode()
1198 v4l2_warn(&ctx->dev->v4l2_dev, coda_prepare_encode()
1206 quant_param = ctx->params.h264_inter_qp; coda_prepare_encode()
1209 quant_param = ctx->params.mpeg4_inter_qp; coda_prepare_encode()
1212 v4l2_warn(&ctx->dev->v4l2_dev, coda_prepare_encode()
1219 if (ctx->params.rot_mode) coda_prepare_encode()
1220 rot_mode = CODA_ROT_MIR_ENABLE | ctx->params.rot_mode; coda_prepare_encode()
1233 coda_write_base(ctx, q_data_src, src_buf, reg); coda_prepare_encode()
1242 if (!ctx->streamon_out) { coda_prepare_encode()
1244 ctx->bit_stream_param |= CODA_BIT_STREAM_END_FLAG; coda_prepare_encode()
1245 coda_write(dev, ctx->bit_stream_param, coda_prepare_encode()
1250 coda_write(dev, ctx->iram_info.axi_sram_use, coda_prepare_encode()
1253 trace_coda_enc_pic_run(ctx, src_buf); coda_prepare_encode()
1255 coda_command_async(ctx, CODA_COMMAND_PIC_RUN); coda_prepare_encode()
1260 static void coda_finish_encode(struct coda_ctx *ctx) coda_finish_encode() argument
1263 struct coda_dev *dev = ctx->dev; coda_finish_encode()
1266 src_buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); coda_finish_encode()
1267 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); coda_finish_encode()
1269 trace_coda_enc_pic_done(ctx, dst_buf); coda_finish_encode()
1273 wr_ptr = coda_read(dev, CODA_REG_BIT_WR_PTR(ctx->reg_idx)); coda_finish_encode()
1278 ctx->vpu_header_size[0] + coda_finish_encode()
1279 ctx->vpu_header_size[1] + coda_finish_encode()
1280 ctx->vpu_header_size[2]); coda_finish_encode()
1285 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, "frame size = %u\n", coda_finish_encode()
1307 dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); coda_finish_encode()
1310 ctx->gopcounter--; coda_finish_encode()
1311 if (ctx->gopcounter < 0) coda_finish_encode()
1312 ctx->gopcounter = ctx->params.gop_size - 1; coda_finish_encode()
1323 struct coda_ctx *ctx = container_of(work, struct coda_ctx, seq_end_work); coda_seq_end_work() local
1324 struct coda_dev *dev = ctx->dev; coda_seq_end_work()
1326 mutex_lock(&ctx->buffer_mutex); coda_seq_end_work()
1330 "%d: %s: sent command 'SEQ_END' to coda\n", ctx->idx, coda_seq_end_work()
1332 if (coda_command_sync(ctx, CODA_COMMAND_SEQ_END)) { coda_seq_end_work()
1337 kfifo_init(&ctx->bitstream_fifo, coda_seq_end_work()
1338 ctx->bitstream.vaddr, ctx->bitstream.size); coda_seq_end_work()
1340 coda_free_framebuffers(ctx); coda_seq_end_work()
1343 mutex_unlock(&ctx->buffer_mutex); coda_seq_end_work()
1346 static void coda_bit_release(struct coda_ctx *ctx) coda_bit_release() argument
1348 mutex_lock(&ctx->buffer_mutex); coda_bit_release()
1349 coda_free_framebuffers(ctx); coda_bit_release()
1350 coda_free_context_buffers(ctx); coda_bit_release()
1351 coda_free_bitstream_buffer(ctx); coda_bit_release()
1352 mutex_unlock(&ctx->buffer_mutex); coda_bit_release()
1369 static int coda_alloc_bitstream_buffer(struct coda_ctx *ctx, coda_alloc_bitstream_buffer() argument
1372 if (ctx->bitstream.vaddr) coda_alloc_bitstream_buffer()
1375 ctx->bitstream.size = roundup_pow_of_two(q_data->sizeimage * 2); coda_alloc_bitstream_buffer()
1376 ctx->bitstream.vaddr = dma_alloc_writecombine( coda_alloc_bitstream_buffer()
1377 &ctx->dev->plat_dev->dev, ctx->bitstream.size, coda_alloc_bitstream_buffer()
1378 &ctx->bitstream.paddr, GFP_KERNEL); coda_alloc_bitstream_buffer()
1379 if (!ctx->bitstream.vaddr) { coda_alloc_bitstream_buffer()
1380 v4l2_err(&ctx->dev->v4l2_dev, coda_alloc_bitstream_buffer()
1384 kfifo_init(&ctx->bitstream_fifo, coda_alloc_bitstream_buffer()
1385 ctx->bitstream.vaddr, ctx->bitstream.size); coda_alloc_bitstream_buffer()
1390 static void coda_free_bitstream_buffer(struct coda_ctx *ctx) coda_free_bitstream_buffer() argument
1392 if (ctx->bitstream.vaddr == NULL) coda_free_bitstream_buffer()
1395 dma_free_writecombine(&ctx->dev->plat_dev->dev, ctx->bitstream.size, coda_free_bitstream_buffer()
1396 ctx->bitstream.vaddr, ctx->bitstream.paddr); coda_free_bitstream_buffer()
1397 ctx->bitstream.vaddr = NULL; coda_free_bitstream_buffer()
1398 kfifo_init(&ctx->bitstream_fifo, NULL, 0); coda_free_bitstream_buffer()
1401 static int coda_decoder_reqbufs(struct coda_ctx *ctx, coda_decoder_reqbufs() argument
1411 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_decoder_reqbufs()
1412 ret = coda_alloc_context_buffers(ctx, q_data_src); coda_decoder_reqbufs()
1415 ret = coda_alloc_bitstream_buffer(ctx, q_data_src); coda_decoder_reqbufs()
1417 coda_free_context_buffers(ctx); coda_decoder_reqbufs()
1421 coda_free_bitstream_buffer(ctx); coda_decoder_reqbufs()
1422 coda_free_context_buffers(ctx); coda_decoder_reqbufs()
1428 static int __coda_start_decoding(struct coda_ctx *ctx) __coda_start_decoding() argument
1432 struct coda_dev *dev = ctx->dev; __coda_start_decoding()
1439 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); __coda_start_decoding()
1440 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); __coda_start_decoding()
1441 bitstream_buf = ctx->bitstream.paddr; __coda_start_decoding()
1442 bitstream_size = ctx->bitstream.size; __coda_start_decoding()
1446 coda_write(dev, ctx->parabuf.paddr, CODA_REG_BIT_PARA_BUF_ADDR); __coda_start_decoding()
1449 coda_kfifo_sync_to_device_full(ctx); __coda_start_decoding()
1451 ctx->frame_mem_ctrl &= ~CODA_FRAME_CHROMA_INTERLEAVE; __coda_start_decoding()
1453 ctx->frame_mem_ctrl |= CODA_FRAME_CHROMA_INTERLEAVE; __coda_start_decoding()
1454 coda_write(dev, ctx->frame_mem_ctrl, CODA_REG_BIT_FRAME_MEM_CTRL); __coda_start_decoding()
1456 ctx->display_idx = -1; __coda_start_decoding()
1457 ctx->frm_dis_flg = 0; __coda_start_decoding()
1458 coda_write(dev, 0, CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx)); __coda_start_decoding()
1469 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) __coda_start_decoding()
1473 ctx->params.codec_mode = ctx->codec->mode; __coda_start_decoding()
1476 ctx->params.codec_mode_aux = CODA_MP4_AUX_MPEG4; __coda_start_decoding()
1478 ctx->params.codec_mode_aux = 0; __coda_start_decoding()
1481 coda_write(dev, ctx->psbuf.paddr, __coda_start_decoding()
1494 if (coda_command_sync(ctx, CODA_COMMAND_SEQ_INIT)) { __coda_start_decoding()
1501 coda_kfifo_sync_from_device(ctx); __coda_start_decoding()
1532 __func__, ctx->idx, width, height); __coda_start_decoding()
1534 ctx->num_internal_frames = coda_read(dev, CODA_RET_DEC_SEQ_FRAME_NEED); __coda_start_decoding()
1535 if (ctx->num_internal_frames > CODA_MAX_FRAMEBUFFERS) { __coda_start_decoding()
1538 CODA_MAX_FRAMEBUFFERS, ctx->num_internal_frames); __coda_start_decoding()
1557 ret = coda_alloc_framebuffers(ctx, q_data_dst, src_fourcc); __coda_start_decoding()
1564 coda_write(dev, ctx->num_internal_frames, CODA_CMD_SET_FRAME_BUF_NUM); __coda_start_decoding()
1569 coda_setup_iram(ctx); __coda_start_decoding()
1571 coda_write(dev, ctx->iram_info.buf_bit_use, __coda_start_decoding()
1573 coda_write(dev, ctx->iram_info.buf_ip_ac_dc_use, __coda_start_decoding()
1575 coda_write(dev, ctx->iram_info.buf_dbk_y_use, __coda_start_decoding()
1577 coda_write(dev, ctx->iram_info.buf_dbk_c_use, __coda_start_decoding()
1579 coda_write(dev, ctx->iram_info.buf_ovl_use, __coda_start_decoding()
1582 coda_write(dev, ctx->iram_info.buf_btp_use, __coda_start_decoding()
1608 coda_write(dev, ctx->slicebuf.paddr, __coda_start_decoding()
1610 coda_write(dev, ctx->slicebuf.size / 1024, __coda_start_decoding()
1630 if (coda_command_sync(ctx, CODA_COMMAND_SET_FRAME_BUF)) { __coda_start_decoding()
1631 v4l2_err(&ctx->dev->v4l2_dev, __coda_start_decoding()
1639 static int coda_start_decoding(struct coda_ctx *ctx) coda_start_decoding() argument
1641 struct coda_dev *dev = ctx->dev; coda_start_decoding()
1645 ret = __coda_start_decoding(ctx); coda_start_decoding()
1651 static int coda_prepare_decode(struct coda_ctx *ctx) coda_prepare_decode() argument
1654 struct coda_dev *dev = ctx->dev; coda_prepare_decode()
1659 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); coda_prepare_decode()
1660 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); coda_prepare_decode()
1663 mutex_lock(&ctx->bitstream_mutex); coda_prepare_decode()
1664 coda_fill_bitstream(ctx, true); coda_prepare_decode()
1665 mutex_unlock(&ctx->bitstream_mutex); coda_prepare_decode()
1667 if (coda_get_bitstream_payload(ctx) < 512 && coda_prepare_decode()
1668 (!(ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG))) { coda_prepare_decode()
1671 coda_get_bitstream_payload(ctx)); coda_prepare_decode()
1672 v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx); coda_prepare_decode()
1677 if (!ctx->initialized) { coda_prepare_decode()
1678 int ret = __coda_start_decoding(ctx); coda_prepare_decode()
1682 v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx); coda_prepare_decode()
1685 ctx->initialized = 1; coda_prepare_decode()
1690 coda_set_gdi_regs(ctx); coda_prepare_decode()
1697 * ROT_INDEX needs to be < 0x40, but > ctx->num_internal_frames. coda_prepare_decode()
1708 coda_write_base(ctx, q_data_dst, dst_buf, reg_addr); coda_prepare_decode()
1711 coda_write(dev, CODA_ROT_MIR_ENABLE | ctx->params.rot_mode, coda_prepare_decode()
1732 coda_write(dev, ctx->iram_info.axi_sram_use, coda_prepare_decode()
1735 meta = list_first_entry_or_null(&ctx->buffer_meta_list, coda_prepare_decode()
1738 if (meta && ctx->codec->src_fourcc == V4L2_PIX_FMT_JPEG) { coda_prepare_decode()
1741 if (meta->end == (ctx->bitstream_fifo.kfifo.in & coda_prepare_decode()
1742 ctx->bitstream_fifo.kfifo.mask)) { coda_prepare_decode()
1751 kfifo_in(&ctx->bitstream_fifo, buf, pad); coda_prepare_decode()
1755 coda_kfifo_sync_to_device_full(ctx); coda_prepare_decode()
1760 trace_coda_dec_pic_run(ctx, meta); coda_prepare_decode()
1762 coda_command_async(ctx, CODA_COMMAND_PIC_RUN); coda_prepare_decode()
1767 static void coda_finish_decode(struct coda_ctx *ctx) coda_finish_decode() argument
1769 struct coda_dev *dev = ctx->dev; coda_finish_decode()
1784 coda_kfifo_sync_from_device(ctx); coda_finish_decode()
1790 if (ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) { coda_finish_decode()
1791 if (coda_get_bitstream_payload(ctx) >= ctx->bitstream.size - 512) coda_finish_decode()
1792 kfifo_init(&ctx->bitstream_fifo, coda_finish_decode()
1793 ctx->bitstream.vaddr, ctx->bitstream.size); coda_finish_decode()
1796 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_finish_decode()
1811 ctx->psbuf.size); coda_finish_decode()
1815 ctx->slicebuf.size); coda_finish_decode()
1822 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); coda_finish_decode()
1859 ctx->hold = true; coda_finish_decode()
1864 ctx->frm_dis_flg = coda_read(dev, coda_finish_decode()
1865 CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx)); coda_finish_decode()
1871 if (ctx->display_idx >= 0 && coda_finish_decode()
1872 ctx->display_idx < ctx->num_internal_frames) { coda_finish_decode()
1873 ctx->frm_dis_flg &= ~(1 << ctx->display_idx); coda_finish_decode()
1874 coda_write(dev, ctx->frm_dis_flg, coda_finish_decode()
1875 CODA_REG_BIT_FRM_DIS_FLG(ctx->reg_idx)); coda_finish_decode()
1888 if (display_idx >= 0 && display_idx < ctx->num_internal_frames) coda_finish_decode()
1889 ctx->sequence_offset++; coda_finish_decode()
1890 else if (ctx->display_idx < 0) coda_finish_decode()
1891 ctx->hold = true; coda_finish_decode()
1894 } else if (decoded_idx < 0 || decoded_idx >= ctx->num_internal_frames) { coda_finish_decode()
1899 val -= ctx->sequence_offset; coda_finish_decode()
1900 mutex_lock(&ctx->bitstream_mutex); coda_finish_decode()
1901 if (!list_empty(&ctx->buffer_meta_list)) { coda_finish_decode()
1902 meta = list_first_entry(&ctx->buffer_meta_list, coda_finish_decode()
1908 val, ctx->sequence_offset, coda_finish_decode()
1911 ctx->frame_metas[decoded_idx] = *meta; coda_finish_decode()
1915 memset(&ctx->frame_metas[decoded_idx], 0, coda_finish_decode()
1917 ctx->frame_metas[decoded_idx].sequence = val; coda_finish_decode()
1918 ctx->sequence_offset++; coda_finish_decode()
1920 mutex_unlock(&ctx->bitstream_mutex); coda_finish_decode()
1922 trace_coda_dec_pic_done(ctx, &ctx->frame_metas[decoded_idx]); coda_finish_decode()
1926 ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_KEYFRAME; coda_finish_decode()
1928 ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_PFRAME; coda_finish_decode()
1930 ctx->frame_types[decoded_idx] = V4L2_BUF_FLAG_BFRAME; coda_finish_decode()
1932 ctx->frame_errors[decoded_idx] = err_mb; coda_finish_decode()
1940 ctx->hold = true; coda_finish_decode()
1943 } else if (display_idx < 0 || display_idx >= ctx->num_internal_frames) { coda_finish_decode()
1950 if (ctx->display_idx >= 0 && coda_finish_decode()
1951 ctx->display_idx < ctx->num_internal_frames) { coda_finish_decode()
1952 dst_buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); coda_finish_decode()
1953 dst_buf->v4l2_buf.sequence = ctx->osequence++; coda_finish_decode()
1958 dst_buf->v4l2_buf.flags |= ctx->frame_types[ctx->display_idx]; coda_finish_decode()
1959 meta = &ctx->frame_metas[ctx->display_idx]; coda_finish_decode()
1963 trace_coda_dec_rot_done(ctx, meta, dst_buf); coda_finish_decode()
1978 v4l2_m2m_buf_done(dst_buf, ctx->frame_errors[display_idx] ? coda_finish_decode()
1992 ctx->display_idx = display_idx; coda_finish_decode()
2008 struct coda_ctx *ctx; coda_irq_handler() local
2015 ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); coda_irq_handler()
2016 if (ctx == NULL) { coda_irq_handler()
2023 trace_coda_bit_done(ctx); coda_irq_handler()
2025 if (ctx->aborting) { coda_irq_handler()
2026 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_irq_handler()
2030 if (coda_isbusy(ctx->dev)) { coda_irq_handler()
2031 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_irq_handler()
2036 complete(&ctx->completion); coda_irq_handler()
H A Dcoda-common.c86 void coda_write_base(struct coda_ctx *ctx, struct coda_q_data *q_data, coda_write_base() argument
109 coda_write(ctx->dev, base_y, reg_y); coda_write_base()
110 coda_write(ctx->dev, base_cb, reg_y + 4); coda_write_base()
111 coda_write(ctx->dev, base_cr, reg_y + 8); coda_write_base()
381 struct coda_ctx *ctx = fh_to_ctx(priv); coda_querycap() local
384 strlcpy(cap->card, coda_product_name(ctx->dev->devtype->product), coda_querycap()
424 struct coda_ctx *ctx = fh_to_ctx(priv); coda_g_fmt() local
426 q_data = get_q_data(ctx, f->type); coda_g_fmt()
440 f->fmt.pix.colorspace = ctx->colorspace; coda_g_fmt()
445 static int coda_try_pixelformat(struct coda_ctx *ctx, struct v4l2_format *f) coda_try_pixelformat() argument
452 formats = ctx->cvd->src_formats; coda_try_pixelformat()
454 formats = ctx->cvd->dst_formats; coda_try_pixelformat()
466 q_data = get_q_data(ctx, f->type); coda_try_pixelformat()
472 static unsigned int coda_estimate_sizeimage(struct coda_ctx *ctx, u32 sizeimage, coda_estimate_sizeimage() argument
484 static int coda_try_fmt(struct coda_ctx *ctx, const struct coda_codec *codec, coda_try_fmt() argument
487 struct coda_dev *dev = ctx->dev; coda_try_fmt()
529 f->fmt.pix.sizeimage = coda_estimate_sizeimage(ctx, coda_try_fmt()
544 struct coda_ctx *ctx = fh_to_ctx(priv); coda_try_fmt_vid_cap() local
550 ret = coda_try_pixelformat(ctx, f); coda_try_fmt_vid_cap()
554 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_try_fmt_vid_cap()
560 src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_try_fmt_vid_cap()
566 f->fmt.pix.colorspace = ctx->colorspace; coda_try_fmt_vid_cap()
568 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_try_fmt_vid_cap()
569 codec = coda_find_codec(ctx->dev, q_data_src->fourcc, coda_try_fmt_vid_cap()
574 ret = coda_try_fmt(ctx, codec, f); coda_try_fmt_vid_cap()
593 struct coda_ctx *ctx = fh_to_ctx(priv); coda_try_fmt_vid_out() local
594 struct coda_dev *dev = ctx->dev; coda_try_fmt_vid_out()
599 ret = coda_try_pixelformat(ctx, f); coda_try_fmt_vid_out()
614 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); coda_try_fmt_vid_out()
617 return coda_try_fmt(ctx, codec, f); coda_try_fmt_vid_out()
620 static int coda_s_fmt(struct coda_ctx *ctx, struct v4l2_format *f) coda_s_fmt() argument
625 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); coda_s_fmt()
629 q_data = get_q_data(ctx, f->type); coda_s_fmt()
634 v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__); coda_s_fmt()
648 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_s_fmt()
658 struct coda_ctx *ctx = fh_to_ctx(priv); coda_s_fmt_vid_cap() local
665 return coda_s_fmt(ctx, f); coda_s_fmt_vid_cap()
671 struct coda_ctx *ctx = fh_to_ctx(priv); coda_s_fmt_vid_out() local
679 ret = coda_s_fmt(ctx, f); coda_s_fmt_vid_out()
683 ctx->colorspace = f->fmt.pix.colorspace; coda_s_fmt_vid_out()
695 return coda_s_fmt(ctx, &f_cap); coda_s_fmt_vid_out()
701 struct coda_ctx *ctx = fh_to_ctx(priv); coda_reqbufs() local
704 ret = v4l2_m2m_reqbufs(file, ctx->fh.m2m_ctx, rb); coda_reqbufs()
712 if (rb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && ctx->ops->reqbufs) coda_reqbufs()
713 return ctx->ops->reqbufs(ctx, rb); coda_reqbufs()
721 struct coda_ctx *ctx = fh_to_ctx(priv); coda_qbuf() local
723 return v4l2_m2m_qbuf(file, ctx->fh.m2m_ctx, buf); coda_qbuf()
726 static bool coda_buf_is_end_of_stream(struct coda_ctx *ctx, coda_buf_is_end_of_stream() argument
731 src_vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_buf_is_end_of_stream()
733 return ((ctx->bit_stream_param & CODA_BIT_STREAM_END_FLAG) && coda_buf_is_end_of_stream()
734 (buf->sequence == (ctx->qsequence - 1))); coda_buf_is_end_of_stream()
740 struct coda_ctx *ctx = fh_to_ctx(priv); coda_dqbuf() local
743 ret = v4l2_m2m_dqbuf(file, ctx->fh.m2m_ctx, buf); coda_dqbuf()
747 coda_buf_is_end_of_stream(ctx, buf)) { coda_dqbuf()
752 v4l2_event_queue_fh(&ctx->fh, &eos_event); coda_dqbuf()
761 struct coda_ctx *ctx = fh_to_ctx(fh); coda_g_selection() local
765 q_data = get_q_data(ctx, s->type); coda_g_selection()
820 struct coda_ctx *ctx = fh_to_ctx(fh); coda_decoder_cmd() local
828 if (ctx->inst_type != CODA_INST_DECODER) coda_decoder_cmd()
832 coda_bit_stream_end_flag(ctx); coda_decoder_cmd()
833 ctx->hold = false; coda_decoder_cmd()
834 v4l2_m2m_try_schedule(ctx->fh.m2m_ctx); coda_decoder_cmd()
883 void coda_set_gdi_regs(struct coda_ctx *ctx) coda_set_gdi_regs() argument
885 struct gdi_tiled_map *tiled_map = &ctx->tiled_map; coda_set_gdi_regs()
886 struct coda_dev *dev = ctx->dev; coda_set_gdi_regs()
910 struct coda_ctx *ctx = m2m_priv; coda_device_run() local
911 struct coda_dev *dev = ctx->dev; coda_device_run()
913 queue_work(dev->workqueue, &ctx->pic_run_work); coda_device_run()
918 struct coda_ctx *ctx = container_of(work, struct coda_ctx, pic_run_work); coda_pic_run_work() local
919 struct coda_dev *dev = ctx->dev; coda_pic_run_work()
922 mutex_lock(&ctx->buffer_mutex); coda_pic_run_work()
925 ret = ctx->ops->prepare_run(ctx); coda_pic_run_work()
926 if (ret < 0 && ctx->inst_type == CODA_INST_DECODER) { coda_pic_run_work()
928 mutex_unlock(&ctx->buffer_mutex); coda_pic_run_work()
933 if (!wait_for_completion_timeout(&ctx->completion, coda_pic_run_work()
937 ctx->hold = true; coda_pic_run_work()
939 coda_hw_reset(ctx); coda_pic_run_work()
940 } else if (!ctx->aborting) { coda_pic_run_work()
941 ctx->ops->finish_run(ctx); coda_pic_run_work()
944 if ((ctx->aborting || (!ctx->streamon_cap && !ctx->streamon_out)) && coda_pic_run_work()
945 ctx->ops->seq_end_work) coda_pic_run_work()
946 queue_work(dev->workqueue, &ctx->seq_end_work); coda_pic_run_work()
949 mutex_unlock(&ctx->buffer_mutex); coda_pic_run_work()
951 v4l2_m2m_job_finish(ctx->dev->m2m_dev, ctx->fh.m2m_ctx); coda_pic_run_work()
956 struct coda_ctx *ctx = m2m_priv; coda_job_ready() local
963 if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) && coda_job_ready()
964 ctx->inst_type != CODA_INST_DECODER) { coda_job_ready()
965 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_job_ready()
970 if (!v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx)) { coda_job_ready()
971 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_job_ready()
976 if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) { coda_job_ready()
982 if (ctx->hold && !v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx)) { coda_job_ready()
983 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_job_ready()
985 ctx->idx); coda_job_ready()
989 stream_end = ctx->bit_stream_param & coda_job_ready()
993 list_for_each(meta, &ctx->buffer_meta_list) coda_job_ready()
996 src_bufs = v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx); coda_job_ready()
999 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_job_ready()
1001 ctx->idx, num_metas, src_bufs); coda_job_ready()
1006 if (!v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) && coda_job_ready()
1007 !stream_end && (coda_get_bitstream_payload(ctx) < 512)) { coda_job_ready()
1008 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_job_ready()
1010 ctx->idx, coda_get_bitstream_payload(ctx)); coda_job_ready()
1015 if (ctx->aborting) { coda_job_ready()
1016 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_job_ready()
1021 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_job_ready()
1028 struct coda_ctx *ctx = priv; coda_job_abort() local
1030 ctx->aborting = 1; coda_job_abort()
1032 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_job_abort()
1038 struct coda_ctx *ctx = m2m_priv; coda_lock() local
1039 struct coda_dev *pcdev = ctx->dev; coda_lock()
1046 struct coda_ctx *ctx = m2m_priv; coda_unlock() local
1047 struct coda_dev *pcdev = ctx->dev; coda_unlock()
1060 static void coda_set_tiled_map_type(struct coda_ctx *ctx, int tiled_map_type) coda_set_tiled_map_type() argument
1062 struct gdi_tiled_map *tiled_map = &ctx->tiled_map; coda_set_tiled_map_type()
1080 dev_err(&ctx->dev->plat_dev->dev, "invalid map type: %d\n", coda_set_tiled_map_type()
1086 static void set_default_params(struct coda_ctx *ctx) set_default_params() argument
1090 ctx->codec = coda_find_codec(ctx->dev, ctx->cvd->src_formats[0], set_default_params()
1091 ctx->cvd->dst_formats[0]); set_default_params()
1092 max_w = min(ctx->codec->max_w, 1920U); set_default_params()
1093 max_h = min(ctx->codec->max_h, 1088U); set_default_params()
1095 csize = coda_estimate_sizeimage(ctx, usize, max_w, max_h); set_default_params()
1097 ctx->params.codec_mode = ctx->codec->mode; set_default_params()
1098 ctx->colorspace = V4L2_COLORSPACE_REC709; set_default_params()
1099 ctx->params.framerate = 30; set_default_params()
1102 ctx->q_data[V4L2_M2M_SRC].fourcc = ctx->codec->src_fourcc; set_default_params()
1103 ctx->q_data[V4L2_M2M_DST].fourcc = ctx->codec->dst_fourcc; set_default_params()
1104 ctx->q_data[V4L2_M2M_SRC].width = max_w; set_default_params()
1105 ctx->q_data[V4L2_M2M_SRC].height = max_h; set_default_params()
1106 ctx->q_data[V4L2_M2M_DST].width = max_w; set_default_params()
1107 ctx->q_data[V4L2_M2M_DST].height = max_h; set_default_params()
1108 if (ctx->codec->src_fourcc == V4L2_PIX_FMT_YUV420) { set_default_params()
1109 ctx->q_data[V4L2_M2M_SRC].bytesperline = max_w; set_default_params()
1110 ctx->q_data[V4L2_M2M_SRC].sizeimage = usize; set_default_params()
1111 ctx->q_data[V4L2_M2M_DST].bytesperline = 0; set_default_params()
1112 ctx->q_data[V4L2_M2M_DST].sizeimage = csize; set_default_params()
1114 ctx->q_data[V4L2_M2M_SRC].bytesperline = 0; set_default_params()
1115 ctx->q_data[V4L2_M2M_SRC].sizeimage = csize; set_default_params()
1116 ctx->q_data[V4L2_M2M_DST].bytesperline = max_w; set_default_params()
1117 ctx->q_data[V4L2_M2M_DST].sizeimage = usize; set_default_params()
1119 ctx->q_data[V4L2_M2M_SRC].rect.width = max_w; set_default_params()
1120 ctx->q_data[V4L2_M2M_SRC].rect.height = max_h; set_default_params()
1121 ctx->q_data[V4L2_M2M_DST].rect.width = max_w; set_default_params()
1122 ctx->q_data[V4L2_M2M_DST].rect.height = max_h; set_default_params()
1124 if (ctx->dev->devtype->product == CODA_960) set_default_params()
1125 coda_set_tiled_map_type(ctx, GDI_LINEAR_FRAME_MAP); set_default_params()
1136 struct coda_ctx *ctx = vb2_get_drv_priv(vq); coda_queue_setup() local
1140 q_data = get_q_data(ctx, vq->type); coda_queue_setup()
1147 alloc_ctxs[0] = ctx->dev->alloc_ctx; coda_queue_setup()
1149 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_queue_setup()
1157 struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); coda_buf_prepare() local
1160 q_data = get_q_data(ctx, vb->vb2_queue->type); coda_buf_prepare()
1163 v4l2_warn(&ctx->dev->v4l2_dev, coda_buf_prepare()
1175 struct coda_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); coda_buf_queue() local
1179 q_data = get_q_data(ctx, vb->vb2_queue->type); coda_buf_queue()
1185 if (ctx->bitstream.size && vq->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { coda_buf_queue()
1191 coda_bit_stream_end_flag(ctx); coda_buf_queue()
1192 mutex_lock(&ctx->bitstream_mutex); coda_buf_queue()
1193 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); coda_buf_queue()
1195 coda_fill_bitstream(ctx, true); coda_buf_queue()
1196 mutex_unlock(&ctx->bitstream_mutex); coda_buf_queue()
1198 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); coda_buf_queue()
1244 struct coda_ctx *ctx = vb2_get_drv_priv(q); coda_start_streaming() local
1245 struct v4l2_device *v4l2_dev = &ctx->dev->v4l2_dev; coda_start_streaming()
1250 q_data_src = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); coda_start_streaming()
1254 ctx->dev->devtype->product == CODA_7541)) { coda_start_streaming()
1256 mutex_lock(&ctx->bitstream_mutex); coda_start_streaming()
1257 coda_fill_bitstream(ctx, false); coda_start_streaming()
1258 mutex_unlock(&ctx->bitstream_mutex); coda_start_streaming()
1260 if (coda_get_bitstream_payload(ctx) < 512) { coda_start_streaming()
1271 ctx->streamon_out = 1; coda_start_streaming()
1278 ctx->streamon_cap = 1; coda_start_streaming()
1282 if (!(ctx->streamon_out & ctx->streamon_cap)) coda_start_streaming()
1285 q_data_dst = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); coda_start_streaming()
1298 if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) coda_start_streaming()
1299 v4l2_m2m_set_src_buffered(ctx->fh.m2m_ctx, true); coda_start_streaming()
1301 ctx->gopcounter = ctx->params.gop_size - 1; coda_start_streaming()
1303 ctx->codec = coda_find_codec(ctx->dev, q_data_src->fourcc, coda_start_streaming()
1305 if (!ctx->codec) { coda_start_streaming()
1312 ctx->params.gop_size = 1; coda_start_streaming()
1313 ctx->gopcounter = ctx->params.gop_size - 1; coda_start_streaming()
1315 ret = ctx->ops->start_streaming(ctx); coda_start_streaming()
1316 if (ctx->inst_type == CODA_INST_DECODER) { coda_start_streaming()
1323 ctx->initialized = 1; coda_start_streaming()
1328 while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx))) coda_start_streaming()
1331 while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx))) coda_start_streaming()
1339 struct coda_ctx *ctx = vb2_get_drv_priv(q); coda_stop_streaming() local
1340 struct coda_dev *dev = ctx->dev; coda_stop_streaming()
1344 stop = ctx->streamon_out && ctx->streamon_cap; coda_stop_streaming()
1349 ctx->streamon_out = 0; coda_stop_streaming()
1351 coda_bit_stream_end_flag(ctx); coda_stop_streaming()
1353 ctx->qsequence = 0; coda_stop_streaming()
1355 while ((buf = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx))) coda_stop_streaming()
1360 ctx->streamon_cap = 0; coda_stop_streaming()
1362 ctx->osequence = 0; coda_stop_streaming()
1363 ctx->sequence_offset = 0; coda_stop_streaming()
1365 while ((buf = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx))) coda_stop_streaming()
1372 if (ctx->ops->seq_end_work) { coda_stop_streaming()
1373 queue_work(dev->workqueue, &ctx->seq_end_work); coda_stop_streaming()
1374 flush_work(&ctx->seq_end_work); coda_stop_streaming()
1376 mutex_lock(&ctx->bitstream_mutex); coda_stop_streaming()
1377 while (!list_empty(&ctx->buffer_meta_list)) { coda_stop_streaming()
1378 meta = list_first_entry(&ctx->buffer_meta_list, coda_stop_streaming()
1383 mutex_unlock(&ctx->bitstream_mutex); coda_stop_streaming()
1384 kfifo_init(&ctx->bitstream_fifo, coda_stop_streaming()
1385 ctx->bitstream.vaddr, ctx->bitstream.size); coda_stop_streaming()
1386 ctx->initialized = 0; coda_stop_streaming()
1387 ctx->runcounter = 0; coda_stop_streaming()
1388 ctx->aborting = 0; coda_stop_streaming()
1404 struct coda_ctx *ctx = coda_s_ctrl() local
1407 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_s_ctrl()
1413 ctx->params.rot_mode |= CODA_MIR_HOR; coda_s_ctrl()
1415 ctx->params.rot_mode &= ~CODA_MIR_HOR; coda_s_ctrl()
1419 ctx->params.rot_mode |= CODA_MIR_VER; coda_s_ctrl()
1421 ctx->params.rot_mode &= ~CODA_MIR_VER; coda_s_ctrl()
1424 ctx->params.bitrate = ctrl->val / 1000; coda_s_ctrl()
1427 ctx->params.gop_size = ctrl->val; coda_s_ctrl()
1430 ctx->params.h264_intra_qp = ctrl->val; coda_s_ctrl()
1433 ctx->params.h264_inter_qp = ctrl->val; coda_s_ctrl()
1436 ctx->params.h264_min_qp = ctrl->val; coda_s_ctrl()
1439 ctx->params.h264_max_qp = ctrl->val; coda_s_ctrl()
1442 ctx->params.h264_deblk_alpha = ctrl->val; coda_s_ctrl()
1445 ctx->params.h264_deblk_beta = ctrl->val; coda_s_ctrl()
1448 ctx->params.h264_deblk_enabled = (ctrl->val == coda_s_ctrl()
1452 ctx->params.mpeg4_intra_qp = ctrl->val; coda_s_ctrl()
1455 ctx->params.mpeg4_inter_qp = ctrl->val; coda_s_ctrl()
1458 ctx->params.slice_mode = ctrl->val; coda_s_ctrl()
1461 ctx->params.slice_max_mb = ctrl->val; coda_s_ctrl()
1464 ctx->params.slice_max_bits = ctrl->val * 8; coda_s_ctrl()
1469 ctx->params.intra_refresh = ctrl->val; coda_s_ctrl()
1472 coda_set_jpeg_compression_quality(ctx, ctrl->val); coda_s_ctrl()
1475 ctx->params.jpeg_restart_interval = ctrl->val; coda_s_ctrl()
1478 v4l2_dbg(1, coda_debug, &ctx->dev->v4l2_dev, coda_s_ctrl()
1491 static void coda_encode_ctrls(struct coda_ctx *ctx) coda_encode_ctrls() argument
1493 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1495 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1497 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1499 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1501 if (ctx->dev->devtype->product != CODA_960) { coda_encode_ctrls()
1502 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1505 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1507 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1509 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1511 v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1515 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1517 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1519 v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1523 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1525 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1528 v4l2_ctrl_new_std_menu(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1533 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_encode_ctrls()
1538 static void coda_jpeg_encode_ctrls(struct coda_ctx *ctx) coda_jpeg_encode_ctrls() argument
1540 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_jpeg_encode_ctrls()
1542 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_jpeg_encode_ctrls()
1546 static int coda_ctrls_setup(struct coda_ctx *ctx) coda_ctrls_setup() argument
1548 v4l2_ctrl_handler_init(&ctx->ctrls, 2); coda_ctrls_setup()
1550 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_ctrls_setup()
1552 v4l2_ctrl_new_std(&ctx->ctrls, &coda_ctrl_ops, coda_ctrls_setup()
1554 if (ctx->inst_type == CODA_INST_ENCODER) { coda_ctrls_setup()
1555 if (ctx->cvd->dst_formats[0] == V4L2_PIX_FMT_JPEG) coda_ctrls_setup()
1556 coda_jpeg_encode_ctrls(ctx); coda_ctrls_setup()
1558 coda_encode_ctrls(ctx); coda_ctrls_setup()
1561 if (ctx->ctrls.error) { coda_ctrls_setup()
1562 v4l2_err(&ctx->dev->v4l2_dev, coda_ctrls_setup()
1564 ctx->ctrls.error); coda_ctrls_setup()
1568 return v4l2_ctrl_handler_setup(&ctx->ctrls); coda_ctrls_setup()
1571 static int coda_queue_init(struct coda_ctx *ctx, struct vb2_queue *vq) coda_queue_init() argument
1573 vq->drv_priv = ctx; coda_queue_init()
1577 vq->lock = &ctx->dev->dev_mutex; coda_queue_init()
1648 struct coda_ctx *ctx = NULL; coda_open() local
1653 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); coda_open()
1654 if (!ctx) coda_open()
1670 ctx->debugfs_entry = debugfs_create_dir(name, dev->debugfs_root); coda_open()
1673 ctx->cvd = to_coda_video_device(vdev); coda_open()
1674 ctx->inst_type = ctx->cvd->type; coda_open()
1675 ctx->ops = ctx->cvd->ops; coda_open()
1676 ctx->use_bit = !ctx->cvd->direct; coda_open()
1677 init_completion(&ctx->completion); coda_open()
1678 INIT_WORK(&ctx->pic_run_work, coda_pic_run_work); coda_open()
1679 if (ctx->ops->seq_end_work) coda_open()
1680 INIT_WORK(&ctx->seq_end_work, ctx->ops->seq_end_work); coda_open()
1681 v4l2_fh_init(&ctx->fh, video_devdata(file)); coda_open()
1682 file->private_data = &ctx->fh; coda_open()
1683 v4l2_fh_add(&ctx->fh); coda_open()
1684 ctx->dev = dev; coda_open()
1685 ctx->idx = idx; coda_open()
1688 ctx->frame_mem_ctrl = 1 << 12; coda_open()
1691 ctx->reg_idx = 0; coda_open()
1694 ctx->reg_idx = idx; coda_open()
1712 set_default_params(ctx); coda_open()
1713 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, coda_open()
1714 ctx->ops->queue_init); coda_open()
1715 if (IS_ERR(ctx->fh.m2m_ctx)) { coda_open()
1716 ret = PTR_ERR(ctx->fh.m2m_ctx); coda_open()
1723 ret = coda_ctrls_setup(ctx); coda_open()
1729 ctx->fh.ctrl_handler = &ctx->ctrls; coda_open()
1731 mutex_init(&ctx->bitstream_mutex); coda_open()
1732 mutex_init(&ctx->buffer_mutex); coda_open()
1733 INIT_LIST_HEAD(&ctx->buffer_meta_list); coda_open()
1735 coda_lock(ctx); coda_open()
1736 list_add(&ctx->list, &dev->instances); coda_open()
1737 coda_unlock(ctx); coda_open()
1740 ctx->idx, ctx); coda_open()
1745 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); coda_open()
1753 v4l2_fh_del(&ctx->fh); coda_open()
1754 v4l2_fh_exit(&ctx->fh); coda_open()
1755 clear_bit(ctx->idx, &dev->instance_mask); coda_open()
1758 kfree(ctx); coda_open()
1765 struct coda_ctx *ctx = fh_to_ctx(file->private_data); coda_release() local
1768 ctx); coda_release()
1770 if (ctx->inst_type == CODA_INST_DECODER && ctx->use_bit) coda_release()
1771 coda_bit_stream_end_flag(ctx); coda_release()
1774 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); coda_release()
1777 if (ctx->initialized && ctx->ops->seq_end_work) { coda_release()
1778 queue_work(dev->workqueue, &ctx->seq_end_work); coda_release()
1779 flush_work(&ctx->seq_end_work); coda_release()
1782 coda_lock(ctx); coda_release()
1783 list_del(&ctx->list); coda_release()
1784 coda_unlock(ctx); coda_release()
1786 if (ctx->dev->devtype->product == CODA_DX6) coda_release()
1787 coda_free_aux_buf(dev, &ctx->workbuf); coda_release()
1789 v4l2_ctrl_handler_free(&ctx->ctrls); coda_release()
1793 v4l2_fh_del(&ctx->fh); coda_release()
1794 v4l2_fh_exit(&ctx->fh); coda_release()
1795 clear_bit(ctx->idx, &dev->instance_mask); coda_release()
1796 if (ctx->ops->release) coda_release()
1797 ctx->ops->release(ctx); coda_release()
1798 debugfs_remove_recursive(ctx->debugfs_entry); coda_release()
1799 kfree(ctx); coda_release()
/linux-4.1.27/sound/soc/intel/atom/sst/
H A Dsst.c200 struct intel_sst_drv *ctx = container_of(work, sst_process_pending_msg() local
203 ctx->ops->post_message(ctx, NULL, false); sst_process_pending_msg()
206 static int sst_workqueue_init(struct intel_sst_drv *ctx) sst_workqueue_init() argument
208 INIT_LIST_HEAD(&ctx->memcpy_list); sst_workqueue_init()
209 INIT_LIST_HEAD(&ctx->rx_list); sst_workqueue_init()
210 INIT_LIST_HEAD(&ctx->ipc_dispatch_list); sst_workqueue_init()
211 INIT_LIST_HEAD(&ctx->block_list); sst_workqueue_init()
212 INIT_WORK(&ctx->ipc_post_msg_wq, sst_process_pending_msg); sst_workqueue_init()
213 init_waitqueue_head(&ctx->wait_queue); sst_workqueue_init()
215 ctx->post_msg_wq = sst_workqueue_init()
217 if (!ctx->post_msg_wq) sst_workqueue_init()
222 static void sst_init_locks(struct intel_sst_drv *ctx) sst_init_locks() argument
224 mutex_init(&ctx->sst_lock); sst_init_locks()
225 spin_lock_init(&ctx->rx_msg_lock); sst_init_locks()
226 spin_lock_init(&ctx->ipc_spin_lock); sst_init_locks()
227 spin_lock_init(&ctx->block_lock); sst_init_locks()
230 int sst_alloc_drv_context(struct intel_sst_drv **ctx, sst_alloc_drv_context() argument
233 *ctx = devm_kzalloc(dev, sizeof(struct intel_sst_drv), GFP_KERNEL); sst_alloc_drv_context()
234 if (!(*ctx)) sst_alloc_drv_context()
237 (*ctx)->dev = dev; sst_alloc_drv_context()
238 (*ctx)->dev_id = dev_id; sst_alloc_drv_context()
244 int sst_context_init(struct intel_sst_drv *ctx) sst_context_init() argument
248 if (!ctx->pdata) sst_context_init()
251 if (!ctx->pdata->probe_data) sst_context_init()
254 memcpy(&ctx->info, ctx->pdata->probe_data, sizeof(ctx->info)); sst_context_init()
256 ret = sst_driver_ops(ctx); sst_context_init()
260 sst_init_locks(ctx); sst_context_init()
261 sst_set_fw_state_locked(ctx, SST_RESET); sst_context_init()
264 ctx->pvt_id = 1; sst_context_init()
265 ctx->stream_cnt = 0; sst_context_init()
266 ctx->fw_in_mem = NULL; sst_context_init()
268 ctx->use_dma = 0; sst_context_init()
269 ctx->use_lli = 0; sst_context_init()
271 if (sst_workqueue_init(ctx)) sst_context_init()
274 ctx->mailbox_recv_offset = ctx->pdata->ipc_info->mbox_recv_off; sst_context_init()
275 ctx->ipc_reg.ipcx = SST_IPCX + ctx->pdata->ipc_info->ipc_offset; sst_context_init()
276 ctx->ipc_reg.ipcd = SST_IPCD + ctx->pdata->ipc_info->ipc_offset; sst_context_init()
278 dev_info(ctx->dev, "Got drv data max stream %d\n", sst_context_init()
279 ctx->info.max_streams); sst_context_init()
281 for (i = 1; i <= ctx->info.max_streams; i++) { sst_context_init()
282 struct stream_info *stream = &ctx->streams[i]; sst_context_init()
290 ret = devm_request_threaded_irq(ctx->dev, ctx->irq_num, ctx->ops->interrupt, sst_context_init()
291 ctx->ops->irq_thread, 0, SST_DRV_NAME, sst_context_init()
292 ctx); sst_context_init()
296 dev_dbg(ctx->dev, "Registered IRQ %#x\n", ctx->irq_num); sst_context_init()
299 sst_shim_write64(ctx->shim, SST_IMRX, 0xFFFF0038); sst_context_init()
301 ctx->qos = devm_kzalloc(ctx->dev, sst_context_init()
303 if (!ctx->qos) { sst_context_init()
307 pm_qos_add_request(ctx->qos, PM_QOS_CPU_DMA_LATENCY, sst_context_init()
310 dev_dbg(ctx->dev, "Requesting FW %s now...\n", ctx->firmware_name); sst_context_init()
311 ret = request_firmware_nowait(THIS_MODULE, true, ctx->firmware_name, sst_context_init()
312 ctx->dev, GFP_KERNEL, ctx, sst_firmware_load_cb); sst_context_init()
314 dev_err(ctx->dev, "Firmware download failed:%d\n", ret); sst_context_init()
317 sst_register(ctx->dev); sst_context_init()
321 destroy_workqueue(ctx->post_msg_wq); sst_context_init()
326 void sst_context_cleanup(struct intel_sst_drv *ctx) sst_context_cleanup() argument
328 pm_runtime_get_noresume(ctx->dev); sst_context_cleanup()
329 pm_runtime_disable(ctx->dev); sst_context_cleanup()
330 sst_unregister(ctx->dev); sst_context_cleanup()
331 sst_set_fw_state_locked(ctx, SST_SHUTDOWN); sst_context_cleanup()
333 destroy_workqueue(ctx->post_msg_wq); sst_context_cleanup()
334 pm_qos_remove_request(ctx->qos); sst_context_cleanup()
335 kfree(ctx->fw_sg_list.src); sst_context_cleanup()
336 kfree(ctx->fw_sg_list.dst); sst_context_cleanup()
337 ctx->fw_sg_list.list_len = 0; sst_context_cleanup()
338 kfree(ctx->fw_in_mem); sst_context_cleanup()
339 ctx->fw_in_mem = NULL; sst_context_cleanup()
340 sst_memcpy_free_resources(ctx); sst_context_cleanup()
341 ctx = NULL; sst_context_cleanup()
345 static inline void sst_save_shim64(struct intel_sst_drv *ctx, sst_save_shim64() argument
351 spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); sst_save_shim64()
357 spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); sst_save_shim64()
360 static inline void sst_restore_shim64(struct intel_sst_drv *ctx, sst_restore_shim64() argument
370 spin_lock_irqsave(&ctx->ipc_spin_lock, irq_flags); sst_restore_shim64()
373 spin_unlock_irqrestore(&ctx->ipc_spin_lock, irq_flags); sst_restore_shim64()
376 void sst_configure_runtime_pm(struct intel_sst_drv *ctx) sst_configure_runtime_pm() argument
378 pm_runtime_set_autosuspend_delay(ctx->dev, SST_SUSPEND_DELAY); sst_configure_runtime_pm()
379 pm_runtime_use_autosuspend(ctx->dev); sst_configure_runtime_pm()
387 pm_runtime_set_active(ctx->dev); sst_configure_runtime_pm()
389 pm_runtime_enable(ctx->dev); sst_configure_runtime_pm()
392 pm_runtime_set_active(ctx->dev); sst_configure_runtime_pm()
394 pm_runtime_put_noidle(ctx->dev); sst_configure_runtime_pm()
396 sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64); sst_configure_runtime_pm()
403 struct intel_sst_drv *ctx = dev_get_drvdata(dev); intel_sst_runtime_suspend() local
405 if (ctx->sst_state == SST_RESET) { intel_sst_runtime_suspend()
410 if (ctx->ops->save_dsp_context(ctx)) intel_sst_runtime_suspend()
414 sst_set_fw_state_locked(ctx, SST_RESET); intel_sst_runtime_suspend()
416 synchronize_irq(ctx->irq_num); intel_sst_runtime_suspend()
417 flush_workqueue(ctx->post_msg_wq); intel_sst_runtime_suspend()
419 ctx->ops->reset(ctx); intel_sst_runtime_suspend()
421 sst_save_shim64(ctx, ctx->shim, ctx->shim_regs64); intel_sst_runtime_suspend()
428 struct intel_sst_drv *ctx = dev_get_drvdata(dev); intel_sst_suspend() local
433 if (ctx->sst_state == SST_RESET) intel_sst_suspend()
440 for (i = 1; i <= ctx->info.max_streams; i++) { intel_sst_suspend()
441 struct stream_info *stream = &ctx->streams[i]; intel_sst_suspend()
448 synchronize_irq(ctx->irq_num); intel_sst_suspend()
449 flush_workqueue(ctx->post_msg_wq); intel_sst_suspend()
452 sst_set_fw_state_locked(ctx, SST_RESET); intel_sst_suspend()
455 if (ctx->ops->save_dsp_context(ctx)) intel_sst_suspend()
462 fw_save->iram = kzalloc(ctx->iram_end - ctx->iram_base, GFP_KERNEL); intel_sst_suspend()
467 fw_save->dram = kzalloc(ctx->dram_end - ctx->dram_base, GFP_KERNEL); intel_sst_suspend()
478 fw_save->ddr = kzalloc(ctx->ddr_end - ctx->ddr_base, GFP_KERNEL); intel_sst_suspend()
484 memcpy32_fromio(fw_save->iram, ctx->iram, ctx->iram_end - ctx->iram_base); intel_sst_suspend()
485 memcpy32_fromio(fw_save->dram, ctx->dram, ctx->dram_end - ctx->dram_base); intel_sst_suspend()
486 memcpy32_fromio(fw_save->sram, ctx->mailbox, SST_MAILBOX_SIZE); intel_sst_suspend()
487 memcpy32_fromio(fw_save->ddr, ctx->ddr, ctx->ddr_end - ctx->ddr_base); intel_sst_suspend()
489 ctx->fw_save = fw_save; intel_sst_suspend()
490 ctx->ops->reset(ctx); intel_sst_suspend()
505 struct intel_sst_drv *ctx = dev_get_drvdata(dev); intel_sst_resume() local
506 struct sst_fw_save *fw_save = ctx->fw_save; intel_sst_resume()
513 sst_set_fw_state_locked(ctx, SST_FW_LOADING); intel_sst_resume()
516 ctx->ops->reset(ctx); intel_sst_resume()
518 ctx->fw_save = NULL; intel_sst_resume()
520 memcpy32_toio(ctx->iram, fw_save->iram, ctx->iram_end - ctx->iram_base); intel_sst_resume()
521 memcpy32_toio(ctx->dram, fw_save->dram, ctx->dram_end - ctx->dram_base); intel_sst_resume()
522 memcpy32_toio(ctx->mailbox, fw_save->sram, SST_MAILBOX_SIZE); intel_sst_resume()
523 memcpy32_toio(ctx->ddr, fw_save->ddr, ctx->ddr_end - ctx->ddr_base); intel_sst_resume()
531 block = sst_create_block(ctx, 0, FW_DWNL_ID); intel_sst_resume()
537 ctx->ops->start(ctx); intel_sst_resume()
538 ret = sst_wait_timeout(ctx, block); intel_sst_resume()
540 dev_err(ctx->dev, "fw download failed %d\n", ret); intel_sst_resume()
545 sst_set_fw_state_locked(ctx, SST_FW_RUNNING); intel_sst_resume()
548 sst_free_block(ctx, block); intel_sst_resume()
H A Dsst_acpi.c143 static int sst_platform_get_resources(struct intel_sst_drv *ctx) sst_platform_get_resources() argument
146 struct platform_device *pdev = to_platform_device(ctx->dev); sst_platform_get_resources()
151 ctx->pdata->res_info->acpi_lpe_res_index); sst_platform_get_resources()
153 dev_err(ctx->dev, "Invalid SHIM base from IFWI"); sst_platform_get_resources()
156 dev_info(ctx->dev, "LPE base: %#x size:%#x", (unsigned int) rsrc->start, sst_platform_get_resources()
159 ctx->iram_base = rsrc->start + ctx->pdata->res_info->iram_offset; sst_platform_get_resources()
160 ctx->iram_end = ctx->iram_base + ctx->pdata->res_info->iram_size - 1; sst_platform_get_resources()
161 dev_info(ctx->dev, "IRAM base: %#x", ctx->iram_base); sst_platform_get_resources()
162 ctx->iram = devm_ioremap_nocache(ctx->dev, ctx->iram_base, sst_platform_get_resources()
163 ctx->pdata->res_info->iram_size); sst_platform_get_resources()
164 if (!ctx->iram) { sst_platform_get_resources()
165 dev_err(ctx->dev, "unable to map IRAM"); sst_platform_get_resources()
169 ctx->dram_base = rsrc->start + ctx->pdata->res_info->dram_offset; sst_platform_get_resources()
170 ctx->dram_end = ctx->dram_base + ctx->pdata->res_info->dram_size - 1; sst_platform_get_resources()
171 dev_info(ctx->dev, "DRAM base: %#x", ctx->dram_base); sst_platform_get_resources()
172 ctx->dram = devm_ioremap_nocache(ctx->dev, ctx->dram_base, sst_platform_get_resources()
173 ctx->pdata->res_info->dram_size); sst_platform_get_resources()
174 if (!ctx->dram) { sst_platform_get_resources()
175 dev_err(ctx->dev, "unable to map DRAM"); sst_platform_get_resources()
179 ctx->shim_phy_add = rsrc->start + ctx->pdata->res_info->shim_offset; sst_platform_get_resources()
180 dev_info(ctx->dev, "SHIM base: %#x", ctx->shim_phy_add); sst_platform_get_resources()
181 ctx->shim = devm_ioremap_nocache(ctx->dev, ctx->shim_phy_add, sst_platform_get_resources()
182 ctx->pdata->res_info->shim_size); sst_platform_get_resources()
183 if (!ctx->shim) { sst_platform_get_resources()
184 dev_err(ctx->dev, "unable to map SHIM"); sst_platform_get_resources()
189 ctx->shim_phy_add = ctx->pdata->res_info->shim_phy_addr; sst_platform_get_resources()
192 ctx->mailbox_add = rsrc->start + ctx->pdata->res_info->mbox_offset; sst_platform_get_resources()
193 dev_info(ctx->dev, "Mailbox base: %#x", ctx->mailbox_add); sst_platform_get_resources()
194 ctx->mailbox = devm_ioremap_nocache(ctx->dev, ctx->mailbox_add, sst_platform_get_resources()
195 ctx->pdata->res_info->mbox_size); sst_platform_get_resources()
196 if (!ctx->mailbox) { sst_platform_get_resources()
197 dev_err(ctx->dev, "unable to map mailbox"); sst_platform_get_resources()
202 ctx->mailbox_add = ctx->info.mailbox_start; sst_platform_get_resources()
205 ctx->pdata->res_info->acpi_ddr_index); sst_platform_get_resources()
207 dev_err(ctx->dev, "Invalid DDR base from IFWI"); sst_platform_get_resources()
210 ctx->ddr_base = rsrc->start; sst_platform_get_resources()
211 ctx->ddr_end = rsrc->end; sst_platform_get_resources()
212 dev_info(ctx->dev, "DDR base: %#x", ctx->ddr_base); sst_platform_get_resources()
213 ctx->ddr = devm_ioremap_nocache(ctx->dev, ctx->ddr_base, sst_platform_get_resources()
215 if (!ctx->ddr) { sst_platform_get_resources()
216 dev_err(ctx->dev, "unable to map DDR"); sst_platform_get_resources()
221 ctx->irq_num = platform_get_irq(pdev, sst_platform_get_resources()
222 ctx->pdata->res_info->acpi_ipc_irq_index); sst_platform_get_resources()
252 struct intel_sst_drv *ctx; sst_acpi_probe() local
292 ret = sst_alloc_drv_context(&ctx, dev, dev_id); sst_acpi_probe()
297 ctx->pdata = mach->pdata; sst_acpi_probe()
298 strcpy(ctx->firmware_name, mach->firmware); sst_acpi_probe()
300 ret = sst_platform_get_resources(ctx); sst_acpi_probe()
304 ret = sst_context_init(ctx); sst_acpi_probe()
309 ctx->shim_regs64 = devm_kzalloc(ctx->dev, sizeof(*ctx->shim_regs64), sst_acpi_probe()
311 if (!ctx->shim_regs64) { sst_acpi_probe()
316 sst_configure_runtime_pm(ctx); sst_acpi_probe()
317 platform_set_drvdata(pdev, ctx); sst_acpi_probe()
321 sst_context_cleanup(ctx); sst_acpi_probe()
323 dev_err(ctx->dev, "failed with %d\n", ret); sst_acpi_probe()
337 struct intel_sst_drv *ctx; sst_acpi_remove() local
339 ctx = platform_get_drvdata(pdev); sst_acpi_remove()
340 sst_context_cleanup(ctx); sst_acpi_remove()
H A Dsst_pci.c33 static int sst_platform_get_resources(struct intel_sst_drv *ctx) sst_platform_get_resources() argument
36 struct pci_dev *pci = ctx->pci; sst_platform_get_resources()
44 if (ctx->dev_id == SST_MRFLD_PCI_ID) { sst_platform_get_resources()
45 ctx->ddr_base = pci_resource_start(pci, 0); sst_platform_get_resources()
47 ddr_base = relocate_imr_addr_mrfld(ctx->ddr_base); sst_platform_get_resources()
48 if (!ctx->pdata->lib_info) { sst_platform_get_resources()
49 dev_err(ctx->dev, "lib_info pointer NULL\n"); sst_platform_get_resources()
53 if (ddr_base != ctx->pdata->lib_info->mod_base) { sst_platform_get_resources()
54 dev_err(ctx->dev, sst_platform_get_resources()
59 ctx->ddr_end = pci_resource_end(pci, 0); sst_platform_get_resources()
61 ctx->ddr = pcim_iomap(pci, 0, sst_platform_get_resources()
63 if (!ctx->ddr) { sst_platform_get_resources()
67 dev_dbg(ctx->dev, "sst: DDR Ptr %p\n", ctx->ddr); sst_platform_get_resources()
69 ctx->ddr = NULL; sst_platform_get_resources()
72 ctx->shim_phy_add = pci_resource_start(pci, 1); sst_platform_get_resources()
73 ctx->shim = pcim_iomap(pci, 1, pci_resource_len(pci, 1)); sst_platform_get_resources()
74 if (!ctx->shim) { sst_platform_get_resources()
78 dev_dbg(ctx->dev, "SST Shim Ptr %p\n", ctx->shim); sst_platform_get_resources()
81 ctx->mailbox_add = pci_resource_start(pci, 2); sst_platform_get_resources()
82 ctx->mailbox = pcim_iomap(pci, 2, pci_resource_len(pci, 2)); sst_platform_get_resources()
83 if (!ctx->mailbox) { sst_platform_get_resources()
87 dev_dbg(ctx->dev, "SRAM Ptr %p\n", ctx->mailbox); sst_platform_get_resources()
90 ctx->iram_end = pci_resource_end(pci, 3); sst_platform_get_resources()
91 ctx->iram_base = pci_resource_start(pci, 3); sst_platform_get_resources()
92 ctx->iram = pcim_iomap(pci, 3, pci_resource_len(pci, 3)); sst_platform_get_resources()
93 if (!ctx->iram) { sst_platform_get_resources()
97 dev_dbg(ctx->dev, "IRAM Ptr %p\n", ctx->iram); sst_platform_get_resources()
100 ctx->dram_end = pci_resource_end(pci, 4); sst_platform_get_resources()
101 ctx->dram_base = pci_resource_start(pci, 4); sst_platform_get_resources()
102 ctx->dram = pcim_iomap(pci, 4, pci_resource_len(pci, 4)); sst_platform_get_resources()
103 if (!ctx->dram) { sst_platform_get_resources()
107 dev_dbg(ctx->dev, "DRAM Ptr %p\n", ctx->dram); sst_platform_get_resources()
H A Dsst_drv_interface.c51 int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id) free_stream_context() argument
56 stream = get_stream_info(ctx, str_id); free_stream_context()
59 ret = sst_free_stream(ctx, str_id); free_stream_context()
61 sst_clean_stream(&ctx->streams[str_id]); free_stream_context()
64 dev_err(ctx->dev, "we tried to free stream context %d which was freed!!!\n", str_id); free_stream_context()
69 int sst_get_stream_allocated(struct intel_sst_drv *ctx, sst_get_stream_allocated() argument
75 retval = ctx->ops->alloc_stream(ctx, str_param); sst_get_stream_allocated()
77 dev_dbg(ctx->dev, "Stream allocated %d\n", retval); sst_get_stream_allocated()
125 int sst_get_stream(struct intel_sst_drv *ctx, sst_get_stream() argument
132 retval = ctx->ops->alloc_stream(ctx, str_param); sst_get_stream()
137 str_info = &ctx->streams[retval]; sst_get_stream()
145 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_power_control() local
152 dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count); sst_power_control()
154 dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret); sst_power_control()
157 if ((ctx->sst_state == SST_RESET) && (usage_count == 1)) { sst_power_control()
158 ret = sst_load_fw(ctx); sst_power_control()
161 sst_set_fw_state_locked(ctx, SST_RESET); sst_power_control()
162 ret = sst_pm_runtime_put(ctx); sst_power_control()
167 dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count); sst_power_control()
168 return sst_pm_runtime_put(ctx); sst_power_control()
185 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_open_pcm_stream() local
190 retval = sst_get_stream(ctx, str_param); sst_open_pcm_stream()
192 ctx->stream_cnt++; sst_open_pcm_stream()
194 dev_err(ctx->dev, "sst_get_stream returned err %d\n", retval); sst_open_pcm_stream()
204 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_open() local
206 retval = pm_runtime_get_sync(ctx->dev); sst_cdev_open()
210 str_id = sst_get_stream(ctx, str_params); sst_cdev_open()
213 stream = &ctx->streams[str_id]; sst_cdev_open()
221 sst_pm_runtime_put(ctx); sst_cdev_open()
230 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_close() local
232 stream = get_stream_info(ctx, str_id); sst_cdev_close()
246 retval = sst_free_stream(ctx, str_id); sst_cdev_close()
266 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_ack() local
268 stream = get_stream_info(ctx, str_id); sst_cdev_ack()
277 ((void *)(ctx->mailbox + ctx->tstamp) sst_cdev_ack()
285 addr = ((void *)(ctx->mailbox + ctx->tstamp)) + sst_cdev_ack()
297 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_set_metadata() local
301 str_info = get_stream_info(ctx, str_id); sst_cdev_set_metadata()
306 retval = sst_prepare_and_post_msg(ctx, str_info->task_id, IPC_CMD, sst_cdev_set_metadata()
316 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_stream_pause() local
318 return sst_pause_stream(ctx, str_id); sst_cdev_stream_pause()
324 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_stream_pause_release() local
326 return sst_resume_stream(ctx, str_id); sst_cdev_stream_pause_release()
332 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_stream_start() local
334 str_info = get_stream_info(ctx, str_id); sst_cdev_stream_start()
339 return sst_start_stream(ctx, str_id); sst_cdev_stream_start()
344 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_stream_drop() local
346 return sst_drop_stream(ctx, str_id); sst_cdev_stream_drop()
351 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_stream_drain() local
353 return sst_drain_stream(ctx, str_id, false); sst_cdev_stream_drain()
359 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_stream_partial_drain() local
361 return sst_drain_stream(ctx, str_id, true); sst_cdev_stream_partial_drain()
369 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_cdev_tstamp() local
372 ((void *)(ctx->mailbox + ctx->tstamp) sst_cdev_tstamp()
376 stream = get_stream_info(ctx, str_id); sst_cdev_tstamp()
455 void sst_cdev_fragment_elapsed(struct intel_sst_drv *ctx, int str_id) sst_cdev_fragment_elapsed() argument
459 dev_dbg(ctx->dev, "fragment elapsed from firmware for str_id %d\n", sst_cdev_fragment_elapsed()
461 stream = &ctx->streams[str_id]; sst_cdev_fragment_elapsed()
478 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_close_pcm_stream() local
480 stream = get_stream_info(ctx, str_id); sst_close_pcm_stream()
482 dev_err(ctx->dev, "stream info is NULL for str %d!!!\n", str_id); sst_close_pcm_stream()
488 dev_dbg(ctx->dev, "stream in reset state...\n"); sst_close_pcm_stream()
494 retval = free_stream_context(ctx, str_id); sst_close_pcm_stream()
499 ctx->stream_cnt--; sst_close_pcm_stream()
502 dev_err(ctx->dev, "free stream returned err %d\n", retval); sst_close_pcm_stream()
504 dev_dbg(ctx->dev, "Exit\n"); sst_close_pcm_stream()
508 static inline int sst_calc_tstamp(struct intel_sst_drv *ctx, sst_calc_tstamp() argument
517 dev_dbg(ctx->dev, "mrfld ring_buffer_counter %llu in bytes\n", sst_calc_tstamp()
519 dev_dbg(ctx->dev, "mrfld hardware_counter %llu in bytes\n", sst_calc_tstamp()
532 dev_dbg(ctx->dev, "pcm delay %zu in bytes\n", delay_bytes); sst_calc_tstamp()
537 dev_dbg(ctx->dev, "buffer ptr %llu pcm_delay rep: %llu\n", sst_calc_tstamp()
548 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_read_timestamp() local
551 stream = get_stream_info(ctx, str_id); sst_read_timestamp()
560 ((void *)(ctx->mailbox + ctx->tstamp) sst_read_timestamp()
563 return sst_calc_tstamp(ctx, info, substream, &fw_tstamp); sst_read_timestamp()
569 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_stream_start() local
571 if (ctx->sst_state != SST_FW_RUNNING) sst_stream_start()
573 str_info = get_stream_info(ctx, str_id); sst_stream_start()
578 sst_start_stream(ctx, str_id); sst_stream_start()
586 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_stream_drop() local
588 if (ctx->sst_state != SST_FW_RUNNING) sst_stream_drop()
591 str_info = get_stream_info(ctx, str_id); sst_stream_drop()
596 return sst_drop_stream(ctx, str_id); sst_stream_drop()
602 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_stream_pause() local
604 if (ctx->sst_state != SST_FW_RUNNING) sst_stream_pause()
607 str_info = get_stream_info(ctx, str_id); sst_stream_pause()
611 return sst_pause_stream(ctx, str_id); sst_stream_pause()
617 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_stream_resume() local
619 if (ctx->sst_state != SST_FW_RUNNING) sst_stream_resume()
622 str_info = get_stream_info(ctx, str_id); sst_stream_resume()
625 return sst_resume_stream(ctx, str_id); sst_stream_resume()
632 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_stream_init() local
636 if (ctx->sst_state != SST_FW_RUNNING) sst_stream_init()
639 stream = get_stream_info(ctx, str_id); sst_stream_init()
643 dev_dbg(ctx->dev, "setting the period ptrs\n"); sst_stream_init()
649 dev_dbg(ctx->dev, sst_stream_init()
670 struct intel_sst_drv *ctx = dev_get_drvdata(dev); sst_send_byte_stream() local
674 ret_val = pm_runtime_get_sync(ctx->dev); sst_send_byte_stream()
678 ret_val = sst_send_byte_stream_mrfld(ctx, bytes); sst_send_byte_stream()
679 sst_pm_runtime_put(ctx); sst_send_byte_stream()
/linux-4.1.27/fs/
H A Dtimerfd.c49 static inline bool isalarm(struct timerfd_ctx *ctx) isalarm() argument
51 return ctx->clockid == CLOCK_REALTIME_ALARM || isalarm()
52 ctx->clockid == CLOCK_BOOTTIME_ALARM; isalarm()
60 static void timerfd_triggered(struct timerfd_ctx *ctx) timerfd_triggered() argument
64 spin_lock_irqsave(&ctx->wqh.lock, flags); timerfd_triggered()
65 ctx->expired = 1; timerfd_triggered()
66 ctx->ticks++; timerfd_triggered()
67 wake_up_locked(&ctx->wqh); timerfd_triggered()
68 spin_unlock_irqrestore(&ctx->wqh.lock, flags); timerfd_triggered()
73 struct timerfd_ctx *ctx = container_of(htmr, struct timerfd_ctx, timerfd_tmrproc() local
75 timerfd_triggered(ctx); timerfd_tmrproc()
82 struct timerfd_ctx *ctx = container_of(alarm, struct timerfd_ctx, timerfd_alarmproc() local
84 timerfd_triggered(ctx); timerfd_alarmproc()
91 * wake-up requires ctx->ticks to be non zero, therefore we increment
97 struct timerfd_ctx *ctx; timerfd_clock_was_set() local
101 list_for_each_entry_rcu(ctx, &cancel_list, clist) { timerfd_clock_was_set()
102 if (!ctx->might_cancel) timerfd_clock_was_set()
104 spin_lock_irqsave(&ctx->wqh.lock, flags); timerfd_clock_was_set()
105 if (ctx->moffs.tv64 != moffs.tv64) { timerfd_clock_was_set()
106 ctx->moffs.tv64 = KTIME_MAX; timerfd_clock_was_set()
107 ctx->ticks++; timerfd_clock_was_set()
108 wake_up_locked(&ctx->wqh); timerfd_clock_was_set()
110 spin_unlock_irqrestore(&ctx->wqh.lock, flags); timerfd_clock_was_set()
115 static void timerfd_remove_cancel(struct timerfd_ctx *ctx) timerfd_remove_cancel() argument
117 if (ctx->might_cancel) { timerfd_remove_cancel()
118 ctx->might_cancel = false; timerfd_remove_cancel()
120 list_del_rcu(&ctx->clist); timerfd_remove_cancel()
125 static bool timerfd_canceled(struct timerfd_ctx *ctx) timerfd_canceled() argument
127 if (!ctx->might_cancel || ctx->moffs.tv64 != KTIME_MAX) timerfd_canceled()
129 ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); timerfd_canceled()
133 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) timerfd_setup_cancel() argument
135 if ((ctx->clockid == CLOCK_REALTIME || timerfd_setup_cancel()
136 ctx->clockid == CLOCK_REALTIME_ALARM) && timerfd_setup_cancel()
138 if (!ctx->might_cancel) { timerfd_setup_cancel()
139 ctx->might_cancel = true; timerfd_setup_cancel()
141 list_add_rcu(&ctx->clist, &cancel_list); timerfd_setup_cancel()
144 } else if (ctx->might_cancel) { timerfd_setup_cancel()
145 timerfd_remove_cancel(ctx); timerfd_setup_cancel()
149 static ktime_t timerfd_get_remaining(struct timerfd_ctx *ctx) timerfd_get_remaining() argument
153 if (isalarm(ctx)) timerfd_get_remaining()
154 remaining = alarm_expires_remaining(&ctx->t.alarm); timerfd_get_remaining()
156 remaining = hrtimer_expires_remaining(&ctx->t.tmr); timerfd_get_remaining()
161 static int timerfd_setup(struct timerfd_ctx *ctx, int flags, timerfd_setup() argument
166 int clockid = ctx->clockid; timerfd_setup()
172 ctx->expired = 0; timerfd_setup()
173 ctx->ticks = 0; timerfd_setup()
174 ctx->tintv = timespec_to_ktime(ktmr->it_interval); timerfd_setup()
176 if (isalarm(ctx)) { timerfd_setup()
177 alarm_init(&ctx->t.alarm, timerfd_setup()
178 ctx->clockid == CLOCK_REALTIME_ALARM ? timerfd_setup()
182 hrtimer_init(&ctx->t.tmr, clockid, htmode); timerfd_setup()
183 hrtimer_set_expires(&ctx->t.tmr, texp); timerfd_setup()
184 ctx->t.tmr.function = timerfd_tmrproc; timerfd_setup()
188 if (isalarm(ctx)) { timerfd_setup()
190 alarm_start(&ctx->t.alarm, texp); timerfd_setup()
192 alarm_start_relative(&ctx->t.alarm, texp); timerfd_setup()
194 hrtimer_start(&ctx->t.tmr, texp, htmode); timerfd_setup()
197 if (timerfd_canceled(ctx)) timerfd_setup()
201 ctx->settime_flags = flags & TFD_SETTIME_FLAGS; timerfd_setup()
207 struct timerfd_ctx *ctx = file->private_data; timerfd_release() local
209 timerfd_remove_cancel(ctx); timerfd_release()
211 if (isalarm(ctx)) timerfd_release()
212 alarm_cancel(&ctx->t.alarm); timerfd_release()
214 hrtimer_cancel(&ctx->t.tmr); timerfd_release()
215 kfree_rcu(ctx, rcu); timerfd_release()
221 struct timerfd_ctx *ctx = file->private_data; timerfd_poll() local
225 poll_wait(file, &ctx->wqh, wait); timerfd_poll()
227 spin_lock_irqsave(&ctx->wqh.lock, flags); timerfd_poll()
228 if (ctx->ticks) timerfd_poll()
230 spin_unlock_irqrestore(&ctx->wqh.lock, flags); timerfd_poll()
238 struct timerfd_ctx *ctx = file->private_data; timerfd_read() local
244 spin_lock_irq(&ctx->wqh.lock); timerfd_read()
248 res = wait_event_interruptible_locked_irq(ctx->wqh, ctx->ticks); timerfd_read()
255 if (timerfd_canceled(ctx)) { timerfd_read()
256 ctx->ticks = 0; timerfd_read()
257 ctx->expired = 0; timerfd_read()
261 if (ctx->ticks) { timerfd_read()
262 ticks = ctx->ticks; timerfd_read()
264 if (ctx->expired && ctx->tintv.tv64) { timerfd_read()
271 if (isalarm(ctx)) { timerfd_read()
273 &ctx->t.alarm, ctx->tintv) - 1; timerfd_read()
274 alarm_restart(&ctx->t.alarm); timerfd_read()
276 ticks += hrtimer_forward_now(&ctx->t.tmr, timerfd_read()
277 ctx->tintv) - 1; timerfd_read()
278 hrtimer_restart(&ctx->t.tmr); timerfd_read()
281 ctx->expired = 0; timerfd_read()
282 ctx->ticks = 0; timerfd_read()
284 spin_unlock_irq(&ctx->wqh.lock); timerfd_read()
293 struct timerfd_ctx *ctx = file->private_data; timerfd_show() local
296 spin_lock_irq(&ctx->wqh.lock); timerfd_show()
297 t.it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); timerfd_show()
298 t.it_interval = ktime_to_timespec(ctx->tintv); timerfd_show()
299 spin_unlock_irq(&ctx->wqh.lock); timerfd_show()
307 ctx->clockid, timerfd_show()
308 (unsigned long long)ctx->ticks, timerfd_show()
309 ctx->settime_flags, timerfd_show()
322 struct timerfd_ctx *ctx = file->private_data; timerfd_ioctl() local
334 spin_lock_irq(&ctx->wqh.lock); timerfd_ioctl()
335 if (!timerfd_canceled(ctx)) { timerfd_ioctl()
336 ctx->ticks = ticks; timerfd_ioctl()
337 wake_up_locked(&ctx->wqh); timerfd_ioctl()
340 spin_unlock_irq(&ctx->wqh.lock); timerfd_ioctl()
379 struct timerfd_ctx *ctx; SYSCALL_DEFINE2() local
393 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); SYSCALL_DEFINE2()
394 if (!ctx) SYSCALL_DEFINE2()
397 init_waitqueue_head(&ctx->wqh); SYSCALL_DEFINE2()
398 ctx->clockid = clockid; SYSCALL_DEFINE2()
400 if (isalarm(ctx)) SYSCALL_DEFINE2()
401 alarm_init(&ctx->t.alarm, SYSCALL_DEFINE2()
402 ctx->clockid == CLOCK_REALTIME_ALARM ? SYSCALL_DEFINE2()
406 hrtimer_init(&ctx->t.tmr, clockid, HRTIMER_MODE_ABS); SYSCALL_DEFINE2()
408 ctx->moffs = ktime_mono_to_real((ktime_t){ .tv64 = 0 }); SYSCALL_DEFINE2()
410 ufd = anon_inode_getfd("[timerfd]", &timerfd_fops, ctx, SYSCALL_DEFINE2()
413 kfree(ctx); SYSCALL_DEFINE2()
423 struct timerfd_ctx *ctx; do_timerfd_settime() local
434 ctx = f.file->private_data; do_timerfd_settime()
436 timerfd_setup_cancel(ctx, flags); do_timerfd_settime()
443 spin_lock_irq(&ctx->wqh.lock); do_timerfd_settime()
445 if (isalarm(ctx)) { do_timerfd_settime()
446 if (alarm_try_to_cancel(&ctx->t.alarm) >= 0) do_timerfd_settime()
449 if (hrtimer_try_to_cancel(&ctx->t.tmr) >= 0) do_timerfd_settime()
452 spin_unlock_irq(&ctx->wqh.lock); do_timerfd_settime()
462 if (ctx->expired && ctx->tintv.tv64) { do_timerfd_settime()
463 if (isalarm(ctx)) do_timerfd_settime()
464 alarm_forward_now(&ctx->t.alarm, ctx->tintv); do_timerfd_settime()
466 hrtimer_forward_now(&ctx->t.tmr, ctx->tintv); do_timerfd_settime()
469 old->it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); do_timerfd_settime()
470 old->it_interval = ktime_to_timespec(ctx->tintv); do_timerfd_settime()
475 ret = timerfd_setup(ctx, flags, new); do_timerfd_settime()
477 spin_unlock_irq(&ctx->wqh.lock); do_timerfd_settime()
485 struct timerfd_ctx *ctx; do_timerfd_gettime() local
489 ctx = f.file->private_data; do_timerfd_gettime()
491 spin_lock_irq(&ctx->wqh.lock); do_timerfd_gettime()
492 if (ctx->expired && ctx->tintv.tv64) { do_timerfd_gettime()
493 ctx->expired = 0; do_timerfd_gettime()
495 if (isalarm(ctx)) { do_timerfd_gettime()
496 ctx->ticks += do_timerfd_gettime()
498 &ctx->t.alarm, ctx->tintv) - 1; do_timerfd_gettime()
499 alarm_restart(&ctx->t.alarm); do_timerfd_gettime()
501 ctx->ticks += do_timerfd_gettime()
502 hrtimer_forward_now(&ctx->t.tmr, ctx->tintv) do_timerfd_gettime()
504 hrtimer_restart(&ctx->t.tmr); do_timerfd_gettime()
507 t->it_value = ktime_to_timespec(timerfd_get_remaining(ctx)); do_timerfd_gettime()
508 t->it_interval = ktime_to_timespec(ctx->tintv); do_timerfd_gettime()
509 spin_unlock_irq(&ctx->wqh.lock); do_timerfd_gettime()
H A Deventfd.c42 * @ctx: [in] Pointer to the eventfd context.
54 __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n) eventfd_signal() argument
58 spin_lock_irqsave(&ctx->wqh.lock, flags); eventfd_signal()
59 if (ULLONG_MAX - ctx->count < n) eventfd_signal()
60 n = ULLONG_MAX - ctx->count; eventfd_signal()
61 ctx->count += n; eventfd_signal()
62 if (waitqueue_active(&ctx->wqh)) eventfd_signal()
63 wake_up_locked_poll(&ctx->wqh, POLLIN); eventfd_signal()
64 spin_unlock_irqrestore(&ctx->wqh.lock, flags); eventfd_signal()
70 static void eventfd_free_ctx(struct eventfd_ctx *ctx) eventfd_free_ctx() argument
72 kfree(ctx); eventfd_free_ctx()
77 struct eventfd_ctx *ctx = container_of(kref, struct eventfd_ctx, kref); eventfd_free() local
79 eventfd_free_ctx(ctx); eventfd_free()
84 * @ctx: [in] Pointer to the eventfd context.
88 struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx) eventfd_ctx_get() argument
90 kref_get(&ctx->kref); eventfd_ctx_get()
91 return ctx; eventfd_ctx_get()
97 * @ctx: [in] Pointer to eventfd context.
102 void eventfd_ctx_put(struct eventfd_ctx *ctx) eventfd_ctx_put() argument
104 kref_put(&ctx->kref, eventfd_free); eventfd_ctx_put()
110 struct eventfd_ctx *ctx = file->private_data; eventfd_release() local
112 wake_up_poll(&ctx->wqh, POLLHUP); eventfd_release()
113 eventfd_ctx_put(ctx); eventfd_release()
119 struct eventfd_ctx *ctx = file->private_data; eventfd_poll() local
123 poll_wait(file, &ctx->wqh, wait); eventfd_poll()
125 count = ctx->count; eventfd_poll()
137 static void eventfd_ctx_do_read(struct eventfd_ctx *ctx, __u64 *cnt) eventfd_ctx_do_read() argument
139 *cnt = (ctx->flags & EFD_SEMAPHORE) ? 1 : ctx->count; eventfd_ctx_do_read()
140 ctx->count -= *cnt; eventfd_ctx_do_read()
145 * @ctx: [in] Pointer to eventfd context.
156 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, eventfd_ctx_remove_wait_queue() argument
161 spin_lock_irqsave(&ctx->wqh.lock, flags); eventfd_ctx_remove_wait_queue()
162 eventfd_ctx_do_read(ctx, cnt); eventfd_ctx_remove_wait_queue()
163 __remove_wait_queue(&ctx->wqh, wait); eventfd_ctx_remove_wait_queue()
164 if (*cnt != 0 && waitqueue_active(&ctx->wqh)) eventfd_ctx_remove_wait_queue()
165 wake_up_locked_poll(&ctx->wqh, POLLOUT); eventfd_ctx_remove_wait_queue()
166 spin_unlock_irqrestore(&ctx->wqh.lock, flags); eventfd_ctx_remove_wait_queue()
174 * @ctx: [in] Pointer to eventfd context.
186 ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt) eventfd_ctx_read() argument
191 spin_lock_irq(&ctx->wqh.lock); eventfd_ctx_read()
194 if (ctx->count > 0) eventfd_ctx_read()
197 __add_wait_queue(&ctx->wqh, &wait); eventfd_ctx_read()
200 if (ctx->count > 0) { eventfd_ctx_read()
208 spin_unlock_irq(&ctx->wqh.lock); eventfd_ctx_read()
210 spin_lock_irq(&ctx->wqh.lock); eventfd_ctx_read()
212 __remove_wait_queue(&ctx->wqh, &wait); eventfd_ctx_read()
216 eventfd_ctx_do_read(ctx, cnt); eventfd_ctx_read()
217 if (waitqueue_active(&ctx->wqh)) eventfd_ctx_read()
218 wake_up_locked_poll(&ctx->wqh, POLLOUT); eventfd_ctx_read()
220 spin_unlock_irq(&ctx->wqh.lock); eventfd_ctx_read()
229 struct eventfd_ctx *ctx = file->private_data; eventfd_read() local
235 res = eventfd_ctx_read(ctx, file->f_flags & O_NONBLOCK, &cnt); eventfd_read()
245 struct eventfd_ctx *ctx = file->private_data; eventfd_write() local
256 spin_lock_irq(&ctx->wqh.lock); eventfd_write()
258 if (ULLONG_MAX - ctx->count > ucnt) eventfd_write()
261 __add_wait_queue(&ctx->wqh, &wait); eventfd_write()
264 if (ULLONG_MAX - ctx->count > ucnt) { eventfd_write()
272 spin_unlock_irq(&ctx->wqh.lock); eventfd_write()
274 spin_lock_irq(&ctx->wqh.lock); eventfd_write()
276 __remove_wait_queue(&ctx->wqh, &wait); eventfd_write()
280 ctx->count += ucnt; eventfd_write()
281 if (waitqueue_active(&ctx->wqh)) eventfd_write()
282 wake_up_locked_poll(&ctx->wqh, POLLIN); eventfd_write()
284 spin_unlock_irq(&ctx->wqh.lock); eventfd_write()
292 struct eventfd_ctx *ctx = f->private_data; eventfd_show_fdinfo() local
294 spin_lock_irq(&ctx->wqh.lock); eventfd_show_fdinfo()
296 (unsigned long long)ctx->count); eventfd_show_fdinfo()
297 spin_unlock_irq(&ctx->wqh.lock); eventfd_show_fdinfo()
349 struct eventfd_ctx *ctx; eventfd_ctx_fdget() local
353 ctx = eventfd_ctx_fileget(f.file); eventfd_ctx_fdget()
355 return ctx; eventfd_ctx_fdget()
394 struct eventfd_ctx *ctx; eventfd_file_create() local
403 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); eventfd_file_create()
404 if (!ctx) eventfd_file_create()
407 kref_init(&ctx->kref); eventfd_file_create()
408 init_waitqueue_head(&ctx->wqh); eventfd_file_create()
409 ctx->count = count; eventfd_file_create()
410 ctx->flags = flags; eventfd_file_create()
412 file = anon_inode_getfile("[eventfd]", &eventfd_fops, ctx, eventfd_file_create()
415 eventfd_free_ctx(ctx); eventfd_file_create()
H A Daio.c205 static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) aio_private_file() argument
215 inode->i_mapping->private_data = ctx; aio_private_file()
269 static void put_aio_ring_file(struct kioctx *ctx) put_aio_ring_file() argument
271 struct file *aio_ring_file = ctx->aio_ring_file; put_aio_ring_file()
278 ctx->aio_ring_file = NULL; put_aio_ring_file()
285 static void aio_free_ring(struct kioctx *ctx) aio_free_ring() argument
292 put_aio_ring_file(ctx); aio_free_ring()
294 for (i = 0; i < ctx->nr_pages; i++) { aio_free_ring()
297 page_count(ctx->ring_pages[i])); aio_free_ring()
298 page = ctx->ring_pages[i]; aio_free_ring()
301 ctx->ring_pages[i] = NULL; aio_free_ring()
305 if (ctx->ring_pages && ctx->ring_pages != ctx->internal_pages) { aio_free_ring()
306 kfree(ctx->ring_pages); aio_free_ring()
307 ctx->ring_pages = NULL; aio_free_ring()
328 struct kioctx *ctx; aio_ring_remap() local
330 ctx = table->table[i]; aio_ring_remap()
331 if (ctx && ctx->aio_ring_file == file) { aio_ring_remap()
332 if (!atomic_read(&ctx->dead)) { aio_ring_remap()
333 ctx->user_id = ctx->mmap_base = vma->vm_start; aio_ring_remap()
354 struct kioctx *ctx; aio_migratepage() local
363 ctx = mapping->private_data; aio_migratepage()
364 if (!ctx) { aio_migratepage()
373 if (!mutex_trylock(&ctx->ring_lock)) { aio_migratepage()
379 if (idx < (pgoff_t)ctx->nr_pages) { aio_migratepage()
381 if (ctx->ring_pages[idx] != old) aio_migratepage()
403 spin_lock_irqsave(&ctx->completion_lock, flags); aio_migratepage()
405 BUG_ON(ctx->ring_pages[idx] != old); aio_migratepage()
406 ctx->ring_pages[idx] = new; aio_migratepage()
407 spin_unlock_irqrestore(&ctx->completion_lock, flags); aio_migratepage()
413 mutex_unlock(&ctx->ring_lock); aio_migratepage()
427 static int aio_setup_ring(struct kioctx *ctx) aio_setup_ring() argument
430 unsigned nr_events = ctx->max_reqs; aio_setup_ring()
447 file = aio_private_file(ctx, nr_pages); aio_setup_ring()
449 ctx->aio_ring_file = NULL; aio_setup_ring()
453 ctx->aio_ring_file = file; aio_setup_ring()
457 ctx->ring_pages = ctx->internal_pages; aio_setup_ring()
459 ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *), aio_setup_ring()
461 if (!ctx->ring_pages) { aio_setup_ring()
462 put_aio_ring_file(ctx); aio_setup_ring()
478 ctx->ring_pages[i] = page; aio_setup_ring()
480 ctx->nr_pages = i; aio_setup_ring()
483 aio_free_ring(ctx); aio_setup_ring()
487 ctx->mmap_size = nr_pages * PAGE_SIZE; aio_setup_ring()
488 pr_debug("attempting mmap of %lu bytes\n", ctx->mmap_size); aio_setup_ring()
491 ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size, aio_setup_ring()
495 if (IS_ERR((void *)ctx->mmap_base)) { aio_setup_ring()
496 ctx->mmap_size = 0; aio_setup_ring()
497 aio_free_ring(ctx); aio_setup_ring()
501 pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base); aio_setup_ring()
503 ctx->user_id = ctx->mmap_base; aio_setup_ring()
504 ctx->nr_events = nr_events; /* trusted copy */ aio_setup_ring()
506 ring = kmap_atomic(ctx->ring_pages[0]); aio_setup_ring()
515 flush_dcache_page(ctx->ring_pages[0]); aio_setup_ring()
527 struct kioctx *ctx = req->ki_ctx; kiocb_set_cancel_fn() local
530 spin_lock_irqsave(&ctx->ctx_lock, flags); kiocb_set_cancel_fn()
533 list_add(&req->ki_list, &ctx->active_reqs); kiocb_set_cancel_fn()
537 spin_unlock_irqrestore(&ctx->ctx_lock, flags); kiocb_set_cancel_fn()
564 struct kioctx *ctx = container_of(work, struct kioctx, free_work); free_ioctx() local
566 pr_debug("freeing %p\n", ctx); free_ioctx()
568 aio_free_ring(ctx); free_ioctx()
569 free_percpu(ctx->cpu); free_ioctx()
570 percpu_ref_exit(&ctx->reqs); free_ioctx()
571 percpu_ref_exit(&ctx->users); free_ioctx()
572 kmem_cache_free(kioctx_cachep, ctx); free_ioctx()
577 struct kioctx *ctx = container_of(ref, struct kioctx, reqs); free_ioctx_reqs() local
580 if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) free_ioctx_reqs()
581 complete(&ctx->rq_wait->comp); free_ioctx_reqs()
583 INIT_WORK(&ctx->free_work, free_ioctx); free_ioctx_reqs()
584 schedule_work(&ctx->free_work); free_ioctx_reqs()
589 * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
594 struct kioctx *ctx = container_of(ref, struct kioctx, users); free_ioctx_users() local
597 spin_lock_irq(&ctx->ctx_lock); free_ioctx_users()
599 while (!list_empty(&ctx->active_reqs)) { free_ioctx_users()
600 req = list_first_entry(&ctx->active_reqs, free_ioctx_users()
607 spin_unlock_irq(&ctx->ctx_lock); free_ioctx_users()
609 percpu_ref_kill(&ctx->reqs); free_ioctx_users()
610 percpu_ref_put(&ctx->reqs); free_ioctx_users()
613 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) ioctx_add_table() argument
626 ctx->id = i; ioctx_add_table()
627 table->table[i] = ctx; ioctx_add_table()
634 ring = kmap_atomic(ctx->ring_pages[0]); ioctx_add_table()
635 ring->id = ctx->id; ioctx_add_table()
684 struct kioctx *ctx; ioctx_alloc() local
708 ctx = kmem_cache_zalloc(kioctx_cachep, GFP_KERNEL); ioctx_alloc()
709 if (!ctx) ioctx_alloc()
712 ctx->max_reqs = nr_events; ioctx_alloc()
714 spin_lock_init(&ctx->ctx_lock); ioctx_alloc()
715 spin_lock_init(&ctx->completion_lock); ioctx_alloc()
716 mutex_init(&ctx->ring_lock); ioctx_alloc()
719 mutex_lock(&ctx->ring_lock); ioctx_alloc()
720 init_waitqueue_head(&ctx->wait); ioctx_alloc()
722 INIT_LIST_HEAD(&ctx->active_reqs); ioctx_alloc()
724 if (percpu_ref_init(&ctx->users, free_ioctx_users, 0, GFP_KERNEL)) ioctx_alloc()
727 if (percpu_ref_init(&ctx->reqs, free_ioctx_reqs, 0, GFP_KERNEL)) ioctx_alloc()
730 ctx->cpu = alloc_percpu(struct kioctx_cpu); ioctx_alloc()
731 if (!ctx->cpu) ioctx_alloc()
734 err = aio_setup_ring(ctx); ioctx_alloc()
738 atomic_set(&ctx->reqs_available, ctx->nr_events - 1); ioctx_alloc()
739 ctx->req_batch = (ctx->nr_events - 1) / (num_possible_cpus() * 4); ioctx_alloc()
740 if (ctx->req_batch < 1) ioctx_alloc()
741 ctx->req_batch = 1; ioctx_alloc()
751 aio_nr += ctx->max_reqs; ioctx_alloc()
754 percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */ ioctx_alloc()
755 percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */ ioctx_alloc()
757 err = ioctx_add_table(ctx, mm); ioctx_alloc()
762 mutex_unlock(&ctx->ring_lock); ioctx_alloc()
765 ctx, ctx->user_id, mm, ctx->nr_events); ioctx_alloc()
766 return ctx; ioctx_alloc()
769 aio_nr_sub(ctx->max_reqs); ioctx_alloc()
771 atomic_set(&ctx->dead, 1); ioctx_alloc()
772 if (ctx->mmap_size) ioctx_alloc()
773 vm_munmap(ctx->mmap_base, ctx->mmap_size); ioctx_alloc()
774 aio_free_ring(ctx); ioctx_alloc()
776 mutex_unlock(&ctx->ring_lock); ioctx_alloc()
777 free_percpu(ctx->cpu); ioctx_alloc()
778 percpu_ref_exit(&ctx->reqs); ioctx_alloc()
779 percpu_ref_exit(&ctx->users); ioctx_alloc()
780 kmem_cache_free(kioctx_cachep, ctx); ioctx_alloc()
790 static int kill_ioctx(struct mm_struct *mm, struct kioctx *ctx, kill_ioctx() argument
796 if (atomic_xchg(&ctx->dead, 1)) { kill_ioctx()
802 WARN_ON(ctx != table->table[ctx->id]); kill_ioctx()
803 table->table[ctx->id] = NULL; kill_ioctx()
807 wake_up_all(&ctx->wait); kill_ioctx()
816 aio_nr_sub(ctx->max_reqs); kill_ioctx()
818 if (ctx->mmap_size) kill_ioctx()
819 vm_munmap(ctx->mmap_base, ctx->mmap_size); kill_ioctx()
821 ctx->rq_wait = wait; kill_ioctx()
822 percpu_ref_kill(&ctx->users); kill_ioctx()
848 struct kioctx *ctx = table->table[i]; exit_aio() local
850 if (!ctx) { exit_aio()
862 ctx->mmap_size = 0; exit_aio()
863 kill_ioctx(mm, ctx, &wait); exit_aio()
875 static void put_reqs_available(struct kioctx *ctx, unsigned nr) put_reqs_available() argument
881 kcpu = this_cpu_ptr(ctx->cpu); put_reqs_available()
884 while (kcpu->reqs_available >= ctx->req_batch * 2) { put_reqs_available()
885 kcpu->reqs_available -= ctx->req_batch; put_reqs_available()
886 atomic_add(ctx->req_batch, &ctx->reqs_available); put_reqs_available()
892 static bool get_reqs_available(struct kioctx *ctx) get_reqs_available() argument
899 kcpu = this_cpu_ptr(ctx->cpu); get_reqs_available()
901 int old, avail = atomic_read(&ctx->reqs_available); get_reqs_available()
904 if (avail < ctx->req_batch) get_reqs_available()
908 avail = atomic_cmpxchg(&ctx->reqs_available, get_reqs_available()
909 avail, avail - ctx->req_batch); get_reqs_available()
912 kcpu->reqs_available += ctx->req_batch; get_reqs_available()
927 * called holding ctx->completion_lock.
929 static void refill_reqs_available(struct kioctx *ctx, unsigned head, refill_reqs_available() argument
935 head %= ctx->nr_events; refill_reqs_available()
939 events_in_ring = ctx->nr_events - (head - tail); refill_reqs_available()
941 completed = ctx->completed_events; refill_reqs_available()
950 ctx->completed_events -= completed; refill_reqs_available()
951 put_reqs_available(ctx, completed); refill_reqs_available()
958 static void user_refill_reqs_available(struct kioctx *ctx) user_refill_reqs_available() argument
960 spin_lock_irq(&ctx->completion_lock); user_refill_reqs_available()
961 if (ctx->completed_events) { user_refill_reqs_available()
970 * ctx->completion_lock. Even if head is invalid, the check user_refill_reqs_available()
971 * against ctx->completed_events below will make sure we do the user_refill_reqs_available()
974 ring = kmap_atomic(ctx->ring_pages[0]); user_refill_reqs_available()
978 refill_reqs_available(ctx, head, ctx->tail); user_refill_reqs_available()
981 spin_unlock_irq(&ctx->completion_lock); user_refill_reqs_available()
988 static inline struct aio_kiocb *aio_get_req(struct kioctx *ctx) aio_get_req() argument
992 if (!get_reqs_available(ctx)) { aio_get_req()
993 user_refill_reqs_available(ctx); aio_get_req()
994 if (!get_reqs_available(ctx)) aio_get_req()
1002 percpu_ref_get(&ctx->reqs); aio_get_req()
1004 req->ki_ctx = ctx; aio_get_req()
1007 put_reqs_available(ctx, 1); aio_get_req()
1024 struct kioctx *ctx, *ret = NULL; lookup_ioctx() local
1037 ctx = table->table[id]; lookup_ioctx()
1038 if (ctx && ctx->user_id == ctx_id) { lookup_ioctx()
1039 percpu_ref_get(&ctx->users); lookup_ioctx()
1040 ret = ctx; lookup_ioctx()
1053 struct kioctx *ctx = iocb->ki_ctx; aio_complete() local
1071 spin_lock_irqsave(&ctx->ctx_lock, flags); aio_complete()
1073 spin_unlock_irqrestore(&ctx->ctx_lock, flags); aio_complete()
1078 * ctx->completion_lock to prevent other code from messing with the tail aio_complete()
1081 spin_lock_irqsave(&ctx->completion_lock, flags); aio_complete()
1083 tail = ctx->tail; aio_complete()
1086 if (++tail >= ctx->nr_events) aio_complete()
1089 ev_page = kmap_atomic(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); aio_complete()
1098 flush_dcache_page(ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]); aio_complete()
1101 ctx, tail, iocb, iocb->ki_user_iocb, iocb->ki_user_data, aio_complete()
1109 ctx->tail = tail; aio_complete()
1111 ring = kmap_atomic(ctx->ring_pages[0]); aio_complete()
1115 flush_dcache_page(ctx->ring_pages[0]); aio_complete()
1117 ctx->completed_events++; aio_complete()
1118 if (ctx->completed_events > 1) aio_complete()
1119 refill_reqs_available(ctx, head, tail); aio_complete()
1120 spin_unlock_irqrestore(&ctx->completion_lock, flags); aio_complete()
1143 if (waitqueue_active(&ctx->wait)) aio_complete()
1144 wake_up(&ctx->wait); aio_complete()
1146 percpu_ref_put(&ctx->reqs); aio_complete()
1153 static long aio_read_events_ring(struct kioctx *ctx, aio_read_events_ring() argument
1168 mutex_lock(&ctx->ring_lock); aio_read_events_ring()
1170 /* Access to ->ring_pages here is protected by ctx->ring_lock. */ aio_read_events_ring()
1171 ring = kmap_atomic(ctx->ring_pages[0]); aio_read_events_ring()
1182 pr_debug("h%u t%u m%u\n", head, tail, ctx->nr_events); aio_read_events_ring()
1187 head %= ctx->nr_events; aio_read_events_ring()
1188 tail %= ctx->nr_events; aio_read_events_ring()
1195 avail = (head <= tail ? tail : ctx->nr_events) - head; aio_read_events_ring()
1204 page = ctx->ring_pages[pos / AIO_EVENTS_PER_PAGE]; aio_read_events_ring()
1219 head %= ctx->nr_events; aio_read_events_ring()
1222 ring = kmap_atomic(ctx->ring_pages[0]); aio_read_events_ring()
1225 flush_dcache_page(ctx->ring_pages[0]); aio_read_events_ring()
1229 mutex_unlock(&ctx->ring_lock); aio_read_events_ring()
1234 static bool aio_read_events(struct kioctx *ctx, long min_nr, long nr, aio_read_events() argument
1237 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); aio_read_events()
1242 if (unlikely(atomic_read(&ctx->dead))) aio_read_events()
1251 static long read_events(struct kioctx *ctx, long min_nr, long nr, read_events() argument
1282 aio_read_events(ctx, min_nr, nr, event, &ret); read_events()
1284 wait_event_interruptible_hrtimeout(ctx->wait, read_events()
1285 aio_read_events(ctx, min_nr, nr, event, &ret), read_events()
1310 unsigned long ctx; SYSCALL_DEFINE2() local
1313 ret = get_user(ctx, ctxp); SYSCALL_DEFINE2()
1318 if (unlikely(ctx || nr_events == 0)) { SYSCALL_DEFINE2()
1319 pr_debug("EINVAL: ctx %lu nr_events %u\n", SYSCALL_DEFINE2()
1320 ctx, nr_events); SYSCALL_DEFINE2()
1343 SYSCALL_DEFINE1(io_destroy, aio_context_t, ctx) SYSCALL_DEFINE1()
1345 struct kioctx *ioctx = lookup_ioctx(ctx); SYSCALL_DEFINE1()
1487 static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb, io_submit_one() argument
1509 req = aio_get_req(ctx); io_submit_one()
1557 put_reqs_available(ctx, 1); io_submit_one()
1558 percpu_ref_put(&ctx->reqs); io_submit_one()
1566 struct kioctx *ctx; do_io_submit() local
1580 ctx = lookup_ioctx(ctx_id); do_io_submit()
1581 if (unlikely(!ctx)) { do_io_submit()
1606 ret = io_submit_one(ctx, user_iocb, &tmp, compat); do_io_submit()
1612 percpu_ref_put(&ctx->users); do_io_submit()
1638 lookup_kiocb(struct kioctx *ctx, struct iocb __user *iocb, u32 key) lookup_kiocb() argument
1642 assert_spin_locked(&ctx->ctx_lock); lookup_kiocb()
1648 list_for_each_entry(kiocb, &ctx->active_reqs, ki_list) { lookup_kiocb()
1668 struct kioctx *ctx; SYSCALL_DEFINE3() local
1677 ctx = lookup_ioctx(ctx_id); SYSCALL_DEFINE3()
1678 if (unlikely(!ctx)) SYSCALL_DEFINE3()
1681 spin_lock_irq(&ctx->ctx_lock); SYSCALL_DEFINE3()
1683 kiocb = lookup_kiocb(ctx, iocb, key); SYSCALL_DEFINE3()
1689 spin_unlock_irq(&ctx->ctx_lock); SYSCALL_DEFINE3()
1700 percpu_ref_put(&ctx->users); SYSCALL_DEFINE3()
/linux-4.1.27/drivers/gpu/drm/exynos/
H A Dexynos7_drm_decon.c75 struct decon_context *ctx = crtc->ctx; decon_wait_for_vblank() local
77 if (ctx->suspended) decon_wait_for_vblank()
80 atomic_set(&ctx->wait_vsync_event, 1); decon_wait_for_vblank()
86 if (!wait_event_timeout(ctx->wait_vsync_queue, decon_wait_for_vblank()
87 !atomic_read(&ctx->wait_vsync_event), decon_wait_for_vblank()
92 static void decon_clear_channel(struct decon_context *ctx) decon_clear_channel() argument
100 u32 val = readl(ctx->regs + WINCON(win)); decon_clear_channel()
104 writel(val, ctx->regs + WINCON(win)); decon_clear_channel()
111 unsigned int state = ctx->suspended; decon_clear_channel()
113 ctx->suspended = 0; decon_clear_channel()
114 decon_wait_for_vblank(ctx->crtc); decon_clear_channel()
115 ctx->suspended = state; decon_clear_channel()
119 static int decon_ctx_initialize(struct decon_context *ctx, decon_ctx_initialize() argument
124 ctx->drm_dev = drm_dev; decon_ctx_initialize()
125 ctx->pipe = priv->pipe++; decon_ctx_initialize()
128 if (is_drm_iommu_supported(ctx->drm_dev)) { decon_ctx_initialize()
135 decon_clear_channel(ctx); decon_ctx_initialize()
136 ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev); decon_ctx_initialize()
146 static void decon_ctx_remove(struct decon_context *ctx) decon_ctx_remove() argument
149 if (is_drm_iommu_supported(ctx->drm_dev)) decon_ctx_remove()
150 drm_iommu_detach_device(ctx->drm_dev, ctx->dev); decon_ctx_remove()
153 static u32 decon_calc_clkdiv(struct decon_context *ctx, decon_calc_clkdiv() argument
160 clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->vclk), ideal_clk); decon_calc_clkdiv()
177 struct decon_context *ctx = crtc->ctx; decon_commit() local
181 if (ctx->suspended) decon_commit()
188 if (!ctx->i80_if) { decon_commit()
196 writel(val, ctx->regs + VIDTCON0); decon_commit()
199 writel(val, ctx->regs + VIDTCON1); decon_commit()
208 writel(val, ctx->regs + VIDTCON2); decon_commit()
211 writel(val, ctx->regs + VIDTCON3); decon_commit()
217 writel(val, ctx->regs + VIDTCON4); decon_commit()
219 writel(mode->vdisplay - 1, ctx->regs + LINECNT_OP_THRESHOLD); decon_commit()
226 writel(val, ctx->regs + VIDCON0); decon_commit()
228 clkdiv = decon_calc_clkdiv(ctx, mode); decon_commit()
231 writel(val, ctx->regs + VCLKCON1); decon_commit()
232 writel(val, ctx->regs + VCLKCON2); decon_commit()
235 val = readl(ctx->regs + DECON_UPDATE); decon_commit()
237 writel(val, ctx->regs + DECON_UPDATE); decon_commit()
242 struct decon_context *ctx = crtc->ctx; decon_enable_vblank() local
245 if (ctx->suspended) decon_enable_vblank()
248 if (!test_and_set_bit(0, &ctx->irq_flags)) { decon_enable_vblank()
249 val = readl(ctx->regs + VIDINTCON0); decon_enable_vblank()
253 if (!ctx->i80_if) { decon_enable_vblank()
259 writel(val, ctx->regs + VIDINTCON0); decon_enable_vblank()
267 struct decon_context *ctx = crtc->ctx; decon_disable_vblank() local
270 if (ctx->suspended) decon_disable_vblank()
273 if (test_and_clear_bit(0, &ctx->irq_flags)) { decon_disable_vblank()
274 val = readl(ctx->regs + VIDINTCON0); decon_disable_vblank()
277 if (!ctx->i80_if) decon_disable_vblank()
280 writel(val, ctx->regs + VIDINTCON0); decon_disable_vblank()
284 static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win) decon_win_set_pixfmt() argument
286 struct exynos_drm_plane *plane = &ctx->planes[win]; decon_win_set_pixfmt()
290 val = readl(ctx->regs + WINCON(win)); decon_win_set_pixfmt()
358 writel(val, ctx->regs + WINCON(win)); decon_win_set_pixfmt()
361 static void decon_win_set_colkey(struct decon_context *ctx, unsigned int win) decon_win_set_colkey() argument
370 writel(keycon0, ctx->regs + WKEYCON0_BASE(win)); decon_win_set_colkey()
371 writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); decon_win_set_colkey()
380 static void decon_shadow_protect_win(struct decon_context *ctx, decon_shadow_protect_win() argument
387 val = readl(ctx->regs + SHADOWCON); decon_shadow_protect_win()
392 writel(val, ctx->regs + SHADOWCON); decon_shadow_protect_win()
397 struct decon_context *ctx = crtc->ctx; decon_win_commit() local
405 if (ctx->suspended) decon_win_commit()
411 plane = &ctx->planes[win]; decon_win_commit()
414 if (ctx->suspended) { decon_win_commit()
430 decon_shadow_protect_win(ctx, win, true); decon_win_commit()
434 writel(val, ctx->regs + VIDW_BUF_START(win)); decon_win_commit()
439 writel(plane->fb_width + padding, ctx->regs + VIDW_WHOLE_X(win)); decon_win_commit()
440 writel(plane->fb_height, ctx->regs + VIDW_WHOLE_Y(win)); decon_win_commit()
443 writel(plane->src_x, ctx->regs + VIDW_OFFSET_X(win)); decon_win_commit()
444 writel(plane->src_y, ctx->regs + VIDW_OFFSET_Y(win)); decon_win_commit()
462 writel(val, ctx->regs + VIDOSD_A(win)); decon_win_commit()
473 writel(val, ctx->regs + VIDOSD_B(win)); decon_win_commit()
483 writel(alpha, ctx->regs + VIDOSD_C(win)); decon_win_commit()
489 writel(alpha, ctx->regs + VIDOSD_D(win)); decon_win_commit()
491 decon_win_set_pixfmt(ctx, win); decon_win_commit()
495 decon_win_set_colkey(ctx, win); decon_win_commit()
498 val = readl(ctx->regs + WINCON(win)); decon_win_commit()
501 writel(val, ctx->regs + WINCON(win)); decon_win_commit()
504 decon_shadow_protect_win(ctx, win, false); decon_win_commit()
506 val = readl(ctx->regs + DECON_UPDATE); decon_win_commit()
508 writel(val, ctx->regs + DECON_UPDATE); decon_win_commit()
515 struct decon_context *ctx = crtc->ctx; decon_win_disable() local
522 plane = &ctx->planes[win]; decon_win_disable()
524 if (ctx->suspended) { decon_win_disable()
531 decon_shadow_protect_win(ctx, win, true); decon_win_disable()
534 val = readl(ctx->regs + WINCON(win)); decon_win_disable()
536 writel(val, ctx->regs + WINCON(win)); decon_win_disable()
539 decon_shadow_protect_win(ctx, win, false); decon_win_disable()
541 val = readl(ctx->regs + DECON_UPDATE); decon_win_disable()
543 writel(val, ctx->regs + DECON_UPDATE); decon_win_disable()
548 static void decon_window_suspend(struct decon_context *ctx) decon_window_suspend() argument
554 plane = &ctx->planes[i]; decon_window_suspend()
557 decon_win_disable(ctx->crtc, i); decon_window_suspend()
561 static void decon_window_resume(struct decon_context *ctx) decon_window_resume() argument
567 plane = &ctx->planes[i]; decon_window_resume()
573 static void decon_apply(struct decon_context *ctx) decon_apply() argument
579 plane = &ctx->planes[i]; decon_apply()
581 decon_win_commit(ctx->crtc, i); decon_apply()
583 decon_win_disable(ctx->crtc, i); decon_apply()
586 decon_commit(ctx->crtc); decon_apply()
589 static void decon_init(struct decon_context *ctx) decon_init() argument
593 writel(VIDCON0_SWRESET, ctx->regs + VIDCON0); decon_init()
596 if (!ctx->i80_if) decon_init()
598 writel(val, ctx->regs + VIDOUTCON0); decon_init()
600 writel(VCLKCON0_CLKVALUP | VCLKCON0_VCLKFREE, ctx->regs + VCLKCON0); decon_init()
602 if (!ctx->i80_if) decon_init()
603 writel(VIDCON1_VCLK_HOLD, ctx->regs + VIDCON1(0)); decon_init()
606 static int decon_poweron(struct decon_context *ctx) decon_poweron() argument
610 if (!ctx->suspended) decon_poweron()
613 ctx->suspended = false; decon_poweron()
615 pm_runtime_get_sync(ctx->dev); decon_poweron()
617 ret = clk_prepare_enable(ctx->pclk); decon_poweron()
623 ret = clk_prepare_enable(ctx->aclk); decon_poweron()
629 ret = clk_prepare_enable(ctx->eclk); decon_poweron()
635 ret = clk_prepare_enable(ctx->vclk); decon_poweron()
641 decon_init(ctx); decon_poweron()
644 if (test_and_clear_bit(0, &ctx->irq_flags)) { decon_poweron()
645 ret = decon_enable_vblank(ctx->crtc); decon_poweron()
652 decon_window_resume(ctx); decon_poweron()
654 decon_apply(ctx); decon_poweron()
659 clk_disable_unprepare(ctx->vclk); decon_poweron()
661 clk_disable_unprepare(ctx->eclk); decon_poweron()
663 clk_disable_unprepare(ctx->aclk); decon_poweron()
665 clk_disable_unprepare(ctx->pclk); decon_poweron()
667 ctx->suspended = true; decon_poweron()
671 static int decon_poweroff(struct decon_context *ctx) decon_poweroff() argument
673 if (ctx->suspended) decon_poweroff()
681 decon_window_suspend(ctx); decon_poweroff()
683 clk_disable_unprepare(ctx->vclk); decon_poweroff()
684 clk_disable_unprepare(ctx->eclk); decon_poweroff()
685 clk_disable_unprepare(ctx->aclk); decon_poweroff()
686 clk_disable_unprepare(ctx->pclk); decon_poweroff()
688 pm_runtime_put_sync(ctx->dev); decon_poweroff()
690 ctx->suspended = true; decon_poweroff()
700 decon_poweron(crtc->ctx); decon_dpms()
705 decon_poweroff(crtc->ctx); decon_dpms()
727 struct decon_context *ctx = (struct decon_context *)dev_id; decon_irq_handler() local
730 val = readl(ctx->regs + VIDINTCON1); decon_irq_handler()
732 clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME; decon_irq_handler()
734 writel(clear_bit, ctx->regs + VIDINTCON1); decon_irq_handler()
737 if (ctx->pipe < 0 || !ctx->drm_dev) decon_irq_handler()
740 if (!ctx->i80_if) { decon_irq_handler()
741 drm_handle_vblank(ctx->drm_dev, ctx->pipe); decon_irq_handler()
742 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); decon_irq_handler()
745 if (atomic_read(&ctx->wait_vsync_event)) { decon_irq_handler()
746 atomic_set(&ctx->wait_vsync_event, 0); decon_irq_handler()
747 wake_up(&ctx->wait_vsync_queue); decon_irq_handler()
756 struct decon_context *ctx = dev_get_drvdata(dev); decon_bind() local
763 ret = decon_ctx_initialize(ctx, drm_dev); decon_bind()
770 type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY : decon_bind()
772 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], decon_bind()
773 1 << ctx->pipe, type, zpos); decon_bind()
778 exynos_plane = &ctx->planes[ctx->default_win]; decon_bind()
779 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base, decon_bind()
780 ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD, decon_bind()
781 &decon_crtc_ops, ctx); decon_bind()
782 if (IS_ERR(ctx->crtc)) { decon_bind()
783 decon_ctx_remove(ctx); decon_bind()
784 return PTR_ERR(ctx->crtc); decon_bind()
787 if (ctx->display) decon_bind()
788 exynos_drm_create_enc_conn(drm_dev, ctx->display); decon_bind()
797 struct decon_context *ctx = dev_get_drvdata(dev); decon_unbind() local
799 decon_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); decon_unbind()
801 if (ctx->display) decon_unbind()
802 exynos_dpi_remove(ctx->display); decon_unbind()
804 decon_ctx_remove(ctx); decon_unbind()
815 struct decon_context *ctx; decon_probe() local
823 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); decon_probe()
824 if (!ctx) decon_probe()
832 ctx->dev = dev; decon_probe()
833 ctx->suspended = true; decon_probe()
837 ctx->i80_if = true; decon_probe()
840 ctx->regs = of_iomap(dev->of_node, 0); decon_probe()
841 if (!ctx->regs) { decon_probe()
846 ctx->pclk = devm_clk_get(dev, "pclk_decon0"); decon_probe()
847 if (IS_ERR(ctx->pclk)) { decon_probe()
849 ret = PTR_ERR(ctx->pclk); decon_probe()
853 ctx->aclk = devm_clk_get(dev, "aclk_decon0"); decon_probe()
854 if (IS_ERR(ctx->aclk)) { decon_probe()
856 ret = PTR_ERR(ctx->aclk); decon_probe()
860 ctx->eclk = devm_clk_get(dev, "decon0_eclk"); decon_probe()
861 if (IS_ERR(ctx->eclk)) { decon_probe()
863 ret = PTR_ERR(ctx->eclk); decon_probe()
867 ctx->vclk = devm_clk_get(dev, "decon0_vclk"); decon_probe()
868 if (IS_ERR(ctx->vclk)) { decon_probe()
870 ret = PTR_ERR(ctx->vclk); decon_probe()
875 ctx->i80_if ? "lcd_sys" : "vsync"); decon_probe()
883 0, "drm_decon", ctx); decon_probe()
889 init_waitqueue_head(&ctx->wait_vsync_queue); decon_probe()
890 atomic_set(&ctx->wait_vsync_event, 0); decon_probe()
892 platform_set_drvdata(pdev, ctx); decon_probe()
894 ctx->display = exynos_dpi_probe(dev); decon_probe()
895 if (IS_ERR(ctx->display)) { decon_probe()
896 ret = PTR_ERR(ctx->display); decon_probe()
912 iounmap(ctx->regs); decon_probe()
921 struct decon_context *ctx = dev_get_drvdata(&pdev->dev); decon_remove() local
925 iounmap(ctx->regs); decon_remove()
H A Dexynos_drm_vidi.c89 struct vidi_context *ctx = crtc->ctx; vidi_enable_vblank() local
91 if (ctx->suspended) vidi_enable_vblank()
94 if (!test_and_set_bit(0, &ctx->irq_flags)) vidi_enable_vblank()
95 ctx->vblank_on = true; vidi_enable_vblank()
97 ctx->direct_vblank = true; vidi_enable_vblank()
104 schedule_work(&ctx->work); vidi_enable_vblank()
111 struct vidi_context *ctx = crtc->ctx; vidi_disable_vblank() local
113 if (ctx->suspended) vidi_disable_vblank()
116 if (test_and_clear_bit(0, &ctx->irq_flags)) vidi_disable_vblank()
117 ctx->vblank_on = false; vidi_disable_vblank()
122 struct vidi_context *ctx = crtc->ctx; vidi_win_commit() local
125 if (ctx->suspended) vidi_win_commit()
131 plane = &ctx->planes[win]; vidi_win_commit()
137 if (ctx->vblank_on) vidi_win_commit()
138 schedule_work(&ctx->work); vidi_win_commit()
143 struct vidi_context *ctx = crtc->ctx; vidi_win_disable() local
149 plane = &ctx->planes[win]; vidi_win_disable()
155 static int vidi_power_on(struct vidi_context *ctx, bool enable) vidi_power_on() argument
166 ctx->suspended = false; vidi_power_on()
169 if (test_and_clear_bit(0, &ctx->irq_flags)) vidi_power_on()
170 vidi_enable_vblank(ctx->crtc); vidi_power_on()
173 plane = &ctx->planes[i]; vidi_power_on()
175 vidi_win_commit(ctx->crtc, i); vidi_power_on()
178 ctx->suspended = true; vidi_power_on()
186 struct vidi_context *ctx = crtc->ctx; vidi_dpms() local
190 mutex_lock(&ctx->lock); vidi_dpms()
194 vidi_power_on(ctx, true); vidi_dpms()
199 vidi_power_on(ctx, false); vidi_dpms()
206 mutex_unlock(&ctx->lock); vidi_dpms()
209 static int vidi_ctx_initialize(struct vidi_context *ctx, vidi_ctx_initialize() argument
214 ctx->drm_dev = drm_dev; vidi_ctx_initialize()
215 ctx->pipe = priv->pipe++; vidi_ctx_initialize()
230 struct vidi_context *ctx = container_of(work, struct vidi_context, vidi_fake_vblank_handler() local
233 if (ctx->pipe < 0) vidi_fake_vblank_handler()
239 mutex_lock(&ctx->lock); vidi_fake_vblank_handler()
241 if (ctx->direct_vblank) { vidi_fake_vblank_handler()
242 drm_handle_vblank(ctx->drm_dev, ctx->pipe); vidi_fake_vblank_handler()
243 ctx->direct_vblank = false; vidi_fake_vblank_handler()
244 mutex_unlock(&ctx->lock); vidi_fake_vblank_handler()
248 mutex_unlock(&ctx->lock); vidi_fake_vblank_handler()
250 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); vidi_fake_vblank_handler()
256 struct vidi_context *ctx = dev_get_drvdata(dev); vidi_show_connection() local
259 mutex_lock(&ctx->lock); vidi_show_connection()
261 rc = sprintf(buf, "%d\n", ctx->connected); vidi_show_connection()
263 mutex_unlock(&ctx->lock); vidi_show_connection()
272 struct vidi_context *ctx = dev_get_drvdata(dev); vidi_store_connection() local
275 ret = kstrtoint(buf, 0, &ctx->connected); vidi_store_connection()
279 if (ctx->connected > 1) vidi_store_connection()
283 if (!ctx->raw_edid) vidi_store_connection()
284 ctx->raw_edid = (struct edid *)fake_edid_info; vidi_store_connection()
287 if (ctx->raw_edid != (struct edid *)fake_edid_info) { vidi_store_connection()
294 drm_helper_hpd_irq_event(ctx->drm_dev); vidi_store_connection()
305 struct vidi_context *ctx = NULL; vidi_connection_ioctl() local
325 ctx = display_to_vidi(display); vidi_connection_ioctl()
330 if (!ctx) { vidi_connection_ioctl()
335 if (ctx->connected == vidi->connection) { vidi_connection_ioctl()
346 ctx->raw_edid = drm_edid_duplicate(raw_edid); vidi_connection_ioctl()
347 if (!ctx->raw_edid) { vidi_connection_ioctl()
356 if (ctx->raw_edid && ctx->raw_edid != vidi_connection_ioctl()
358 kfree(ctx->raw_edid); vidi_connection_ioctl()
359 ctx->raw_edid = NULL; vidi_connection_ioctl()
363 ctx->connected = vidi->connection; vidi_connection_ioctl()
364 drm_helper_hpd_irq_event(ctx->drm_dev); vidi_connection_ioctl()
372 struct vidi_context *ctx = ctx_from_connector(connector); vidi_detect() local
378 return ctx->connected ? connector_status_connected : vidi_detect()
395 struct vidi_context *ctx = ctx_from_connector(connector); vidi_get_modes() local
401 * to ctx->raw_edid through specific ioctl. vidi_get_modes()
403 if (!ctx->raw_edid) { vidi_get_modes()
408 edid_len = (1 + ctx->raw_edid->extensions) * EDID_LENGTH; vidi_get_modes()
409 edid = kmemdup(ctx->raw_edid, edid_len, GFP_KERNEL); vidi_get_modes()
422 struct vidi_context *ctx = ctx_from_connector(connector); vidi_best_encoder() local
424 return ctx->encoder; vidi_best_encoder()
435 struct vidi_context *ctx = display_to_vidi(display); vidi_create_connector() local
436 struct drm_connector *connector = &ctx->connector; vidi_create_connector()
439 ctx->encoder = encoder; vidi_create_connector()
442 ret = drm_connector_init(ctx->drm_dev, connector, vidi_create_connector()
463 struct vidi_context *ctx = dev_get_drvdata(dev); vidi_bind() local
470 vidi_ctx_initialize(ctx, drm_dev); vidi_bind()
473 type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY : vidi_bind()
475 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], vidi_bind()
476 1 << ctx->pipe, type, zpos); vidi_bind()
481 exynos_plane = &ctx->planes[ctx->default_win]; vidi_bind()
482 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base, vidi_bind()
483 ctx->pipe, EXYNOS_DISPLAY_TYPE_VIDI, vidi_bind()
484 &vidi_crtc_ops, ctx); vidi_bind()
485 if (IS_ERR(ctx->crtc)) { vidi_bind()
487 return PTR_ERR(ctx->crtc); vidi_bind()
490 ret = exynos_drm_create_enc_conn(drm_dev, &ctx->display); vidi_bind()
492 ctx->crtc->base.funcs->destroy(&ctx->crtc->base); vidi_bind()
511 struct vidi_context *ctx; vidi_probe() local
514 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); vidi_probe()
515 if (!ctx) vidi_probe()
518 ctx->display.type = EXYNOS_DISPLAY_TYPE_VIDI; vidi_probe()
519 ctx->display.ops = &vidi_display_ops; vidi_probe()
520 ctx->default_win = 0; vidi_probe()
521 ctx->pdev = pdev; vidi_probe()
529 ctx->display.type); vidi_probe()
533 INIT_WORK(&ctx->work, vidi_fake_vblank_handler); vidi_probe()
535 mutex_init(&ctx->lock); vidi_probe()
537 platform_set_drvdata(pdev, ctx); vidi_probe()
563 struct vidi_context *ctx = platform_get_drvdata(pdev); vidi_remove() local
565 if (ctx->raw_edid != (struct edid *)fake_edid_info) { vidi_remove()
566 kfree(ctx->raw_edid); vidi_remove()
567 ctx->raw_edid = NULL; vidi_remove()
H A Dexynos_drm_fimd.c201 struct fimd_context *ctx = crtc->ctx; fimd_wait_for_vblank() local
203 if (ctx->suspended) fimd_wait_for_vblank()
206 atomic_set(&ctx->wait_vsync_event, 1); fimd_wait_for_vblank()
212 if (!wait_event_timeout(ctx->wait_vsync_queue, fimd_wait_for_vblank()
213 !atomic_read(&ctx->wait_vsync_event), fimd_wait_for_vblank()
218 static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win, fimd_enable_video_output() argument
221 u32 val = readl(ctx->regs + WINCON(win)); fimd_enable_video_output()
228 writel(val, ctx->regs + WINCON(win)); fimd_enable_video_output()
231 static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, fimd_enable_shadow_channel_path() argument
235 u32 val = readl(ctx->regs + SHADOWCON); fimd_enable_shadow_channel_path()
242 writel(val, ctx->regs + SHADOWCON); fimd_enable_shadow_channel_path()
245 static void fimd_clear_channel(struct fimd_context *ctx) fimd_clear_channel() argument
253 u32 val = readl(ctx->regs + WINCON(win)); fimd_clear_channel()
256 fimd_enable_video_output(ctx, win, false); fimd_clear_channel()
258 if (ctx->driver_data->has_shadowcon) fimd_clear_channel()
259 fimd_enable_shadow_channel_path(ctx, win, fimd_clear_channel()
268 unsigned int state = ctx->suspended; fimd_clear_channel()
270 ctx->suspended = 0; fimd_clear_channel()
271 fimd_wait_for_vblank(ctx->crtc); fimd_clear_channel()
272 ctx->suspended = state; fimd_clear_channel()
276 static int fimd_iommu_attach_devices(struct fimd_context *ctx, fimd_iommu_attach_devices() argument
281 if (is_drm_iommu_supported(ctx->drm_dev)) { fimd_iommu_attach_devices()
288 fimd_clear_channel(ctx); fimd_iommu_attach_devices()
289 ret = drm_iommu_attach_device(ctx->drm_dev, ctx->dev); fimd_iommu_attach_devices()
300 static void fimd_iommu_detach_devices(struct fimd_context *ctx) fimd_iommu_detach_devices() argument
303 if (is_drm_iommu_supported(ctx->drm_dev)) fimd_iommu_detach_devices()
304 drm_iommu_detach_device(ctx->drm_dev, ctx->dev); fimd_iommu_detach_devices()
307 static u32 fimd_calc_clkdiv(struct fimd_context *ctx, fimd_calc_clkdiv() argument
313 if (ctx->i80_if) { fimd_calc_clkdiv()
322 clkdiv = DIV_ROUND_UP(clk_get_rate(ctx->lcd_clk), ideal_clk); fimd_calc_clkdiv()
339 struct fimd_context *ctx = crtc->ctx; fimd_commit() local
341 struct fimd_driver_data *driver_data = ctx->driver_data; fimd_commit()
342 void *timing_base = ctx->regs + driver_data->timing_base; fimd_commit()
345 if (ctx->suspended) fimd_commit()
352 if (ctx->i80_if) { fimd_commit()
353 val = ctx->i80ifcon | I80IFEN_ENABLE; fimd_commit()
360 if (driver_data->has_vtsel && ctx->sysreg && fimd_commit()
361 regmap_update_bits(ctx->sysreg, fimd_commit()
373 vidcon1 = ctx->vidcon1; fimd_commit()
378 writel(vidcon1, ctx->regs + driver_data->timing_base + VIDCON1); fimd_commit()
388 writel(val, ctx->regs + driver_data->timing_base + VIDTCON0); fimd_commit()
398 writel(val, ctx->regs + driver_data->timing_base + VIDTCON1); fimd_commit()
402 writel(ctx->vidout_con, timing_base + VIDOUT_CON); fimd_commit()
405 if (ctx->sysreg && regmap_update_bits(ctx->sysreg, fimd_commit()
418 writel(val, ctx->regs + driver_data->timing_base + VIDTCON2); fimd_commit()
424 val = ctx->vidcon0; fimd_commit()
427 if (ctx->driver_data->has_clksel) fimd_commit()
430 clkdiv = fimd_calc_clkdiv(ctx, mode); fimd_commit()
434 writel(val, ctx->regs + VIDCON0); fimd_commit()
439 struct fimd_context *ctx = crtc->ctx; fimd_enable_vblank() local
442 if (ctx->suspended) fimd_enable_vblank()
445 if (!test_and_set_bit(0, &ctx->irq_flags)) { fimd_enable_vblank()
446 val = readl(ctx->regs + VIDINTCON0); fimd_enable_vblank()
450 if (ctx->i80_if) { fimd_enable_vblank()
463 writel(val, ctx->regs + VIDINTCON0); fimd_enable_vblank()
471 struct fimd_context *ctx = crtc->ctx; fimd_disable_vblank() local
474 if (ctx->suspended) fimd_disable_vblank()
477 if (test_and_clear_bit(0, &ctx->irq_flags)) { fimd_disable_vblank()
478 val = readl(ctx->regs + VIDINTCON0); fimd_disable_vblank()
482 if (ctx->i80_if) { fimd_disable_vblank()
489 writel(val, ctx->regs + VIDINTCON0); fimd_disable_vblank()
493 static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win) fimd_win_set_pixfmt() argument
495 struct exynos_drm_plane *plane = &ctx->planes[win]; fimd_win_set_pixfmt()
504 if (ctx->driver_data->has_limited_fmt && !win) { fimd_win_set_pixfmt()
560 writel(val, ctx->regs + WINCON(win)); fimd_win_set_pixfmt()
572 writel(val, ctx->regs + VIDOSD_C(win)); fimd_win_set_pixfmt()
576 writel(val, ctx->regs + VIDWnALPHA0(win)); fimd_win_set_pixfmt()
577 writel(val, ctx->regs + VIDWnALPHA1(win)); fimd_win_set_pixfmt()
581 static void fimd_win_set_colkey(struct fimd_context *ctx, unsigned int win) fimd_win_set_colkey() argument
590 writel(keycon0, ctx->regs + WKEYCON0_BASE(win)); fimd_win_set_colkey()
591 writel(keycon1, ctx->regs + WKEYCON1_BASE(win)); fimd_win_set_colkey()
600 static void fimd_shadow_protect_win(struct fimd_context *ctx, fimd_shadow_protect_win() argument
605 if (ctx->driver_data->has_shadowcon) { fimd_shadow_protect_win()
613 val = readl(ctx->regs + reg); fimd_shadow_protect_win()
618 writel(val, ctx->regs + reg); fimd_shadow_protect_win()
623 struct fimd_context *ctx = crtc->ctx; fimd_win_commit() local
629 if (ctx->suspended) fimd_win_commit()
635 plane = &ctx->planes[win]; fimd_win_commit()
638 if (ctx->suspended) { fimd_win_commit()
654 fimd_shadow_protect_win(ctx, win, true); fimd_win_commit()
663 writel(val, ctx->regs + VIDWx_BUF_START(win, 0)); fimd_win_commit()
668 writel(val, ctx->regs + VIDWx_BUF_END(win, 0)); fimd_win_commit()
682 writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0)); fimd_win_commit()
689 writel(val, ctx->regs + VIDOSD_A(win)); fimd_win_commit()
701 writel(val, ctx->regs + VIDOSD_B(win)); fimd_win_commit()
712 writel(val, ctx->regs + offset); fimd_win_commit()
717 fimd_win_set_pixfmt(ctx, win); fimd_win_commit()
721 fimd_win_set_colkey(ctx, win); fimd_win_commit()
723 fimd_enable_video_output(ctx, win, true); fimd_win_commit()
725 if (ctx->driver_data->has_shadowcon) fimd_win_commit()
726 fimd_enable_shadow_channel_path(ctx, win, true); fimd_win_commit()
729 fimd_shadow_protect_win(ctx, win, false); fimd_win_commit()
733 if (ctx->i80_if) fimd_win_commit()
734 atomic_set(&ctx->win_updated, 1); fimd_win_commit()
739 struct fimd_context *ctx = crtc->ctx; fimd_win_disable() local
745 plane = &ctx->planes[win]; fimd_win_disable()
747 if (ctx->suspended) { fimd_win_disable()
754 fimd_shadow_protect_win(ctx, win, true); fimd_win_disable()
756 fimd_enable_video_output(ctx, win, false); fimd_win_disable()
758 if (ctx->driver_data->has_shadowcon) fimd_win_disable()
759 fimd_enable_shadow_channel_path(ctx, win, false); fimd_win_disable()
762 fimd_shadow_protect_win(ctx, win, false); fimd_win_disable()
767 static void fimd_window_suspend(struct fimd_context *ctx) fimd_window_suspend() argument
773 plane = &ctx->planes[i]; fimd_window_suspend()
776 fimd_win_disable(ctx->crtc, i); fimd_window_suspend()
780 static void fimd_window_resume(struct fimd_context *ctx) fimd_window_resume() argument
786 plane = &ctx->planes[i]; fimd_window_resume()
792 static void fimd_apply(struct fimd_context *ctx) fimd_apply() argument
798 plane = &ctx->planes[i]; fimd_apply()
800 fimd_win_commit(ctx->crtc, i); fimd_apply()
802 fimd_win_disable(ctx->crtc, i); fimd_apply()
805 fimd_commit(ctx->crtc); fimd_apply()
808 static int fimd_poweron(struct fimd_context *ctx) fimd_poweron() argument
812 if (!ctx->suspended) fimd_poweron()
815 ctx->suspended = false; fimd_poweron()
817 pm_runtime_get_sync(ctx->dev); fimd_poweron()
819 ret = clk_prepare_enable(ctx->bus_clk); fimd_poweron()
825 ret = clk_prepare_enable(ctx->lcd_clk); fimd_poweron()
832 if (test_and_clear_bit(0, &ctx->irq_flags)) { fimd_poweron()
833 ret = fimd_enable_vblank(ctx->crtc); fimd_poweron()
840 fimd_window_resume(ctx); fimd_poweron()
842 fimd_apply(ctx); fimd_poweron()
847 clk_disable_unprepare(ctx->lcd_clk); fimd_poweron()
849 clk_disable_unprepare(ctx->bus_clk); fimd_poweron()
851 ctx->suspended = true; fimd_poweron()
855 static int fimd_poweroff(struct fimd_context *ctx) fimd_poweroff() argument
857 if (ctx->suspended) fimd_poweroff()
865 fimd_window_suspend(ctx); fimd_poweroff()
867 clk_disable_unprepare(ctx->lcd_clk); fimd_poweroff()
868 clk_disable_unprepare(ctx->bus_clk); fimd_poweroff()
870 pm_runtime_put_sync(ctx->dev); fimd_poweroff()
872 ctx->suspended = true; fimd_poweroff()
882 fimd_poweron(crtc->ctx); fimd_dpms()
887 fimd_poweroff(crtc->ctx); fimd_dpms()
897 struct fimd_context *ctx = dev_get_drvdata(dev); fimd_trigger() local
898 struct fimd_driver_data *driver_data = ctx->driver_data; fimd_trigger()
899 void *timing_base = ctx->regs + driver_data->timing_base; fimd_trigger()
906 if (atomic_read(&ctx->triggering)) fimd_trigger()
910 atomic_set(&ctx->triggering, 1); fimd_trigger()
920 if (!test_bit(0, &ctx->irq_flags)) fimd_trigger()
921 atomic_set(&ctx->triggering, 0); fimd_trigger()
926 struct fimd_context *ctx = crtc->ctx; fimd_te_handler() local
929 if (ctx->pipe < 0 || !ctx->drm_dev) fimd_te_handler()
936 if (atomic_add_unless(&ctx->win_updated, -1, 0)) fimd_te_handler()
937 fimd_trigger(ctx->dev); fimd_te_handler()
940 if (atomic_read(&ctx->wait_vsync_event)) { fimd_te_handler()
941 atomic_set(&ctx->wait_vsync_event, 0); fimd_te_handler()
942 wake_up(&ctx->wait_vsync_queue); fimd_te_handler()
945 if (test_bit(0, &ctx->irq_flags)) fimd_te_handler()
946 drm_handle_vblank(ctx->drm_dev, ctx->pipe); fimd_te_handler()
951 struct fimd_context *ctx = crtc->ctx; fimd_dp_clock_enable() local
959 if (ctx->driver_data != &exynos5_fimd_driver_data) fimd_dp_clock_enable()
963 writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON); fimd_dp_clock_enable()
981 struct fimd_context *ctx = (struct fimd_context *)dev_id; fimd_irq_handler() local
984 val = readl(ctx->regs + VIDINTCON1); fimd_irq_handler()
986 clear_bit = ctx->i80_if ? VIDINTCON1_INT_I80 : VIDINTCON1_INT_FRAME; fimd_irq_handler()
988 writel(clear_bit, ctx->regs + VIDINTCON1); fimd_irq_handler()
991 if (ctx->pipe < 0 || !ctx->drm_dev) fimd_irq_handler()
994 if (ctx->i80_if) { fimd_irq_handler()
995 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); fimd_irq_handler()
998 atomic_set(&ctx->triggering, 0); fimd_irq_handler()
1000 drm_handle_vblank(ctx->drm_dev, ctx->pipe); fimd_irq_handler()
1001 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); fimd_irq_handler()
1004 if (atomic_read(&ctx->wait_vsync_event)) { fimd_irq_handler()
1005 atomic_set(&ctx->wait_vsync_event, 0); fimd_irq_handler()
1006 wake_up(&ctx->wait_vsync_queue); fimd_irq_handler()
1016 struct fimd_context *ctx = dev_get_drvdata(dev); fimd_bind() local
1024 ctx->drm_dev = drm_dev; fimd_bind()
1025 ctx->pipe = priv->pipe++; fimd_bind()
1028 type = (zpos == ctx->default_win) ? DRM_PLANE_TYPE_PRIMARY : fimd_bind()
1030 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], fimd_bind()
1031 1 << ctx->pipe, type, zpos); fimd_bind()
1036 exynos_plane = &ctx->planes[ctx->default_win]; fimd_bind()
1037 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base, fimd_bind()
1038 ctx->pipe, EXYNOS_DISPLAY_TYPE_LCD, fimd_bind()
1039 &fimd_crtc_ops, ctx); fimd_bind()
1040 if (IS_ERR(ctx->crtc)) fimd_bind()
1041 return PTR_ERR(ctx->crtc); fimd_bind()
1043 if (ctx->display) fimd_bind()
1044 exynos_drm_create_enc_conn(drm_dev, ctx->display); fimd_bind()
1046 return fimd_iommu_attach_devices(ctx, drm_dev); fimd_bind()
1052 struct fimd_context *ctx = dev_get_drvdata(dev); fimd_unbind() local
1054 fimd_dpms(ctx->crtc, DRM_MODE_DPMS_OFF); fimd_unbind()
1056 fimd_iommu_detach_devices(ctx); fimd_unbind()
1058 if (ctx->display) fimd_unbind()
1059 exynos_dpi_remove(ctx->display); fimd_unbind()
1070 struct fimd_context *ctx; fimd_probe() local
1078 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); fimd_probe()
1079 if (!ctx) fimd_probe()
1087 ctx->dev = dev; fimd_probe()
1088 ctx->suspended = true; fimd_probe()
1089 ctx->driver_data = drm_fimd_get_driver_data(pdev); fimd_probe()
1092 ctx->vidcon1 |= VIDCON1_INV_VDEN; fimd_probe()
1094 ctx->vidcon1 |= VIDCON1_INV_VCLK; fimd_probe()
1100 ctx->i80_if = true; fimd_probe()
1102 if (ctx->driver_data->has_vidoutcon) fimd_probe()
1103 ctx->vidout_con |= VIDOUT_CON_F_I80_LDI0; fimd_probe()
1105 ctx->vidcon0 |= VIDCON0_VIDOUT_I80_LDI0; fimd_probe()
1110 ctx->vidcon0 |= VIDCON0_DSI_EN; fimd_probe()
1114 ctx->i80ifcon = LCD_CS_SETUP(val); fimd_probe()
1117 ctx->i80ifcon |= LCD_WR_SETUP(val); fimd_probe()
1120 ctx->i80ifcon |= LCD_WR_ACTIVE(val); fimd_probe()
1123 ctx->i80ifcon |= LCD_WR_HOLD(val); fimd_probe()
1127 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, fimd_probe()
1129 if (IS_ERR(ctx->sysreg)) { fimd_probe()
1131 ctx->sysreg = NULL; fimd_probe()
1134 ctx->bus_clk = devm_clk_get(dev, "fimd"); fimd_probe()
1135 if (IS_ERR(ctx->bus_clk)) { fimd_probe()
1137 ret = PTR_ERR(ctx->bus_clk); fimd_probe()
1141 ctx->lcd_clk = devm_clk_get(dev, "sclk_fimd"); fimd_probe()
1142 if (IS_ERR(ctx->lcd_clk)) { fimd_probe()
1144 ret = PTR_ERR(ctx->lcd_clk); fimd_probe()
1150 ctx->regs = devm_ioremap_resource(dev, res); fimd_probe()
1151 if (IS_ERR(ctx->regs)) { fimd_probe()
1152 ret = PTR_ERR(ctx->regs); fimd_probe()
1157 ctx->i80_if ? "lcd_sys" : "vsync"); fimd_probe()
1165 0, "drm_fimd", ctx); fimd_probe()
1171 init_waitqueue_head(&ctx->wait_vsync_queue); fimd_probe()
1172 atomic_set(&ctx->wait_vsync_event, 0); fimd_probe()
1174 platform_set_drvdata(pdev, ctx); fimd_probe()
1176 ctx->display = exynos_dpi_probe(dev); fimd_probe()
1177 if (IS_ERR(ctx->display)) { fimd_probe()
1178 ret = PTR_ERR(ctx->display); fimd_probe()
H A Dexynos_drm_dpi.c47 struct exynos_dpi *ctx = connector_to_dpi(connector); exynos_dpi_detect() local
49 if (ctx->panel && !ctx->panel->connector) exynos_dpi_detect()
50 drm_panel_attach(ctx->panel, &ctx->connector); exynos_dpi_detect()
70 struct exynos_dpi *ctx = connector_to_dpi(connector); exynos_dpi_get_modes() local
73 if (ctx->vm) { exynos_dpi_get_modes()
81 drm_display_mode_from_videomode(ctx->vm, mode); exynos_dpi_get_modes()
87 if (ctx->panel) exynos_dpi_get_modes()
88 return ctx->panel->funcs->get_modes(ctx->panel); exynos_dpi_get_modes()
96 struct exynos_dpi *ctx = connector_to_dpi(connector); exynos_dpi_best_encoder() local
98 return ctx->encoder; exynos_dpi_best_encoder()
109 struct exynos_dpi *ctx = display_to_dpi(display); exynos_dpi_create_connector() local
110 struct drm_connector *connector = &ctx->connector; exynos_dpi_create_connector()
113 ctx->encoder = encoder; exynos_dpi_create_connector()
132 static void exynos_dpi_poweron(struct exynos_dpi *ctx) exynos_dpi_poweron() argument
134 if (ctx->panel) { exynos_dpi_poweron()
135 drm_panel_prepare(ctx->panel); exynos_dpi_poweron()
136 drm_panel_enable(ctx->panel); exynos_dpi_poweron()
140 static void exynos_dpi_poweroff(struct exynos_dpi *ctx) exynos_dpi_poweroff() argument
142 if (ctx->panel) { exynos_dpi_poweroff()
143 drm_panel_disable(ctx->panel); exynos_dpi_poweroff()
144 drm_panel_unprepare(ctx->panel); exynos_dpi_poweroff()
150 struct exynos_dpi *ctx = display_to_dpi(display); exynos_dpi_dpms() local
154 if (ctx->dpms_mode != DRM_MODE_DPMS_ON) exynos_dpi_dpms()
155 exynos_dpi_poweron(ctx); exynos_dpi_dpms()
160 if (ctx->dpms_mode == DRM_MODE_DPMS_ON) exynos_dpi_dpms()
161 exynos_dpi_poweroff(ctx); exynos_dpi_dpms()
166 ctx->dpms_mode = mode; exynos_dpi_dpms()
262 static int exynos_dpi_parse_dt(struct exynos_dpi *ctx) exynos_dpi_parse_dt() argument
264 struct device *dev = ctx->dev; exynos_dpi_parse_dt()
268 ctx->panel_node = exynos_dpi_of_find_panel_node(dev); exynos_dpi_parse_dt()
277 vm = devm_kzalloc(dev, sizeof(*ctx->vm), GFP_KERNEL); exynos_dpi_parse_dt()
287 ctx->vm = vm; exynos_dpi_parse_dt()
292 if (!ctx->panel_node) exynos_dpi_parse_dt()
300 struct exynos_dpi *ctx; exynos_dpi_probe() local
303 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); exynos_dpi_probe()
304 if (!ctx) exynos_dpi_probe()
307 ctx->display.type = EXYNOS_DISPLAY_TYPE_LCD; exynos_dpi_probe()
308 ctx->display.ops = &exynos_dpi_display_ops; exynos_dpi_probe()
309 ctx->dev = dev; exynos_dpi_probe()
310 ctx->dpms_mode = DRM_MODE_DPMS_OFF; exynos_dpi_probe()
314 ctx->display.type); exynos_dpi_probe()
318 ret = exynos_dpi_parse_dt(ctx); exynos_dpi_probe()
320 devm_kfree(dev, ctx); exynos_dpi_probe()
324 if (ctx->panel_node) { exynos_dpi_probe()
325 ctx->panel = of_drm_find_panel(ctx->panel_node); exynos_dpi_probe()
326 if (!ctx->panel) { exynos_dpi_probe()
333 return &ctx->display; exynos_dpi_probe()
343 struct exynos_dpi *ctx = display_to_dpi(display); exynos_dpi_remove() local
345 exynos_dpi_dpms(&ctx->display, DRM_MODE_DPMS_OFF); exynos_dpi_remove()
347 if (ctx->panel) exynos_dpi_remove()
348 drm_panel_detach(ctx->panel); exynos_dpi_remove()
350 exynos_drm_component_del(ctx->dev, EXYNOS_DEVICE_TYPE_CONNECTOR); exynos_dpi_remove()
H A Dexynos_drm_fimc.c172 static u32 fimc_read(struct fimc_context *ctx, u32 reg) fimc_read() argument
174 return readl(ctx->regs + reg); fimc_read()
177 static void fimc_write(struct fimc_context *ctx, u32 val, u32 reg) fimc_write() argument
179 writel(val, ctx->regs + reg); fimc_write()
182 static void fimc_set_bits(struct fimc_context *ctx, u32 reg, u32 bits) fimc_set_bits() argument
184 void __iomem *r = ctx->regs + reg; fimc_set_bits()
189 static void fimc_clear_bits(struct fimc_context *ctx, u32 reg, u32 bits) fimc_clear_bits() argument
191 void __iomem *r = ctx->regs + reg; fimc_clear_bits()
196 static void fimc_sw_reset(struct fimc_context *ctx) fimc_sw_reset() argument
201 cfg = fimc_read(ctx, EXYNOS_CISTATUS); fimc_sw_reset()
203 fimc_clear_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID); fimc_sw_reset()
205 fimc_set_bits(ctx, EXYNOS_CISRCFMT, EXYNOS_CISRCFMT_ITU601_8BIT); fimc_sw_reset()
208 fimc_clear_bits(ctx, EXYNOS_CIIMGCPT, fimc_sw_reset()
212 fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST); fimc_sw_reset()
215 fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_SWRST); fimc_sw_reset()
218 fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ); fimc_sw_reset()
221 static int fimc_set_camblk_fimd0_wb(struct fimc_context *ctx) fimc_set_camblk_fimd0_wb() argument
223 return regmap_update_bits(ctx->sysreg, SYSREG_CAMERA_BLK, fimc_set_camblk_fimd0_wb()
225 ctx->id << SYSREG_FIMD0WB_DEST_SHIFT); fimc_set_camblk_fimd0_wb()
228 static void fimc_set_type_ctrl(struct fimc_context *ctx, enum fimc_wb wb) fimc_set_type_ctrl() argument
234 cfg = fimc_read(ctx, EXYNOS_CIGCTRL); fimc_set_type_ctrl()
260 fimc_write(ctx, cfg, EXYNOS_CIGCTRL); fimc_set_type_ctrl()
263 static void fimc_set_polarity(struct fimc_context *ctx, fimc_set_polarity() argument
273 cfg = fimc_read(ctx, EXYNOS_CIGCTRL); fimc_set_polarity()
286 fimc_write(ctx, cfg, EXYNOS_CIGCTRL); fimc_set_polarity()
289 static void fimc_handle_jpeg(struct fimc_context *ctx, bool enable) fimc_handle_jpeg() argument
295 cfg = fimc_read(ctx, EXYNOS_CIGCTRL); fimc_handle_jpeg()
301 fimc_write(ctx, cfg, EXYNOS_CIGCTRL); fimc_handle_jpeg()
304 static void fimc_mask_irq(struct fimc_context *ctx, bool enable) fimc_mask_irq() argument
310 cfg = fimc_read(ctx, EXYNOS_CIGCTRL); fimc_mask_irq()
316 fimc_write(ctx, cfg, EXYNOS_CIGCTRL); fimc_mask_irq()
319 static void fimc_clear_irq(struct fimc_context *ctx) fimc_clear_irq() argument
321 fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_CLR); fimc_clear_irq()
324 static bool fimc_check_ovf(struct fimc_context *ctx) fimc_check_ovf() argument
326 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_check_ovf()
329 status = fimc_read(ctx, EXYNOS_CISTATUS); fimc_check_ovf()
336 fimc_set_bits(ctx, EXYNOS_CIWDOFST, fimc_check_ovf()
341 ctx->id, status); fimc_check_ovf()
348 static bool fimc_check_frame_end(struct fimc_context *ctx) fimc_check_frame_end() argument
352 cfg = fimc_read(ctx, EXYNOS_CISTATUS); fimc_check_frame_end()
360 fimc_write(ctx, cfg, EXYNOS_CISTATUS); fimc_check_frame_end()
365 static int fimc_get_buf_id(struct fimc_context *ctx) fimc_get_buf_id() argument
370 cfg = fimc_read(ctx, EXYNOS_CISTATUS2); fimc_get_buf_id()
391 static void fimc_handle_lastend(struct fimc_context *ctx, bool enable) fimc_handle_lastend() argument
397 cfg = fimc_read(ctx, EXYNOS_CIOCTRL); fimc_handle_lastend()
403 fimc_write(ctx, cfg, EXYNOS_CIOCTRL); fimc_handle_lastend()
407 static int fimc_src_set_fmt_order(struct fimc_context *ctx, u32 fmt) fimc_src_set_fmt_order() argument
409 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_src_set_fmt_order()
415 cfg = fimc_read(ctx, EXYNOS_CISCCTRL); fimc_src_set_fmt_order()
421 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); fimc_src_set_fmt_order()
426 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); fimc_src_set_fmt_order()
434 cfg = fimc_read(ctx, EXYNOS_MSCTRL); fimc_src_set_fmt_order()
473 fimc_write(ctx, cfg, EXYNOS_MSCTRL); fimc_src_set_fmt_order()
480 struct fimc_context *ctx = get_fimc_context(dev); fimc_src_set_fmt() local
481 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_src_set_fmt()
486 cfg = fimc_read(ctx, EXYNOS_MSCTRL); fimc_src_set_fmt()
520 fimc_write(ctx, cfg, EXYNOS_MSCTRL); fimc_src_set_fmt()
522 cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); fimc_src_set_fmt()
527 fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); fimc_src_set_fmt()
529 return fimc_src_set_fmt_order(ctx, fmt); fimc_src_set_fmt()
536 struct fimc_context *ctx = get_fimc_context(dev); fimc_src_set_transf() local
537 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_src_set_transf()
542 cfg1 = fimc_read(ctx, EXYNOS_MSCTRL); fimc_src_set_transf()
546 cfg2 = fimc_read(ctx, EXYNOS_CITRGFMT); fimc_src_set_transf()
585 fimc_write(ctx, cfg1, EXYNOS_MSCTRL); fimc_src_set_transf()
586 fimc_write(ctx, cfg2, EXYNOS_CITRGFMT); fimc_src_set_transf()
592 static int fimc_set_window(struct fimc_context *ctx, fimc_set_window() argument
611 cfg = fimc_read(ctx, EXYNOS_CIWDOFST); fimc_set_window()
617 fimc_write(ctx, cfg, EXYNOS_CIWDOFST); fimc_set_window()
621 fimc_write(ctx, cfg, EXYNOS_CIWDOFST2); fimc_set_window()
629 struct fimc_context *ctx = get_fimc_context(dev); fimc_src_set_size() local
641 fimc_write(ctx, cfg, EXYNOS_ORGISIZE); fimc_src_set_size()
653 cfg = fimc_read(ctx, EXYNOS_CIREAL_ISIZE); fimc_src_set_size()
658 fimc_write(ctx, cfg, EXYNOS_CIREAL_ISIZE); fimc_src_set_size()
667 fimc_write(ctx, cfg, EXYNOS_CISRCFMT); fimc_src_set_size()
672 fimc_write(ctx, cfg, EXYNOS_CIIYOFF); fimc_src_set_size()
675 fimc_write(ctx, cfg, EXYNOS_CIICBOFF); fimc_src_set_size()
678 fimc_write(ctx, cfg, EXYNOS_CIICROFF); fimc_src_set_size()
680 return fimc_set_window(ctx, &img_pos, &img_sz); fimc_src_set_size()
687 struct fimc_context *ctx = get_fimc_context(dev); fimc_src_set_addr() local
688 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_src_set_addr()
712 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], fimc_src_set_addr()
716 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], fimc_src_set_addr()
718 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], fimc_src_set_addr()
721 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], fimc_src_set_addr()
723 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], fimc_src_set_addr()
728 fimc_write(ctx, 0x0, EXYNOS_CIIYSA0); fimc_src_set_addr()
729 fimc_write(ctx, 0x0, EXYNOS_CIICBSA0); fimc_src_set_addr()
730 fimc_write(ctx, 0x0, EXYNOS_CIICRSA0); fimc_src_set_addr()
747 static int fimc_dst_set_fmt_order(struct fimc_context *ctx, u32 fmt) fimc_dst_set_fmt_order() argument
749 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_dst_set_fmt_order()
755 cfg = fimc_read(ctx, EXYNOS_CISCCTRL); fimc_dst_set_fmt_order()
761 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); fimc_dst_set_fmt_order()
765 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); fimc_dst_set_fmt_order()
770 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); fimc_dst_set_fmt_order()
778 cfg = fimc_read(ctx, EXYNOS_CIOCTRL); fimc_dst_set_fmt_order()
819 fimc_write(ctx, cfg, EXYNOS_CIOCTRL); fimc_dst_set_fmt_order()
826 struct fimc_context *ctx = get_fimc_context(dev); fimc_dst_set_fmt() local
827 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_dst_set_fmt()
832 cfg = fimc_read(ctx, EXYNOS_CIEXTEN); fimc_dst_set_fmt()
836 fimc_write(ctx, cfg, EXYNOS_CIEXTEN); fimc_dst_set_fmt()
839 fimc_write(ctx, cfg, EXYNOS_CIEXTEN); fimc_dst_set_fmt()
841 cfg = fimc_read(ctx, EXYNOS_CITRGFMT); fimc_dst_set_fmt()
873 fimc_write(ctx, cfg, EXYNOS_CITRGFMT); fimc_dst_set_fmt()
876 cfg = fimc_read(ctx, EXYNOS_CIDMAPARAM); fimc_dst_set_fmt()
881 fimc_write(ctx, cfg, EXYNOS_CIDMAPARAM); fimc_dst_set_fmt()
883 return fimc_dst_set_fmt_order(ctx, fmt); fimc_dst_set_fmt()
890 struct fimc_context *ctx = get_fimc_context(dev); fimc_dst_set_transf() local
891 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_dst_set_transf()
896 cfg = fimc_read(ctx, EXYNOS_CITRGFMT); fimc_dst_set_transf()
936 fimc_write(ctx, cfg, EXYNOS_CITRGFMT); fimc_dst_set_transf()
942 static int fimc_set_prescaler(struct fimc_context *ctx, struct fimc_scaler *sc, fimc_set_prescaler() argument
945 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_set_prescaler()
952 cfg_ext = fimc_read(ctx, EXYNOS_CITRGFMT); fimc_set_prescaler()
1001 fimc_write(ctx, cfg, EXYNOS_CISCPRERATIO); fimc_set_prescaler()
1005 fimc_write(ctx, cfg, EXYNOS_CISCPREDST); fimc_set_prescaler()
1010 static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc) fimc_set_scaler() argument
1019 cfg = fimc_read(ctx, EXYNOS_CISCCTRL); fimc_set_scaler()
1039 fimc_write(ctx, cfg, EXYNOS_CISCCTRL); fimc_set_scaler()
1041 cfg_ext = fimc_read(ctx, EXYNOS_CIEXTEN); fimc_set_scaler()
1046 fimc_write(ctx, cfg_ext, EXYNOS_CIEXTEN); fimc_set_scaler()
1052 struct fimc_context *ctx = get_fimc_context(dev); fimc_dst_set_size() local
1064 fimc_write(ctx, cfg, EXYNOS_ORGOSIZE); fimc_dst_set_size()
1069 cfg = fimc_read(ctx, EXYNOS_CIGCTRL); fimc_dst_set_size()
1077 fimc_write(ctx, cfg, EXYNOS_CIGCTRL); fimc_dst_set_size()
1087 cfg = fimc_read(ctx, EXYNOS_CITRGFMT); fimc_dst_set_size()
1092 fimc_write(ctx, cfg, EXYNOS_CITRGFMT); fimc_dst_set_size()
1096 fimc_write(ctx, cfg, EXYNOS_CITAREA); fimc_dst_set_size()
1101 fimc_write(ctx, cfg, EXYNOS_CIOYOFF); fimc_dst_set_size()
1104 fimc_write(ctx, cfg, EXYNOS_CIOCBOFF); fimc_dst_set_size()
1107 fimc_write(ctx, cfg, EXYNOS_CIOCROFF); fimc_dst_set_size()
1112 static void fimc_dst_set_buf_seq(struct fimc_context *ctx, u32 buf_id, fimc_dst_set_buf_seq() argument
1121 spin_lock_irqsave(&ctx->lock, flags); fimc_dst_set_buf_seq()
1123 cfg = fimc_read(ctx, EXYNOS_CIFCNTSEQ); fimc_dst_set_buf_seq()
1130 fimc_write(ctx, cfg, EXYNOS_CIFCNTSEQ); fimc_dst_set_buf_seq()
1135 fimc_mask_irq(ctx, true); fimc_dst_set_buf_seq()
1137 fimc_mask_irq(ctx, false); fimc_dst_set_buf_seq()
1139 spin_unlock_irqrestore(&ctx->lock, flags); fimc_dst_set_buf_seq()
1146 struct fimc_context *ctx = get_fimc_context(dev); fimc_dst_set_addr() local
1147 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_dst_set_addr()
1172 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_Y], fimc_dst_set_addr()
1176 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], fimc_dst_set_addr()
1178 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], fimc_dst_set_addr()
1181 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CB], fimc_dst_set_addr()
1183 fimc_write(ctx, buf_info->base[EXYNOS_DRM_PLANAR_CR], fimc_dst_set_addr()
1188 fimc_write(ctx, 0x0, EXYNOS_CIOYSA(buf_id)); fimc_dst_set_addr()
1189 fimc_write(ctx, 0x0, EXYNOS_CIOCBSA(buf_id)); fimc_dst_set_addr()
1190 fimc_write(ctx, 0x0, EXYNOS_CIOCRSA(buf_id)); fimc_dst_set_addr()
1197 fimc_dst_set_buf_seq(ctx, buf_id, buf_type); fimc_dst_set_addr()
1209 static int fimc_clk_ctrl(struct fimc_context *ctx, bool enable) fimc_clk_ctrl() argument
1214 clk_prepare_enable(ctx->clocks[FIMC_CLK_GATE]); fimc_clk_ctrl()
1215 clk_prepare_enable(ctx->clocks[FIMC_CLK_WB_A]); fimc_clk_ctrl()
1216 ctx->suspended = false; fimc_clk_ctrl()
1218 clk_disable_unprepare(ctx->clocks[FIMC_CLK_GATE]); fimc_clk_ctrl()
1219 clk_disable_unprepare(ctx->clocks[FIMC_CLK_WB_A]); fimc_clk_ctrl()
1220 ctx->suspended = true; fimc_clk_ctrl()
1228 struct fimc_context *ctx = dev_id; fimc_irq_handler() local
1229 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_irq_handler()
1235 DRM_DEBUG_KMS("fimc id[%d]\n", ctx->id); fimc_irq_handler()
1237 fimc_clear_irq(ctx); fimc_irq_handler()
1238 if (fimc_check_ovf(ctx)) fimc_irq_handler()
1241 if (!fimc_check_frame_end(ctx)) fimc_irq_handler()
1244 buf_id = fimc_get_buf_id(ctx); fimc_irq_handler()
1250 fimc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); fimc_irq_handler()
1306 struct fimc_context *ctx = get_fimc_context(dev); fimc_ippdrv_check_property() local
1307 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_ippdrv_check_property()
1416 static void fimc_clear_addr(struct fimc_context *ctx) fimc_clear_addr() argument
1421 fimc_write(ctx, 0, EXYNOS_CIIYSA(i)); fimc_clear_addr()
1422 fimc_write(ctx, 0, EXYNOS_CIICBSA(i)); fimc_clear_addr()
1423 fimc_write(ctx, 0, EXYNOS_CIICRSA(i)); fimc_clear_addr()
1427 fimc_write(ctx, 0, EXYNOS_CIOYSA(i)); fimc_clear_addr()
1428 fimc_write(ctx, 0, EXYNOS_CIOCBSA(i)); fimc_clear_addr()
1429 fimc_write(ctx, 0, EXYNOS_CIOCRSA(i)); fimc_clear_addr()
1435 struct fimc_context *ctx = get_fimc_context(dev); fimc_ippdrv_reset() local
1438 fimc_sw_reset(ctx); fimc_ippdrv_reset()
1441 memset(&ctx->sc, 0x0, sizeof(ctx->sc)); fimc_ippdrv_reset()
1443 fimc_clear_addr(ctx); fimc_ippdrv_reset()
1450 struct fimc_context *ctx = get_fimc_context(dev); fimc_ippdrv_start() local
1451 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_ippdrv_start()
1469 fimc_mask_irq(ctx, true); fimc_ippdrv_start()
1476 ret = fimc_set_prescaler(ctx, &ctx->sc,
1485 fimc_handle_jpeg(ctx, false);
1486 fimc_set_scaler(ctx, &ctx->sc);
1487 fimc_set_polarity(ctx, &ctx->pol);
1491 fimc_set_type_ctrl(ctx, FIMC_WB_NONE);
1492 fimc_handle_lastend(ctx, false);
1495 cfg0 = fimc_read(ctx, EXYNOS_MSCTRL);
1498 fimc_write(ctx, cfg0, EXYNOS_MSCTRL);
1501 fimc_set_type_ctrl(ctx, FIMC_WB_A);
1502 fimc_handle_lastend(ctx, true);
1505 ret = fimc_set_camblk_fimd0_wb(ctx);
1523 fimc_write(ctx, 0x0, EXYNOS_CISTATUS);
1525 cfg0 = fimc_read(ctx, EXYNOS_CIIMGCPT);
1530 cfg1 = fimc_read(ctx, EXYNOS_CISCCTRL);
1535 fimc_write(ctx, cfg1, EXYNOS_CISCCTRL);
1539 fimc_write(ctx, cfg0, EXYNOS_CIIMGCPT);
1542 fimc_clear_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE);
1544 fimc_clear_bits(ctx, EXYNOS_CIOCTRL, EXYNOS_CIOCTRL_WEAVE_MASK);
1547 fimc_set_bits(ctx, EXYNOS_MSCTRL, EXYNOS_MSCTRL_ENVID);
1554 struct fimc_context *ctx = get_fimc_context(dev); fimc_ippdrv_stop() local
1563 cfg = fimc_read(ctx, EXYNOS_MSCTRL); fimc_ippdrv_stop()
1566 fimc_write(ctx, cfg, EXYNOS_MSCTRL); fimc_ippdrv_stop()
1577 fimc_mask_irq(ctx, false); fimc_ippdrv_stop()
1580 fimc_write(ctx, 0x0, EXYNOS_CIFCNTSEQ); fimc_ippdrv_stop()
1583 fimc_clear_bits(ctx, EXYNOS_CISCCTRL, EXYNOS_CISCCTRL_SCALERSTART); fimc_ippdrv_stop()
1586 fimc_clear_bits(ctx, EXYNOS_CIIMGCPT, fimc_ippdrv_stop()
1590 fimc_set_bits(ctx, EXYNOS_CIGCTRL, EXYNOS_CIGCTRL_IRQ_END_DISABLE); fimc_ippdrv_stop()
1593 static void fimc_put_clocks(struct fimc_context *ctx) fimc_put_clocks() argument
1598 if (IS_ERR(ctx->clocks[i])) fimc_put_clocks()
1600 clk_put(ctx->clocks[i]); fimc_put_clocks()
1601 ctx->clocks[i] = ERR_PTR(-EINVAL); fimc_put_clocks()
1605 static int fimc_setup_clocks(struct fimc_context *ctx) fimc_setup_clocks() argument
1607 struct device *fimc_dev = ctx->ippdrv.dev; fimc_setup_clocks()
1612 ctx->clocks[i] = ERR_PTR(-EINVAL); fimc_setup_clocks()
1620 ctx->clocks[i] = clk_get(dev, fimc_clock_names[i]); fimc_setup_clocks()
1621 if (IS_ERR(ctx->clocks[i])) { fimc_setup_clocks()
1624 ret = PTR_ERR(ctx->clocks[i]); fimc_setup_clocks()
1632 if (!IS_ERR(ctx->clocks[FIMC_CLK_PARENT])) { fimc_setup_clocks()
1633 ret = clk_set_parent(ctx->clocks[FIMC_CLK_MUX], fimc_setup_clocks()
1634 ctx->clocks[FIMC_CLK_PARENT]); fimc_setup_clocks()
1641 ret = clk_set_rate(ctx->clocks[FIMC_CLK_LCLK], ctx->clk_frequency); fimc_setup_clocks()
1645 ret = clk_prepare_enable(ctx->clocks[FIMC_CLK_LCLK]); fimc_setup_clocks()
1649 fimc_put_clocks(ctx); fimc_setup_clocks()
1653 static int fimc_parse_dt(struct fimc_context *ctx) fimc_parse_dt() argument
1655 struct device_node *node = ctx->ippdrv.dev->of_node; fimc_parse_dt()
1662 &ctx->clk_frequency)) fimc_parse_dt()
1663 ctx->clk_frequency = FIMC_DEFAULT_LCLK_FREQUENCY; fimc_parse_dt()
1665 ctx->id = of_alias_get_id(node, "fimc"); fimc_parse_dt()
1667 if (ctx->id < 0) { fimc_parse_dt()
1668 dev_err(ctx->ippdrv.dev, "failed to get node alias id.\n"); fimc_parse_dt()
1678 struct fimc_context *ctx; fimc_probe() local
1688 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); fimc_probe()
1689 if (!ctx) fimc_probe()
1692 ctx->ippdrv.dev = dev; fimc_probe()
1694 ret = fimc_parse_dt(ctx); fimc_probe()
1698 ctx->sysreg = syscon_regmap_lookup_by_phandle(dev->of_node, fimc_probe()
1700 if (IS_ERR(ctx->sysreg)) { fimc_probe()
1702 return PTR_ERR(ctx->sysreg); fimc_probe()
1706 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); fimc_probe()
1707 ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); fimc_probe()
1708 if (IS_ERR(ctx->regs)) fimc_probe()
1709 return PTR_ERR(ctx->regs); fimc_probe()
1718 ctx->irq = res->start; fimc_probe()
1719 ret = devm_request_threaded_irq(dev, ctx->irq, NULL, fimc_irq_handler, fimc_probe()
1720 IRQF_ONESHOT, "drm_fimc", ctx); fimc_probe()
1726 ret = fimc_setup_clocks(ctx); fimc_probe()
1730 ippdrv = &ctx->ippdrv; fimc_probe()
1743 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); fimc_probe()
1745 spin_lock_init(&ctx->lock); fimc_probe()
1746 platform_set_drvdata(pdev, ctx); fimc_probe()
1764 fimc_put_clocks(ctx); fimc_probe()
1772 struct fimc_context *ctx = get_fimc_context(dev); fimc_remove() local
1773 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; fimc_remove()
1777 fimc_put_clocks(ctx); fimc_remove()
1787 struct fimc_context *ctx = get_fimc_context(dev); fimc_suspend() local
1789 DRM_DEBUG_KMS("id[%d]\n", ctx->id); fimc_suspend()
1794 return fimc_clk_ctrl(ctx, false); fimc_suspend()
1799 struct fimc_context *ctx = get_fimc_context(dev); fimc_resume() local
1801 DRM_DEBUG_KMS("id[%d]\n", ctx->id); fimc_resume()
1804 return fimc_clk_ctrl(ctx, true); fimc_resume()
1813 struct fimc_context *ctx = get_fimc_context(dev); fimc_runtime_suspend() local
1815 DRM_DEBUG_KMS("id[%d]\n", ctx->id); fimc_runtime_suspend()
1817 return fimc_clk_ctrl(ctx, false); fimc_runtime_suspend()
1822 struct fimc_context *ctx = get_fimc_context(dev); fimc_runtime_resume() local
1824 DRM_DEBUG_KMS("id[%d]\n", ctx->id); fimc_runtime_resume()
1826 return fimc_clk_ctrl(ctx, true); fimc_runtime_resume()
H A Dexynos_mixer.c167 static void mixer_regs_dump(struct mixer_context *ctx) mixer_regs_dump() argument
172 (u32)readl(ctx->mixer_res.mixer_regs + reg_id)); \ mixer_regs_dump()
199 static void vp_regs_dump(struct mixer_context *ctx) vp_regs_dump() argument
204 (u32) readl(ctx->mixer_res.vp_regs + reg_id)); \ vp_regs_dump()
256 static void mixer_vsync_set_update(struct mixer_context *ctx, bool enable) mixer_vsync_set_update() argument
258 struct mixer_resources *res = &ctx->mixer_res; mixer_vsync_set_update()
264 if (ctx->vp_enabled) mixer_vsync_set_update()
269 static void mixer_cfg_scan(struct mixer_context *ctx, unsigned int height) mixer_cfg_scan() argument
271 struct mixer_resources *res = &ctx->mixer_res; mixer_cfg_scan()
275 val = (ctx->interlace ? MXR_CFG_SCAN_INTERLACE : mixer_cfg_scan()
278 if (ctx->mxr_ver != MXR_VER_128_0_0_184) { mixer_cfg_scan()
295 static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height) mixer_cfg_rgb_fmt() argument
297 struct mixer_resources *res = &ctx->mixer_res; mixer_cfg_rgb_fmt()
336 static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win, mixer_cfg_layer() argument
339 struct mixer_resources *res = &ctx->mixer_res; mixer_cfg_layer()
350 if (ctx->vp_enabled) { mixer_cfg_layer()
364 static void mixer_run(struct mixer_context *ctx) mixer_run() argument
366 struct mixer_resources *res = &ctx->mixer_res; mixer_run()
371 static void mixer_stop(struct mixer_context *ctx) mixer_stop() argument
373 struct mixer_resources *res = &ctx->mixer_res; mixer_stop()
383 static void vp_video_buffer(struct mixer_context *ctx, unsigned int win) vp_video_buffer() argument
385 struct mixer_resources *res = &ctx->mixer_res; vp_video_buffer()
393 plane = &ctx->planes[win]; vp_video_buffer()
412 ctx->interlace = true; vp_video_buffer()
421 ctx->interlace = false; vp_video_buffer()
427 mixer_vsync_set_update(ctx, false); vp_video_buffer()
430 val = (ctx->interlace ? ~0 : 0); vp_video_buffer()
453 if (ctx->interlace) { vp_video_buffer()
472 mixer_cfg_scan(ctx, plane->mode_height); vp_video_buffer()
473 mixer_cfg_rgb_fmt(ctx, plane->mode_height); vp_video_buffer()
474 mixer_cfg_layer(ctx, win, true); vp_video_buffer()
475 mixer_run(ctx); vp_video_buffer()
477 mixer_vsync_set_update(ctx, true); vp_video_buffer()
480 mixer_regs_dump(ctx); vp_video_buffer()
481 vp_regs_dump(ctx); vp_video_buffer()
484 static void mixer_layer_update(struct mixer_context *ctx) mixer_layer_update() argument
486 struct mixer_resources *res = &ctx->mixer_res; mixer_layer_update()
515 static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win) mixer_graph_buffer() argument
517 struct mixer_resources *res = &ctx->mixer_res; mixer_graph_buffer()
526 plane = &ctx->planes[win]; mixer_graph_buffer()
566 ctx->interlace = true; mixer_graph_buffer()
568 ctx->interlace = false; mixer_graph_buffer()
571 mixer_vsync_set_update(ctx, false); mixer_graph_buffer()
582 if (ctx->mxr_ver == MXR_VER_128_0_0_184 && mixer_graph_buffer()
608 mixer_cfg_scan(ctx, plane->mode_height); mixer_graph_buffer()
609 mixer_cfg_rgb_fmt(ctx, plane->mode_height); mixer_graph_buffer()
610 mixer_cfg_layer(ctx, win, true); mixer_graph_buffer()
613 if (ctx->mxr_ver == MXR_VER_16_0_33_0 || mixer_graph_buffer()
614 ctx->mxr_ver == MXR_VER_128_0_0_184) mixer_graph_buffer()
615 mixer_layer_update(ctx); mixer_graph_buffer()
617 mixer_run(ctx); mixer_graph_buffer()
619 mixer_vsync_set_update(ctx, true); mixer_graph_buffer()
622 mixer_regs_dump(ctx); mixer_graph_buffer()
625 static void vp_win_reset(struct mixer_context *ctx) vp_win_reset() argument
627 struct mixer_resources *res = &ctx->mixer_res; vp_win_reset()
640 static void mixer_win_reset(struct mixer_context *ctx) mixer_win_reset() argument
642 struct mixer_resources *res = &ctx->mixer_res; mixer_win_reset()
647 mixer_vsync_set_update(ctx, false); mixer_win_reset()
666 if (ctx->vp_enabled) mixer_win_reset()
692 if (ctx->vp_enabled) { mixer_win_reset()
694 vp_win_reset(ctx); mixer_win_reset()
701 if (ctx->vp_enabled) mixer_win_reset()
704 mixer_vsync_set_update(ctx, true); mixer_win_reset()
710 struct mixer_context *ctx = arg; mixer_irq_handler() local
711 struct mixer_resources *res = &ctx->mixer_res; mixer_irq_handler()
722 if (ctx->interlace) { mixer_irq_handler()
734 drm_handle_vblank(ctx->drm_dev, ctx->pipe); mixer_irq_handler()
735 exynos_drm_crtc_finish_pageflip(ctx->drm_dev, ctx->pipe); mixer_irq_handler()
738 if (atomic_read(&ctx->wait_vsync_event)) { mixer_irq_handler()
739 atomic_set(&ctx->wait_vsync_event, 0); mixer_irq_handler()
740 wake_up(&ctx->wait_vsync_queue); mixer_irq_handler()
899 struct mixer_context *mixer_ctx = crtc->ctx; mixer_enable_vblank()
916 struct mixer_context *mixer_ctx = crtc->ctx; mixer_disable_vblank()
925 struct mixer_context *mixer_ctx = crtc->ctx; mixer_win_commit()
946 struct mixer_context *mixer_ctx = crtc->ctx; mixer_win_disable()
973 struct mixer_context *mixer_ctx = crtc->ctx; mixer_wait_for_vblank()
1003 static void mixer_window_suspend(struct mixer_context *ctx) mixer_window_suspend() argument
1009 plane = &ctx->planes[i]; mixer_window_suspend()
1011 mixer_win_disable(ctx->crtc, i); mixer_window_suspend()
1013 mixer_wait_for_vblank(ctx->crtc); mixer_window_suspend()
1016 static void mixer_window_resume(struct mixer_context *ctx) mixer_window_resume() argument
1022 plane = &ctx->planes[i]; mixer_window_resume()
1026 mixer_win_commit(ctx->crtc, i); mixer_window_resume()
1030 static void mixer_poweron(struct mixer_context *ctx) mixer_poweron() argument
1032 struct mixer_resources *res = &ctx->mixer_res; mixer_poweron()
1034 mutex_lock(&ctx->mixer_mutex); mixer_poweron()
1035 if (ctx->powered) { mixer_poweron()
1036 mutex_unlock(&ctx->mixer_mutex); mixer_poweron()
1040 mutex_unlock(&ctx->mixer_mutex); mixer_poweron()
1042 pm_runtime_get_sync(ctx->dev); mixer_poweron()
1046 if (ctx->vp_enabled) { mixer_poweron()
1048 if (ctx->has_sclk) mixer_poweron()
1052 mutex_lock(&ctx->mixer_mutex); mixer_poweron()
1053 ctx->powered = true; mixer_poweron()
1054 mutex_unlock(&ctx->mixer_mutex); mixer_poweron()
1058 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); mixer_poweron()
1059 mixer_win_reset(ctx); mixer_poweron()
1061 mixer_window_resume(ctx); mixer_poweron()
1064 static void mixer_poweroff(struct mixer_context *ctx) mixer_poweroff() argument
1066 struct mixer_resources *res = &ctx->mixer_res; mixer_poweroff()
1068 mutex_lock(&ctx->mixer_mutex); mixer_poweroff()
1069 if (!ctx->powered) { mixer_poweroff()
1070 mutex_unlock(&ctx->mixer_mutex); mixer_poweroff()
1073 mutex_unlock(&ctx->mixer_mutex); mixer_poweroff()
1075 mixer_stop(ctx); mixer_poweroff()
1076 mixer_regs_dump(ctx); mixer_poweroff()
1077 mixer_window_suspend(ctx); mixer_poweroff()
1079 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); mixer_poweroff()
1081 mutex_lock(&ctx->mixer_mutex); mixer_poweroff()
1082 ctx->powered = false; mixer_poweroff()
1083 mutex_unlock(&ctx->mixer_mutex); mixer_poweroff()
1087 if (ctx->vp_enabled) { mixer_poweroff()
1089 if (ctx->has_sclk) mixer_poweroff()
1093 pm_runtime_put_sync(ctx->dev); mixer_poweroff()
1100 mixer_poweron(crtc->ctx); mixer_dpms()
1105 mixer_poweroff(crtc->ctx); mixer_dpms()
1199 struct mixer_context *ctx = dev_get_drvdata(dev); mixer_bind() local
1206 ret = mixer_initialize(ctx, drm_dev); mixer_bind()
1213 ret = exynos_plane_init(drm_dev, &ctx->planes[zpos], mixer_bind()
1214 1 << ctx->pipe, type, zpos); mixer_bind()
1219 exynos_plane = &ctx->planes[MIXER_DEFAULT_WIN]; mixer_bind()
1220 ctx->crtc = exynos_drm_crtc_create(drm_dev, &exynos_plane->base, mixer_bind()
1221 ctx->pipe, EXYNOS_DISPLAY_TYPE_HDMI, mixer_bind()
1222 &mixer_crtc_ops, ctx); mixer_bind()
1223 if (IS_ERR(ctx->crtc)) { mixer_bind()
1224 mixer_ctx_remove(ctx); mixer_bind()
1225 ret = PTR_ERR(ctx->crtc); mixer_bind()
1232 devm_kfree(dev, ctx); mixer_bind()
1238 struct mixer_context *ctx = dev_get_drvdata(dev); mixer_unbind() local
1240 mixer_ctx_remove(ctx); mixer_unbind()
1252 struct mixer_context *ctx; mixer_probe() local
1255 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); mixer_probe()
1256 if (!ctx) { mixer_probe()
1261 mutex_init(&ctx->mixer_mutex); mixer_probe()
1273 ctx->pdev = pdev; mixer_probe()
1274 ctx->dev = dev; mixer_probe()
1275 ctx->vp_enabled = drv->is_vp_enabled; mixer_probe()
1276 ctx->has_sclk = drv->has_sclk; mixer_probe()
1277 ctx->mxr_ver = drv->version; mixer_probe()
1278 init_waitqueue_head(&ctx->wait_vsync_queue); mixer_probe()
1279 atomic_set(&ctx->wait_vsync_event, 0); mixer_probe()
1281 platform_set_drvdata(pdev, ctx); mixer_probe()
H A Dexynos_drm_gsc.c83 #define gsc_read(offset) readl(ctx->regs + (offset))
84 #define gsc_write(cfg, offset) writel(cfg, ctx->regs + (offset))
398 static int gsc_sw_reset(struct gsc_context *ctx) gsc_sw_reset() argument
438 static void gsc_set_gscblk_fimd_wb(struct gsc_context *ctx, bool enable) gsc_set_gscblk_fimd_wb() argument
445 gscblk_cfg |= GSC_BLK_DISP1WB_DEST(ctx->id) | gsc_set_gscblk_fimd_wb()
446 GSC_BLK_GSCL_WB_IN_SRC_SEL(ctx->id) | gsc_set_gscblk_fimd_wb()
447 GSC_BLK_SW_RESET_WB_DEST(ctx->id); gsc_set_gscblk_fimd_wb()
449 gscblk_cfg |= GSC_BLK_PXLASYNC_LO_MASK_WB(ctx->id); gsc_set_gscblk_fimd_wb()
454 static void gsc_handle_irq(struct gsc_context *ctx, bool enable, gsc_handle_irq() argument
486 struct gsc_context *ctx = get_gsc_context(dev); gsc_src_set_fmt() local
487 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_src_set_fmt()
559 struct gsc_context *ctx = get_gsc_context(dev); gsc_src_set_transf() local
560 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_src_set_transf()
596 ctx->rotation = cfg & gsc_src_set_transf()
598 *swap = ctx->rotation; gsc_src_set_transf()
606 struct gsc_context *ctx = get_gsc_context(dev); gsc_src_set_size() local
608 struct gsc_scaler *sc = &ctx->sc; gsc_src_set_size()
662 static int gsc_src_set_buf_seq(struct gsc_context *ctx, u32 buf_id, gsc_src_set_buf_seq() argument
665 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_src_set_buf_seq()
701 struct gsc_context *ctx = get_gsc_context(dev); gsc_src_set_addr() local
702 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_src_set_addr()
741 return gsc_src_set_buf_seq(ctx, buf_id, buf_type); gsc_src_set_addr()
753 struct gsc_context *ctx = get_gsc_context(dev); gsc_dst_set_fmt() local
754 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_dst_set_fmt()
823 struct gsc_context *ctx = get_gsc_context(dev); gsc_dst_set_transf() local
824 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_dst_set_transf()
860 ctx->rotation = cfg & gsc_dst_set_transf()
862 *swap = ctx->rotation; gsc_dst_set_transf()
901 static int gsc_set_prescaler(struct gsc_context *ctx, struct gsc_scaler *sc, gsc_set_prescaler() argument
904 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_set_prescaler()
912 if (ctx->rotation) { gsc_set_prescaler()
954 static void gsc_set_h_coef(struct gsc_context *ctx, unsigned long main_hratio) gsc_set_h_coef() argument
980 static void gsc_set_v_coef(struct gsc_context *ctx, unsigned long main_vratio) gsc_set_v_coef() argument
1006 static void gsc_set_scaler(struct gsc_context *ctx, struct gsc_scaler *sc) gsc_set_scaler() argument
1013 gsc_set_h_coef(ctx, sc->main_hratio); gsc_set_scaler()
1017 gsc_set_v_coef(ctx, sc->main_vratio); gsc_set_scaler()
1025 struct gsc_context *ctx = get_gsc_context(dev); gsc_dst_set_size() local
1027 struct gsc_scaler *sc = &ctx->sc; gsc_dst_set_size()
1078 static int gsc_dst_get_buf_seq(struct gsc_context *ctx) gsc_dst_get_buf_seq() argument
1094 static int gsc_dst_set_buf_seq(struct gsc_context *ctx, u32 buf_id, gsc_dst_set_buf_seq() argument
1097 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_dst_set_buf_seq()
1105 mutex_lock(&ctx->lock); gsc_dst_set_buf_seq()
1132 gsc_dst_get_buf_seq(ctx) >= GSC_BUF_START) gsc_dst_set_buf_seq()
1133 gsc_handle_irq(ctx, true, false, true); gsc_dst_set_buf_seq()
1137 gsc_dst_get_buf_seq(ctx) <= GSC_BUF_STOP) gsc_dst_set_buf_seq()
1138 gsc_handle_irq(ctx, false, false, true); gsc_dst_set_buf_seq()
1141 mutex_unlock(&ctx->lock); gsc_dst_set_buf_seq()
1149 struct gsc_context *ctx = get_gsc_context(dev); gsc_dst_set_addr() local
1150 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_dst_set_addr()
1189 return gsc_dst_set_buf_seq(ctx, buf_id, buf_type); gsc_dst_set_addr()
1199 static int gsc_clk_ctrl(struct gsc_context *ctx, bool enable) gsc_clk_ctrl() argument
1204 clk_enable(ctx->gsc_clk); gsc_clk_ctrl()
1205 ctx->suspended = false; gsc_clk_ctrl()
1207 clk_disable(ctx->gsc_clk); gsc_clk_ctrl()
1208 ctx->suspended = true; gsc_clk_ctrl()
1214 static int gsc_get_src_buf_index(struct gsc_context *ctx) gsc_get_src_buf_index() argument
1220 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); gsc_get_src_buf_index()
1237 ret = gsc_src_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); gsc_get_src_buf_index()
1249 static int gsc_get_dst_buf_index(struct gsc_context *ctx) gsc_get_dst_buf_index() argument
1255 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); gsc_get_dst_buf_index()
1272 ret = gsc_dst_set_buf_seq(ctx, buf_id, IPP_BUF_DEQUEUE); gsc_get_dst_buf_index()
1286 struct gsc_context *ctx = dev_id; gsc_irq_handler() local
1287 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_irq_handler()
1294 DRM_DEBUG_KMS("gsc id[%d]\n", ctx->id); gsc_irq_handler()
1299 ctx->id, status); gsc_irq_handler()
1305 ctx->id, status); gsc_irq_handler()
1307 buf_id[EXYNOS_DRM_OPS_SRC] = gsc_get_src_buf_index(ctx); gsc_irq_handler()
1311 buf_id[EXYNOS_DRM_OPS_DST] = gsc_get_dst_buf_index(ctx); gsc_irq_handler()
1375 struct gsc_context *ctx = get_gsc_context(dev); gsc_ippdrv_check_property() local
1376 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_ippdrv_check_property()
1488 struct gsc_context *ctx = get_gsc_context(dev); gsc_ippdrv_reset() local
1489 struct gsc_scaler *sc = &ctx->sc; gsc_ippdrv_reset()
1493 ret = gsc_sw_reset(ctx); gsc_ippdrv_reset()
1500 memset(&ctx->sc, 0x0, sizeof(ctx->sc)); gsc_ippdrv_reset()
1508 struct gsc_context *ctx = get_gsc_context(dev); gsc_ippdrv_start() local
1509 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_ippdrv_start()
1527 gsc_handle_irq(ctx, true, false, true); gsc_ippdrv_start()
1557 gsc_set_gscblk_fimd_wb(ctx, set_wb.enable);
1589 ret = gsc_set_prescaler(ctx, &ctx->sc,
1597 gsc_set_scaler(ctx, &ctx->sc);
1608 struct gsc_context *ctx = get_gsc_context(dev); gsc_ippdrv_stop() local
1619 gsc_set_gscblk_fimd_wb(ctx, set_wb.enable); gsc_ippdrv_stop()
1628 gsc_handle_irq(ctx, false, false, true); gsc_ippdrv_stop()
1643 struct gsc_context *ctx; gsc_probe() local
1648 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); gsc_probe()
1649 if (!ctx) gsc_probe()
1653 ctx->gsc_clk = devm_clk_get(dev, "gscl"); gsc_probe()
1654 if (IS_ERR(ctx->gsc_clk)) { gsc_probe()
1656 return PTR_ERR(ctx->gsc_clk); gsc_probe()
1660 ctx->regs_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); gsc_probe()
1661 ctx->regs = devm_ioremap_resource(dev, ctx->regs_res); gsc_probe()
1662 if (IS_ERR(ctx->regs)) gsc_probe()
1663 return PTR_ERR(ctx->regs); gsc_probe()
1672 ctx->irq = res->start; gsc_probe()
1673 ret = devm_request_threaded_irq(dev, ctx->irq, NULL, gsc_irq_handler, gsc_probe()
1674 IRQF_ONESHOT, "drm_gsc", ctx); gsc_probe()
1681 ctx->id = pdev->id; gsc_probe()
1683 ippdrv = &ctx->ippdrv; gsc_probe()
1697 DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv); gsc_probe()
1699 mutex_init(&ctx->lock); gsc_probe()
1700 platform_set_drvdata(pdev, ctx); gsc_probe()
1723 struct gsc_context *ctx = get_gsc_context(dev); gsc_remove() local
1724 struct exynos_drm_ippdrv *ippdrv = &ctx->ippdrv; gsc_remove()
1727 mutex_destroy(&ctx->lock); gsc_remove()
1738 struct gsc_context *ctx = get_gsc_context(dev); gsc_suspend() local
1740 DRM_DEBUG_KMS("id[%d]\n", ctx->id); gsc_suspend()
1745 return gsc_clk_ctrl(ctx, false); gsc_suspend()
1750 struct gsc_context *ctx = get_gsc_context(dev); gsc_resume() local
1752 DRM_DEBUG_KMS("id[%d]\n", ctx->id); gsc_resume()
1755 return gsc_clk_ctrl(ctx, true); gsc_resume()
1764 struct gsc_context *ctx = get_gsc_context(dev); gsc_runtime_suspend() local
1766 DRM_DEBUG_KMS("id[%d]\n", ctx->id); gsc_runtime_suspend()
1768 return gsc_clk_ctrl(ctx, false); gsc_runtime_suspend()
1773 struct gsc_context *ctx = get_gsc_context(dev); gsc_runtime_resume() local
1775 DRM_DEBUG_KMS("id[%d]\n", ctx->id); gsc_runtime_resume()
1777 return gsc_clk_ctrl(ctx, true); gsc_runtime_resume()
/linux-4.1.27/arch/arm/net/
H A Dbpf_jit_32.c116 static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx) _emit() argument
121 if (ctx->target != NULL) _emit()
122 ctx->target[ctx->idx] = inst; _emit()
124 ctx->idx++; _emit()
130 static inline void emit(u32 inst, struct jit_ctx *ctx) emit() argument
132 _emit(ARM_COND_AL, inst, ctx); emit()
135 static u16 saved_regs(struct jit_ctx *ctx) saved_regs() argument
139 if ((ctx->skf->len > 1) || saved_regs()
140 (ctx->skf->insns[0].code == (BPF_RET | BPF_A))) saved_regs()
146 if (ctx->seen & SEEN_CALL) saved_regs()
149 if (ctx->seen & (SEEN_DATA | SEEN_SKB)) saved_regs()
151 if (ctx->seen & SEEN_DATA) saved_regs()
153 if (ctx->seen & SEEN_X) saved_regs()
159 static inline int mem_words_used(struct jit_ctx *ctx) mem_words_used() argument
162 return fls(ctx->seen & SEEN_MEM); mem_words_used()
173 static void build_prologue(struct jit_ctx *ctx) build_prologue() argument
175 u16 reg_set = saved_regs(ctx); build_prologue()
179 emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx); build_prologue() local
180 emit(ARM_PUSH(reg_set), ctx); build_prologue() local
181 emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx); build_prologue()
184 emit(ARM_PUSH(reg_set), ctx); build_prologue() local
187 if (ctx->seen & (SEEN_DATA | SEEN_SKB)) build_prologue()
188 emit(ARM_MOV_R(r_skb, ARM_R0), ctx); build_prologue() local
190 if (ctx->seen & SEEN_DATA) { build_prologue()
192 emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx); build_prologue() local
195 emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx); build_prologue() local
197 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); build_prologue() local
198 emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx); build_prologue() local
201 if (ctx->flags & FLAG_NEED_X_RESET) build_prologue()
202 emit(ARM_MOV_I(r_X, 0), ctx); build_prologue()
205 if (bpf_needs_clear_a(&ctx->skf->insns[0])) build_prologue()
206 emit(ARM_MOV_I(r_A, 0), ctx); build_prologue()
209 if (ctx->seen & SEEN_MEM) build_prologue()
210 emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); build_prologue() local
213 static void build_epilogue(struct jit_ctx *ctx) build_epilogue() argument
215 u16 reg_set = saved_regs(ctx); build_epilogue()
217 if (ctx->seen & SEEN_MEM) build_epilogue()
218 emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx); build_epilogue() local
226 emit(ARM_LDM(ARM_SP, reg_set), ctx); build_epilogue() local
229 if (ctx->seen & SEEN_CALL) build_epilogue()
231 emit(ARM_POP(reg_set), ctx); build_epilogue() local
234 if (!(ctx->seen & SEEN_CALL)) build_epilogue()
235 emit(ARM_BX(ARM_LR), ctx); build_epilogue() local
252 static u16 imm_offset(u32 k, struct jit_ctx *ctx) imm_offset() argument
258 if (ctx->target == NULL) { imm_offset()
259 ctx->imm_count++; imm_offset()
263 while ((i < ctx->imm_count) && ctx->imms[i]) { imm_offset()
264 if (ctx->imms[i] == k) imm_offset()
269 if (ctx->imms[i] == 0) imm_offset()
270 ctx->imms[i] = k; imm_offset()
273 offset = ctx->offsets[ctx->skf->len]; imm_offset()
274 offset += ctx->prologue_bytes; imm_offset()
275 offset += ctx->epilogue_bytes; imm_offset()
278 ctx->target[offset / 4] = k; imm_offset()
281 imm = offset - (8 + ctx->idx * 4); imm_offset()
288 ctx->flags |= FLAG_IMM_OVERFLOW; imm_offset()
300 static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx) emit_mov_i_no8m() argument
303 emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx); emit_mov_i_no8m() local
305 emit(ARM_MOVW(rd, val & 0xffff), ctx); emit_mov_i_no8m()
307 emit(ARM_MOVT(rd, val >> 16), ctx); emit_mov_i_no8m()
311 static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx) emit_mov_i() argument
316 emit(ARM_MOV_I(rd, imm12), ctx); emit_mov_i() local
318 emit_mov_i_no8m(rd, val, ctx); emit_mov_i()
323 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) emit_load_be32() argument
325 _emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx); emit_load_be32()
326 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); emit_load_be32()
327 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx); emit_load_be32()
328 _emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx); emit_load_be32()
329 _emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx); emit_load_be32()
330 _emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx); emit_load_be32()
331 _emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx); emit_load_be32()
332 _emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx); emit_load_be32()
335 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) emit_load_be16() argument
337 _emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx); emit_load_be16()
338 _emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx); emit_load_be16()
339 _emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx); emit_load_be16()
342 static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx) emit_swap16() argument
345 emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx); emit_swap16()
346 emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx); emit_swap16()
354 emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx); emit_swap16()
359 static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) emit_load_be32() argument
361 _emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx); emit_load_be32()
363 _emit(cond, ARM_REV(r_res, r_res), ctx); emit_load_be32()
367 static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx) emit_load_be16() argument
369 _emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx); emit_load_be16()
371 _emit(cond, ARM_REV16(r_res, r_res), ctx); emit_load_be16()
377 struct jit_ctx *ctx __maybe_unused) emit_swap16()
380 emit(ARM_REV16(r_dst, r_src), ctx); emit_swap16() local
388 static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx) b_imm() argument
392 if (ctx->target == NULL) b_imm()
398 imm = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8); b_imm()
403 #define OP_IMM3(op, r1, r2, imm_val, ctx) \
407 emit_mov_i_no8m(r_scratch, imm_val, ctx); \
408 emit(op ## _R((r1), (r2), r_scratch), ctx); \
410 emit(op ## _I((r1), (r2), imm12), ctx); \
414 static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx) emit_err_ret() argument
416 if (ctx->ret0_fp_idx >= 0) { emit_err_ret()
417 _emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx); emit_err_ret()
419 emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx); emit_err_ret() local
421 _emit(cond, ARM_MOV_I(ARM_R0, 0), ctx); emit_err_ret()
422 _emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx); emit_err_ret()
426 static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx) emit_blx_r() argument
429 emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx); emit_blx_r() local
432 emit(ARM_BX(tgt_reg), ctx); emit_blx_r() local
434 emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx); emit_blx_r() local
436 emit(ARM_BLX_R(tgt_reg), ctx); emit_blx_r()
440 static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx) emit_udiv() argument
444 emit(ARM_UDIV(rd, rm, rn), ctx); emit_udiv() local
460 emit(ARM_MOV_R(ARM_R1, rn), ctx); emit_udiv() local
462 emit(ARM_MOV_R(ARM_R0, rm), ctx); emit_udiv() local
464 ctx->seen |= SEEN_CALL; emit_udiv()
465 emit_mov_i(ARM_R3, (u32)jit_udiv, ctx); emit_udiv()
466 emit_blx_r(ARM_R3, ctx); emit_udiv()
469 emit(ARM_MOV_R(rd, ARM_R0), ctx); emit_udiv() local
472 static inline void update_on_xread(struct jit_ctx *ctx) update_on_xread() argument
474 if (!(ctx->seen & SEEN_X)) update_on_xread()
475 ctx->flags |= FLAG_NEED_X_RESET; update_on_xread()
477 ctx->seen |= SEEN_X; update_on_xread()
480 static int build_body(struct jit_ctx *ctx) build_body() argument
483 const struct bpf_prog *prog = ctx->skf; build_body()
498 if (ctx->target == NULL) build_body()
499 ctx->offsets[i] = ctx->idx * 4; build_body()
503 emit_mov_i(r_A, k, ctx); build_body()
506 ctx->seen |= SEEN_SKB; build_body()
509 offsetof(struct sk_buff, len)), ctx); build_body() local
513 ctx->seen |= SEEN_MEM_WORD(k); build_body()
514 emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); build_body() local
528 emit_mov_i(r_off, k, ctx); build_body()
530 ctx->seen |= SEEN_DATA | SEEN_CALL; build_body()
534 1 << load_order), ctx); build_body()
535 emit(ARM_CMP_R(r_scratch, r_off), ctx); build_body() local
538 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); build_body() local
543 ctx); build_body()
547 ctx); build_body()
549 emit_load_be16(condt, r_A, r_scratch, ctx); build_body()
551 emit_load_be32(condt, r_A, r_scratch, ctx); build_body()
553 _emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx); build_body()
556 emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx); build_body()
557 emit(ARM_MOV_R(ARM_R0, r_skb), ctx); build_body() local
559 emit_blx_r(ARM_R3, ctx); build_body()
561 emit(ARM_CMP_I(ARM_R1, 0), ctx); build_body()
562 emit_err_ret(ARM_COND_NE, ctx); build_body()
563 emit(ARM_MOV_R(r_A, ARM_R0), ctx); build_body() local
574 OP_IMM3(ARM_ADD, r_off, r_X, k, ctx); build_body()
577 ctx->seen |= SEEN_X; build_body()
578 emit_mov_i(r_X, k, ctx); build_body()
581 ctx->seen |= SEEN_X | SEEN_SKB; build_body()
583 offsetof(struct sk_buff, len)), ctx); build_body() local
586 ctx->seen |= SEEN_X | SEEN_MEM_WORD(k); build_body()
587 emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); build_body() local
591 ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL; build_body()
596 emit_mov_i(r_off, k, ctx); build_body()
597 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); build_body() local
601 ARM_R1), ctx); build_body()
606 _emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx); build_body()
608 emit(ARM_MOV_R(ARM_R0, r_skb), ctx); build_body() local
610 emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx); build_body()
611 emit_blx_r(ARM_R3, ctx); build_body()
613 emit(ARM_CMP_I(ARM_R1, 0), ctx); build_body()
614 emit_err_ret(ARM_COND_NE, ctx); build_body()
616 emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx); build_body()
617 emit(ARM_LSL_I(r_X, r_X, 2), ctx); build_body()
620 ctx->seen |= SEEN_MEM_WORD(k); build_body()
621 emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx); build_body() local
624 update_on_xread(ctx); build_body()
625 ctx->seen |= SEEN_MEM_WORD(k); build_body()
626 emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx); build_body() local
630 OP_IMM3(ARM_ADD, r_A, r_A, k, ctx); build_body()
633 update_on_xread(ctx); build_body()
634 emit(ARM_ADD_R(r_A, r_A, r_X), ctx); build_body() local
638 OP_IMM3(ARM_SUB, r_A, r_A, k, ctx); build_body()
641 update_on_xread(ctx); build_body()
642 emit(ARM_SUB_R(r_A, r_A, r_X), ctx); build_body() local
646 emit_mov_i(r_scratch, k, ctx); build_body()
647 emit(ARM_MUL(r_A, r_A, r_scratch), ctx); build_body() local
650 update_on_xread(ctx); build_body()
651 emit(ARM_MUL(r_A, r_A, r_X), ctx); build_body() local
656 emit_mov_i(r_scratch, k, ctx); build_body()
657 emit_udiv(r_A, r_A, r_scratch, ctx); build_body()
660 update_on_xread(ctx); build_body()
661 emit(ARM_CMP_I(r_X, 0), ctx); build_body()
662 emit_err_ret(ARM_COND_EQ, ctx); build_body()
663 emit_udiv(r_A, r_A, r_X, ctx); build_body()
667 OP_IMM3(ARM_ORR, r_A, r_A, k, ctx); build_body()
670 update_on_xread(ctx); build_body()
671 emit(ARM_ORR_R(r_A, r_A, r_X), ctx); build_body() local
675 OP_IMM3(ARM_EOR, r_A, r_A, k, ctx); build_body()
680 update_on_xread(ctx); build_body()
681 emit(ARM_EOR_R(r_A, r_A, r_X), ctx); build_body() local
685 OP_IMM3(ARM_AND, r_A, r_A, k, ctx); build_body()
688 update_on_xread(ctx); build_body()
689 emit(ARM_AND_R(r_A, r_A, r_X), ctx); build_body() local
694 emit(ARM_LSL_I(r_A, r_A, k), ctx); build_body()
697 update_on_xread(ctx); build_body()
698 emit(ARM_LSL_R(r_A, r_A, r_X), ctx); build_body() local
703 emit(ARM_LSR_I(r_A, r_A, k), ctx); build_body()
706 update_on_xread(ctx); build_body()
707 emit(ARM_LSR_R(r_A, r_A, r_X), ctx); build_body() local
711 emit(ARM_RSB_I(r_A, r_A, 0), ctx); build_body()
715 emit(ARM_B(b_imm(i + k + 1, ctx)), ctx); build_body()
731 emit_mov_i_no8m(r_scratch, k, ctx); build_body()
732 emit(ARM_CMP_R(r_A, r_scratch), ctx); build_body() local
734 emit(ARM_CMP_I(r_A, imm12), ctx); build_body() local
739 ctx)), ctx); build_body()
742 ctx)), ctx); build_body()
756 update_on_xread(ctx); build_body()
757 emit(ARM_CMP_R(r_A, r_X), ctx); build_body() local
766 emit_mov_i_no8m(r_scratch, k, ctx); build_body()
767 emit(ARM_TST_R(r_A, r_scratch), ctx); build_body() local
769 emit(ARM_TST_I(r_A, imm12), ctx); build_body() local
774 update_on_xread(ctx); build_body()
776 emit(ARM_TST_R(r_A, r_X), ctx); build_body() local
779 emit(ARM_MOV_R(ARM_R0, r_A), ctx); build_body() local
782 if ((k == 0) && (ctx->ret0_fp_idx < 0)) build_body()
783 ctx->ret0_fp_idx = i; build_body()
784 emit_mov_i(ARM_R0, k, ctx); build_body()
786 if (i != ctx->skf->len - 1) build_body()
787 emit(ARM_B(b_imm(prog->len, ctx)), ctx); build_body()
791 ctx->seen |= SEEN_X; build_body()
792 emit(ARM_MOV_R(r_X, r_A), ctx); build_body() local
796 update_on_xread(ctx); build_body()
797 emit(ARM_MOV_R(r_A, r_X), ctx); build_body() local
801 ctx->seen |= SEEN_SKB; build_body()
805 emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx); build_body() local
806 emit_swap16(r_A, r_scratch, ctx); build_body()
810 OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx); build_body()
814 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); build_body() local
818 ctx->seen |= SEEN_SKB; build_body()
820 emit(ARM_LDR_I(r_scratch, r_skb, off), ctx); build_body() local
822 emit(ARM_CMP_I(r_scratch, 0), ctx); build_body()
823 emit_err_ret(ARM_COND_EQ, ctx); build_body()
828 emit(ARM_LDR_I(r_A, r_scratch, off), ctx); build_body() local
831 ctx->seen |= SEEN_SKB; build_body()
834 emit(ARM_LDR_I(r_A, r_skb, off), ctx); build_body() local
837 ctx->seen |= SEEN_SKB; build_body()
840 emit(ARM_LDR_I(r_A, r_skb, off), ctx); build_body() local
844 ctx->seen |= SEEN_SKB; build_body()
847 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); build_body() local
849 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); build_body()
851 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); build_body()
854 ctx->seen |= SEEN_SKB; build_body()
860 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); build_body() local
866 if (ctx->flags & FLAG_IMM_OVERFLOW) build_body()
876 if (ctx->target == NULL) build_body()
877 ctx->offsets[i] = ctx->idx * 4; build_body()
886 struct jit_ctx ctx; bpf_jit_compile() local
894 memset(&ctx, 0, sizeof(ctx)); bpf_jit_compile()
895 ctx.skf = fp; bpf_jit_compile()
896 ctx.ret0_fp_idx = -1; bpf_jit_compile()
898 ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL); bpf_jit_compile()
899 if (ctx.offsets == NULL) bpf_jit_compile()
902 /* fake pass to fill in the ctx->seen */ bpf_jit_compile()
903 if (unlikely(build_body(&ctx))) bpf_jit_compile()
906 tmp_idx = ctx.idx; bpf_jit_compile()
907 build_prologue(&ctx); bpf_jit_compile()
908 ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4; bpf_jit_compile()
911 tmp_idx = ctx.idx; bpf_jit_compile()
912 build_epilogue(&ctx); bpf_jit_compile()
913 ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4; bpf_jit_compile()
915 ctx.idx += ctx.imm_count; bpf_jit_compile()
916 if (ctx.imm_count) { bpf_jit_compile()
917 ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL); bpf_jit_compile()
918 if (ctx.imms == NULL) bpf_jit_compile()
923 build_epilogue(&ctx); bpf_jit_compile()
925 alloc_size = 4 * ctx.idx; bpf_jit_compile()
931 ctx.target = (u32 *) target_ptr; bpf_jit_compile()
932 ctx.idx = 0; bpf_jit_compile()
934 build_prologue(&ctx); bpf_jit_compile()
935 if (build_body(&ctx) < 0) { bpf_jit_compile()
937 if (ctx.imm_count) bpf_jit_compile()
938 kfree(ctx.imms); bpf_jit_compile()
943 build_epilogue(&ctx); bpf_jit_compile()
945 flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx)); bpf_jit_compile()
948 if (ctx.imm_count) bpf_jit_compile()
949 kfree(ctx.imms); bpf_jit_compile()
954 bpf_jit_dump(fp->len, alloc_size, 2, ctx.target); bpf_jit_compile()
957 fp->bpf_func = (void *)ctx.target; bpf_jit_compile()
960 kfree(ctx.offsets); bpf_jit_compile()
/linux-4.1.27/drivers/hwmon/
H A Dpwm-fan.c41 static int __set_pwm(struct pwm_fan_ctx *ctx, unsigned long pwm) __set_pwm() argument
46 mutex_lock(&ctx->lock); __set_pwm()
47 if (ctx->pwm_value == pwm) __set_pwm()
50 duty = DIV_ROUND_UP(pwm * (ctx->pwm->period - 1), MAX_PWM); __set_pwm()
51 ret = pwm_config(ctx->pwm, duty, ctx->pwm->period); __set_pwm()
56 pwm_disable(ctx->pwm); __set_pwm()
58 if (ctx->pwm_value == 0) { __set_pwm()
59 ret = pwm_enable(ctx->pwm); __set_pwm()
64 ctx->pwm_value = pwm; __set_pwm()
66 mutex_unlock(&ctx->lock); __set_pwm()
70 static void pwm_fan_update_state(struct pwm_fan_ctx *ctx, unsigned long pwm) pwm_fan_update_state() argument
74 for (i = 0; i < ctx->pwm_fan_max_state; ++i) pwm_fan_update_state()
75 if (pwm < ctx->pwm_fan_cooling_levels[i + 1]) pwm_fan_update_state()
78 ctx->pwm_fan_state = i; pwm_fan_update_state()
84 struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); set_pwm() local
91 ret = __set_pwm(ctx, pwm); set_pwm()
95 pwm_fan_update_state(ctx, pwm); set_pwm()
102 struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); show_pwm() local
104 return sprintf(buf, "%u\n", ctx->pwm_value); show_pwm()
121 struct pwm_fan_ctx *ctx = cdev->devdata; pwm_fan_get_max_state() local
123 if (!ctx) pwm_fan_get_max_state()
126 *state = ctx->pwm_fan_max_state; pwm_fan_get_max_state()
134 struct pwm_fan_ctx *ctx = cdev->devdata; pwm_fan_get_cur_state() local
136 if (!ctx) pwm_fan_get_cur_state()
139 *state = ctx->pwm_fan_state; pwm_fan_get_cur_state()
147 struct pwm_fan_ctx *ctx = cdev->devdata; pwm_fan_set_cur_state() local
150 if (!ctx || (state > ctx->pwm_fan_max_state)) pwm_fan_set_cur_state()
153 if (state == ctx->pwm_fan_state) pwm_fan_set_cur_state()
156 ret = __set_pwm(ctx, ctx->pwm_fan_cooling_levels[state]); pwm_fan_set_cur_state()
162 ctx->pwm_fan_state = state; pwm_fan_set_cur_state()
174 struct pwm_fan_ctx *ctx) pwm_fan_of_get_cooling_data()
189 ctx->pwm_fan_cooling_levels = devm_kzalloc(dev, num * sizeof(u32), pwm_fan_of_get_cooling_data()
191 if (!ctx->pwm_fan_cooling_levels) pwm_fan_of_get_cooling_data()
195 ctx->pwm_fan_cooling_levels, num); pwm_fan_of_get_cooling_data()
202 if (ctx->pwm_fan_cooling_levels[i] > MAX_PWM) { pwm_fan_of_get_cooling_data()
204 ctx->pwm_fan_cooling_levels[i], MAX_PWM); pwm_fan_of_get_cooling_data()
209 ctx->pwm_fan_max_state = num - 1; pwm_fan_of_get_cooling_data()
217 struct pwm_fan_ctx *ctx; pwm_fan_probe() local
222 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); pwm_fan_probe()
223 if (!ctx) pwm_fan_probe()
226 mutex_init(&ctx->lock); pwm_fan_probe()
228 ctx->pwm = devm_of_pwm_get(&pdev->dev, pdev->dev.of_node, NULL); pwm_fan_probe()
229 if (IS_ERR(ctx->pwm)) { pwm_fan_probe()
231 return PTR_ERR(ctx->pwm); pwm_fan_probe()
234 platform_set_drvdata(pdev, ctx); pwm_fan_probe()
237 duty_cycle = ctx->pwm->period - 1; pwm_fan_probe()
238 ctx->pwm_value = MAX_PWM; pwm_fan_probe()
240 ret = pwm_config(ctx->pwm, duty_cycle, ctx->pwm->period); pwm_fan_probe()
247 ret = pwm_enable(ctx->pwm); pwm_fan_probe()
254 ctx, pwm_fan_groups); pwm_fan_probe()
257 pwm_disable(ctx->pwm); pwm_fan_probe()
261 ret = pwm_fan_of_get_cooling_data(&pdev->dev, ctx); pwm_fan_probe()
265 ctx->pwm_fan_state = ctx->pwm_fan_max_state; pwm_fan_probe()
268 "pwm-fan", ctx, pwm_fan_probe()
273 pwm_disable(ctx->pwm); pwm_fan_probe()
276 ctx->cdev = cdev; pwm_fan_probe()
285 struct pwm_fan_ctx *ctx = platform_get_drvdata(pdev); pwm_fan_remove() local
287 thermal_cooling_device_unregister(ctx->cdev); pwm_fan_remove()
288 if (ctx->pwm_value) pwm_fan_remove()
289 pwm_disable(ctx->pwm); pwm_fan_remove()
296 struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); pwm_fan_suspend() local
298 if (ctx->pwm_value) pwm_fan_suspend()
299 pwm_disable(ctx->pwm); pwm_fan_suspend()
305 struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); pwm_fan_resume() local
309 if (ctx->pwm_value == 0) pwm_fan_resume()
312 duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM); pwm_fan_resume()
313 ret = pwm_config(ctx->pwm, duty, ctx->pwm->period); pwm_fan_resume()
316 return pwm_enable(ctx->pwm); pwm_fan_resume()
173 pwm_fan_of_get_cooling_data(struct device *dev, struct pwm_fan_ctx *ctx) pwm_fan_of_get_cooling_data() argument
/linux-4.1.27/drivers/misc/cxl/
H A Dcontext.c37 int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, cxl_context_init() argument
42 spin_lock_init(&ctx->sste_lock); cxl_context_init()
43 ctx->afu = afu; cxl_context_init()
44 ctx->master = master; cxl_context_init()
45 ctx->pid = NULL; /* Set in start work ioctl */ cxl_context_init()
46 mutex_init(&ctx->mapping_lock); cxl_context_init()
47 ctx->mapping = mapping; cxl_context_init()
56 i = cxl_alloc_sst(ctx); cxl_context_init()
60 INIT_WORK(&ctx->fault_work, cxl_handle_fault); cxl_context_init()
62 init_waitqueue_head(&ctx->wq); cxl_context_init()
63 spin_lock_init(&ctx->lock); cxl_context_init()
65 ctx->irq_bitmap = NULL; cxl_context_init()
66 ctx->pending_irq = false; cxl_context_init()
67 ctx->pending_fault = false; cxl_context_init()
68 ctx->pending_afu_err = false; cxl_context_init()
78 ctx->irqs.range[i] = 0; cxl_context_init()
80 mutex_init(&ctx->status_mutex); cxl_context_init()
82 ctx->status = OPENED; cxl_context_init()
90 i = idr_alloc(&ctx->afu->contexts_idr, ctx, 0, cxl_context_init()
91 ctx->afu->num_procs, GFP_NOWAIT); cxl_context_init()
97 ctx->pe = i; cxl_context_init()
98 ctx->elem = &ctx->afu->spa[i]; cxl_context_init()
99 ctx->pe_inserted = false; cxl_context_init()
105 struct cxl_context *ctx = vma->vm_file->private_data; cxl_mmap_fault() local
112 __func__, ctx->pe, address, offset); cxl_mmap_fault()
114 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { cxl_mmap_fault()
115 area = ctx->afu->psn_phys; cxl_mmap_fault()
116 if (offset >= ctx->afu->adapter->ps_size) cxl_mmap_fault()
119 area = ctx->psn_phys; cxl_mmap_fault()
120 if (offset >= ctx->psn_size) cxl_mmap_fault()
124 mutex_lock(&ctx->status_mutex); cxl_mmap_fault()
126 if (ctx->status != STARTED) { cxl_mmap_fault()
127 mutex_unlock(&ctx->status_mutex); cxl_mmap_fault()
134 mutex_unlock(&ctx->status_mutex); cxl_mmap_fault()
146 int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) cxl_context_iomap() argument
149 len = min(len, ctx->psn_size); cxl_context_iomap()
151 if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { cxl_context_iomap()
153 if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { cxl_context_iomap()
159 if (!ctx->afu->enabled) cxl_context_iomap()
164 ctx->psn_phys, ctx->pe , ctx->master); cxl_context_iomap()
175 * hardware should no longer access *ctx after this has returned.
177 static void __detach_context(struct cxl_context *ctx) __detach_context() argument
181 mutex_lock(&ctx->status_mutex); __detach_context()
182 status = ctx->status; __detach_context()
183 ctx->status = CLOSED; __detach_context()
184 mutex_unlock(&ctx->status_mutex); __detach_context()
188 WARN_ON(cxl_detach_process(ctx)); __detach_context()
189 afu_release_irqs(ctx); __detach_context()
190 flush_work(&ctx->fault_work); /* Only needed for dedicated process */ __detach_context()
191 wake_up_all(&ctx->wq); __detach_context()
200 void cxl_context_detach(struct cxl_context *ctx) cxl_context_detach() argument
202 __detach_context(ctx); cxl_context_detach()
210 struct cxl_context *ctx; cxl_context_detach_all() local
214 idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { cxl_context_detach_all()
219 __detach_context(ctx); cxl_context_detach_all()
227 mutex_lock(&ctx->mapping_lock); cxl_context_detach_all()
228 if (ctx->mapping) cxl_context_detach_all()
229 unmap_mapping_range(ctx->mapping, 0, 0, 1); cxl_context_detach_all()
230 mutex_unlock(&ctx->mapping_lock); cxl_context_detach_all()
235 void cxl_context_free(struct cxl_context *ctx) cxl_context_free() argument
237 mutex_lock(&ctx->afu->contexts_lock); cxl_context_free()
238 idr_remove(&ctx->afu->contexts_idr, ctx->pe); cxl_context_free()
239 mutex_unlock(&ctx->afu->contexts_lock); cxl_context_free()
242 free_page((u64)ctx->sstp); cxl_context_free()
243 ctx->sstp = NULL; cxl_context_free()
245 put_pid(ctx->pid); cxl_context_free()
246 kfree(ctx); cxl_context_free()
H A Dfault.c35 static struct cxl_sste* find_free_sste(struct cxl_context *ctx, find_free_sste() argument
39 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */ find_free_sste()
48 primary = ctx->sstp + (hash << 3); find_free_sste()
60 ret = primary + ctx->sst_lru; find_free_sste()
61 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7; find_free_sste()
66 static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) cxl_load_segment() argument
72 spin_lock_irqsave(&ctx->sste_lock, flags); cxl_load_segment()
73 sste = find_free_sste(ctx, slb); cxl_load_segment()
78 sste - ctx->sstp, slb->vsid, slb->esid); cxl_load_segment()
79 trace_cxl_ste_write(ctx, sste - ctx->sstp, slb->esid, slb->vsid); cxl_load_segment()
84 spin_unlock_irqrestore(&ctx->sste_lock, flags); cxl_load_segment()
87 static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, cxl_fault_segment() argument
94 cxl_load_segment(ctx, &slb); cxl_fault_segment()
100 static void cxl_ack_ae(struct cxl_context *ctx) cxl_ack_ae() argument
104 cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); cxl_ack_ae()
106 spin_lock_irqsave(&ctx->lock, flags); cxl_ack_ae()
107 ctx->pending_fault = true; cxl_ack_ae()
108 ctx->fault_addr = ctx->dar; cxl_ack_ae()
109 ctx->fault_dsisr = ctx->dsisr; cxl_ack_ae()
110 spin_unlock_irqrestore(&ctx->lock, flags); cxl_ack_ae()
112 wake_up_all(&ctx->wq); cxl_ack_ae()
115 static int cxl_handle_segment_miss(struct cxl_context *ctx, cxl_handle_segment_miss() argument
120 pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); cxl_handle_segment_miss()
121 trace_cxl_ste_miss(ctx, ea); cxl_handle_segment_miss()
123 if ((rc = cxl_fault_segment(ctx, mm, ea))) cxl_handle_segment_miss()
124 cxl_ack_ae(ctx); cxl_handle_segment_miss()
128 cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); cxl_handle_segment_miss()
134 static void cxl_handle_page_fault(struct cxl_context *ctx, cxl_handle_page_fault() argument
141 trace_cxl_pte_miss(ctx, dsisr, dar); cxl_handle_page_fault()
145 return cxl_ack_ae(ctx); cxl_handle_page_fault()
155 if ((!ctx->kernel) || ~(dar & (1ULL << 63))) cxl_handle_page_fault()
165 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); cxl_handle_page_fault()
166 cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); cxl_handle_page_fault()
171 struct cxl_context *ctx = cxl_handle_fault() local
173 u64 dsisr = ctx->dsisr; cxl_handle_fault()
174 u64 dar = ctx->dar; cxl_handle_fault()
178 if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || cxl_handle_fault()
179 cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || cxl_handle_fault()
180 cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { cxl_handle_fault()
184 dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); cxl_handle_fault()
189 if (ctx->status == CLOSED) { cxl_handle_fault()
190 cxl_ack_ae(ctx); cxl_handle_fault()
195 "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); cxl_handle_fault()
197 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { cxl_handle_fault()
199 pid_nr(ctx->pid)); cxl_handle_fault()
200 cxl_ack_ae(ctx); cxl_handle_fault()
205 pid_nr(ctx->pid)); cxl_handle_fault()
206 cxl_ack_ae(ctx); cxl_handle_fault()
211 cxl_handle_segment_miss(ctx, mm, dar); cxl_handle_fault()
213 cxl_handle_page_fault(ctx, mm, dsisr, dar); cxl_handle_fault()
222 static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) cxl_prefault_one() argument
228 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { cxl_prefault_one()
230 pid_nr(ctx->pid)); cxl_prefault_one()
235 pid_nr(ctx->pid)); cxl_prefault_one()
240 rc = cxl_fault_segment(ctx, mm, ea); cxl_prefault_one()
256 static void cxl_prefault_vma(struct cxl_context *ctx) cxl_prefault_vma() argument
265 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { cxl_prefault_vma()
267 pid_nr(ctx->pid)); cxl_prefault_vma()
272 pid_nr(ctx->pid)); cxl_prefault_vma()
287 cxl_load_segment(ctx, &slb); cxl_prefault_vma()
298 void cxl_prefault(struct cxl_context *ctx, u64 wed) cxl_prefault() argument
300 switch (ctx->afu->prefault_mode) { cxl_prefault()
302 cxl_prefault_one(ctx, wed); cxl_prefault()
305 cxl_prefault_vma(ctx); cxl_prefault()
H A Dtrace.h59 TP_PROTO(struct cxl_context *ctx),
61 TP_ARGS(ctx),
70 __entry->card = ctx->afu->adapter->adapter_num;
71 __entry->afu = ctx->afu->slice;
72 __entry->pe = ctx->pe;
84 TP_PROTO(struct cxl_context *ctx, u64 wed, s16 num_interrupts, u64 amr),
86 TP_ARGS(ctx, wed, num_interrupts, amr),
99 __entry->card = ctx->afu->adapter->adapter_num;
100 __entry->afu = ctx->afu->slice;
101 __entry->pe = ctx->pe;
102 __entry->pid = pid_nr(ctx->pid);
120 TP_PROTO(struct cxl_context *ctx),
121 TP_ARGS(ctx)
125 TP_PROTO(struct cxl_context *ctx, int afu_irq, int virq, irq_hw_number_t hwirq),
127 TP_ARGS(ctx, afu_irq, virq, hwirq),
139 __entry->card = ctx->afu->adapter->adapter_num;
140 __entry->afu = ctx->afu->slice;
141 __entry->pe = ctx->pe;
158 TP_PROTO(struct cxl_context *ctx, int irq, u64 dsisr, u64 dar),
160 TP_ARGS(ctx, irq, dsisr, dar),
172 __entry->card = ctx->afu->adapter->adapter_num;
173 __entry->afu = ctx->afu->slice;
174 __entry->pe = ctx->pe;
191 TP_PROTO(struct cxl_context *ctx, u64 tfc),
193 TP_ARGS(ctx, tfc),
203 __entry->card = ctx->afu->adapter->adapter_num;
204 __entry->afu = ctx->afu->slice;
205 __entry->pe = ctx->pe;
218 TP_PROTO(struct cxl_context *ctx, u64 dar),
220 TP_ARGS(ctx, dar),
230 __entry->card = ctx->afu->adapter->adapter_num;
231 __entry->afu = ctx->afu->slice;
232 __entry->pe = ctx->pe;
245 TP_PROTO(struct cxl_context *ctx, unsigned int idx, u64 e, u64 v),
247 TP_ARGS(ctx, idx, e, v),
259 __entry->card = ctx->afu->adapter->adapter_num;
260 __entry->afu = ctx->afu->slice;
261 __entry->pe = ctx->pe;
278 TP_PROTO(struct cxl_context *ctx, u64 dsisr, u64 dar),
280 TP_ARGS(ctx, dsisr, dar),
291 __entry->card = ctx->afu->adapter->adapter_num;
292 __entry->afu = ctx->afu->slice;
293 __entry->pe = ctx->pe;
308 TP_PROTO(struct cxl_context *ctx, u64 cmd),
310 TP_ARGS(ctx, cmd),
320 __entry->card = ctx->afu->adapter->adapter_num;
321 __entry->afu = ctx->afu->slice;
322 __entry->pe = ctx->pe;
335 TP_PROTO(struct cxl_context *ctx, u64 cmd, int rc),
337 TP_ARGS(ctx, cmd, rc),
348 __entry->card = ctx->afu->adapter->adapter_num;
349 __entry->afu = ctx->afu->slice;
350 __entry->pe = ctx->pe;
449 TP_PROTO(struct cxl_context *ctx),
450 TP_ARGS(ctx)
H A Dirq.c23 static irqreturn_t handle_psl_slice_error(struct cxl_context *ctx, u64 dsisr, u64 errstat) handle_psl_slice_error() argument
27 fir1 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR1); handle_psl_slice_error()
28 fir2 = cxl_p1_read(ctx->afu->adapter, CXL_PSL_FIR2); handle_psl_slice_error()
29 fir_slice = cxl_p1n_read(ctx->afu, CXL_PSL_FIR_SLICE_An); handle_psl_slice_error()
30 serr = cxl_p1n_read(ctx->afu, CXL_PSL_SERR_An); handle_psl_slice_error()
31 afu_debug = cxl_p1n_read(ctx->afu, CXL_AFU_DEBUG_An); handle_psl_slice_error()
33 dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%.16llx\n", errstat); handle_psl_slice_error()
34 dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%.16llx\n", fir1); handle_psl_slice_error()
35 dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%.16llx\n", fir2); handle_psl_slice_error()
36 dev_crit(&ctx->afu->dev, "PSL_SERR_An: 0x%.16llx\n", serr); handle_psl_slice_error()
37 dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%.16llx\n", fir_slice); handle_psl_slice_error()
38 dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%.16llx\n", afu_debug); handle_psl_slice_error()
40 dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n"); handle_psl_slice_error()
41 cxl_stop_trace(ctx->afu->adapter); handle_psl_slice_error()
43 return cxl_ack_irq(ctx, 0, errstat); handle_psl_slice_error()
88 static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 dar) schedule_cxl_fault() argument
90 ctx->dsisr = dsisr; schedule_cxl_fault()
91 ctx->dar = dar; schedule_cxl_fault()
92 schedule_work(&ctx->fault_work); schedule_cxl_fault()
98 struct cxl_context *ctx = data; cxl_irq() local
104 trace_cxl_psl_irq(ctx, irq, dsisr, dar); cxl_irq()
106 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); cxl_irq()
119 pr_devel("Scheduling segment miss handling for later pe: %i\n", ctx->pe); cxl_irq()
120 return schedule_cxl_fault(ctx, dsisr, dar); cxl_irq()
140 pr_devel("Scheduling page fault handling for later pe: %i\n", ctx->pe); cxl_irq()
141 return schedule_cxl_fault(ctx, dsisr, dar); cxl_irq()
148 return handle_psl_slice_error(ctx, dsisr, irq_info->errstat); cxl_irq()
152 if (ctx->pending_afu_err) { cxl_irq()
160 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " cxl_irq()
162 ctx->pe, irq_info->afu_err); cxl_irq()
164 spin_lock(&ctx->lock); cxl_irq()
165 ctx->afu_err = irq_info->afu_err; cxl_irq()
166 ctx->pending_afu_err = 1; cxl_irq()
167 spin_unlock(&ctx->lock); cxl_irq()
169 wake_up_all(&ctx->wq); cxl_irq()
172 cxl_ack_irq(ctx, CXL_PSL_TFC_An_A, 0); cxl_irq()
195 struct cxl_context *ctx; cxl_irq_multiplexed() local
206 ctx = idr_find(&afu->contexts_idr, ph); cxl_irq_multiplexed()
207 if (ctx) { cxl_irq_multiplexed()
208 ret = cxl_irq(irq, ctx, &irq_info); cxl_irq_multiplexed()
223 struct cxl_context *ctx = data; cxl_irq_afu() local
230 irq_off = hwirq - ctx->irqs.offset[r]; cxl_irq_afu()
231 range = ctx->irqs.range[r]; cxl_irq_afu()
240 ctx->pe, irq, hwirq); cxl_irq_afu()
244 trace_cxl_afu_irq(ctx, afu_irq, irq, hwirq); cxl_irq_afu()
246 afu_irq, ctx->pe, irq, hwirq); cxl_irq_afu()
248 if (unlikely(!ctx->irq_bitmap)) { cxl_irq_afu()
252 spin_lock(&ctx->lock); cxl_irq_afu()
253 set_bit(afu_irq - 1, ctx->irq_bitmap); cxl_irq_afu()
254 ctx->pending_irq = true; cxl_irq_afu()
255 spin_unlock(&ctx->lock); cxl_irq_afu()
257 wake_up_all(&ctx->wq); cxl_irq_afu()
407 void afu_irq_name_free(struct cxl_context *ctx) afu_irq_name_free() argument
411 list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) { afu_irq_name_free()
418 int afu_register_irqs(struct cxl_context *ctx, u32 count) afu_register_irqs() argument
424 if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count))) afu_register_irqs()
428 ctx->irqs.offset[0] = ctx->afu->psl_hwirq; afu_register_irqs()
429 ctx->irqs.range[0] = 1; afu_register_irqs()
431 ctx->irq_count = count; afu_register_irqs()
432 ctx->irq_bitmap = kcalloc(BITS_TO_LONGS(count), afu_register_irqs()
433 sizeof(*ctx->irq_bitmap), GFP_KERNEL); afu_register_irqs()
434 if (!ctx->irq_bitmap) afu_register_irqs()
441 INIT_LIST_HEAD(&ctx->irq_names); afu_register_irqs()
443 for (i = 0; i < ctx->irqs.range[r]; i++) { afu_register_irqs()
449 dev_name(&ctx->afu->dev), afu_register_irqs()
450 ctx->pe, j); afu_register_irqs()
456 list_add_tail(&irq_name->list, &ctx->irq_names); afu_register_irqs()
462 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list); afu_register_irqs()
464 hwirq = ctx->irqs.offset[r]; afu_register_irqs()
465 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { afu_register_irqs()
466 cxl_map_irq(ctx->afu->adapter, hwirq, afu_register_irqs()
467 cxl_irq_afu, ctx, irq_name->name); afu_register_irqs()
475 afu_irq_name_free(ctx); afu_register_irqs()
479 void afu_release_irqs(struct cxl_context *ctx) afu_release_irqs() argument
486 hwirq = ctx->irqs.offset[r]; afu_release_irqs()
487 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { afu_release_irqs()
490 cxl_unmap_irq(virq, ctx); afu_release_irqs()
494 afu_irq_name_free(ctx); afu_release_irqs()
495 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); afu_release_irqs()
H A Dnative.c269 static void slb_invalid(struct cxl_context *ctx) slb_invalid() argument
271 struct cxl *adapter = ctx->afu->adapter; slb_invalid()
274 WARN_ON(!mutex_is_locked(&ctx->afu->spa_mutex)); slb_invalid()
277 ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | slb_invalid()
278 be32_to_cpu(ctx->elem->lpid)); slb_invalid()
289 static int do_process_element_cmd(struct cxl_context *ctx, do_process_element_cmd() argument
296 trace_cxl_llcmd(ctx, cmd); do_process_element_cmd()
298 WARN_ON(!ctx->afu->enabled); do_process_element_cmd()
300 ctx->elem->software_state = cpu_to_be32(pe_state); do_process_element_cmd()
302 *(ctx->afu->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); do_process_element_cmd()
304 cxl_p1n_write(ctx->afu, CXL_PSL_LLCMD_An, cmd | ctx->pe); do_process_element_cmd()
307 dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n"); do_process_element_cmd()
311 state = be64_to_cpup(ctx->afu->sw_command_status); do_process_element_cmd()
318 (cmd | (cmd >> 16) | ctx->pe)) do_process_element_cmd()
331 trace_cxl_llcmd_done(ctx, cmd, rc); do_process_element_cmd()
335 static int add_process_element(struct cxl_context *ctx) add_process_element() argument
339 mutex_lock(&ctx->afu->spa_mutex); add_process_element()
340 pr_devel("%s Adding pe: %i started\n", __func__, ctx->pe); add_process_element()
341 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) add_process_element()
342 ctx->pe_inserted = true; add_process_element()
343 pr_devel("%s Adding pe: %i finished\n", __func__, ctx->pe); add_process_element()
344 mutex_unlock(&ctx->afu->spa_mutex); add_process_element()
348 static int terminate_process_element(struct cxl_context *ctx) terminate_process_element() argument
353 if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) terminate_process_element()
356 mutex_lock(&ctx->afu->spa_mutex); terminate_process_element()
357 pr_devel("%s Terminate pe: %i started\n", __func__, ctx->pe); terminate_process_element()
358 rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, terminate_process_element()
360 ctx->elem->software_state = 0; /* Remove Valid bit */ terminate_process_element()
361 pr_devel("%s Terminate pe: %i finished\n", __func__, ctx->pe); terminate_process_element()
362 mutex_unlock(&ctx->afu->spa_mutex); terminate_process_element()
366 static int remove_process_element(struct cxl_context *ctx) remove_process_element() argument
370 mutex_lock(&ctx->afu->spa_mutex); remove_process_element()
371 pr_devel("%s Remove pe: %i started\n", __func__, ctx->pe); remove_process_element()
372 if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, 0))) remove_process_element()
373 ctx->pe_inserted = false; remove_process_element()
374 slb_invalid(ctx); remove_process_element()
375 pr_devel("%s Remove pe: %i finished\n", __func__, ctx->pe); remove_process_element()
376 mutex_unlock(&ctx->afu->spa_mutex); remove_process_element()
382 static void assign_psn_space(struct cxl_context *ctx) assign_psn_space() argument
384 if (!ctx->afu->pp_size || ctx->master) { assign_psn_space()
385 ctx->psn_phys = ctx->afu->psn_phys; assign_psn_space()
386 ctx->psn_size = ctx->afu->adapter->ps_size; assign_psn_space()
388 ctx->psn_phys = ctx->afu->psn_phys + assign_psn_space()
389 (ctx->afu->pp_offset + ctx->afu->pp_size * ctx->pe); assign_psn_space()
390 ctx->psn_size = ctx->afu->pp_size; assign_psn_space()
433 static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr) attach_afu_directed() argument
438 assign_psn_space(ctx); attach_afu_directed()
440 ctx->elem->ctxtime = 0; /* disable */ attach_afu_directed()
441 ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); attach_afu_directed()
442 ctx->elem->haurp = 0; /* disable */ attach_afu_directed()
443 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1)); attach_afu_directed()
446 if (ctx->master) attach_afu_directed()
458 ctx->elem->common.pid = cpu_to_be32(current->pid); attach_afu_directed()
459 ctx->elem->common.tid = 0; attach_afu_directed()
460 ctx->elem->sr = cpu_to_be64(sr); attach_afu_directed()
462 ctx->elem->common.csrp = 0; /* disable */ attach_afu_directed()
463 ctx->elem->common.aurp0 = 0; /* disable */ attach_afu_directed()
464 ctx->elem->common.aurp1 = 0; /* disable */ attach_afu_directed()
466 cxl_prefault(ctx, wed); attach_afu_directed()
468 ctx->elem->common.sstp0 = cpu_to_be64(ctx->sstp0); attach_afu_directed()
469 ctx->elem->common.sstp1 = cpu_to_be64(ctx->sstp1); attach_afu_directed()
472 ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); attach_afu_directed()
473 ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); attach_afu_directed()
476 ctx->elem->common.amr = cpu_to_be64(amr); attach_afu_directed()
477 ctx->elem->common.wed = cpu_to_be64(wed); attach_afu_directed()
480 if ((result = afu_check_and_enable(ctx->afu))) attach_afu_directed()
483 add_process_element(ctx); attach_afu_directed()
530 static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr) attach_dedicated() argument
532 struct cxl_afu *afu = ctx->afu; attach_dedicated()
538 if (ctx->master) attach_dedicated()
548 if ((rc = cxl_write_sstp(afu, ctx->sstp0, ctx->sstp1))) attach_dedicated()
551 cxl_prefault(ctx, wed); attach_dedicated()
554 (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | attach_dedicated()
555 (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | attach_dedicated()
556 (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | attach_dedicated()
557 ((u64)ctx->irqs.offset[3] & 0xffff)); attach_dedicated()
559 (((u64)ctx->irqs.range[0] & 0xffff) << 48) | attach_dedicated()
560 (((u64)ctx->irqs.range[1] & 0xffff) << 32) | attach_dedicated()
561 (((u64)ctx->irqs.range[2] & 0xffff) << 16) | attach_dedicated()
562 ((u64)ctx->irqs.range[3] & 0xffff)); attach_dedicated()
567 assign_psn_space(ctx); attach_dedicated()
618 int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed, u64 amr) cxl_attach_process() argument
620 ctx->kernel = kernel; cxl_attach_process()
621 if (ctx->afu->current_mode == CXL_MODE_DIRECTED) cxl_attach_process()
622 return attach_afu_directed(ctx, wed, amr); cxl_attach_process()
624 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) cxl_attach_process()
625 return attach_dedicated(ctx, wed, amr); cxl_attach_process()
630 static inline int detach_process_native_dedicated(struct cxl_context *ctx) detach_process_native_dedicated() argument
632 cxl_afu_reset(ctx->afu); detach_process_native_dedicated()
633 cxl_afu_disable(ctx->afu); detach_process_native_dedicated()
634 cxl_psl_purge(ctx->afu); detach_process_native_dedicated()
638 static inline int detach_process_native_afu_directed(struct cxl_context *ctx) detach_process_native_afu_directed() argument
640 if (!ctx->pe_inserted) detach_process_native_afu_directed()
642 if (terminate_process_element(ctx)) detach_process_native_afu_directed()
644 if (remove_process_element(ctx)) detach_process_native_afu_directed()
650 int cxl_detach_process(struct cxl_context *ctx) cxl_detach_process() argument
652 trace_cxl_detach(ctx); cxl_detach_process()
654 if (ctx->afu->current_mode == CXL_MODE_DEDICATED) cxl_detach_process()
655 return detach_process_native_dedicated(ctx); cxl_detach_process()
657 return detach_process_native_afu_directed(ctx); cxl_detach_process()
690 int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) cxl_ack_irq() argument
692 trace_cxl_psl_irq_ack(ctx, tfc); cxl_ack_irq()
694 cxl_p2n_write(ctx->afu, CXL_PSL_TFC_An, tfc); cxl_ack_irq()
696 recover_psl_err(ctx->afu, psl_reset_mask); cxl_ack_irq()
H A Dmain.c35 static inline void _cxl_slbia(struct cxl_context *ctx, struct mm_struct *mm) _cxl_slbia() argument
39 if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { _cxl_slbia()
41 __func__, pid_nr(ctx->pid)); _cxl_slbia()
49 ctx->afu->adapter->adapter_num, ctx->afu->slice, ctx->pe); _cxl_slbia()
51 spin_lock_irqsave(&ctx->sste_lock, flags); _cxl_slbia()
52 trace_cxl_slbia(ctx); _cxl_slbia()
53 memset(ctx->sstp, 0, ctx->sst_size); _cxl_slbia()
54 spin_unlock_irqrestore(&ctx->sste_lock, flags); _cxl_slbia()
56 cxl_afu_slbia(ctx->afu); _cxl_slbia()
65 struct cxl_context *ctx; cxl_slbia_core() local
72 /* XXX: Make this lookup faster with link from mm to ctx */ cxl_slbia_core()
79 idr_for_each_entry(&afu->contexts_idr, ctx, id) cxl_slbia_core()
80 _cxl_slbia(ctx, mm); cxl_slbia_core()
93 int cxl_alloc_sst(struct cxl_context *ctx) cxl_alloc_sst() argument
101 ctx->sst_size = PAGE_SIZE; cxl_alloc_sst()
102 ctx->sst_lru = 0; cxl_alloc_sst()
103 ctx->sstp = (struct cxl_sste *)get_zeroed_page(GFP_KERNEL); cxl_alloc_sst()
104 if (!ctx->sstp) { cxl_alloc_sst()
108 pr_devel("SSTP allocated at 0x%p\n", ctx->sstp); cxl_alloc_sst()
110 vsid = get_kernel_vsid((u64)ctx->sstp, mmu_kernel_ssize) << 12; cxl_alloc_sst()
115 size = (((u64)ctx->sst_size >> 8) - 1) << CXL_SSTP0_An_SegTableSize_SHIFT; cxl_alloc_sst()
129 sstp1 |= (u64)ctx->sstp & ea_mask; cxl_alloc_sst()
133 (u64)ctx->sstp, (u64)ctx->sstp & ESID_MASK, mmu_kernel_ssize, vsid, sstp0, sstp1); cxl_alloc_sst()
136 ctx->sstp0 = sstp0; cxl_alloc_sst()
137 ctx->sstp1 = sstp1; cxl_alloc_sst()
H A Dfile.c52 struct cxl_context *ctx; __afu_open() local
76 if (!(ctx = cxl_context_alloc())) { __afu_open()
81 if ((rc = cxl_context_init(ctx, afu, master, inode->i_mapping))) __afu_open()
84 pr_devel("afu_open pe: %i\n", ctx->pe); __afu_open()
85 file->private_data = ctx; __afu_open()
111 struct cxl_context *ctx = file->private_data; afu_release() local
114 __func__, ctx->pe); afu_release()
115 cxl_context_detach(ctx); afu_release()
117 mutex_lock(&ctx->mapping_lock); afu_release()
118 ctx->mapping = NULL; afu_release()
119 mutex_unlock(&ctx->mapping_lock); afu_release()
121 put_device(&ctx->afu->dev); afu_release()
129 cxl_context_free(ctx); afu_release()
135 static long afu_ioctl_start_work(struct cxl_context *ctx, afu_ioctl_start_work() argument
142 pr_devel("%s: pe: %i\n", __func__, ctx->pe); afu_ioctl_start_work()
152 mutex_lock(&ctx->status_mutex); afu_ioctl_start_work()
153 if (ctx->status != OPENED) { afu_ioctl_start_work()
170 work.num_interrupts = ctx->afu->pp_irqs; afu_ioctl_start_work()
171 else if ((work.num_interrupts < ctx->afu->pp_irqs) || afu_ioctl_start_work()
172 (work.num_interrupts > ctx->afu->irqs_max)) { afu_ioctl_start_work()
176 if ((rc = afu_register_irqs(ctx, work.num_interrupts))) afu_ioctl_start_work()
188 ctx->pid = get_pid(get_task_pid(current, PIDTYPE_PID)); afu_ioctl_start_work()
190 trace_cxl_attach(ctx, work.work_element_descriptor, work.num_interrupts, amr); afu_ioctl_start_work()
192 if ((rc = cxl_attach_process(ctx, false, work.work_element_descriptor, afu_ioctl_start_work()
194 afu_release_irqs(ctx); afu_ioctl_start_work()
198 ctx->status = STARTED; afu_ioctl_start_work()
201 mutex_unlock(&ctx->status_mutex); afu_ioctl_start_work()
204 static long afu_ioctl_process_element(struct cxl_context *ctx, afu_ioctl_process_element() argument
207 pr_devel("%s: pe: %i\n", __func__, ctx->pe); afu_ioctl_process_element()
209 if (copy_to_user(upe, &ctx->pe, sizeof(__u32))) afu_ioctl_process_element()
217 struct cxl_context *ctx = file->private_data; afu_ioctl() local
219 if (ctx->status == CLOSED) afu_ioctl()
225 return afu_ioctl_start_work(ctx, (struct cxl_ioctl_start_work __user *)arg); afu_ioctl()
227 return afu_ioctl_process_element(ctx, (__u32 __user *)arg); afu_ioctl()
240 struct cxl_context *ctx = file->private_data; afu_mmap() local
243 if (ctx->status != STARTED) afu_mmap()
246 return cxl_context_iomap(ctx, vm); afu_mmap()
251 struct cxl_context *ctx = file->private_data; afu_poll() local
256 poll_wait(file, &ctx->wq, poll); afu_poll()
258 pr_devel("afu_poll wait done pe: %i\n", ctx->pe); afu_poll()
260 spin_lock_irqsave(&ctx->lock, flags); afu_poll()
261 if (ctx->pending_irq || ctx->pending_fault || afu_poll()
262 ctx->pending_afu_err) afu_poll()
264 else if (ctx->status == CLOSED) afu_poll()
268 spin_unlock_irqrestore(&ctx->lock, flags); afu_poll()
270 pr_devel("afu_poll pe: %i returning %#x\n", ctx->pe, mask); afu_poll()
275 static inline int ctx_event_pending(struct cxl_context *ctx) ctx_event_pending() argument
277 return (ctx->pending_irq || ctx->pending_fault || ctx_event_pending()
278 ctx->pending_afu_err || (ctx->status == CLOSED)); ctx_event_pending()
284 struct cxl_context *ctx = file->private_data; afu_read() local
293 spin_lock_irqsave(&ctx->lock, flags); afu_read()
296 prepare_to_wait(&ctx->wq, &wait, TASK_INTERRUPTIBLE); afu_read()
297 if (ctx_event_pending(ctx)) afu_read()
310 spin_unlock_irqrestore(&ctx->lock, flags); afu_read()
314 spin_lock_irqsave(&ctx->lock, flags); afu_read()
317 finish_wait(&ctx->wq, &wait); afu_read()
320 event.header.process_element = ctx->pe; afu_read()
322 if (ctx->pending_irq) { afu_read()
326 event.irq.irq = find_first_bit(ctx->irq_bitmap, ctx->irq_count) + 1; afu_read()
327 clear_bit(event.irq.irq - 1, ctx->irq_bitmap); afu_read()
328 if (bitmap_empty(ctx->irq_bitmap, ctx->irq_count)) afu_read()
329 ctx->pending_irq = false; afu_read()
330 } else if (ctx->pending_fault) { afu_read()
334 event.fault.addr = ctx->fault_addr; afu_read()
335 event.fault.dsisr = ctx->fault_dsisr; afu_read()
336 ctx->pending_fault = false; afu_read()
337 } else if (ctx->pending_afu_err) { afu_read()
341 event.afu_error.error = ctx->afu_err; afu_read()
342 ctx->pending_afu_err = false; afu_read()
343 } else if (ctx->status == CLOSED) { afu_read()
345 spin_unlock_irqrestore(&ctx->lock, flags); afu_read()
350 spin_unlock_irqrestore(&ctx->lock, flags); afu_read()
357 finish_wait(&ctx->wq, &wait); afu_read()
358 spin_unlock_irqrestore(&ctx->lock, flags); afu_read()
/linux-4.1.27/drivers/media/platform/s5p-mfc/
H A Ds5p_mfc_opr_v5.c37 static int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_alloc_dec_temp_buffers_v5() argument
39 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_alloc_dec_temp_buffers_v5()
43 ctx->dsc.size = buf_size->dsc; s5p_mfc_alloc_dec_temp_buffers_v5()
44 ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->dsc); s5p_mfc_alloc_dec_temp_buffers_v5()
50 BUG_ON(ctx->dsc.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); s5p_mfc_alloc_dec_temp_buffers_v5()
51 memset(ctx->dsc.virt, 0, ctx->dsc.size); s5p_mfc_alloc_dec_temp_buffers_v5()
58 static void s5p_mfc_release_dec_desc_buffer_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_release_dec_desc_buffer_v5() argument
60 s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->dsc); s5p_mfc_release_dec_desc_buffer_v5()
64 static int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_alloc_codec_buffers_v5() argument
66 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_alloc_codec_buffers_v5()
72 if (ctx->type == MFCINST_DECODER) { s5p_mfc_alloc_codec_buffers_v5()
74 ctx->luma_size, ctx->chroma_size, ctx->mv_size); s5p_mfc_alloc_codec_buffers_v5()
75 mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count); s5p_mfc_alloc_codec_buffers_v5()
76 } else if (ctx->type == MFCINST_ENCODER) { s5p_mfc_alloc_codec_buffers_v5()
77 enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) s5p_mfc_alloc_codec_buffers_v5()
78 * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN); s5p_mfc_alloc_codec_buffers_v5()
81 if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) { s5p_mfc_alloc_codec_buffers_v5()
82 enc_ref_c_size = ALIGN(ctx->img_width, s5p_mfc_alloc_codec_buffers_v5()
84 * ALIGN(ctx->img_height >> 1, s5p_mfc_alloc_codec_buffers_v5()
89 guard_width = ALIGN(ctx->img_width + 16, s5p_mfc_alloc_codec_buffers_v5()
91 guard_height = ALIGN((ctx->img_height >> 1) + 4, s5p_mfc_alloc_codec_buffers_v5()
102 switch (ctx->codec_mode) { s5p_mfc_alloc_codec_buffers_v5()
104 ctx->bank1.size = s5p_mfc_alloc_codec_buffers_v5()
108 ctx->bank2.size = ctx->total_dpb_count * ctx->mv_size; s5p_mfc_alloc_codec_buffers_v5()
111 ctx->bank1.size = s5p_mfc_alloc_codec_buffers_v5()
118 ctx->bank2.size = 0; s5p_mfc_alloc_codec_buffers_v5()
122 ctx->bank1.size = s5p_mfc_alloc_codec_buffers_v5()
129 ctx->bank2.size = 0; s5p_mfc_alloc_codec_buffers_v5()
132 ctx->bank1.size = 0; s5p_mfc_alloc_codec_buffers_v5()
133 ctx->bank2.size = 0; s5p_mfc_alloc_codec_buffers_v5()
136 ctx->bank1.size = s5p_mfc_alloc_codec_buffers_v5()
142 ctx->bank2.size = 0; s5p_mfc_alloc_codec_buffers_v5()
145 ctx->bank1.size = (enc_ref_y_size * 2) + s5p_mfc_alloc_codec_buffers_v5()
150 ctx->bank2.size = (enc_ref_y_size * 2) + s5p_mfc_alloc_codec_buffers_v5()
155 ctx->bank1.size = (enc_ref_y_size * 2) + s5p_mfc_alloc_codec_buffers_v5()
159 ctx->bank2.size = (enc_ref_y_size * 2) + s5p_mfc_alloc_codec_buffers_v5()
163 ctx->bank1.size = (enc_ref_y_size * 2) + s5p_mfc_alloc_codec_buffers_v5()
166 ctx->bank2.size = (enc_ref_y_size * 2) + s5p_mfc_alloc_codec_buffers_v5()
173 if (ctx->bank1.size > 0) { s5p_mfc_alloc_codec_buffers_v5()
175 ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1); s5p_mfc_alloc_codec_buffers_v5()
180 BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); s5p_mfc_alloc_codec_buffers_v5()
183 if (ctx->bank2.size > 0) { s5p_mfc_alloc_codec_buffers_v5()
184 ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_r, &ctx->bank2); s5p_mfc_alloc_codec_buffers_v5()
187 s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1); s5p_mfc_alloc_codec_buffers_v5()
190 BUG_ON(ctx->bank2.dma & ((1 << MFC_BANK2_ALIGN_ORDER) - 1)); s5p_mfc_alloc_codec_buffers_v5()
196 static void s5p_mfc_release_codec_buffers_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_release_codec_buffers_v5() argument
198 s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1); s5p_mfc_release_codec_buffers_v5()
199 s5p_mfc_release_priv_buf(ctx->dev->mem_dev_r, &ctx->bank2); s5p_mfc_release_codec_buffers_v5()
203 static int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_alloc_instance_buffer_v5() argument
205 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_alloc_instance_buffer_v5()
209 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC || s5p_mfc_alloc_instance_buffer_v5()
210 ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) s5p_mfc_alloc_instance_buffer_v5()
211 ctx->ctx.size = buf_size->h264_ctx; s5p_mfc_alloc_instance_buffer_v5()
213 ctx->ctx.size = buf_size->non_h264_ctx; s5p_mfc_alloc_instance_buffer_v5()
215 ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx); s5p_mfc_alloc_instance_buffer_v5()
220 ctx->ctx.ofs = OFFSETA(ctx->ctx.dma); s5p_mfc_alloc_instance_buffer_v5()
223 memset(ctx->ctx.virt, 0, ctx->ctx.size); s5p_mfc_alloc_instance_buffer_v5()
227 ctx->shm.size = buf_size->shm; s5p_mfc_alloc_instance_buffer_v5()
228 ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->shm); s5p_mfc_alloc_instance_buffer_v5()
231 s5p_mfc_release_priv_buf(dev->mem_dev_l, &ctx->ctx); s5p_mfc_alloc_instance_buffer_v5()
236 ctx->shm.ofs = ctx->shm.dma - dev->bank1; s5p_mfc_alloc_instance_buffer_v5()
237 BUG_ON(ctx->shm.ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); s5p_mfc_alloc_instance_buffer_v5()
239 memset(ctx->shm.virt, 0, buf_size->shm); s5p_mfc_alloc_instance_buffer_v5()
245 static void s5p_mfc_release_instance_buffer_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_release_instance_buffer_v5() argument
247 s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->ctx); s5p_mfc_release_instance_buffer_v5()
248 s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->shm); s5p_mfc_release_instance_buffer_v5()
263 static void s5p_mfc_write_info_v5(struct s5p_mfc_ctx *ctx, unsigned int data, s5p_mfc_write_info_v5() argument
266 writel(data, (void *)(ctx->shm.virt + ofs)); s5p_mfc_write_info_v5()
270 static unsigned int s5p_mfc_read_info_v5(struct s5p_mfc_ctx *ctx, s5p_mfc_read_info_v5() argument
274 return readl((void *)(ctx->shm.virt + ofs)); s5p_mfc_read_info_v5()
277 static void s5p_mfc_dec_calc_dpb_size_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_dec_calc_dpb_size_v5() argument
281 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN); s5p_mfc_dec_calc_dpb_size_v5()
282 ctx->buf_height = ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN); s5p_mfc_dec_calc_dpb_size_v5()
285 ctx->img_width, ctx->img_height, ctx->buf_width, s5p_mfc_dec_calc_dpb_size_v5()
286 ctx->buf_height); s5p_mfc_dec_calc_dpb_size_v5()
288 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) { s5p_mfc_dec_calc_dpb_size_v5()
289 ctx->luma_size = ALIGN(ctx->buf_width * ctx->buf_height, s5p_mfc_dec_calc_dpb_size_v5()
291 ctx->chroma_size = ALIGN(ctx->buf_width * s5p_mfc_dec_calc_dpb_size_v5()
292 ALIGN((ctx->img_height >> 1), s5p_mfc_dec_calc_dpb_size_v5()
295 ctx->mv_size = ALIGN(ctx->buf_width * s5p_mfc_dec_calc_dpb_size_v5()
296 ALIGN((ctx->buf_height >> 2), s5p_mfc_dec_calc_dpb_size_v5()
301 ALIGN(ctx->img_width + 24, S5P_FIMV_NV12MT_HALIGN); s5p_mfc_dec_calc_dpb_size_v5()
303 ALIGN(ctx->img_height + 16, S5P_FIMV_NV12MT_VALIGN); s5p_mfc_dec_calc_dpb_size_v5()
304 ctx->luma_size = ALIGN(guard_width * guard_height, s5p_mfc_dec_calc_dpb_size_v5()
308 ALIGN(ctx->img_width + 16, S5P_FIMV_NV12MT_HALIGN); s5p_mfc_dec_calc_dpb_size_v5()
310 ALIGN((ctx->img_height >> 1) + 4, s5p_mfc_dec_calc_dpb_size_v5()
312 ctx->chroma_size = ALIGN(guard_width * guard_height, s5p_mfc_dec_calc_dpb_size_v5()
315 ctx->mv_size = 0; s5p_mfc_dec_calc_dpb_size_v5()
319 static void s5p_mfc_enc_calc_src_size_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_enc_calc_src_size_v5() argument
321 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) { s5p_mfc_enc_calc_src_size_v5()
322 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN); s5p_mfc_enc_calc_src_size_v5()
324 ctx->luma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN) s5p_mfc_enc_calc_src_size_v5()
325 * ALIGN(ctx->img_height, S5P_FIMV_NV12M_LVALIGN); s5p_mfc_enc_calc_src_size_v5()
326 ctx->chroma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN) s5p_mfc_enc_calc_src_size_v5()
327 * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12M_CVALIGN); s5p_mfc_enc_calc_src_size_v5()
329 ctx->luma_size = ALIGN(ctx->luma_size, S5P_FIMV_NV12M_SALIGN); s5p_mfc_enc_calc_src_size_v5()
330 ctx->chroma_size = s5p_mfc_enc_calc_src_size_v5()
331 ALIGN(ctx->chroma_size, S5P_FIMV_NV12M_SALIGN); s5p_mfc_enc_calc_src_size_v5()
332 } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) { s5p_mfc_enc_calc_src_size_v5()
333 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN); s5p_mfc_enc_calc_src_size_v5()
335 ctx->luma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) s5p_mfc_enc_calc_src_size_v5()
336 * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN); s5p_mfc_enc_calc_src_size_v5()
337 ctx->chroma_size = s5p_mfc_enc_calc_src_size_v5()
338 ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) s5p_mfc_enc_calc_src_size_v5()
339 * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN); s5p_mfc_enc_calc_src_size_v5()
341 ctx->luma_size = ALIGN(ctx->luma_size, S5P_FIMV_NV12MT_SALIGN); s5p_mfc_enc_calc_src_size_v5()
342 ctx->chroma_size = s5p_mfc_enc_calc_src_size_v5()
343 ALIGN(ctx->chroma_size, S5P_FIMV_NV12MT_SALIGN); s5p_mfc_enc_calc_src_size_v5()
348 static void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx) s5p_mfc_set_dec_desc_buffer() argument
350 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_dec_desc_buffer()
353 mfc_write(dev, OFFSETA(ctx->dsc.dma), S5P_FIMV_SI_CH0_DESC_ADR); s5p_mfc_set_dec_desc_buffer()
358 static void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx) s5p_mfc_set_shared_buffer() argument
360 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_shared_buffer()
361 mfc_write(dev, ctx->shm.ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR); s5p_mfc_set_shared_buffer()
365 static int s5p_mfc_set_dec_stream_buffer_v5(struct s5p_mfc_ctx *ctx, s5p_mfc_set_dec_stream_buffer_v5() argument
369 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_dec_stream_buffer_v5()
372 mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE); s5p_mfc_set_dec_stream_buffer_v5()
374 s5p_mfc_write_info_v5(ctx, start_num_byte, START_BYTE_NUM); s5p_mfc_set_dec_stream_buffer_v5()
379 static int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_set_dec_frame_buffer_v5() argument
383 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_dec_frame_buffer_v5()
388 buf_addr1 = ctx->bank1.dma; s5p_mfc_set_dec_frame_buffer_v5()
389 buf_size1 = ctx->bank1.size; s5p_mfc_set_dec_frame_buffer_v5()
390 buf_addr2 = ctx->bank2.dma; s5p_mfc_set_dec_frame_buffer_v5()
391 buf_size2 = ctx->bank2.size; s5p_mfc_set_dec_frame_buffer_v5()
394 mfc_write(dev, ctx->total_dpb_count | dpb, s5p_mfc_set_dec_frame_buffer_v5()
396 s5p_mfc_set_shared_buffer(ctx); s5p_mfc_set_dec_frame_buffer_v5()
397 switch (ctx->codec_mode) { s5p_mfc_set_dec_frame_buffer_v5()
466 ctx->codec_mode); s5p_mfc_set_dec_frame_buffer_v5()
469 frame_size_lu = ctx->luma_size; s5p_mfc_set_dec_frame_buffer_v5()
470 frame_size_ch = ctx->chroma_size; s5p_mfc_set_dec_frame_buffer_v5()
471 frame_size_mv = ctx->mv_size; s5p_mfc_set_dec_frame_buffer_v5()
474 for (i = 0; i < ctx->total_dpb_count; i++) { s5p_mfc_set_dec_frame_buffer_v5()
477 ctx->dst_bufs[i].cookie.raw.luma); s5p_mfc_set_dec_frame_buffer_v5()
478 mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma), s5p_mfc_set_dec_frame_buffer_v5()
481 ctx->dst_bufs[i].cookie.raw.chroma); s5p_mfc_set_dec_frame_buffer_v5()
482 mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma), s5p_mfc_set_dec_frame_buffer_v5()
484 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) { s5p_mfc_set_dec_frame_buffer_v5()
495 buf_size1, buf_size2, ctx->total_dpb_count); s5p_mfc_set_dec_frame_buffer_v5()
500 s5p_mfc_write_info_v5(ctx, frame_size_lu, ALLOC_LUMA_DPB_SIZE); s5p_mfc_set_dec_frame_buffer_v5()
501 s5p_mfc_write_info_v5(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE); s5p_mfc_set_dec_frame_buffer_v5()
502 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) s5p_mfc_set_dec_frame_buffer_v5()
503 s5p_mfc_write_info_v5(ctx, frame_size_mv, ALLOC_MV_SIZE); s5p_mfc_set_dec_frame_buffer_v5()
505 << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), s5p_mfc_set_dec_frame_buffer_v5()
511 static int s5p_mfc_set_enc_stream_buffer_v5(struct s5p_mfc_ctx *ctx, s5p_mfc_set_enc_stream_buffer_v5() argument
514 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_stream_buffer_v5()
521 static void s5p_mfc_set_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx, s5p_mfc_set_enc_frame_buffer_v5() argument
524 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_frame_buffer_v5()
530 static void s5p_mfc_get_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx, s5p_mfc_get_enc_frame_buffer_v5() argument
533 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_get_enc_frame_buffer_v5()
542 static int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_ref_buffer_v5() argument
544 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_ref_buffer_v5()
551 buf_addr1 = ctx->bank1.dma; s5p_mfc_set_enc_ref_buffer_v5()
552 buf_size1 = ctx->bank1.size; s5p_mfc_set_enc_ref_buffer_v5()
553 buf_addr2 = ctx->bank2.dma; s5p_mfc_set_enc_ref_buffer_v5()
554 buf_size2 = ctx->bank2.size; s5p_mfc_set_enc_ref_buffer_v5()
555 enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) s5p_mfc_set_enc_ref_buffer_v5()
556 * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN); s5p_mfc_set_enc_ref_buffer_v5()
558 if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) { s5p_mfc_set_enc_ref_buffer_v5()
559 enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) s5p_mfc_set_enc_ref_buffer_v5()
560 * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN); s5p_mfc_set_enc_ref_buffer_v5()
563 guard_width = ALIGN(ctx->img_width + 16, s5p_mfc_set_enc_ref_buffer_v5()
565 guard_height = ALIGN((ctx->img_height >> 1) + 4, s5p_mfc_set_enc_ref_buffer_v5()
571 switch (ctx->codec_mode) { s5p_mfc_set_enc_ref_buffer_v5()
671 ctx->codec_mode); s5p_mfc_set_enc_ref_buffer_v5()
677 static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_params() argument
679 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_params()
680 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_set_enc_params()
685 mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX); s5p_mfc_set_enc_params()
687 mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX); s5p_mfc_set_enc_params()
709 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) s5p_mfc_set_enc_params()
711 else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) s5p_mfc_set_enc_params()
747 shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL); s5p_mfc_set_enc_params()
754 s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL); s5p_mfc_set_enc_params()
756 s5p_mfc_write_info_v5(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG); s5p_mfc_set_enc_params()
760 static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_params_h264() argument
762 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_params_h264()
763 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_set_enc_params_h264()
768 s5p_mfc_set_enc_params(ctx); s5p_mfc_set_enc_params_h264()
788 mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX); s5p_mfc_set_enc_params_h264()
867 shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP); s5p_mfc_set_enc_params_h264()
871 s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP); s5p_mfc_set_enc_params_h264()
874 shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL); s5p_mfc_set_enc_params_h264()
878 s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL); s5p_mfc_set_enc_params_h264()
881 shm = s5p_mfc_read_info_v5(ctx, SAMPLE_ASPECT_RATIO_IDC); s5p_mfc_set_enc_params_h264()
884 s5p_mfc_write_info_v5(ctx, shm, SAMPLE_ASPECT_RATIO_IDC); s5p_mfc_set_enc_params_h264()
887 shm = s5p_mfc_read_info_v5(ctx, EXTENDED_SAR); s5p_mfc_set_enc_params_h264()
891 s5p_mfc_write_info_v5(ctx, shm, EXTENDED_SAR); s5p_mfc_set_enc_params_h264()
895 shm = s5p_mfc_read_info_v5(ctx, H264_I_PERIOD); s5p_mfc_set_enc_params_h264()
904 s5p_mfc_write_info_v5(ctx, shm, H264_I_PERIOD); s5p_mfc_set_enc_params_h264()
906 shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL); s5p_mfc_set_enc_params_h264()
913 s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL); s5p_mfc_set_enc_params_h264()
917 static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_params_mpeg4() argument
919 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_params_mpeg4()
920 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_set_enc_params_mpeg4()
926 s5p_mfc_set_enc_params(ctx); s5p_mfc_set_enc_params_mpeg4()
946 shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP); s5p_mfc_set_enc_params_mpeg4()
950 s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP); s5p_mfc_set_enc_params_mpeg4()
959 shm = s5p_mfc_read_info_v5(ctx, RC_VOP_TIMING); s5p_mfc_set_enc_params_mpeg4()
964 s5p_mfc_write_info_v5(ctx, shm, RC_VOP_TIMING); s5p_mfc_set_enc_params_mpeg4()
985 shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL); s5p_mfc_set_enc_params_mpeg4()
992 s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL); s5p_mfc_set_enc_params_mpeg4()
996 static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_params_h263() argument
998 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_params_h263()
999 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_set_enc_params_h263()
1004 s5p_mfc_set_enc_params(ctx); s5p_mfc_set_enc_params_h263()
1007 shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP); s5p_mfc_set_enc_params_h263()
1010 s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP); s5p_mfc_set_enc_params_h263()
1034 shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL); s5p_mfc_set_enc_params_h263()
1041 s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL); s5p_mfc_set_enc_params_h263()
1046 static int s5p_mfc_init_decode_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_init_decode_v5() argument
1048 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_init_decode_v5()
1050 s5p_mfc_set_shared_buffer(ctx); s5p_mfc_init_decode_v5()
1052 if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_DEC) s5p_mfc_init_decode_v5()
1053 mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL); s5p_mfc_init_decode_v5()
1056 mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) << s5p_mfc_init_decode_v5()
1057 S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable << s5p_mfc_init_decode_v5()
1058 S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay & s5p_mfc_init_decode_v5()
1063 | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); s5p_mfc_init_decode_v5()
1067 static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush) s5p_mfc_set_flush() argument
1069 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_flush()
1082 static int s5p_mfc_decode_one_frame_v5(struct s5p_mfc_ctx *ctx, s5p_mfc_decode_one_frame_v5() argument
1085 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_decode_one_frame_v5()
1087 mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF); s5p_mfc_decode_one_frame_v5()
1088 s5p_mfc_set_shared_buffer(ctx); s5p_mfc_decode_one_frame_v5()
1089 s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag); s5p_mfc_decode_one_frame_v5()
1095 S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); s5p_mfc_decode_one_frame_v5()
1099 S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); s5p_mfc_decode_one_frame_v5()
1103 S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), s5p_mfc_decode_one_frame_v5()
1111 static int s5p_mfc_init_encode_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_init_encode_v5() argument
1113 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_init_encode_v5()
1115 if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) s5p_mfc_init_encode_v5()
1116 s5p_mfc_set_enc_params_h264(ctx); s5p_mfc_init_encode_v5()
1117 else if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_ENC) s5p_mfc_init_encode_v5()
1118 s5p_mfc_set_enc_params_mpeg4(ctx); s5p_mfc_init_encode_v5()
1119 else if (ctx->codec_mode == S5P_MFC_CODEC_H263_ENC) s5p_mfc_init_encode_v5()
1120 s5p_mfc_set_enc_params_h263(ctx); s5p_mfc_init_encode_v5()
1123 ctx->codec_mode); s5p_mfc_init_encode_v5()
1126 s5p_mfc_set_shared_buffer(ctx); s5p_mfc_init_encode_v5()
1128 (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); s5p_mfc_init_encode_v5()
1133 static int s5p_mfc_encode_one_frame_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_encode_one_frame_v5() argument
1135 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_encode_one_frame_v5()
1138 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) s5p_mfc_encode_one_frame_v5()
1140 else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) s5p_mfc_encode_one_frame_v5()
1142 s5p_mfc_set_shared_buffer(ctx); s5p_mfc_encode_one_frame_v5()
1144 if (ctx->state == MFCINST_FINISHING) s5p_mfc_encode_one_frame_v5()
1149 | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); s5p_mfc_encode_one_frame_v5()
1175 static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx) s5p_mfc_run_res_change() argument
1177 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_res_change()
1179 s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0); s5p_mfc_run_res_change()
1180 dev->curr_ctx = ctx->num; s5p_mfc_run_res_change()
1181 s5p_mfc_decode_one_frame_v5(ctx, MFC_DEC_RES_CHANGE); s5p_mfc_run_res_change()
1184 static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) s5p_mfc_run_dec_frame() argument
1186 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_dec_frame()
1190 if (ctx->state == MFCINST_FINISHING) { s5p_mfc_run_dec_frame()
1192 s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0); s5p_mfc_run_dec_frame()
1193 dev->curr_ctx = ctx->num; s5p_mfc_run_dec_frame()
1194 s5p_mfc_decode_one_frame_v5(ctx, last_frame); s5p_mfc_run_dec_frame()
1200 if (list_empty(&ctx->src_queue)) { s5p_mfc_run_dec_frame()
1206 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); s5p_mfc_run_dec_frame()
1208 s5p_mfc_set_dec_stream_buffer_v5(ctx, s5p_mfc_run_dec_frame()
1210 ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused); s5p_mfc_run_dec_frame()
1212 dev->curr_ctx = ctx->num; s5p_mfc_run_dec_frame()
1215 mfc_debug(2, "Setting ctx->state to FINISHING\n"); s5p_mfc_run_dec_frame()
1216 ctx->state = MFCINST_FINISHING; s5p_mfc_run_dec_frame()
1218 s5p_mfc_decode_one_frame_v5(ctx, last_frame); s5p_mfc_run_dec_frame()
1222 static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) s5p_mfc_run_enc_frame() argument
1224 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_enc_frame()
1232 if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) { s5p_mfc_run_enc_frame()
1237 if (list_empty(&ctx->dst_queue)) { s5p_mfc_run_enc_frame()
1242 if (list_empty(&ctx->src_queue)) { s5p_mfc_run_enc_frame()
1244 s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2, dev->bank2); s5p_mfc_run_enc_frame()
1247 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, s5p_mfc_run_enc_frame()
1252 s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2, s5p_mfc_run_enc_frame()
1254 ctx->state = MFCINST_FINISHING; s5p_mfc_run_enc_frame()
1260 s5p_mfc_set_enc_frame_buffer_v5(ctx, src_y_addr, s5p_mfc_run_enc_frame()
1263 ctx->state = MFCINST_FINISHING; s5p_mfc_run_enc_frame()
1266 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); s5p_mfc_run_enc_frame()
1270 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); s5p_mfc_run_enc_frame()
1272 dev->curr_ctx = ctx->num; s5p_mfc_run_enc_frame()
1274 src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); s5p_mfc_run_enc_frame()
1275 s5p_mfc_encode_one_frame_v5(ctx); s5p_mfc_run_enc_frame()
1279 static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) s5p_mfc_run_init_dec() argument
1281 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_init_dec()
1288 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); s5p_mfc_run_init_dec()
1289 s5p_mfc_set_dec_desc_buffer(ctx); s5p_mfc_run_init_dec()
1291 s5p_mfc_set_dec_stream_buffer_v5(ctx, s5p_mfc_run_init_dec()
1295 dev->curr_ctx = ctx->num; s5p_mfc_run_init_dec()
1296 s5p_mfc_init_decode_v5(ctx); s5p_mfc_run_init_dec()
1299 static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) s5p_mfc_run_init_enc() argument
1301 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_init_enc()
1307 s5p_mfc_set_enc_ref_buffer_v5(ctx); s5p_mfc_run_init_enc()
1309 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); s5p_mfc_run_init_enc()
1312 s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); s5p_mfc_run_init_enc()
1314 dev->curr_ctx = ctx->num; s5p_mfc_run_init_enc()
1315 s5p_mfc_init_encode_v5(ctx); s5p_mfc_run_init_enc()
1318 static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) s5p_mfc_run_init_dec_buffers() argument
1320 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_init_dec_buffers()
1329 if (ctx->capture_state != QUEUE_BUFS_MMAPED) { s5p_mfc_run_init_dec_buffers()
1336 if (list_empty(&ctx->src_queue)) { s5p_mfc_run_init_dec_buffers()
1342 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); s5p_mfc_run_init_dec_buffers()
1344 s5p_mfc_set_dec_stream_buffer_v5(ctx, s5p_mfc_run_init_dec_buffers()
1348 dev->curr_ctx = ctx->num; s5p_mfc_run_init_dec_buffers()
1349 ret = s5p_mfc_set_dec_frame_buffer_v5(ctx); s5p_mfc_run_init_dec_buffers()
1352 ctx->state = MFCINST_ERROR; s5p_mfc_run_init_dec_buffers()
1360 struct s5p_mfc_ctx *ctx; s5p_mfc_try_run_v5() local
1370 /* This is perfectly ok, the scheduled ctx should wait */ s5p_mfc_try_run_v5()
1382 mfc_debug(1, "No ctx is scheduled to be run\n"); s5p_mfc_try_run_v5()
1385 ctx = dev->ctx[new_ctx]; s5p_mfc_try_run_v5()
1386 /* Got context to run in ctx */ s5p_mfc_try_run_v5()
1392 s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_try_run_v5()
1394 if (ctx->type == MFCINST_DECODER) { s5p_mfc_try_run_v5()
1395 s5p_mfc_set_dec_desc_buffer(ctx); s5p_mfc_try_run_v5()
1396 switch (ctx->state) { s5p_mfc_try_run_v5()
1398 s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME); s5p_mfc_try_run_v5()
1401 ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME); s5p_mfc_try_run_v5()
1405 ctx); s5p_mfc_try_run_v5()
1409 ctx); s5p_mfc_try_run_v5()
1412 s5p_mfc_run_init_dec(ctx); s5p_mfc_try_run_v5()
1415 ret = s5p_mfc_run_init_dec_buffers(ctx); s5p_mfc_try_run_v5()
1419 s5p_mfc_run_res_change(ctx); s5p_mfc_try_run_v5()
1422 s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME); s5p_mfc_try_run_v5()
1426 ctx->capture_state = QUEUE_FREE; s5p_mfc_try_run_v5()
1428 s5p_mfc_run_init_dec(ctx); s5p_mfc_try_run_v5()
1433 } else if (ctx->type == MFCINST_ENCODER) { s5p_mfc_try_run_v5()
1434 switch (ctx->state) { s5p_mfc_try_run_v5()
1437 ret = s5p_mfc_run_enc_frame(ctx); s5p_mfc_try_run_v5()
1441 ctx); s5p_mfc_try_run_v5()
1445 ctx); s5p_mfc_try_run_v5()
1448 s5p_mfc_run_init_enc(ctx); s5p_mfc_try_run_v5()
1454 mfc_err("Invalid context type: %d\n", ctx->type); s5p_mfc_try_run_v5()
1519 static int s5p_mfc_get_disp_frame_type_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_get_disp_frame_type_v5() argument
1521 return (s5p_mfc_read_info_v5(ctx, DISP_PIC_FRAME_TYPE) >> s5p_mfc_get_disp_frame_type_v5()
1640 static int s5p_mfc_get_sei_avail_status_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_get_sei_avail_status_v5() argument
1642 return s5p_mfc_read_info_v5(ctx, FRAME_PACK_SEI_AVAIL); s5p_mfc_get_sei_avail_status_v5()
1655 static unsigned int s5p_mfc_get_pic_type_top_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_get_pic_type_top_v5() argument
1657 return s5p_mfc_read_info_v5(ctx, PIC_TIME_TOP); s5p_mfc_get_pic_type_top_v5()
1660 static unsigned int s5p_mfc_get_pic_type_bot_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_get_pic_type_bot_v5() argument
1662 return s5p_mfc_read_info_v5(ctx, PIC_TIME_BOT); s5p_mfc_get_pic_type_bot_v5()
1665 static unsigned int s5p_mfc_get_crop_info_h_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_get_crop_info_h_v5() argument
1667 return s5p_mfc_read_info_v5(ctx, CROP_INFO_H); s5p_mfc_get_crop_info_h_v5()
1670 static unsigned int s5p_mfc_get_crop_info_v_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_get_crop_info_v_v5() argument
1672 return s5p_mfc_read_info_v5(ctx, CROP_INFO_V); s5p_mfc_get_crop_info_v_v5()
H A Ds5p_mfc_intr.c54 int s5p_mfc_wait_for_done_ctx(struct s5p_mfc_ctx *ctx, s5p_mfc_wait_for_done_ctx() argument
60 ret = wait_event_interruptible_timeout(ctx->queue, s5p_mfc_wait_for_done_ctx()
61 (ctx->int_cond && (ctx->int_type == command s5p_mfc_wait_for_done_ctx()
62 || ctx->int_type == S5P_MFC_R2H_CMD_ERR_RET)), s5p_mfc_wait_for_done_ctx()
65 ret = wait_event_timeout(ctx->queue, s5p_mfc_wait_for_done_ctx()
66 (ctx->int_cond && (ctx->int_type == command s5p_mfc_wait_for_done_ctx()
67 || ctx->int_type == S5P_MFC_R2H_CMD_ERR_RET)), s5p_mfc_wait_for_done_ctx()
71 mfc_err("Interrupt (ctx->int_type:%d, command:%d) timed out\n", s5p_mfc_wait_for_done_ctx()
72 ctx->int_type, command); s5p_mfc_wait_for_done_ctx()
78 mfc_debug(1, "Finished waiting (ctx->int_type:%d, command: %d)\n", s5p_mfc_wait_for_done_ctx()
79 ctx->int_type, command); s5p_mfc_wait_for_done_ctx()
80 if (ctx->int_type == S5P_MFC_R2H_CMD_ERR_RET) s5p_mfc_wait_for_done_ctx()
85 void s5p_mfc_clean_ctx_int_flags(struct s5p_mfc_ctx *ctx) s5p_mfc_clean_ctx_int_flags() argument
87 ctx->int_cond = 0; s5p_mfc_clean_ctx_int_flags()
88 ctx->int_type = 0; s5p_mfc_clean_ctx_int_flags()
89 ctx->int_err = 0; s5p_mfc_clean_ctx_int_flags()
H A Ds5p_mfc_dec.c224 static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) s5p_mfc_ctx_ready() argument
227 if (ctx->src_queue_cnt >= 1 && ctx->state == MFCINST_GOT_INST) s5p_mfc_ctx_ready()
230 if (ctx->src_queue_cnt >= 1 && s5p_mfc_ctx_ready()
231 ctx->state == MFCINST_RUNNING && s5p_mfc_ctx_ready()
232 ctx->dst_queue_cnt >= ctx->pb_count) s5p_mfc_ctx_ready()
235 if (ctx->state == MFCINST_FINISHING && s5p_mfc_ctx_ready()
236 ctx->dst_queue_cnt >= ctx->pb_count) s5p_mfc_ctx_ready()
239 if (ctx->src_queue_cnt >= 1 && s5p_mfc_ctx_ready()
240 ctx->state == MFCINST_HEAD_PARSED && s5p_mfc_ctx_ready()
241 ctx->capture_state == QUEUE_BUFS_MMAPED) s5p_mfc_ctx_ready()
244 if ((ctx->state == MFCINST_RES_CHANGE_INIT || s5p_mfc_ctx_ready()
245 ctx->state == MFCINST_RES_CHANGE_FLUSH) && s5p_mfc_ctx_ready()
246 ctx->dst_queue_cnt >= ctx->pb_count) s5p_mfc_ctx_ready()
248 if (ctx->state == MFCINST_RES_CHANGE_END && s5p_mfc_ctx_ready()
249 ctx->src_queue_cnt >= 1) s5p_mfc_ctx_ready()
251 mfc_debug(2, "ctx is not ready\n"); s5p_mfc_ctx_ready()
324 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_g_fmt() local
330 (ctx->state == MFCINST_GOT_INST || ctx->state == vidioc_g_fmt()
334 s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_SEQ_DONE_RET, vidioc_g_fmt()
338 ctx->state >= MFCINST_HEAD_PARSED && vidioc_g_fmt()
339 ctx->state < MFCINST_ABORT) { vidioc_g_fmt()
345 pix_mp->width = ctx->buf_width; vidioc_g_fmt()
346 pix_mp->height = ctx->buf_height; vidioc_g_fmt()
351 pix_mp->pixelformat = ctx->dst_fmt->fourcc; vidioc_g_fmt()
352 pix_mp->plane_fmt[0].bytesperline = ctx->buf_width; vidioc_g_fmt()
353 pix_mp->plane_fmt[0].sizeimage = ctx->luma_size; vidioc_g_fmt()
354 pix_mp->plane_fmt[1].bytesperline = ctx->buf_width; vidioc_g_fmt()
355 pix_mp->plane_fmt[1].sizeimage = ctx->chroma_size; vidioc_g_fmt()
363 pix_mp->plane_fmt[0].bytesperline = ctx->dec_src_buf_size; vidioc_g_fmt()
364 pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size; vidioc_g_fmt()
365 pix_mp->pixelformat = ctx->src_fmt->fourcc; vidioc_g_fmt()
366 pix_mp->num_planes = ctx->src_fmt->num_planes; vidioc_g_fmt()
416 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_s_fmt() local
426 if (ctx->vq_src.streaming || ctx->vq_dst.streaming) { vidioc_s_fmt()
433 ctx->dst_fmt = find_format(f, MFC_FMT_RAW); vidioc_s_fmt()
438 ctx->src_fmt = find_format(f, MFC_FMT_DEC); vidioc_s_fmt()
439 ctx->codec_mode = ctx->src_fmt->codec_mode; vidioc_s_fmt()
440 mfc_debug(2, "The codec number is: %d\n", ctx->codec_mode); vidioc_s_fmt()
444 pix_mp->plane_fmt[0].sizeimage = ctx->dec_src_buf_size = vidioc_s_fmt()
447 ctx->dec_src_buf_size = buf_size->cpb; vidioc_s_fmt()
449 ctx->dec_src_buf_size = pix_mp->plane_fmt[0].sizeimage; vidioc_s_fmt()
451 ctx->state = MFCINST_INIT; vidioc_s_fmt()
465 static int reqbufs_output(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx, reqbufs_output() argument
474 ret = vb2_reqbufs(&ctx->vq_src, reqbufs); reqbufs_output()
477 s5p_mfc_close_mfc_inst(dev, ctx); reqbufs_output()
478 ctx->src_bufs_cnt = 0; reqbufs_output()
479 ctx->output_state = QUEUE_FREE; reqbufs_output()
480 } else if (ctx->output_state == QUEUE_FREE) { reqbufs_output()
482 WARN_ON(ctx->src_bufs_cnt != 0); reqbufs_output()
483 if (ctx->state != MFCINST_INIT) { reqbufs_output()
491 ret = vb2_reqbufs(&ctx->vq_src, reqbufs); reqbufs_output()
495 ret = s5p_mfc_open_mfc_inst(dev, ctx); reqbufs_output()
498 vb2_reqbufs(&ctx->vq_src, reqbufs); reqbufs_output()
502 ctx->output_state = QUEUE_BUFS_REQUESTED; reqbufs_output()
514 static int reqbufs_capture(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx, reqbufs_capture() argument
523 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); reqbufs_capture()
526 s5p_mfc_hw_call_void(dev->mfc_ops, release_codec_buffers, ctx); reqbufs_capture()
527 ctx->dst_bufs_cnt = 0; reqbufs_capture()
528 } else if (ctx->capture_state == QUEUE_FREE) { reqbufs_capture()
529 WARN_ON(ctx->dst_bufs_cnt != 0); reqbufs_capture()
532 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); reqbufs_capture()
536 ctx->capture_state = QUEUE_BUFS_REQUESTED; reqbufs_capture()
537 ctx->total_dpb_count = reqbufs->count; reqbufs_capture()
539 ret = s5p_mfc_hw_call(dev->mfc_ops, alloc_codec_buffers, ctx); reqbufs_capture()
543 vb2_reqbufs(&ctx->vq_dst, reqbufs); reqbufs_capture()
545 ctx->capture_state = QUEUE_FREE; reqbufs_capture()
549 WARN_ON(ctx->dst_bufs_cnt != ctx->total_dpb_count); reqbufs_capture()
550 ctx->capture_state = QUEUE_BUFS_MMAPED; reqbufs_capture()
552 if (s5p_mfc_ctx_ready(ctx)) reqbufs_capture()
553 set_work_bit_irqsave(ctx); reqbufs_capture()
555 s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_INIT_BUFFERS_RET, reqbufs_capture()
573 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_reqbufs() local
581 return reqbufs_output(dev, ctx, reqbufs); vidioc_reqbufs()
583 return reqbufs_capture(dev, ctx, reqbufs); vidioc_reqbufs()
594 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_querybuf() local
602 mfc_debug(2, "State: %d, buf->type: %d\n", ctx->state, buf->type); vidioc_querybuf()
603 if (ctx->state == MFCINST_GOT_INST && vidioc_querybuf()
605 ret = vb2_querybuf(&ctx->vq_src, buf); vidioc_querybuf()
606 } else if (ctx->state == MFCINST_RUNNING && vidioc_querybuf()
608 ret = vb2_querybuf(&ctx->vq_dst, buf); vidioc_querybuf()
622 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_qbuf() local
624 if (ctx->state == MFCINST_ERROR) { vidioc_qbuf()
629 return vb2_qbuf(&ctx->vq_src, buf); vidioc_qbuf()
631 return vb2_qbuf(&ctx->vq_dst, buf); vidioc_qbuf()
641 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_dqbuf() local
644 if (ctx->state == MFCINST_ERROR) { vidioc_dqbuf()
649 ret = vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK); vidioc_dqbuf()
651 ret = vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK); vidioc_dqbuf()
652 if (ret == 0 && ctx->state == MFCINST_FINISHED && vidioc_dqbuf()
653 list_empty(&ctx->vq_dst.done_list)) vidioc_dqbuf()
654 v4l2_event_queue_fh(&ctx->fh, &ev); vidioc_dqbuf()
665 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_expbuf() local
668 return vb2_expbuf(&ctx->vq_src, eb); vidioc_expbuf()
670 return vb2_expbuf(&ctx->vq_dst, eb); vidioc_expbuf()
678 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_streamon() local
683 ret = vb2_streamon(&ctx->vq_src, type); vidioc_streamon()
685 ret = vb2_streamon(&ctx->vq_dst, type); vidioc_streamon()
694 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_streamoff() local
697 return vb2_streamoff(&ctx->vq_src, type); vidioc_streamoff()
699 return vb2_streamoff(&ctx->vq_dst, type); vidioc_streamoff()
706 struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl); s5p_mfc_dec_s_ctrl() local
710 ctx->display_delay = ctrl->val; s5p_mfc_dec_s_ctrl()
713 ctx->display_delay_enable = ctrl->val; s5p_mfc_dec_s_ctrl()
716 ctx->loop_filter_mpeg4 = ctrl->val; s5p_mfc_dec_s_ctrl()
719 ctx->slice_interface = ctrl->val; s5p_mfc_dec_s_ctrl()
730 struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl); s5p_mfc_dec_g_v_ctrl() local
731 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_dec_g_v_ctrl()
735 if (ctx->state >= MFCINST_HEAD_PARSED && s5p_mfc_dec_g_v_ctrl()
736 ctx->state < MFCINST_ABORT) { s5p_mfc_dec_g_v_ctrl()
737 ctrl->val = ctx->pb_count; s5p_mfc_dec_g_v_ctrl()
739 } else if (ctx->state != MFCINST_INIT && s5p_mfc_dec_g_v_ctrl()
740 ctx->state != MFCINST_RES_CHANGE_END) { s5p_mfc_dec_g_v_ctrl()
745 s5p_mfc_wait_for_done_ctx(ctx, s5p_mfc_dec_g_v_ctrl()
747 if (ctx->state >= MFCINST_HEAD_PARSED && s5p_mfc_dec_g_v_ctrl()
748 ctx->state < MFCINST_ABORT) { s5p_mfc_dec_g_v_ctrl()
749 ctrl->val = ctx->pb_count; s5p_mfc_dec_g_v_ctrl()
769 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_g_crop() local
770 struct s5p_mfc_dev *dev = ctx->dev; vidioc_g_crop()
773 if (ctx->state != MFCINST_HEAD_PARSED && vidioc_g_crop()
774 ctx->state != MFCINST_RUNNING && ctx->state != MFCINST_FINISHING vidioc_g_crop()
775 && ctx->state != MFCINST_FINISHED) { vidioc_g_crop()
779 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_H264) { vidioc_g_crop()
780 left = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_h, ctx); vidioc_g_crop()
783 top = s5p_mfc_hw_call(dev->mfc_ops, get_crop_info_v, ctx); vidioc_g_crop()
788 cr->c.width = ctx->img_width - left - right; vidioc_g_crop()
789 cr->c.height = ctx->img_height - top - bottom; vidioc_g_crop()
793 ctx->buf_width, ctx->buf_height); vidioc_g_crop()
797 cr->c.width = ctx->img_width; vidioc_g_crop()
798 cr->c.height = ctx->img_height; vidioc_g_crop()
800 "fh=%d\n", cr->c.width, cr->c.height, ctx->buf_width, vidioc_g_crop()
801 ctx->buf_height); vidioc_g_crop()
809 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_decoder_cmd() local
810 struct s5p_mfc_dev *dev = ctx->dev; vidioc_decoder_cmd()
819 if (!ctx->vq_src.streaming) vidioc_decoder_cmd()
823 if (list_empty(&ctx->src_queue)) { vidioc_decoder_cmd()
825 ctx->state = MFCINST_FINISHING; vidioc_decoder_cmd()
826 if (s5p_mfc_ctx_ready(ctx)) vidioc_decoder_cmd()
827 set_work_bit_irqsave(ctx); vidioc_decoder_cmd()
832 buf = list_entry(ctx->src_queue.prev, vidioc_decoder_cmd()
835 ctx->state = MFCINST_FINISHING; vidioc_decoder_cmd()
890 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); s5p_mfc_queue_setup() local
891 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_queue_setup()
895 if (ctx->state == MFCINST_INIT && s5p_mfc_queue_setup()
905 } else if (ctx->state == MFCINST_HEAD_PARSED && s5p_mfc_queue_setup()
910 if (*buf_count < ctx->pb_count) s5p_mfc_queue_setup()
911 *buf_count = ctx->pb_count; s5p_mfc_queue_setup()
912 if (*buf_count > ctx->pb_count + MFC_MAX_EXTRA_DPB) s5p_mfc_queue_setup()
913 *buf_count = ctx->pb_count + MFC_MAX_EXTRA_DPB; s5p_mfc_queue_setup()
918 ctx->state, vq->type); s5p_mfc_queue_setup()
923 if (ctx->state == MFCINST_HEAD_PARSED && s5p_mfc_queue_setup()
925 psize[0] = ctx->luma_size; s5p_mfc_queue_setup()
926 psize[1] = ctx->chroma_size; s5p_mfc_queue_setup()
930 ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; s5p_mfc_queue_setup()
933 ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX]; s5p_mfc_queue_setup()
934 allocators[1] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; s5p_mfc_queue_setup()
936 ctx->state == MFCINST_INIT) { s5p_mfc_queue_setup()
937 psize[0] = ctx->dec_src_buf_size; s5p_mfc_queue_setup()
938 allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; s5p_mfc_queue_setup()
949 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); s5p_mfc_buf_init() local
953 if (ctx->capture_state == QUEUE_BUFS_MMAPED) s5p_mfc_buf_init()
955 for (i = 0; i < ctx->dst_fmt->num_planes; i++) { s5p_mfc_buf_init()
962 if (vb2_plane_size(vb, 0) < ctx->luma_size || s5p_mfc_buf_init()
963 vb2_plane_size(vb, 1) < ctx->chroma_size) { s5p_mfc_buf_init()
968 ctx->dst_bufs[i].b = vb; s5p_mfc_buf_init()
969 ctx->dst_bufs[i].cookie.raw.luma = s5p_mfc_buf_init()
971 ctx->dst_bufs[i].cookie.raw.chroma = s5p_mfc_buf_init()
973 ctx->dst_bufs_cnt++; s5p_mfc_buf_init()
980 if (vb2_plane_size(vb, 0) < ctx->dec_src_buf_size) { s5p_mfc_buf_init()
986 ctx->src_bufs[i].b = vb; s5p_mfc_buf_init()
987 ctx->src_bufs[i].cookie.stream = s5p_mfc_buf_init()
989 ctx->src_bufs_cnt++; s5p_mfc_buf_init()
999 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); s5p_mfc_start_streaming() local
1000 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_start_streaming()
1002 v4l2_ctrl_handler_setup(&ctx->ctrl_handler); s5p_mfc_start_streaming()
1003 if (ctx->state == MFCINST_FINISHING || s5p_mfc_start_streaming()
1004 ctx->state == MFCINST_FINISHED) s5p_mfc_start_streaming()
1005 ctx->state = MFCINST_RUNNING; s5p_mfc_start_streaming()
1007 if (s5p_mfc_ctx_ready(ctx)) s5p_mfc_start_streaming()
1008 set_work_bit_irqsave(ctx); s5p_mfc_start_streaming()
1016 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); s5p_mfc_stop_streaming() local
1017 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_stop_streaming()
1020 if ((ctx->state == MFCINST_FINISHING || s5p_mfc_stop_streaming()
1021 ctx->state == MFCINST_RUNNING) && s5p_mfc_stop_streaming()
1022 dev->curr_ctx == ctx->num && dev->hw_lock) { s5p_mfc_stop_streaming()
1023 ctx->state = MFCINST_ABORT; s5p_mfc_stop_streaming()
1024 s5p_mfc_wait_for_done_ctx(ctx, s5p_mfc_stop_streaming()
1031 &ctx->dst_queue, &ctx->vq_dst); s5p_mfc_stop_streaming()
1032 INIT_LIST_HEAD(&ctx->dst_queue); s5p_mfc_stop_streaming()
1033 ctx->dst_queue_cnt = 0; s5p_mfc_stop_streaming()
1034 ctx->dpb_flush_flag = 1; s5p_mfc_stop_streaming()
1035 ctx->dec_dst_flag = 0; s5p_mfc_stop_streaming()
1037 if (IS_MFCV6_PLUS(dev) && (ctx->state == MFCINST_RUNNING)) { s5p_mfc_stop_streaming()
1038 ctx->state = MFCINST_FLUSH; s5p_mfc_stop_streaming()
1039 set_work_bit_irqsave(ctx); s5p_mfc_stop_streaming()
1041 if (s5p_mfc_wait_for_done_ctx(ctx, s5p_mfc_stop_streaming()
1049 &ctx->src_queue, &ctx->vq_src); s5p_mfc_stop_streaming()
1050 INIT_LIST_HEAD(&ctx->src_queue); s5p_mfc_stop_streaming()
1051 ctx->src_queue_cnt = 0; s5p_mfc_stop_streaming()
1055 ctx->state = MFCINST_RUNNING; s5p_mfc_stop_streaming()
1062 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); s5p_mfc_buf_queue() local
1063 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_buf_queue()
1068 mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index]; s5p_mfc_buf_queue()
1071 list_add_tail(&mfc_buf->list, &ctx->src_queue); s5p_mfc_buf_queue()
1072 ctx->src_queue_cnt++; s5p_mfc_buf_queue()
1075 mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index]; s5p_mfc_buf_queue()
1079 set_bit(vb->v4l2_buf.index, &ctx->dec_dst_flag); s5p_mfc_buf_queue()
1080 list_add_tail(&mfc_buf->list, &ctx->dst_queue); s5p_mfc_buf_queue()
1081 ctx->dst_queue_cnt++; s5p_mfc_buf_queue()
1086 if (s5p_mfc_ctx_ready(ctx)) s5p_mfc_buf_queue()
1087 set_work_bit_irqsave(ctx); s5p_mfc_buf_queue()
1119 int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx) s5p_mfc_dec_ctrls_setup() argument
1124 v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS); s5p_mfc_dec_ctrls_setup()
1125 if (ctx->ctrl_handler.error) { s5p_mfc_dec_ctrls_setup()
1127 return ctx->ctrl_handler.error; s5p_mfc_dec_ctrls_setup()
1144 ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler, s5p_mfc_dec_ctrls_setup()
1147 ctx->ctrls[i] = v4l2_ctrl_new_std(&ctx->ctrl_handler, s5p_mfc_dec_ctrls_setup()
1153 if (ctx->ctrl_handler.error) { s5p_mfc_dec_ctrls_setup()
1155 return ctx->ctrl_handler.error; s5p_mfc_dec_ctrls_setup()
1157 if (controls[i].is_volatile && ctx->ctrls[i]) s5p_mfc_dec_ctrls_setup()
1158 ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE; s5p_mfc_dec_ctrls_setup()
1163 void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx) s5p_mfc_dec_ctrls_delete() argument
1167 v4l2_ctrl_handler_free(&ctx->ctrl_handler); s5p_mfc_dec_ctrls_delete()
1169 ctx->ctrls[i] = NULL; s5p_mfc_dec_ctrls_delete()
1172 void s5p_mfc_dec_init(struct s5p_mfc_ctx *ctx) s5p_mfc_dec_init() argument
1176 ctx->src_fmt = find_format(&f, MFC_FMT_DEC); s5p_mfc_dec_init()
1177 if (IS_MFCV8(ctx->dev)) s5p_mfc_dec_init()
1179 else if (IS_MFCV6_PLUS(ctx->dev)) s5p_mfc_dec_init()
1183 ctx->dst_fmt = find_format(&f, MFC_FMT_RAW); s5p_mfc_dec_init()
1185 ctx->src_fmt, ctx->dst_fmt); s5p_mfc_dec_init()
H A Ds5p_mfc_opr_v6.c49 static int s5p_mfc_alloc_dec_temp_buffers_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_alloc_dec_temp_buffers_v6() argument
57 static void s5p_mfc_release_dec_desc_buffer_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_release_dec_desc_buffer_v6() argument
63 static int s5p_mfc_alloc_codec_buffers_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_alloc_codec_buffers_v6() argument
65 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_alloc_codec_buffers_v6()
69 mb_width = MB_WIDTH(ctx->img_width); s5p_mfc_alloc_codec_buffers_v6()
70 mb_height = MB_HEIGHT(ctx->img_height); s5p_mfc_alloc_codec_buffers_v6()
72 if (ctx->type == MFCINST_DECODER) { s5p_mfc_alloc_codec_buffers_v6()
74 ctx->luma_size, ctx->chroma_size, ctx->mv_size); s5p_mfc_alloc_codec_buffers_v6()
75 mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count); s5p_mfc_alloc_codec_buffers_v6()
76 } else if (ctx->type == MFCINST_ENCODER) { s5p_mfc_alloc_codec_buffers_v6()
78 ctx->tmv_buffer_size = S5P_FIMV_NUM_TMV_BUFFERS_V6 * s5p_mfc_alloc_codec_buffers_v6()
82 ctx->tmv_buffer_size = S5P_FIMV_NUM_TMV_BUFFERS_V6 * s5p_mfc_alloc_codec_buffers_v6()
86 ctx->luma_dpb_size = ALIGN((mb_width * mb_height) * s5p_mfc_alloc_codec_buffers_v6()
89 ctx->chroma_dpb_size = ALIGN((mb_width * mb_height) * s5p_mfc_alloc_codec_buffers_v6()
93 ctx->me_buffer_size = ALIGN(S5P_FIMV_ME_BUFFER_SIZE_V8( s5p_mfc_alloc_codec_buffers_v6()
94 ctx->img_width, ctx->img_height, s5p_mfc_alloc_codec_buffers_v6()
98 ctx->me_buffer_size = ALIGN(S5P_FIMV_ME_BUFFER_SIZE_V6( s5p_mfc_alloc_codec_buffers_v6()
99 ctx->img_width, ctx->img_height, s5p_mfc_alloc_codec_buffers_v6()
104 ctx->luma_dpb_size, ctx->chroma_dpb_size); s5p_mfc_alloc_codec_buffers_v6()
110 switch (ctx->codec_mode) { s5p_mfc_alloc_codec_buffers_v6()
114 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
119 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
123 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, s5p_mfc_alloc_codec_buffers_v6()
125 ctx->bank1.size = s5p_mfc_alloc_codec_buffers_v6()
126 ctx->scratch_buf_size + s5p_mfc_alloc_codec_buffers_v6()
127 (ctx->mv_count * ctx->mv_size); s5p_mfc_alloc_codec_buffers_v6()
131 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
136 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
142 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, s5p_mfc_alloc_codec_buffers_v6()
144 ctx->bank1.size = ctx->scratch_buf_size; s5p_mfc_alloc_codec_buffers_v6()
148 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
152 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, s5p_mfc_alloc_codec_buffers_v6()
154 ctx->bank1.size = ctx->scratch_buf_size; s5p_mfc_alloc_codec_buffers_v6()
157 ctx->bank1.size = 0; s5p_mfc_alloc_codec_buffers_v6()
158 ctx->bank2.size = 0; s5p_mfc_alloc_codec_buffers_v6()
161 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
165 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, s5p_mfc_alloc_codec_buffers_v6()
167 ctx->bank1.size = ctx->scratch_buf_size; s5p_mfc_alloc_codec_buffers_v6()
171 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
176 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
180 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, s5p_mfc_alloc_codec_buffers_v6()
182 ctx->bank1.size = ctx->scratch_buf_size; s5p_mfc_alloc_codec_buffers_v6()
186 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
191 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
195 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, s5p_mfc_alloc_codec_buffers_v6()
197 ctx->bank1.size = s5p_mfc_alloc_codec_buffers_v6()
198 ctx->scratch_buf_size + ctx->tmv_buffer_size + s5p_mfc_alloc_codec_buffers_v6()
199 (ctx->pb_count * (ctx->luma_dpb_size + s5p_mfc_alloc_codec_buffers_v6()
200 ctx->chroma_dpb_size + ctx->me_buffer_size)); s5p_mfc_alloc_codec_buffers_v6()
201 ctx->bank2.size = 0; s5p_mfc_alloc_codec_buffers_v6()
205 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
209 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, s5p_mfc_alloc_codec_buffers_v6()
211 ctx->bank1.size = s5p_mfc_alloc_codec_buffers_v6()
212 ctx->scratch_buf_size + ctx->tmv_buffer_size + s5p_mfc_alloc_codec_buffers_v6()
213 (ctx->pb_count * (ctx->luma_dpb_size + s5p_mfc_alloc_codec_buffers_v6()
214 ctx->chroma_dpb_size + ctx->me_buffer_size)); s5p_mfc_alloc_codec_buffers_v6()
215 ctx->bank2.size = 0; s5p_mfc_alloc_codec_buffers_v6()
219 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
224 ctx->scratch_buf_size = s5p_mfc_alloc_codec_buffers_v6()
228 ctx->scratch_buf_size = ALIGN(ctx->scratch_buf_size, s5p_mfc_alloc_codec_buffers_v6()
230 ctx->bank1.size = s5p_mfc_alloc_codec_buffers_v6()
231 ctx->scratch_buf_size + ctx->tmv_buffer_size + s5p_mfc_alloc_codec_buffers_v6()
232 (ctx->pb_count * (ctx->luma_dpb_size + s5p_mfc_alloc_codec_buffers_v6()
233 ctx->chroma_dpb_size + ctx->me_buffer_size)); s5p_mfc_alloc_codec_buffers_v6()
234 ctx->bank2.size = 0; s5p_mfc_alloc_codec_buffers_v6()
241 if (ctx->bank1.size > 0) { s5p_mfc_alloc_codec_buffers_v6()
242 ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1); s5p_mfc_alloc_codec_buffers_v6()
247 BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); s5p_mfc_alloc_codec_buffers_v6()
253 static void s5p_mfc_release_codec_buffers_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_release_codec_buffers_v6() argument
255 s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1); s5p_mfc_release_codec_buffers_v6()
259 static int s5p_mfc_alloc_instance_buffer_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_alloc_instance_buffer_v6() argument
261 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_alloc_instance_buffer_v6()
267 switch (ctx->codec_mode) { s5p_mfc_alloc_instance_buffer_v6()
270 ctx->ctx.size = buf_size->h264_dec_ctx; s5p_mfc_alloc_instance_buffer_v6()
278 ctx->ctx.size = buf_size->other_dec_ctx; s5p_mfc_alloc_instance_buffer_v6()
281 ctx->ctx.size = buf_size->h264_enc_ctx; s5p_mfc_alloc_instance_buffer_v6()
286 ctx->ctx.size = buf_size->other_enc_ctx; s5p_mfc_alloc_instance_buffer_v6()
289 ctx->ctx.size = 0; s5p_mfc_alloc_instance_buffer_v6()
290 mfc_err("Codec type(%d) should be checked!\n", ctx->codec_mode); s5p_mfc_alloc_instance_buffer_v6()
294 ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx); s5p_mfc_alloc_instance_buffer_v6()
300 memset(ctx->ctx.virt, 0, ctx->ctx.size); s5p_mfc_alloc_instance_buffer_v6()
309 static void s5p_mfc_release_instance_buffer_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_release_instance_buffer_v6() argument
311 s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->ctx); s5p_mfc_release_instance_buffer_v6()
357 static void s5p_mfc_dec_calc_dpb_size_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_dec_calc_dpb_size_v6() argument
359 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN_V6); s5p_mfc_dec_calc_dpb_size_v6()
360 ctx->buf_height = ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN_V6); s5p_mfc_dec_calc_dpb_size_v6()
362 "buffer dimensions: %dx%d\n", ctx->img_width, s5p_mfc_dec_calc_dpb_size_v6()
363 ctx->img_height, ctx->buf_width, ctx->buf_height); s5p_mfc_dec_calc_dpb_size_v6()
365 ctx->luma_size = calc_plane(ctx->img_width, ctx->img_height); s5p_mfc_dec_calc_dpb_size_v6()
366 ctx->chroma_size = calc_plane(ctx->img_width, (ctx->img_height >> 1)); s5p_mfc_dec_calc_dpb_size_v6()
367 if (IS_MFCV8(ctx->dev)) { s5p_mfc_dec_calc_dpb_size_v6()
369 ctx->luma_size += S5P_FIMV_D_ALIGN_PLANE_SIZE_V8; s5p_mfc_dec_calc_dpb_size_v6()
370 ctx->chroma_size += S5P_FIMV_D_ALIGN_PLANE_SIZE_V8; s5p_mfc_dec_calc_dpb_size_v6()
373 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC || s5p_mfc_dec_calc_dpb_size_v6()
374 ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) { s5p_mfc_dec_calc_dpb_size_v6()
375 ctx->mv_size = S5P_MFC_DEC_MV_SIZE_V6(ctx->img_width, s5p_mfc_dec_calc_dpb_size_v6()
376 ctx->img_height); s5p_mfc_dec_calc_dpb_size_v6()
377 ctx->mv_size = ALIGN(ctx->mv_size, 16); s5p_mfc_dec_calc_dpb_size_v6()
379 ctx->mv_size = 0; s5p_mfc_dec_calc_dpb_size_v6()
383 static void s5p_mfc_enc_calc_src_size_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_enc_calc_src_size_v6() argument
387 mb_width = MB_WIDTH(ctx->img_width); s5p_mfc_enc_calc_src_size_v6()
388 mb_height = MB_HEIGHT(ctx->img_height); s5p_mfc_enc_calc_src_size_v6()
390 ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN_V6); s5p_mfc_enc_calc_src_size_v6()
391 ctx->luma_size = ALIGN((mb_width * mb_height) * 256, 256); s5p_mfc_enc_calc_src_size_v6()
392 ctx->chroma_size = ALIGN((mb_width * mb_height) * 128, 256); s5p_mfc_enc_calc_src_size_v6()
395 if (IS_MFCV7_PLUS(ctx->dev)) { s5p_mfc_enc_calc_src_size_v6()
396 ctx->luma_size += MFC_LUMA_PAD_BYTES_V7; s5p_mfc_enc_calc_src_size_v6()
397 ctx->chroma_size += MFC_CHROMA_PAD_BYTES_V7; s5p_mfc_enc_calc_src_size_v6()
402 static int s5p_mfc_set_dec_stream_buffer_v6(struct s5p_mfc_ctx *ctx, s5p_mfc_set_dec_stream_buffer_v6() argument
406 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_dec_stream_buffer_v6()
413 ctx->inst_no, buf_addr, strm_size, strm_size); s5p_mfc_set_dec_stream_buffer_v6()
424 static int s5p_mfc_set_dec_frame_buffer_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_set_dec_frame_buffer_v6() argument
428 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_dec_frame_buffer_v6()
434 buf_addr1 = ctx->bank1.dma; s5p_mfc_set_dec_frame_buffer_v6()
435 buf_size1 = ctx->bank1.size; s5p_mfc_set_dec_frame_buffer_v6()
438 mfc_debug(2, "Total DPB COUNT: %d\n", ctx->total_dpb_count); s5p_mfc_set_dec_frame_buffer_v6()
439 mfc_debug(2, "Setting display delay to %d\n", ctx->display_delay); s5p_mfc_set_dec_frame_buffer_v6()
441 writel(ctx->total_dpb_count, mfc_regs->d_num_dpb); s5p_mfc_set_dec_frame_buffer_v6()
442 writel(ctx->luma_size, mfc_regs->d_first_plane_dpb_size); s5p_mfc_set_dec_frame_buffer_v6()
443 writel(ctx->chroma_size, mfc_regs->d_second_plane_dpb_size); s5p_mfc_set_dec_frame_buffer_v6()
446 writel(ctx->scratch_buf_size, mfc_regs->d_scratch_buffer_size); s5p_mfc_set_dec_frame_buffer_v6()
449 writel(ctx->img_width, s5p_mfc_set_dec_frame_buffer_v6()
451 writel(ctx->img_width, s5p_mfc_set_dec_frame_buffer_v6()
455 buf_addr1 += ctx->scratch_buf_size; s5p_mfc_set_dec_frame_buffer_v6()
456 buf_size1 -= ctx->scratch_buf_size; s5p_mfc_set_dec_frame_buffer_v6()
458 if (ctx->codec_mode == S5P_FIMV_CODEC_H264_DEC || s5p_mfc_set_dec_frame_buffer_v6()
459 ctx->codec_mode == S5P_FIMV_CODEC_H264_MVC_DEC){ s5p_mfc_set_dec_frame_buffer_v6()
460 writel(ctx->mv_size, mfc_regs->d_mv_buffer_size); s5p_mfc_set_dec_frame_buffer_v6()
461 writel(ctx->mv_count, mfc_regs->d_num_mv); s5p_mfc_set_dec_frame_buffer_v6()
464 frame_size = ctx->luma_size; s5p_mfc_set_dec_frame_buffer_v6()
465 frame_size_ch = ctx->chroma_size; s5p_mfc_set_dec_frame_buffer_v6()
466 frame_size_mv = ctx->mv_size; s5p_mfc_set_dec_frame_buffer_v6()
470 for (i = 0; i < ctx->total_dpb_count; i++) { s5p_mfc_set_dec_frame_buffer_v6()
473 ctx->dst_bufs[i].cookie.raw.luma); s5p_mfc_set_dec_frame_buffer_v6()
474 writel(ctx->dst_bufs[i].cookie.raw.luma, s5p_mfc_set_dec_frame_buffer_v6()
477 ctx->dst_bufs[i].cookie.raw.chroma); s5p_mfc_set_dec_frame_buffer_v6()
478 writel(ctx->dst_bufs[i].cookie.raw.chroma, s5p_mfc_set_dec_frame_buffer_v6()
481 if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC || s5p_mfc_set_dec_frame_buffer_v6()
482 ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) { s5p_mfc_set_dec_frame_buffer_v6()
483 for (i = 0; i < ctx->mv_count; i++) { s5p_mfc_set_dec_frame_buffer_v6()
499 buf_addr1, buf_size1, ctx->total_dpb_count); s5p_mfc_set_dec_frame_buffer_v6()
505 writel(ctx->inst_no, mfc_regs->instance_id); s5p_mfc_set_dec_frame_buffer_v6()
514 static int s5p_mfc_set_enc_stream_buffer_v6(struct s5p_mfc_ctx *ctx, s5p_mfc_set_enc_stream_buffer_v6() argument
517 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_stream_buffer_v6()
529 static void s5p_mfc_set_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, s5p_mfc_set_enc_frame_buffer_v6() argument
532 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_frame_buffer_v6()
542 static void s5p_mfc_get_enc_frame_buffer_v6(struct s5p_mfc_ctx *ctx, s5p_mfc_get_enc_frame_buffer_v6() argument
545 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_get_enc_frame_buffer_v6()
560 static int s5p_mfc_set_enc_ref_buffer_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_ref_buffer_v6() argument
562 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_ref_buffer_v6()
569 buf_addr1 = ctx->bank1.dma; s5p_mfc_set_enc_ref_buffer_v6()
570 buf_size1 = ctx->bank1.size; s5p_mfc_set_enc_ref_buffer_v6()
574 for (i = 0; i < ctx->pb_count; i++) { s5p_mfc_set_enc_ref_buffer_v6()
576 buf_addr1 += ctx->luma_dpb_size; s5p_mfc_set_enc_ref_buffer_v6()
578 buf_addr1 += ctx->chroma_dpb_size; s5p_mfc_set_enc_ref_buffer_v6()
580 buf_addr1 += ctx->me_buffer_size; s5p_mfc_set_enc_ref_buffer_v6()
581 buf_size1 -= (ctx->luma_dpb_size + ctx->chroma_dpb_size + s5p_mfc_set_enc_ref_buffer_v6()
582 ctx->me_buffer_size); s5p_mfc_set_enc_ref_buffer_v6()
586 writel(ctx->scratch_buf_size, mfc_regs->e_scratch_buffer_size); s5p_mfc_set_enc_ref_buffer_v6()
587 buf_addr1 += ctx->scratch_buf_size; s5p_mfc_set_enc_ref_buffer_v6()
588 buf_size1 -= ctx->scratch_buf_size; s5p_mfc_set_enc_ref_buffer_v6()
591 buf_addr1 += ctx->tmv_buffer_size >> 1; s5p_mfc_set_enc_ref_buffer_v6()
593 buf_addr1 += ctx->tmv_buffer_size >> 1; s5p_mfc_set_enc_ref_buffer_v6()
594 buf_size1 -= ctx->tmv_buffer_size; s5p_mfc_set_enc_ref_buffer_v6()
597 buf_addr1, buf_size1, ctx->pb_count); s5p_mfc_set_enc_ref_buffer_v6()
603 writel(ctx->inst_no, mfc_regs->instance_id); s5p_mfc_set_enc_ref_buffer_v6()
612 static int s5p_mfc_set_slice_mode(struct s5p_mfc_ctx *ctx) s5p_mfc_set_slice_mode() argument
614 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_slice_mode()
619 writel(ctx->slice_mode, mfc_regs->e_mslice_mode); s5p_mfc_set_slice_mode()
620 if (ctx->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) { s5p_mfc_set_slice_mode()
621 writel(ctx->slice_size.mb, mfc_regs->e_mslice_size_mb); s5p_mfc_set_slice_mode()
622 } else if (ctx->slice_mode == s5p_mfc_set_slice_mode()
624 writel(ctx->slice_size.bits, mfc_regs->e_mslice_size_bits); s5p_mfc_set_slice_mode()
633 static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_params() argument
635 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_params()
637 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_set_enc_params()
643 writel(ctx->img_width, mfc_regs->e_frame_width); /* 16 align */ s5p_mfc_set_enc_params()
645 writel(ctx->img_height, mfc_regs->e_frame_height); /* 16 align */ s5p_mfc_set_enc_params()
648 writel(ctx->img_width, mfc_regs->e_cropped_frame_width); s5p_mfc_set_enc_params()
650 writel(ctx->img_height, mfc_regs->e_cropped_frame_height); s5p_mfc_set_enc_params()
661 ctx->slice_mode = p->slice_mode; s5p_mfc_set_enc_params()
666 ctx->slice_size.mb = p->slice_mb; s5p_mfc_set_enc_params()
670 ctx->slice_size.bits = p->slice_bit; s5p_mfc_set_enc_params()
676 s5p_mfc_set_slice_mode(ctx); s5p_mfc_set_enc_params()
693 if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) { s5p_mfc_set_enc_params()
700 } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV21M) { s5p_mfc_set_enc_params()
707 } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) { s5p_mfc_set_enc_params()
798 static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_params_h264() argument
800 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_params_h264()
802 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_set_enc_params_h264()
809 s5p_mfc_set_enc_params(ctx); s5p_mfc_set_enc_params_h264()
880 writel(ctx->img_height >> 1, s5p_mfc_set_enc_params_h264()
883 writel(ctx->img_height >> 1, s5p_mfc_set_enc_params_h264()
1079 static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_params_mpeg4() argument
1081 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_params_mpeg4()
1083 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_set_enc_params_mpeg4()
1089 s5p_mfc_set_enc_params(ctx); s5p_mfc_set_enc_params_mpeg4()
1161 static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_params_h263() argument
1163 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_params_h263()
1165 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_set_enc_params_h263()
1171 s5p_mfc_set_enc_params(ctx); s5p_mfc_set_enc_params_h263()
1231 static int s5p_mfc_set_enc_params_vp8(struct s5p_mfc_ctx *ctx) s5p_mfc_set_enc_params_vp8() argument
1233 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_enc_params_vp8()
1235 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_set_enc_params_vp8()
1242 s5p_mfc_set_enc_params(ctx); s5p_mfc_set_enc_params_vp8()
1325 static int s5p_mfc_init_decode_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_init_decode_v6() argument
1327 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_init_decode_v6()
1333 mfc_debug(2, "InstNo: %d/%d\n", ctx->inst_no, s5p_mfc_init_decode_v6()
1343 if (ctx->display_delay_enable) { s5p_mfc_init_decode_v6()
1345 writel(ctx->display_delay, mfc_regs->d_display_delay); s5p_mfc_init_decode_v6()
1354 if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_DEC) { s5p_mfc_init_decode_v6()
1356 ctx->loop_filter_mpeg4); s5p_mfc_init_decode_v6()
1357 reg |= (ctx->loop_filter_mpeg4 << s5p_mfc_init_decode_v6()
1360 if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV12MT_16X16) s5p_mfc_init_decode_v6()
1369 if (ctx->dst_fmt->fourcc == V4L2_PIX_FMT_NV21M) s5p_mfc_init_decode_v6()
1376 writel(ctx->sei_fp_parse & 0x1, mfc_regs->d_sei_enable); s5p_mfc_init_decode_v6()
1378 writel(ctx->inst_no, mfc_regs->instance_id); s5p_mfc_init_decode_v6()
1386 static inline void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush) s5p_mfc_set_flush() argument
1388 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_flush()
1392 dev->curr_ctx = ctx->num; s5p_mfc_set_flush()
1393 writel(ctx->inst_no, mfc_regs->instance_id); s5p_mfc_set_flush()
1400 static int s5p_mfc_decode_one_frame_v6(struct s5p_mfc_ctx *ctx, s5p_mfc_decode_one_frame_v6() argument
1403 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_decode_one_frame_v6()
1406 writel(ctx->dec_dst_flag, mfc_regs->d_available_dpb_flag_lower); s5p_mfc_decode_one_frame_v6()
1407 writel(ctx->slice_interface & 0x1, mfc_regs->d_slice_if_enable); s5p_mfc_decode_one_frame_v6()
1409 writel(ctx->inst_no, mfc_regs->instance_id); s5p_mfc_decode_one_frame_v6()
1430 static int s5p_mfc_init_encode_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_init_encode_v6() argument
1432 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_init_encode_v6()
1435 if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) s5p_mfc_init_encode_v6()
1436 s5p_mfc_set_enc_params_h264(ctx); s5p_mfc_init_encode_v6()
1437 else if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_ENC) s5p_mfc_init_encode_v6()
1438 s5p_mfc_set_enc_params_mpeg4(ctx); s5p_mfc_init_encode_v6()
1439 else if (ctx->codec_mode == S5P_MFC_CODEC_H263_ENC) s5p_mfc_init_encode_v6()
1440 s5p_mfc_set_enc_params_h263(ctx); s5p_mfc_init_encode_v6()
1441 else if (ctx->codec_mode == S5P_MFC_CODEC_VP8_ENC) s5p_mfc_init_encode_v6()
1442 s5p_mfc_set_enc_params_vp8(ctx); s5p_mfc_init_encode_v6()
1445 ctx->codec_mode); s5p_mfc_init_encode_v6()
1451 writel(ctx->img_width, mfc_regs->e_source_first_plane_stride); s5p_mfc_init_encode_v6()
1452 writel(ctx->img_width, mfc_regs->e_source_second_plane_stride); s5p_mfc_init_encode_v6()
1455 writel(ctx->inst_no, mfc_regs->instance_id); s5p_mfc_init_encode_v6()
1462 static int s5p_mfc_h264_set_aso_slice_order_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_h264_set_aso_slice_order_v6() argument
1464 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_h264_set_aso_slice_order_v6()
1466 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_h264_set_aso_slice_order_v6()
1480 static int s5p_mfc_encode_one_frame_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_encode_one_frame_v6() argument
1482 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_encode_one_frame_v6()
1489 if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) s5p_mfc_encode_one_frame_v6()
1490 s5p_mfc_h264_set_aso_slice_order_v6(ctx); s5p_mfc_encode_one_frame_v6()
1492 s5p_mfc_set_slice_mode(ctx); s5p_mfc_encode_one_frame_v6()
1494 writel(ctx->inst_no, mfc_regs->instance_id); s5p_mfc_encode_one_frame_v6()
1527 static inline void s5p_mfc_run_dec_last_frames(struct s5p_mfc_ctx *ctx) s5p_mfc_run_dec_last_frames() argument
1529 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_dec_last_frames()
1531 s5p_mfc_set_dec_stream_buffer_v6(ctx, 0, 0, 0); s5p_mfc_run_dec_last_frames()
1532 dev->curr_ctx = ctx->num; s5p_mfc_run_dec_last_frames()
1533 s5p_mfc_decode_one_frame_v6(ctx, MFC_DEC_LAST_FRAME); s5p_mfc_run_dec_last_frames()
1536 static inline int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx) s5p_mfc_run_dec_frame() argument
1538 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_dec_frame()
1543 if (ctx->state == MFCINST_FINISHING) { s5p_mfc_run_dec_frame()
1545 s5p_mfc_set_dec_stream_buffer_v6(ctx, 0, 0, 0); s5p_mfc_run_dec_frame()
1546 dev->curr_ctx = ctx->num; s5p_mfc_run_dec_frame()
1547 s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_run_dec_frame()
1548 s5p_mfc_decode_one_frame_v6(ctx, last_frame); s5p_mfc_run_dec_frame()
1554 if (list_empty(&ctx->src_queue)) { s5p_mfc_run_dec_frame()
1560 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); s5p_mfc_run_dec_frame()
1562 s5p_mfc_set_dec_stream_buffer_v6(ctx, s5p_mfc_run_dec_frame()
1564 ctx->consumed_stream, s5p_mfc_run_dec_frame()
1568 dev->curr_ctx = ctx->num; s5p_mfc_run_dec_frame()
1571 mfc_debug(2, "Setting ctx->state to FINISHING\n"); s5p_mfc_run_dec_frame()
1572 ctx->state = MFCINST_FINISHING; s5p_mfc_run_dec_frame()
1574 s5p_mfc_decode_one_frame_v6(ctx, last_frame); s5p_mfc_run_dec_frame()
1579 static inline int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) s5p_mfc_run_enc_frame() argument
1581 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_enc_frame()
1593 if (list_empty(&ctx->src_queue)) { s5p_mfc_run_enc_frame()
1599 if (list_empty(&ctx->dst_queue)) { s5p_mfc_run_enc_frame()
1605 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); s5p_mfc_run_enc_frame()
1613 s5p_mfc_set_enc_frame_buffer_v6(ctx, src_y_addr, src_c_addr); s5p_mfc_run_enc_frame()
1615 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); s5p_mfc_run_enc_frame()
1620 s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); s5p_mfc_run_enc_frame()
1624 dev->curr_ctx = ctx->num; s5p_mfc_run_enc_frame()
1625 s5p_mfc_encode_one_frame_v6(ctx); s5p_mfc_run_enc_frame()
1630 static inline void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) s5p_mfc_run_init_dec() argument
1632 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_init_dec()
1639 temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); s5p_mfc_run_init_dec()
1641 s5p_mfc_set_dec_stream_buffer_v6(ctx, s5p_mfc_run_init_dec()
1645 dev->curr_ctx = ctx->num; s5p_mfc_run_init_dec()
1646 s5p_mfc_init_decode_v6(ctx); s5p_mfc_run_init_dec()
1649 static inline void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) s5p_mfc_run_init_enc() argument
1651 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_init_enc()
1659 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); s5p_mfc_run_init_enc()
1662 s5p_mfc_set_enc_stream_buffer_v6(ctx, dst_addr, dst_size); s5p_mfc_run_init_enc()
1664 dev->curr_ctx = ctx->num; s5p_mfc_run_init_enc()
1665 s5p_mfc_init_encode_v6(ctx); s5p_mfc_run_init_enc()
1668 static inline int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) s5p_mfc_run_init_dec_buffers() argument
1670 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_init_dec_buffers()
1674 * s5p_mfc_alloc_dec_buffers(ctx); */ s5p_mfc_run_init_dec_buffers()
1676 if (ctx->capture_state != QUEUE_BUFS_MMAPED) { s5p_mfc_run_init_dec_buffers()
1683 dev->curr_ctx = ctx->num; s5p_mfc_run_init_dec_buffers()
1684 ret = s5p_mfc_set_dec_frame_buffer_v6(ctx); s5p_mfc_run_init_dec_buffers()
1687 ctx->state = MFCINST_ERROR; s5p_mfc_run_init_dec_buffers()
1692 static inline int s5p_mfc_run_init_enc_buffers(struct s5p_mfc_ctx *ctx) s5p_mfc_run_init_enc_buffers() argument
1694 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_run_init_enc_buffers()
1697 dev->curr_ctx = ctx->num; s5p_mfc_run_init_enc_buffers()
1698 ret = s5p_mfc_set_enc_ref_buffer_v6(ctx); s5p_mfc_run_init_enc_buffers()
1701 ctx->state = MFCINST_ERROR; s5p_mfc_run_init_enc_buffers()
1709 struct s5p_mfc_ctx *ctx; s5p_mfc_try_run_v6() local
1717 /* This is perfectly ok, the scheduled ctx should wait */ s5p_mfc_try_run_v6()
1731 mfc_debug(1, "No ctx is scheduled to be run.\n"); s5p_mfc_try_run_v6()
1736 ctx = dev->ctx[new_ctx]; s5p_mfc_try_run_v6()
1737 mfc_debug(1, "Seting new context to %p\n", ctx); s5p_mfc_try_run_v6()
1738 /* Got context to run in ctx */ s5p_mfc_try_run_v6()
1739 mfc_debug(1, "ctx->dst_queue_cnt=%d ctx->dpb_count=%d ctx->src_queue_cnt=%d\n", s5p_mfc_try_run_v6()
1740 ctx->dst_queue_cnt, ctx->pb_count, ctx->src_queue_cnt); s5p_mfc_try_run_v6()
1741 mfc_debug(1, "ctx->state=%d\n", ctx->state); s5p_mfc_try_run_v6()
1746 s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_try_run_v6()
1748 if (ctx->type == MFCINST_DECODER) { s5p_mfc_try_run_v6()
1749 switch (ctx->state) { s5p_mfc_try_run_v6()
1751 s5p_mfc_run_dec_last_frames(ctx); s5p_mfc_try_run_v6()
1754 ret = s5p_mfc_run_dec_frame(ctx); s5p_mfc_try_run_v6()
1758 ctx); s5p_mfc_try_run_v6()
1762 ctx); s5p_mfc_try_run_v6()
1765 s5p_mfc_run_init_dec(ctx); s5p_mfc_try_run_v6()
1768 ret = s5p_mfc_run_init_dec_buffers(ctx); s5p_mfc_try_run_v6()
1771 s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag); s5p_mfc_try_run_v6()
1774 s5p_mfc_run_dec_last_frames(ctx); s5p_mfc_try_run_v6()
1777 s5p_mfc_run_dec_last_frames(ctx); s5p_mfc_try_run_v6()
1781 ctx->capture_state = QUEUE_FREE; s5p_mfc_try_run_v6()
1783 s5p_mfc_run_init_dec(ctx); s5p_mfc_try_run_v6()
1788 } else if (ctx->type == MFCINST_ENCODER) { s5p_mfc_try_run_v6()
1789 switch (ctx->state) { s5p_mfc_try_run_v6()
1792 ret = s5p_mfc_run_enc_frame(ctx); s5p_mfc_try_run_v6()
1796 ctx); s5p_mfc_try_run_v6()
1800 ctx); s5p_mfc_try_run_v6()
1803 s5p_mfc_run_init_enc(ctx); s5p_mfc_try_run_v6()
1806 ret = s5p_mfc_run_init_enc_buffers(ctx); s5p_mfc_try_run_v6()
1812 mfc_err("invalid context type: %d\n", ctx->type); s5p_mfc_try_run_v6()
1851 static void s5p_mfc_write_info_v6(struct s5p_mfc_ctx *ctx, unsigned int data, s5p_mfc_write_info_v6() argument
1860 s5p_mfc_read_info_v6(struct s5p_mfc_ctx *ctx, unsigned long ofs) s5p_mfc_read_info_v6() argument
1897 static int s5p_mfc_get_disp_frame_type_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_get_disp_frame_type_v6() argument
1899 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_get_disp_frame_type_v6()
1975 static int s5p_mfc_get_sei_avail_status_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_get_sei_avail_status_v6() argument
1977 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_get_sei_avail_status_v6()
1991 static unsigned int s5p_mfc_get_pic_type_top_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_get_pic_type_top_v6() argument
1993 return s5p_mfc_read_info_v6(ctx, s5p_mfc_get_pic_type_top_v6()
1994 (__force unsigned long) ctx->dev->mfc_regs->d_ret_picture_tag_top); s5p_mfc_get_pic_type_top_v6()
1997 static unsigned int s5p_mfc_get_pic_type_bot_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_get_pic_type_bot_v6() argument
1999 return s5p_mfc_read_info_v6(ctx, s5p_mfc_get_pic_type_bot_v6()
2000 (__force unsigned long) ctx->dev->mfc_regs->d_ret_picture_tag_bot); s5p_mfc_get_pic_type_bot_v6()
2003 static unsigned int s5p_mfc_get_crop_info_h_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_get_crop_info_h_v6() argument
2005 return s5p_mfc_read_info_v6(ctx, s5p_mfc_get_crop_info_h_v6()
2006 (__force unsigned long) ctx->dev->mfc_regs->d_display_crop_info1); s5p_mfc_get_crop_info_h_v6()
2009 static unsigned int s5p_mfc_get_crop_info_v_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_get_crop_info_v_v6() argument
2011 return s5p_mfc_read_info_v6(ctx, s5p_mfc_get_crop_info_v_v6()
2012 (__force unsigned long) ctx->dev->mfc_regs->d_display_crop_info2); s5p_mfc_get_crop_info_v_v6()
H A Ds5p_mfc_enc.c727 static int s5p_mfc_ctx_ready(struct s5p_mfc_ctx *ctx) s5p_mfc_ctx_ready() argument
730 ctx->src_queue_cnt, ctx->dst_queue_cnt, ctx->state); s5p_mfc_ctx_ready()
732 if (ctx->state == MFCINST_GOT_INST && ctx->dst_queue_cnt >= 1) s5p_mfc_ctx_ready()
735 if ((ctx->state == MFCINST_RUNNING || s5p_mfc_ctx_ready()
736 ctx->state == MFCINST_HEAD_PRODUCED) && s5p_mfc_ctx_ready()
737 ctx->src_queue_cnt >= 1 && ctx->dst_queue_cnt >= 1) s5p_mfc_ctx_ready()
740 if (ctx->state == MFCINST_FINISHING && s5p_mfc_ctx_ready()
741 ctx->dst_queue_cnt >= 1) s5p_mfc_ctx_ready()
743 mfc_debug(2, "ctx is not ready\n"); s5p_mfc_ctx_ready()
747 static void cleanup_ref_queue(struct s5p_mfc_ctx *ctx) cleanup_ref_queue() argument
752 while (!list_empty(&ctx->ref_queue)) { cleanup_ref_queue()
753 mb_entry = list_entry((&ctx->ref_queue)->next, cleanup_ref_queue()
756 ctx->ref_queue_cnt--; cleanup_ref_queue()
757 list_add_tail(&mb_entry->list, &ctx->src_queue); cleanup_ref_queue()
758 ctx->src_queue_cnt++; cleanup_ref_queue()
761 ctx->src_queue_cnt, ctx->ref_queue_cnt); cleanup_ref_queue()
762 INIT_LIST_HEAD(&ctx->ref_queue); cleanup_ref_queue()
763 ctx->ref_queue_cnt = 0; cleanup_ref_queue()
766 static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx) enc_pre_seq_start() argument
768 struct s5p_mfc_dev *dev = ctx->dev; enc_pre_seq_start()
775 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); enc_pre_seq_start()
778 s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, enc_pre_seq_start()
784 static int enc_post_seq_start(struct s5p_mfc_ctx *ctx) enc_post_seq_start() argument
786 struct s5p_mfc_dev *dev = ctx->dev; enc_post_seq_start()
787 struct s5p_mfc_enc_params *p = &ctx->enc_params; enc_post_seq_start()
794 if (!list_empty(&ctx->dst_queue)) { enc_post_seq_start()
795 dst_mb = list_entry(ctx->dst_queue.next, enc_post_seq_start()
798 ctx->dst_queue_cnt--; enc_post_seq_start()
808 ctx->state = MFCINST_RUNNING; enc_post_seq_start()
809 if (s5p_mfc_ctx_ready(ctx)) enc_post_seq_start()
810 set_work_bit_irqsave(ctx); enc_post_seq_start()
815 if (ctx->pb_count < enc_pb_count) enc_post_seq_start()
816 ctx->pb_count = enc_pb_count; enc_post_seq_start()
817 ctx->state = MFCINST_HEAD_PRODUCED; enc_post_seq_start()
823 static int enc_pre_frame_start(struct s5p_mfc_ctx *ctx) enc_pre_frame_start() argument
825 struct s5p_mfc_dev *dev = ctx->dev; enc_pre_frame_start()
833 src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); enc_pre_frame_start()
836 s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_frame_buffer, ctx, enc_pre_frame_start()
841 dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); enc_pre_frame_start()
844 s5p_mfc_hw_call_void(dev->mfc_ops, set_enc_stream_buffer, ctx, dst_addr, enc_pre_frame_start()
851 static int enc_post_frame_start(struct s5p_mfc_ctx *ctx) enc_post_frame_start() argument
853 struct s5p_mfc_dev *dev = ctx->dev; enc_post_frame_start()
869 s5p_mfc_hw_call_void(dev->mfc_ops, get_enc_frame_buffer, ctx, enc_post_frame_start()
871 list_for_each_entry(mb_entry, &ctx->src_queue, list) { enc_post_frame_start()
877 ctx->src_queue_cnt--; enc_post_frame_start()
883 list_for_each_entry(mb_entry, &ctx->ref_queue, list) { enc_post_frame_start()
889 ctx->ref_queue_cnt--; enc_post_frame_start()
896 if ((ctx->src_queue_cnt > 0) && (ctx->state == MFCINST_RUNNING)) { enc_post_frame_start()
897 mb_entry = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, enc_post_frame_start()
901 ctx->src_queue_cnt--; enc_post_frame_start()
902 list_add_tail(&mb_entry->list, &ctx->ref_queue); enc_post_frame_start()
903 ctx->ref_queue_cnt++; enc_post_frame_start()
906 ctx->src_queue_cnt, ctx->ref_queue_cnt); enc_post_frame_start()
908 if ((ctx->dst_queue_cnt > 0) && (strm_size > 0)) { enc_post_frame_start()
909 mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, enc_post_frame_start()
912 ctx->dst_queue_cnt--; enc_post_frame_start()
928 if ((ctx->src_queue_cnt == 0) || (ctx->dst_queue_cnt == 0)) enc_post_frame_start()
929 clear_work_bit(ctx); enc_post_frame_start()
1000 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_g_fmt() local
1003 mfc_debug(2, "f->type = %d ctx->state = %d\n", f->type, ctx->state); vidioc_g_fmt()
1009 pix_fmt_mp->pixelformat = ctx->dst_fmt->fourcc; vidioc_g_fmt()
1010 pix_fmt_mp->num_planes = ctx->dst_fmt->num_planes; vidioc_g_fmt()
1012 pix_fmt_mp->plane_fmt[0].bytesperline = ctx->enc_dst_buf_size; vidioc_g_fmt()
1013 pix_fmt_mp->plane_fmt[0].sizeimage = ctx->enc_dst_buf_size; vidioc_g_fmt()
1016 pix_fmt_mp->width = ctx->img_width; vidioc_g_fmt()
1017 pix_fmt_mp->height = ctx->img_height; vidioc_g_fmt()
1020 pix_fmt_mp->pixelformat = ctx->src_fmt->fourcc; vidioc_g_fmt()
1021 pix_fmt_mp->num_planes = ctx->src_fmt->num_planes; vidioc_g_fmt()
1023 pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width; vidioc_g_fmt()
1024 pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size; vidioc_g_fmt()
1025 pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width; vidioc_g_fmt()
1026 pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size; vidioc_g_fmt()
1085 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_s_fmt() local
1092 if (ctx->vq_src.streaming || ctx->vq_dst.streaming) { vidioc_s_fmt()
1099 ctx->dst_fmt = find_format(f, MFC_FMT_ENC); vidioc_s_fmt()
1100 ctx->state = MFCINST_INIT; vidioc_s_fmt()
1101 ctx->codec_mode = ctx->dst_fmt->codec_mode; vidioc_s_fmt()
1102 ctx->enc_dst_buf_size = pix_fmt_mp->plane_fmt[0].sizeimage; vidioc_s_fmt()
1104 ctx->dst_bufs_cnt = 0; vidioc_s_fmt()
1105 ctx->capture_state = QUEUE_FREE; vidioc_s_fmt()
1106 ret = s5p_mfc_open_mfc_inst(dev, ctx); vidioc_s_fmt()
1109 ctx->src_fmt = find_format(f, MFC_FMT_RAW); vidioc_s_fmt()
1110 ctx->img_width = pix_fmt_mp->width; vidioc_s_fmt()
1111 ctx->img_height = pix_fmt_mp->height; vidioc_s_fmt()
1112 mfc_debug(2, "codec number: %d\n", ctx->src_fmt->codec_mode); vidioc_s_fmt()
1113 mfc_debug(2, "fmt - w: %d, h: %d, ctx - w: %d, h: %d\n", vidioc_s_fmt()
1115 ctx->img_width, ctx->img_height); vidioc_s_fmt()
1117 s5p_mfc_hw_call_void(dev->mfc_ops, enc_calc_src_size, ctx); vidioc_s_fmt()
1118 pix_fmt_mp->plane_fmt[0].sizeimage = ctx->luma_size; vidioc_s_fmt()
1119 pix_fmt_mp->plane_fmt[0].bytesperline = ctx->buf_width; vidioc_s_fmt()
1120 pix_fmt_mp->plane_fmt[1].sizeimage = ctx->chroma_size; vidioc_s_fmt()
1121 pix_fmt_mp->plane_fmt[1].bytesperline = ctx->buf_width; vidioc_s_fmt()
1123 ctx->src_bufs_cnt = 0; vidioc_s_fmt()
1124 ctx->output_state = QUEUE_FREE; vidioc_s_fmt()
1138 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_reqbufs() local
1147 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); vidioc_reqbufs()
1148 ctx->capture_state = QUEUE_FREE; vidioc_reqbufs()
1151 if (ctx->capture_state != QUEUE_FREE) { vidioc_reqbufs()
1153 ctx->capture_state); vidioc_reqbufs()
1156 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); vidioc_reqbufs()
1161 ctx->capture_state = QUEUE_BUFS_REQUESTED; vidioc_reqbufs()
1163 ret = s5p_mfc_hw_call(ctx->dev->mfc_ops, vidioc_reqbufs()
1164 alloc_codec_buffers, ctx); vidioc_reqbufs()
1168 ret = vb2_reqbufs(&ctx->vq_dst, reqbufs); vidioc_reqbufs()
1174 ret = vb2_reqbufs(&ctx->vq_src, reqbufs); vidioc_reqbufs()
1176 ctx); vidioc_reqbufs()
1177 ctx->output_state = QUEUE_FREE; vidioc_reqbufs()
1180 if (ctx->output_state != QUEUE_FREE) { vidioc_reqbufs()
1182 ctx->output_state); vidioc_reqbufs()
1188 if (ctx->pb_count && vidioc_reqbufs()
1189 (reqbufs->count < ctx->pb_count)) { vidioc_reqbufs()
1190 reqbufs->count = ctx->pb_count; vidioc_reqbufs()
1192 ctx->pb_count); vidioc_reqbufs()
1194 ctx->pb_count = reqbufs->count; vidioc_reqbufs()
1198 ret = vb2_reqbufs(&ctx->vq_src, reqbufs); vidioc_reqbufs()
1203 ctx->output_state = QUEUE_BUFS_REQUESTED; vidioc_reqbufs()
1214 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_querybuf() local
1222 if (ctx->state != MFCINST_GOT_INST) { vidioc_querybuf()
1223 mfc_err("invalid context state: %d\n", ctx->state); vidioc_querybuf()
1226 ret = vb2_querybuf(&ctx->vq_dst, buf); vidioc_querybuf()
1233 ret = vb2_querybuf(&ctx->vq_src, buf); vidioc_querybuf()
1248 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_qbuf() local
1250 if (ctx->state == MFCINST_ERROR) { vidioc_qbuf()
1255 if (ctx->state == MFCINST_FINISHING) { vidioc_qbuf()
1259 return vb2_qbuf(&ctx->vq_src, buf); vidioc_qbuf()
1261 return vb2_qbuf(&ctx->vq_dst, buf); vidioc_qbuf()
1272 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_dqbuf() local
1275 if (ctx->state == MFCINST_ERROR) { vidioc_dqbuf()
1280 ret = vb2_dqbuf(&ctx->vq_src, buf, file->f_flags & O_NONBLOCK); vidioc_dqbuf()
1282 ret = vb2_dqbuf(&ctx->vq_dst, buf, file->f_flags & O_NONBLOCK); vidioc_dqbuf()
1283 if (ret == 0 && ctx->state == MFCINST_FINISHED vidioc_dqbuf()
1284 && list_empty(&ctx->vq_dst.done_list)) vidioc_dqbuf()
1285 v4l2_event_queue_fh(&ctx->fh, &ev); vidioc_dqbuf()
1297 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_expbuf() local
1300 return vb2_expbuf(&ctx->vq_src, eb); vidioc_expbuf()
1302 return vb2_expbuf(&ctx->vq_dst, eb); vidioc_expbuf()
1310 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_streamon() local
1313 return vb2_streamon(&ctx->vq_src, type); vidioc_streamon()
1315 return vb2_streamon(&ctx->vq_dst, type); vidioc_streamon()
1323 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_streamoff() local
1326 return vb2_streamoff(&ctx->vq_src, type); vidioc_streamoff()
1328 return vb2_streamoff(&ctx->vq_dst, type); vidioc_streamoff()
1393 struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl); s5p_mfc_enc_s_ctrl() local
1394 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_enc_s_ctrl()
1395 struct s5p_mfc_enc_params *p = &ctx->enc_params; s5p_mfc_enc_s_ctrl()
1432 ctx->force_frame_type = ctrl->val; s5p_mfc_enc_s_ctrl()
1648 struct s5p_mfc_ctx *ctx = ctrl_to_ctx(ctrl); s5p_mfc_enc_g_v_ctrl() local
1649 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_enc_g_v_ctrl()
1653 if (ctx->state >= MFCINST_HEAD_PARSED && s5p_mfc_enc_g_v_ctrl()
1654 ctx->state < MFCINST_ABORT) { s5p_mfc_enc_g_v_ctrl()
1655 ctrl->val = ctx->pb_count; s5p_mfc_enc_g_v_ctrl()
1657 } else if (ctx->state != MFCINST_INIT) { s5p_mfc_enc_g_v_ctrl()
1662 s5p_mfc_wait_for_done_ctx(ctx, s5p_mfc_enc_g_v_ctrl()
1664 if (ctx->state >= MFCINST_HEAD_PARSED && s5p_mfc_enc_g_v_ctrl()
1665 ctx->state < MFCINST_ABORT) { s5p_mfc_enc_g_v_ctrl()
1666 ctrl->val = ctx->pb_count; s5p_mfc_enc_g_v_ctrl()
1684 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_s_parm() local
1687 ctx->enc_params.rc_framerate_num = vidioc_s_parm()
1689 ctx->enc_params.rc_framerate_denom = vidioc_s_parm()
1701 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_g_parm() local
1705 ctx->enc_params.rc_framerate_num; vidioc_g_parm()
1707 ctx->enc_params.rc_framerate_denom; vidioc_g_parm()
1718 struct s5p_mfc_ctx *ctx = fh_to_ctx(priv); vidioc_encoder_cmd() local
1719 struct s5p_mfc_dev *dev = ctx->dev; vidioc_encoder_cmd()
1728 if (!ctx->vq_src.streaming) vidioc_encoder_cmd()
1732 if (list_empty(&ctx->src_queue)) { vidioc_encoder_cmd()
1734 ctx->state = MFCINST_FINISHING; vidioc_encoder_cmd()
1735 if (s5p_mfc_ctx_ready(ctx)) vidioc_encoder_cmd()
1736 set_work_bit_irqsave(ctx); vidioc_encoder_cmd()
1741 buf = list_entry(ctx->src_queue.prev, vidioc_encoder_cmd()
1744 ctx->state = MFCINST_FINISHING; vidioc_encoder_cmd()
1819 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); s5p_mfc_queue_setup() local
1820 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_queue_setup()
1822 if (ctx->state != MFCINST_GOT_INST) { s5p_mfc_queue_setup()
1823 mfc_err("inavlid state: %d\n", ctx->state); s5p_mfc_queue_setup()
1827 if (ctx->dst_fmt) s5p_mfc_queue_setup()
1828 *plane_count = ctx->dst_fmt->num_planes; s5p_mfc_queue_setup()
1835 psize[0] = ctx->enc_dst_buf_size; s5p_mfc_queue_setup()
1836 allocators[0] = ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; s5p_mfc_queue_setup()
1838 if (ctx->src_fmt) s5p_mfc_queue_setup()
1839 *plane_count = ctx->src_fmt->num_planes; s5p_mfc_queue_setup()
1848 psize[0] = ctx->luma_size; s5p_mfc_queue_setup()
1849 psize[1] = ctx->chroma_size; s5p_mfc_queue_setup()
1853 ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; s5p_mfc_queue_setup()
1855 ctx->dev->alloc_ctx[MFC_BANK1_ALLOC_CTX]; s5p_mfc_queue_setup()
1858 ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX]; s5p_mfc_queue_setup()
1860 ctx->dev->alloc_ctx[MFC_BANK2_ALLOC_CTX]; s5p_mfc_queue_setup()
1872 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); s5p_mfc_buf_init() local
1877 ret = check_vb_with_fmt(ctx->dst_fmt, vb); s5p_mfc_buf_init()
1881 ctx->dst_bufs[i].b = vb; s5p_mfc_buf_init()
1882 ctx->dst_bufs[i].cookie.stream = s5p_mfc_buf_init()
1884 ctx->dst_bufs_cnt++; s5p_mfc_buf_init()
1886 ret = check_vb_with_fmt(ctx->src_fmt, vb); s5p_mfc_buf_init()
1890 ctx->src_bufs[i].b = vb; s5p_mfc_buf_init()
1891 ctx->src_bufs[i].cookie.raw.luma = s5p_mfc_buf_init()
1893 ctx->src_bufs[i].cookie.raw.chroma = s5p_mfc_buf_init()
1895 ctx->src_bufs_cnt++; s5p_mfc_buf_init()
1906 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); s5p_mfc_buf_prepare() local
1910 ret = check_vb_with_fmt(ctx->dst_fmt, vb); s5p_mfc_buf_prepare()
1914 vb2_plane_size(vb, 0), ctx->enc_dst_buf_size); s5p_mfc_buf_prepare()
1915 if (vb2_plane_size(vb, 0) < ctx->enc_dst_buf_size) { s5p_mfc_buf_prepare()
1920 ret = check_vb_with_fmt(ctx->src_fmt, vb); s5p_mfc_buf_prepare()
1924 vb2_plane_size(vb, 0), ctx->luma_size); s5p_mfc_buf_prepare()
1926 vb2_plane_size(vb, 1), ctx->chroma_size); s5p_mfc_buf_prepare()
1927 if (vb2_plane_size(vb, 0) < ctx->luma_size || s5p_mfc_buf_prepare()
1928 vb2_plane_size(vb, 1) < ctx->chroma_size) { s5p_mfc_buf_prepare()
1941 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); s5p_mfc_start_streaming() local
1942 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_start_streaming()
1947 if ((ctx->state == MFCINST_GOT_INST) && s5p_mfc_start_streaming()
1948 (dev->curr_ctx == ctx->num) && dev->hw_lock) { s5p_mfc_start_streaming()
1949 s5p_mfc_wait_for_done_ctx(ctx, s5p_mfc_start_streaming()
1954 if (ctx->src_bufs_cnt < ctx->pb_count) { s5p_mfc_start_streaming()
1956 ctx->pb_count); s5p_mfc_start_streaming()
1962 if (s5p_mfc_ctx_ready(ctx)) s5p_mfc_start_streaming()
1963 set_work_bit_irqsave(ctx); s5p_mfc_start_streaming()
1972 struct s5p_mfc_ctx *ctx = fh_to_ctx(q->drv_priv); s5p_mfc_stop_streaming() local
1973 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_stop_streaming()
1975 if ((ctx->state == MFCINST_FINISHING || s5p_mfc_stop_streaming()
1976 ctx->state == MFCINST_RUNNING) && s5p_mfc_stop_streaming()
1977 dev->curr_ctx == ctx->num && dev->hw_lock) { s5p_mfc_stop_streaming()
1978 ctx->state = MFCINST_ABORT; s5p_mfc_stop_streaming()
1979 s5p_mfc_wait_for_done_ctx(ctx, S5P_MFC_R2H_CMD_FRAME_DONE_RET, s5p_mfc_stop_streaming()
1982 ctx->state = MFCINST_FINISHED; s5p_mfc_stop_streaming()
1986 &ctx->dst_queue, &ctx->vq_dst); s5p_mfc_stop_streaming()
1987 INIT_LIST_HEAD(&ctx->dst_queue); s5p_mfc_stop_streaming()
1988 ctx->dst_queue_cnt = 0; s5p_mfc_stop_streaming()
1991 cleanup_ref_queue(ctx); s5p_mfc_stop_streaming()
1992 s5p_mfc_hw_call_void(dev->mfc_ops, cleanup_queue, &ctx->src_queue, s5p_mfc_stop_streaming()
1993 &ctx->vq_src); s5p_mfc_stop_streaming()
1994 INIT_LIST_HEAD(&ctx->src_queue); s5p_mfc_stop_streaming()
1995 ctx->src_queue_cnt = 0; s5p_mfc_stop_streaming()
2003 struct s5p_mfc_ctx *ctx = fh_to_ctx(vq->drv_priv); s5p_mfc_buf_queue() local
2004 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_buf_queue()
2008 if (ctx->state == MFCINST_ERROR) { s5p_mfc_buf_queue()
2010 cleanup_ref_queue(ctx); s5p_mfc_buf_queue()
2014 mfc_buf = &ctx->dst_bufs[vb->v4l2_buf.index]; s5p_mfc_buf_queue()
2018 list_add_tail(&mfc_buf->list, &ctx->dst_queue); s5p_mfc_buf_queue()
2019 ctx->dst_queue_cnt++; s5p_mfc_buf_queue()
2022 mfc_buf = &ctx->src_bufs[vb->v4l2_buf.index]; s5p_mfc_buf_queue()
2025 list_add_tail(&mfc_buf->list, &ctx->src_queue); s5p_mfc_buf_queue()
2026 ctx->src_queue_cnt++; s5p_mfc_buf_queue()
2031 if (s5p_mfc_ctx_ready(ctx)) s5p_mfc_buf_queue()
2032 set_work_bit_irqsave(ctx); s5p_mfc_buf_queue()
2065 int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx) s5p_mfc_enc_ctrls_setup() argument
2070 v4l2_ctrl_handler_init(&ctx->ctrl_handler, NUM_CTRLS); s5p_mfc_enc_ctrls_setup()
2071 if (ctx->ctrl_handler.error) { s5p_mfc_enc_ctrls_setup()
2073 return ctx->ctrl_handler.error; s5p_mfc_enc_ctrls_setup()
2095 ctx->ctrls[i] = v4l2_ctrl_new_custom(&ctx->ctrl_handler, s5p_mfc_enc_ctrls_setup()
2101 ctx->ctrls[i] = v4l2_ctrl_new_std_menu( s5p_mfc_enc_ctrls_setup()
2102 &ctx->ctrl_handler, s5p_mfc_enc_ctrls_setup()
2107 ctx->ctrls[i] = v4l2_ctrl_new_std( s5p_mfc_enc_ctrls_setup()
2108 &ctx->ctrl_handler, s5p_mfc_enc_ctrls_setup()
2115 if (ctx->ctrl_handler.error) { s5p_mfc_enc_ctrls_setup()
2117 return ctx->ctrl_handler.error; s5p_mfc_enc_ctrls_setup()
2119 if (controls[i].is_volatile && ctx->ctrls[i]) s5p_mfc_enc_ctrls_setup()
2120 ctx->ctrls[i]->flags |= V4L2_CTRL_FLAG_VOLATILE; s5p_mfc_enc_ctrls_setup()
2122 v4l2_ctrl_handler_setup(&ctx->ctrl_handler); s5p_mfc_enc_ctrls_setup()
2126 void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx) s5p_mfc_enc_ctrls_delete() argument
2130 v4l2_ctrl_handler_free(&ctx->ctrl_handler); s5p_mfc_enc_ctrls_delete()
2132 ctx->ctrls[i] = NULL; s5p_mfc_enc_ctrls_delete()
2135 void s5p_mfc_enc_init(struct s5p_mfc_ctx *ctx) s5p_mfc_enc_init() argument
2139 ctx->src_fmt = find_format(&f, MFC_FMT_RAW); s5p_mfc_enc_init()
2141 ctx->dst_fmt = find_format(&f, MFC_FMT_ENC); s5p_mfc_enc_init()
H A Ds5p_mfc.c47 void clear_work_bit(struct s5p_mfc_ctx *ctx) clear_work_bit() argument
49 struct s5p_mfc_dev *dev = ctx->dev; clear_work_bit()
52 __clear_bit(ctx->num, &dev->ctx_work_bits); clear_work_bit()
57 void set_work_bit(struct s5p_mfc_ctx *ctx) set_work_bit() argument
59 struct s5p_mfc_dev *dev = ctx->dev; set_work_bit()
62 __set_bit(ctx->num, &dev->ctx_work_bits); set_work_bit()
67 void clear_work_bit_irqsave(struct s5p_mfc_ctx *ctx) clear_work_bit_irqsave() argument
69 struct s5p_mfc_dev *dev = ctx->dev; clear_work_bit_irqsave()
73 __clear_bit(ctx->num, &dev->ctx_work_bits); clear_work_bit_irqsave()
78 void set_work_bit_irqsave(struct s5p_mfc_ctx *ctx) set_work_bit_irqsave() argument
80 struct s5p_mfc_dev *dev = ctx->dev; set_work_bit_irqsave()
84 __set_bit(ctx->num, &dev->ctx_work_bits); set_work_bit_irqsave()
89 static void wake_up_ctx(struct s5p_mfc_ctx *ctx, unsigned int reason, wake_up_ctx() argument
92 ctx->int_cond = 1; wake_up_ctx()
93 ctx->int_type = reason; wake_up_ctx()
94 ctx->int_err = err; wake_up_ctx()
95 wake_up(&ctx->queue); wake_up_ctx()
131 struct s5p_mfc_ctx *ctx; s5p_mfc_watchdog_worker() local
149 ctx = dev->ctx[i]; s5p_mfc_watchdog_worker()
150 if (!ctx) s5p_mfc_watchdog_worker()
152 ctx->state = MFCINST_ERROR; s5p_mfc_watchdog_worker()
154 &ctx->dst_queue, &ctx->vq_dst); s5p_mfc_watchdog_worker()
156 &ctx->src_queue, &ctx->vq_src); s5p_mfc_watchdog_worker()
157 clear_work_bit(ctx); s5p_mfc_watchdog_worker()
158 wake_up_ctx(ctx, S5P_MFC_R2H_CMD_ERR_RET, 0); s5p_mfc_watchdog_worker()
191 static void s5p_mfc_handle_frame_all_extracted(struct s5p_mfc_ctx *ctx) s5p_mfc_handle_frame_all_extracted() argument
194 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_handle_frame_all_extracted()
196 ctx->state = MFCINST_FINISHED; s5p_mfc_handle_frame_all_extracted()
197 ctx->sequence++; s5p_mfc_handle_frame_all_extracted()
198 while (!list_empty(&ctx->dst_queue)) { s5p_mfc_handle_frame_all_extracted()
199 dst_buf = list_entry(ctx->dst_queue.next, s5p_mfc_handle_frame_all_extracted()
206 ctx->dst_queue_cnt--; s5p_mfc_handle_frame_all_extracted()
207 dst_buf->b->v4l2_buf.sequence = (ctx->sequence++); s5p_mfc_handle_frame_all_extracted()
209 if (s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_top, ctx) == s5p_mfc_handle_frame_all_extracted()
210 s5p_mfc_hw_call(dev->mfc_ops, get_pic_type_bot, ctx)) s5p_mfc_handle_frame_all_extracted()
215 ctx->dec_dst_flag &= ~(1 << dst_buf->b->v4l2_buf.index); s5p_mfc_handle_frame_all_extracted()
220 static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx) s5p_mfc_handle_frame_copy_time() argument
222 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_handle_frame_copy_time()
235 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); s5p_mfc_handle_frame_copy_time()
236 list_for_each_entry(dst_buf, &ctx->dst_queue, list) { s5p_mfc_handle_frame_copy_time()
271 static void s5p_mfc_handle_frame_new(struct s5p_mfc_ctx *ctx, unsigned int err) s5p_mfc_handle_frame_new() argument
273 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_handle_frame_new()
281 get_disp_frame_type, ctx); s5p_mfc_handle_frame_new()
288 if (!ctx->after_packed_pb) s5p_mfc_handle_frame_new()
289 ctx->sequence++; s5p_mfc_handle_frame_new()
290 ctx->after_packed_pb = 0; s5p_mfc_handle_frame_new()
293 ctx->sequence++; s5p_mfc_handle_frame_new()
296 list_for_each_entry(dst_buf, &ctx->dst_queue, list) { s5p_mfc_handle_frame_new()
300 ctx->dst_queue_cnt--; s5p_mfc_handle_frame_new()
301 dst_buf->b->v4l2_buf.sequence = ctx->sequence; s5p_mfc_handle_frame_new()
303 get_pic_type_top, ctx) == s5p_mfc_handle_frame_new()
305 get_pic_type_bot, ctx)) s5p_mfc_handle_frame_new()
310 vb2_set_plane_payload(dst_buf->b, 0, ctx->luma_size); s5p_mfc_handle_frame_new()
311 vb2_set_plane_payload(dst_buf->b, 1, ctx->chroma_size); s5p_mfc_handle_frame_new()
313 &ctx->dec_dst_flag); s5p_mfc_handle_frame_new()
324 static void s5p_mfc_handle_frame(struct s5p_mfc_ctx *ctx, s5p_mfc_handle_frame() argument
327 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_handle_frame()
342 if (ctx->state == MFCINST_RES_CHANGE_INIT) s5p_mfc_handle_frame()
343 ctx->state = MFCINST_RES_CHANGE_FLUSH; s5p_mfc_handle_frame()
346 ctx->state = MFCINST_RES_CHANGE_INIT; s5p_mfc_handle_frame()
348 wake_up_ctx(ctx, reason, err); s5p_mfc_handle_frame()
354 if (ctx->dpb_flush_flag) s5p_mfc_handle_frame()
355 ctx->dpb_flush_flag = 0; s5p_mfc_handle_frame()
360 if (ctx->state == MFCINST_RES_CHANGE_FLUSH) { s5p_mfc_handle_frame()
367 s5p_mfc_handle_frame_all_extracted(ctx); s5p_mfc_handle_frame()
368 ctx->state = MFCINST_RES_CHANGE_END; s5p_mfc_handle_frame()
369 v4l2_event_queue_fh(&ctx->fh, &ev_src_ch); s5p_mfc_handle_frame()
373 s5p_mfc_handle_frame_all_extracted(ctx); s5p_mfc_handle_frame()
378 s5p_mfc_handle_frame_copy_time(ctx); s5p_mfc_handle_frame()
383 s5p_mfc_handle_frame_new(ctx, err); s5p_mfc_handle_frame()
389 && !list_empty(&ctx->src_queue)) { s5p_mfc_handle_frame()
390 src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, s5p_mfc_handle_frame()
392 ctx->consumed_stream += s5p_mfc_hw_call(dev->mfc_ops, s5p_mfc_handle_frame()
394 if (ctx->codec_mode != S5P_MFC_CODEC_H264_DEC && s5p_mfc_handle_frame()
395 ctx->codec_mode != S5P_MFC_CODEC_VP8_DEC && s5p_mfc_handle_frame()
396 ctx->consumed_stream + STUFF_BYTE < s5p_mfc_handle_frame()
400 ctx->after_packed_pb = 1; s5p_mfc_handle_frame()
403 ctx->consumed_stream = 0; s5p_mfc_handle_frame()
405 ctx->state = MFCINST_FINISHING; s5p_mfc_handle_frame()
407 ctx->src_queue_cnt--; s5p_mfc_handle_frame()
416 if ((ctx->src_queue_cnt == 0 && ctx->state != MFCINST_FINISHING) s5p_mfc_handle_frame()
417 || ctx->dst_queue_cnt < ctx->pb_count) s5p_mfc_handle_frame()
418 clear_work_bit(ctx); s5p_mfc_handle_frame()
420 wake_up_ctx(ctx, reason, err); s5p_mfc_handle_frame()
432 struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) s5p_mfc_handle_error()
438 if (ctx != NULL) { s5p_mfc_handle_error()
440 switch (ctx->state) { s5p_mfc_handle_error()
449 clear_work_bit(ctx); s5p_mfc_handle_error()
450 ctx->state = MFCINST_ERROR; s5p_mfc_handle_error()
454 &ctx->dst_queue, &ctx->vq_dst); s5p_mfc_handle_error()
457 &ctx->src_queue, &ctx->vq_src); s5p_mfc_handle_error()
459 wake_up_ctx(ctx, reason, err); s5p_mfc_handle_error()
462 clear_work_bit(ctx); s5p_mfc_handle_error()
463 ctx->state = MFCINST_ERROR; s5p_mfc_handle_error()
464 wake_up_ctx(ctx, reason, err); s5p_mfc_handle_error()
476 static void s5p_mfc_handle_seq_done(struct s5p_mfc_ctx *ctx, s5p_mfc_handle_seq_done() argument
481 if (ctx == NULL) s5p_mfc_handle_seq_done()
483 dev = ctx->dev; s5p_mfc_handle_seq_done()
484 if (ctx->c_ops->post_seq_start) { s5p_mfc_handle_seq_done()
485 if (ctx->c_ops->post_seq_start(ctx)) s5p_mfc_handle_seq_done()
488 ctx->img_width = s5p_mfc_hw_call(dev->mfc_ops, get_img_width, s5p_mfc_handle_seq_done()
490 ctx->img_height = s5p_mfc_hw_call(dev->mfc_ops, get_img_height, s5p_mfc_handle_seq_done()
493 s5p_mfc_hw_call_void(dev->mfc_ops, dec_calc_dpb_size, ctx); s5p_mfc_handle_seq_done()
495 ctx->pb_count = s5p_mfc_hw_call(dev->mfc_ops, get_dpb_count, s5p_mfc_handle_seq_done()
497 ctx->mv_count = s5p_mfc_hw_call(dev->mfc_ops, get_mv_count, s5p_mfc_handle_seq_done()
499 if (ctx->img_width == 0 || ctx->img_height == 0) s5p_mfc_handle_seq_done()
500 ctx->state = MFCINST_ERROR; s5p_mfc_handle_seq_done()
502 ctx->state = MFCINST_HEAD_PARSED; s5p_mfc_handle_seq_done()
504 if ((ctx->codec_mode == S5P_MFC_CODEC_H264_DEC || s5p_mfc_handle_seq_done()
505 ctx->codec_mode == S5P_MFC_CODEC_H264_MVC_DEC) && s5p_mfc_handle_seq_done()
506 !list_empty(&ctx->src_queue)) { s5p_mfc_handle_seq_done()
508 src_buf = list_entry(ctx->src_queue.next, s5p_mfc_handle_seq_done()
513 ctx->head_processed = 0; s5p_mfc_handle_seq_done()
515 ctx->head_processed = 1; s5p_mfc_handle_seq_done()
517 ctx->head_processed = 1; s5p_mfc_handle_seq_done()
521 clear_work_bit(ctx); s5p_mfc_handle_seq_done()
525 wake_up_ctx(ctx, reason, err); s5p_mfc_handle_seq_done()
529 static void s5p_mfc_handle_init_buffers(struct s5p_mfc_ctx *ctx, s5p_mfc_handle_init_buffers() argument
536 if (ctx == NULL) s5p_mfc_handle_init_buffers()
538 dev = ctx->dev; s5p_mfc_handle_init_buffers()
540 ctx->int_type = reason; s5p_mfc_handle_init_buffers()
541 ctx->int_err = err; s5p_mfc_handle_init_buffers()
542 ctx->int_cond = 1; s5p_mfc_handle_init_buffers()
543 clear_work_bit(ctx); s5p_mfc_handle_init_buffers()
545 ctx->state = MFCINST_RUNNING; s5p_mfc_handle_init_buffers()
546 if (!ctx->dpb_flush_flag && ctx->head_processed) { s5p_mfc_handle_init_buffers()
548 if (!list_empty(&ctx->src_queue)) { s5p_mfc_handle_init_buffers()
549 src_buf = list_entry(ctx->src_queue.next, s5p_mfc_handle_init_buffers()
552 ctx->src_queue_cnt--; s5p_mfc_handle_init_buffers()
558 ctx->dpb_flush_flag = 0; s5p_mfc_handle_init_buffers()
564 wake_up(&ctx->queue); s5p_mfc_handle_init_buffers()
571 wake_up(&ctx->queue); s5p_mfc_handle_init_buffers()
575 static void s5p_mfc_handle_stream_complete(struct s5p_mfc_ctx *ctx, s5p_mfc_handle_stream_complete() argument
578 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_handle_stream_complete()
584 ctx->int_type = reason; s5p_mfc_handle_stream_complete()
585 ctx->int_err = err; s5p_mfc_handle_stream_complete()
586 ctx->state = MFCINST_FINISHED; s5p_mfc_handle_stream_complete()
589 if (!list_empty(&ctx->dst_queue)) { s5p_mfc_handle_stream_complete()
590 mb_entry = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, s5p_mfc_handle_stream_complete()
593 ctx->dst_queue_cnt--; s5p_mfc_handle_stream_complete()
599 clear_work_bit(ctx); s5p_mfc_handle_stream_complete()
604 wake_up(&ctx->queue); s5p_mfc_handle_stream_complete()
612 struct s5p_mfc_ctx *ctx; s5p_mfc_irq() local
619 ctx = dev->ctx[dev->curr_ctx]; s5p_mfc_irq()
627 if (ctx->state == MFCINST_RUNNING && s5p_mfc_irq()
630 s5p_mfc_handle_frame(ctx, reason, err); s5p_mfc_irq()
632 s5p_mfc_handle_error(dev, ctx, reason, err); s5p_mfc_irq()
639 if (ctx->c_ops->post_frame_start) { s5p_mfc_irq()
640 if (ctx->c_ops->post_frame_start(ctx)) s5p_mfc_irq()
643 wake_up_ctx(ctx, reason, err); s5p_mfc_irq()
648 s5p_mfc_handle_frame(ctx, reason, err); s5p_mfc_irq()
653 s5p_mfc_handle_seq_done(ctx, reason, err); s5p_mfc_irq()
657 ctx->inst_no = s5p_mfc_hw_call(dev->mfc_ops, get_inst_no, dev); s5p_mfc_irq()
658 ctx->state = MFCINST_GOT_INST; s5p_mfc_irq()
659 clear_work_bit(ctx); s5p_mfc_irq()
660 wake_up(&ctx->queue); s5p_mfc_irq()
664 clear_work_bit(ctx); s5p_mfc_irq()
665 ctx->inst_no = MFC_NO_INSTANCE_SET; s5p_mfc_irq()
666 ctx->state = MFCINST_FREE; s5p_mfc_irq()
667 wake_up(&ctx->queue); s5p_mfc_irq()
674 if (ctx) s5p_mfc_irq()
675 clear_work_bit(ctx); s5p_mfc_irq()
683 s5p_mfc_handle_init_buffers(ctx, reason, err); s5p_mfc_irq()
687 s5p_mfc_handle_stream_complete(ctx, reason, err); s5p_mfc_irq()
691 clear_work_bit(ctx); s5p_mfc_irq()
692 ctx->state = MFCINST_RUNNING; s5p_mfc_irq()
693 wake_up(&ctx->queue); s5p_mfc_irq()
704 ctx->int_type = reason; s5p_mfc_irq()
705 ctx->int_err = err; s5p_mfc_irq()
706 ctx->int_cond = 1; s5p_mfc_irq()
722 struct s5p_mfc_ctx *ctx = NULL; s5p_mfc_open() local
731 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); s5p_mfc_open()
732 if (!ctx) { s5p_mfc_open()
737 v4l2_fh_init(&ctx->fh, vdev); s5p_mfc_open()
738 file->private_data = &ctx->fh; s5p_mfc_open()
739 v4l2_fh_add(&ctx->fh); s5p_mfc_open()
740 ctx->dev = dev; s5p_mfc_open()
741 INIT_LIST_HEAD(&ctx->src_queue); s5p_mfc_open()
742 INIT_LIST_HEAD(&ctx->dst_queue); s5p_mfc_open()
743 ctx->src_queue_cnt = 0; s5p_mfc_open()
744 ctx->dst_queue_cnt = 0; s5p_mfc_open()
746 ctx->num = 0; s5p_mfc_open()
747 while (dev->ctx[ctx->num]) { s5p_mfc_open()
748 ctx->num++; s5p_mfc_open()
749 if (ctx->num >= MFC_NUM_CONTEXTS) { s5p_mfc_open()
756 clear_work_bit_irqsave(ctx); s5p_mfc_open()
757 dev->ctx[ctx->num] = ctx; s5p_mfc_open()
759 ctx->type = MFCINST_DECODER; s5p_mfc_open()
760 ctx->c_ops = get_dec_codec_ops(); s5p_mfc_open()
761 s5p_mfc_dec_init(ctx); s5p_mfc_open()
763 ret = s5p_mfc_dec_ctrls_setup(ctx); s5p_mfc_open()
769 ctx->type = MFCINST_ENCODER; s5p_mfc_open()
770 ctx->c_ops = get_enc_codec_ops(); s5p_mfc_open()
772 INIT_LIST_HEAD(&ctx->ref_queue); s5p_mfc_open()
773 ctx->ref_queue_cnt = 0; s5p_mfc_open()
774 s5p_mfc_enc_init(ctx); s5p_mfc_open()
776 ret = s5p_mfc_enc_ctrls_setup(ctx); s5p_mfc_open()
785 ctx->fh.ctrl_handler = &ctx->ctrl_handler; s5p_mfc_open()
786 ctx->inst_no = MFC_NO_INSTANCE_SET; s5p_mfc_open()
810 q = &ctx->vq_dst; s5p_mfc_open()
812 q->drv_priv = &ctx->fh; s5p_mfc_open()
832 q = &ctx->vq_src; s5p_mfc_open()
835 q->drv_priv = &ctx->fh; s5p_mfc_open()
861 init_waitqueue_head(&ctx->queue); s5p_mfc_open()
878 s5p_mfc_dec_ctrls_delete(ctx); s5p_mfc_open()
880 dev->ctx[ctx->num] = NULL; s5p_mfc_open()
882 v4l2_fh_del(&ctx->fh); s5p_mfc_open()
883 v4l2_fh_exit(&ctx->fh); s5p_mfc_open()
884 kfree(ctx); s5p_mfc_open()
895 struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data); s5p_mfc_release() local
896 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_release()
901 vb2_queue_release(&ctx->vq_src); s5p_mfc_release()
902 vb2_queue_release(&ctx->vq_dst); s5p_mfc_release()
904 clear_work_bit_irqsave(ctx); s5p_mfc_release()
907 if (ctx->state != MFCINST_FREE && ctx->state != MFCINST_INIT) { s5p_mfc_release()
909 s5p_mfc_close_mfc_inst(dev, ctx); s5p_mfc_release()
912 if (dev->curr_ctx == ctx->num) s5p_mfc_release()
924 dev->ctx[ctx->num] = NULL; s5p_mfc_release()
925 s5p_mfc_dec_ctrls_delete(ctx); s5p_mfc_release()
926 v4l2_fh_del(&ctx->fh); s5p_mfc_release()
927 v4l2_fh_exit(&ctx->fh); s5p_mfc_release()
928 kfree(ctx); s5p_mfc_release()
938 struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data); s5p_mfc_poll() local
939 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_poll()
946 src_q = &ctx->vq_src; s5p_mfc_poll()
947 dst_q = &ctx->vq_dst; s5p_mfc_poll()
959 poll_wait(file, &ctx->fh.wait, wait); s5p_mfc_poll()
963 if (v4l2_event_pending(&ctx->fh)) s5p_mfc_poll()
989 struct s5p_mfc_ctx *ctx = fh_to_ctx(file->private_data); s5p_mfc_mmap() local
990 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_mmap()
998 ret = vb2_mmap(&ctx->vq_src, vma); s5p_mfc_mmap()
1002 ret = vb2_mmap(&ctx->vq_dst, vma); s5p_mfc_mmap()
431 s5p_mfc_handle_error(struct s5p_mfc_dev *dev, struct s5p_mfc_ctx *ctx, unsigned int reason, unsigned int err) s5p_mfc_handle_error() argument
H A Ds5p_mfc_cmd_v6.c67 static int s5p_mfc_open_inst_cmd_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_open_inst_cmd_v6() argument
69 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_open_inst_cmd_v6()
73 mfc_debug(2, "Requested codec mode: %d\n", ctx->codec_mode); s5p_mfc_open_inst_cmd_v6()
74 dev->curr_ctx = ctx->num; s5p_mfc_open_inst_cmd_v6()
75 switch (ctx->codec_mode) { s5p_mfc_open_inst_cmd_v6()
119 mfc_write(dev, ctx->ctx.dma, S5P_FIMV_CONTEXT_MEM_ADDR_V6); s5p_mfc_open_inst_cmd_v6()
120 mfc_write(dev, ctx->ctx.size, S5P_FIMV_CONTEXT_MEM_SIZE_V6); s5p_mfc_open_inst_cmd_v6()
128 static int s5p_mfc_close_inst_cmd_v6(struct s5p_mfc_ctx *ctx) s5p_mfc_close_inst_cmd_v6() argument
130 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_close_inst_cmd_v6()
134 dev->curr_ctx = ctx->num; s5p_mfc_close_inst_cmd_v6()
135 if (ctx->state != MFCINST_FREE) { s5p_mfc_close_inst_cmd_v6()
136 mfc_write(dev, ctx->inst_no, S5P_FIMV_INSTANCE_ID_V6); s5p_mfc_close_inst_cmd_v6()
H A Ds5p_mfc_cmd_v5.c75 static int s5p_mfc_open_inst_cmd_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_open_inst_cmd_v5() argument
77 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_open_inst_cmd_v5()
82 mfc_debug(2, "Getting instance number (codec: %d)\n", ctx->codec_mode); s5p_mfc_open_inst_cmd_v5()
83 dev->curr_ctx = ctx->num; s5p_mfc_open_inst_cmd_v5()
85 switch (ctx->codec_mode) { s5p_mfc_open_inst_cmd_v5()
117 h2r_args.arg[2] = ctx->ctx.ofs; s5p_mfc_open_inst_cmd_v5()
118 h2r_args.arg[3] = ctx->ctx.size; s5p_mfc_open_inst_cmd_v5()
123 ctx->state = MFCINST_ERROR; s5p_mfc_open_inst_cmd_v5()
128 static int s5p_mfc_close_inst_cmd_v5(struct s5p_mfc_ctx *ctx) s5p_mfc_close_inst_cmd_v5() argument
130 struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_close_inst_cmd_v5()
134 if (ctx->state == MFCINST_FREE) { s5p_mfc_close_inst_cmd_v5()
136 ctx->state = MFCINST_ERROR; s5p_mfc_close_inst_cmd_v5()
140 mfc_debug(2, "Returning instance number %d\n", ctx->inst_no); s5p_mfc_close_inst_cmd_v5()
141 dev->curr_ctx = ctx->num; s5p_mfc_close_inst_cmd_v5()
143 h2r_args.arg[0] = ctx->inst_no; s5p_mfc_close_inst_cmd_v5()
148 ctx->state = MFCINST_ERROR; s5p_mfc_close_inst_cmd_v5()
H A Ds5p_mfc_dec.h20 int s5p_mfc_dec_ctrls_setup(struct s5p_mfc_ctx *ctx);
21 void s5p_mfc_dec_ctrls_delete(struct s5p_mfc_ctx *ctx);
22 void s5p_mfc_dec_init(struct s5p_mfc_ctx *ctx);
H A Ds5p_mfc_enc.h20 int s5p_mfc_enc_ctrls_setup(struct s5p_mfc_ctx *ctx);
21 void s5p_mfc_enc_ctrls_delete(struct s5p_mfc_ctx *ctx);
22 void s5p_mfc_enc_init(struct s5p_mfc_ctx *ctx);
/linux-4.1.27/drivers/staging/skein/
H A Dskein_base.c23 int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len) skein_256_init() argument
31 ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */ skein_256_init()
35 memcpy(ctx->x, SKEIN_256_IV_256, sizeof(ctx->x)); skein_256_init()
38 memcpy(ctx->x, SKEIN_256_IV_224, sizeof(ctx->x)); skein_256_init()
41 memcpy(ctx->x, SKEIN_256_IV_160, sizeof(ctx->x)); skein_256_init()
44 memcpy(ctx->x, SKEIN_256_IV_128, sizeof(ctx->x)); skein_256_init()
53 skein_start_new_type(ctx, CFG_FINAL); skein_256_init()
65 memset(ctx->x, 0, sizeof(ctx->x)); skein_256_init()
66 skein_256_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); skein_256_init()
69 /* The chaining vars ctx->x are now initialized for hash_bit_len. */ skein_256_init()
71 skein_start_new_type(ctx, MSG); /* T0=0, T1= MSG type */ skein_256_init()
80 int skein_256_init_ext(struct skein_256_ctx *ctx, size_t hash_bit_len, skein_256_init_ext() argument
91 /* compute the initial chaining values ctx->x[], based on key */ skein_256_init_ext()
94 memset(ctx->x, 0, sizeof(ctx->x)); skein_256_init_ext()
96 skein_assert(sizeof(cfg.b) >= sizeof(ctx->x)); skein_256_init_ext()
99 ctx->h.hash_bit_len = 8*sizeof(ctx->x); skein_256_init_ext()
101 skein_start_new_type(ctx, KEY); skein_256_init_ext()
103 memset(ctx->x, 0, sizeof(ctx->x)); skein_256_init_ext()
105 skein_256_update(ctx, key, key_bytes); skein_256_init_ext()
107 skein_256_final_pad(ctx, cfg.b); skein_256_init_ext()
108 /* copy over into ctx->x[] */ skein_256_init_ext()
109 memcpy(ctx->x, cfg.b, sizeof(cfg.b)); skein_256_init_ext()
116 ctx->h.hash_bit_len = hash_bit_len; skein_256_init_ext()
117 skein_start_new_type(ctx, CFG_FINAL); skein_256_init_ext()
128 skein_256_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); skein_256_init_ext()
130 /* The chaining vars ctx->x are now initialized */ skein_256_init_ext()
132 skein_start_new_type(ctx, MSG); skein_256_init_ext()
139 int skein_256_update(struct skein_256_ctx *ctx, const u8 *msg, skein_256_update() argument
145 skein_assert_ret(ctx->h.b_cnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL); skein_256_update()
148 if (msg_byte_cnt + ctx->h.b_cnt > SKEIN_256_BLOCK_BYTES) { skein_256_update()
150 if (ctx->h.b_cnt) { skein_256_update()
152 n = SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt; skein_256_update()
156 memcpy(&ctx->b[ctx->h.b_cnt], msg, n); skein_256_update()
159 ctx->h.b_cnt += n; skein_256_update()
161 skein_assert(ctx->h.b_cnt == SKEIN_256_BLOCK_BYTES); skein_256_update()
162 skein_256_process_block(ctx, ctx->b, 1, skein_256_update()
164 ctx->h.b_cnt = 0; skein_256_update()
173 skein_256_process_block(ctx, msg, n, skein_256_update()
178 skein_assert(ctx->h.b_cnt == 0); skein_256_update()
183 skein_assert(msg_byte_cnt + ctx->h.b_cnt <= skein_256_update()
185 memcpy(&ctx->b[ctx->h.b_cnt], msg, msg_byte_cnt); skein_256_update()
186 ctx->h.b_cnt += msg_byte_cnt; skein_256_update()
194 int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val) skein_256_final() argument
199 skein_assert_ret(ctx->h.b_cnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL); skein_256_final()
202 ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL; skein_256_final()
204 if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES) skein_256_final()
205 memset(&ctx->b[ctx->h.b_cnt], 0, skein_256_final()
206 SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt); skein_256_final()
209 skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); skein_256_final()
213 byte_cnt = (ctx->h.hash_bit_len + 7) >> 3; skein_256_final()
217 memset(ctx->b, 0, sizeof(ctx->b)); skein_256_final()
219 memcpy(x, ctx->x, sizeof(x)); skein_256_final()
222 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); skein_256_final()
223 skein_start_new_type(ctx, OUT_FINAL); skein_256_final()
225 skein_256_process_block(ctx, ctx->b, 1, sizeof(u64)); skein_256_final()
231 skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x, skein_256_final()
234 memcpy(ctx->x, x, sizeof(x)); skein_256_final()
245 int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len) skein_512_init() argument
253 ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */ skein_512_init()
257 memcpy(ctx->x, SKEIN_512_IV_512, sizeof(ctx->x)); skein_512_init()
260 memcpy(ctx->x, SKEIN_512_IV_384, sizeof(ctx->x)); skein_512_init()
263 memcpy(ctx->x, SKEIN_512_IV_256, sizeof(ctx->x)); skein_512_init()
266 memcpy(ctx->x, SKEIN_512_IV_224, sizeof(ctx->x)); skein_512_init()
275 skein_start_new_type(ctx, CFG_FINAL); skein_512_init()
287 memset(ctx->x, 0, sizeof(ctx->x)); skein_512_init()
288 skein_512_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); skein_512_init()
293 * The chaining vars ctx->x are now initialized for the given skein_512_init()
297 skein_start_new_type(ctx, MSG); /* T0=0, T1= MSG type */ skein_512_init()
306 int skein_512_init_ext(struct skein_512_ctx *ctx, size_t hash_bit_len, skein_512_init_ext() argument
317 /* compute the initial chaining values ctx->x[], based on key */ skein_512_init_ext()
320 memset(ctx->x, 0, sizeof(ctx->x)); skein_512_init_ext()
322 skein_assert(sizeof(cfg.b) >= sizeof(ctx->x)); skein_512_init_ext()
325 ctx->h.hash_bit_len = 8*sizeof(ctx->x); skein_512_init_ext()
327 skein_start_new_type(ctx, KEY); skein_512_init_ext()
329 memset(ctx->x, 0, sizeof(ctx->x)); skein_512_init_ext()
331 skein_512_update(ctx, key, key_bytes); skein_512_init_ext()
333 skein_512_final_pad(ctx, cfg.b); skein_512_init_ext()
334 /* copy over into ctx->x[] */ skein_512_init_ext()
335 memcpy(ctx->x, cfg.b, sizeof(cfg.b)); skein_512_init_ext()
341 ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */ skein_512_init_ext()
342 skein_start_new_type(ctx, CFG_FINAL); skein_512_init_ext()
353 skein_512_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); skein_512_init_ext()
355 /* The chaining vars ctx->x are now initialized */ skein_512_init_ext()
357 skein_start_new_type(ctx, MSG); skein_512_init_ext()
364 int skein_512_update(struct skein_512_ctx *ctx, const u8 *msg, skein_512_update() argument
370 skein_assert_ret(ctx->h.b_cnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL); skein_512_update()
373 if (msg_byte_cnt + ctx->h.b_cnt > SKEIN_512_BLOCK_BYTES) { skein_512_update()
375 if (ctx->h.b_cnt) { skein_512_update()
377 n = SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt; skein_512_update()
381 memcpy(&ctx->b[ctx->h.b_cnt], msg, n); skein_512_update()
384 ctx->h.b_cnt += n; skein_512_update()
386 skein_assert(ctx->h.b_cnt == SKEIN_512_BLOCK_BYTES); skein_512_update()
387 skein_512_process_block(ctx, ctx->b, 1, skein_512_update()
389 ctx->h.b_cnt = 0; skein_512_update()
398 skein_512_process_block(ctx, msg, n, skein_512_update()
403 skein_assert(ctx->h.b_cnt == 0); skein_512_update()
408 skein_assert(msg_byte_cnt + ctx->h.b_cnt <= skein_512_update()
410 memcpy(&ctx->b[ctx->h.b_cnt], msg, msg_byte_cnt); skein_512_update()
411 ctx->h.b_cnt += msg_byte_cnt; skein_512_update()
419 int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val) skein_512_final() argument
424 skein_assert_ret(ctx->h.b_cnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL); skein_512_final()
427 ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL; skein_512_final()
429 if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES) skein_512_final()
430 memset(&ctx->b[ctx->h.b_cnt], 0, skein_512_final()
431 SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt); skein_512_final()
434 skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); skein_512_final()
438 byte_cnt = (ctx->h.hash_bit_len + 7) >> 3; skein_512_final()
442 memset(ctx->b, 0, sizeof(ctx->b)); skein_512_final()
444 memcpy(x, ctx->x, sizeof(x)); skein_512_final()
447 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); skein_512_final()
448 skein_start_new_type(ctx, OUT_FINAL); skein_512_final()
450 skein_512_process_block(ctx, ctx->b, 1, sizeof(u64)); skein_512_final()
456 skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x, skein_512_final()
459 memcpy(ctx->x, x, sizeof(x)); skein_512_final()
470 int skein_1024_init(struct skein_1024_ctx *ctx, size_t hash_bit_len) skein_1024_init() argument
478 ctx->h.hash_bit_len = hash_bit_len; /* output hash bit count */ skein_1024_init()
482 memcpy(ctx->x, SKEIN_1024_IV_512, sizeof(ctx->x)); skein_1024_init()
485 memcpy(ctx->x, SKEIN_1024_IV_384, sizeof(ctx->x)); skein_1024_init()
488 memcpy(ctx->x, SKEIN_1024_IV_1024, sizeof(ctx->x)); skein_1024_init()
497 skein_start_new_type(ctx, CFG_FINAL); skein_1024_init()
509 memset(ctx->x, 0, sizeof(ctx->x)); skein_1024_init()
510 skein_1024_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); skein_1024_init()
514 /* The chaining vars ctx->x are now initialized for the hash_bit_len. */ skein_1024_init()
516 skein_start_new_type(ctx, MSG); /* T0=0, T1= MSG type */ skein_1024_init()
525 int skein_1024_init_ext(struct skein_1024_ctx *ctx, size_t hash_bit_len, skein_1024_init_ext() argument
536 /* compute the initial chaining values ctx->x[], based on key */ skein_1024_init_ext()
539 memset(ctx->x, 0, sizeof(ctx->x)); skein_1024_init_ext()
541 skein_assert(sizeof(cfg.b) >= sizeof(ctx->x)); skein_1024_init_ext()
544 ctx->h.hash_bit_len = 8*sizeof(ctx->x); skein_1024_init_ext()
546 skein_start_new_type(ctx, KEY); skein_1024_init_ext()
548 memset(ctx->x, 0, sizeof(ctx->x)); skein_1024_init_ext()
550 skein_1024_update(ctx, key, key_bytes); skein_1024_init_ext()
552 skein_1024_final_pad(ctx, cfg.b); skein_1024_init_ext()
553 /* copy over into ctx->x[] */ skein_1024_init_ext()
554 memcpy(ctx->x, cfg.b, sizeof(cfg.b)); skein_1024_init_ext()
561 ctx->h.hash_bit_len = hash_bit_len; skein_1024_init_ext()
562 skein_start_new_type(ctx, CFG_FINAL); skein_1024_init_ext()
573 skein_1024_process_block(ctx, cfg.b, 1, SKEIN_CFG_STR_LEN); skein_1024_init_ext()
575 /* The chaining vars ctx->x are now initialized */ skein_1024_init_ext()
577 skein_start_new_type(ctx, MSG); skein_1024_init_ext()
584 int skein_1024_update(struct skein_1024_ctx *ctx, const u8 *msg, skein_1024_update() argument
590 skein_assert_ret(ctx->h.b_cnt <= SKEIN_1024_BLOCK_BYTES, SKEIN_FAIL); skein_1024_update()
593 if (msg_byte_cnt + ctx->h.b_cnt > SKEIN_1024_BLOCK_BYTES) { skein_1024_update()
595 if (ctx->h.b_cnt) { skein_1024_update()
597 n = SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt; skein_1024_update()
601 memcpy(&ctx->b[ctx->h.b_cnt], msg, n); skein_1024_update()
604 ctx->h.b_cnt += n; skein_1024_update()
606 skein_assert(ctx->h.b_cnt == SKEIN_1024_BLOCK_BYTES); skein_1024_update()
607 skein_1024_process_block(ctx, ctx->b, 1, skein_1024_update()
609 ctx->h.b_cnt = 0; skein_1024_update()
618 skein_1024_process_block(ctx, msg, n, skein_1024_update()
623 skein_assert(ctx->h.b_cnt == 0); skein_1024_update()
628 skein_assert(msg_byte_cnt + ctx->h.b_cnt <= skein_1024_update()
630 memcpy(&ctx->b[ctx->h.b_cnt], msg, msg_byte_cnt); skein_1024_update()
631 ctx->h.b_cnt += msg_byte_cnt; skein_1024_update()
639 int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val) skein_1024_final() argument
644 skein_assert_ret(ctx->h.b_cnt <= SKEIN_1024_BLOCK_BYTES, SKEIN_FAIL); skein_1024_final()
647 ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL; skein_1024_final()
649 if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES) skein_1024_final()
650 memset(&ctx->b[ctx->h.b_cnt], 0, skein_1024_final()
651 SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt); skein_1024_final()
654 skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); skein_1024_final()
658 byte_cnt = (ctx->h.hash_bit_len + 7) >> 3; skein_1024_final()
662 memset(ctx->b, 0, sizeof(ctx->b)); skein_1024_final()
664 memcpy(x, ctx->x, sizeof(x)); skein_1024_final()
667 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); skein_1024_final()
668 skein_start_new_type(ctx, OUT_FINAL); skein_1024_final()
670 skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64)); skein_1024_final()
676 skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x, skein_1024_final()
679 memcpy(ctx->x, x, sizeof(x)); skein_1024_final()
689 int skein_256_final_pad(struct skein_256_ctx *ctx, u8 *hash_val) skein_256_final_pad() argument
692 skein_assert_ret(ctx->h.b_cnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL); skein_256_final_pad()
695 ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL; skein_256_final_pad()
697 if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES) skein_256_final_pad()
698 memset(&ctx->b[ctx->h.b_cnt], 0, skein_256_final_pad()
699 SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt); skein_256_final_pad()
701 skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); skein_256_final_pad()
704 skein_put64_lsb_first(hash_val, ctx->x, SKEIN_256_BLOCK_BYTES); skein_256_final_pad()
711 int skein_512_final_pad(struct skein_512_ctx *ctx, u8 *hash_val) skein_512_final_pad() argument
714 skein_assert_ret(ctx->h.b_cnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL); skein_512_final_pad()
717 ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL; skein_512_final_pad()
719 if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES) skein_512_final_pad()
720 memset(&ctx->b[ctx->h.b_cnt], 0, skein_512_final_pad()
721 SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt); skein_512_final_pad()
723 skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); skein_512_final_pad()
726 skein_put64_lsb_first(hash_val, ctx->x, SKEIN_512_BLOCK_BYTES); skein_512_final_pad()
733 int skein_1024_final_pad(struct skein_1024_ctx *ctx, u8 *hash_val) skein_1024_final_pad() argument
736 skein_assert_ret(ctx->h.b_cnt <= SKEIN_1024_BLOCK_BYTES, SKEIN_FAIL); skein_1024_final_pad()
739 ctx->h.tweak[1] |= SKEIN_T1_FLAG_FINAL; skein_1024_final_pad()
741 if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES) skein_1024_final_pad()
742 memset(&ctx->b[ctx->h.b_cnt], 0, skein_1024_final_pad()
743 SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt); skein_1024_final_pad()
745 skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt); skein_1024_final_pad()
748 skein_put64_lsb_first(hash_val, ctx->x, SKEIN_1024_BLOCK_BYTES); skein_1024_final_pad()
756 int skein_256_output(struct skein_256_ctx *ctx, u8 *hash_val) skein_256_output() argument
761 skein_assert_ret(ctx->h.b_cnt <= SKEIN_256_BLOCK_BYTES, SKEIN_FAIL); skein_256_output()
765 byte_cnt = (ctx->h.hash_bit_len + 7) >> 3; skein_256_output()
769 memset(ctx->b, 0, sizeof(ctx->b)); skein_256_output()
771 memcpy(x, ctx->x, sizeof(x)); skein_256_output()
774 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); skein_256_output()
775 skein_start_new_type(ctx, OUT_FINAL); skein_256_output()
777 skein_256_process_block(ctx, ctx->b, 1, sizeof(u64)); skein_256_output()
783 skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x, skein_256_output()
786 memcpy(ctx->x, x, sizeof(x)); skein_256_output()
793 int skein_512_output(struct skein_512_ctx *ctx, u8 *hash_val) skein_512_output() argument
798 skein_assert_ret(ctx->h.b_cnt <= SKEIN_512_BLOCK_BYTES, SKEIN_FAIL); skein_512_output()
802 byte_cnt = (ctx->h.hash_bit_len + 7) >> 3; skein_512_output()
806 memset(ctx->b, 0, sizeof(ctx->b)); skein_512_output()
808 memcpy(x, ctx->x, sizeof(x)); skein_512_output()
811 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); skein_512_output()
812 skein_start_new_type(ctx, OUT_FINAL); skein_512_output()
814 skein_512_process_block(ctx, ctx->b, 1, sizeof(u64)); skein_512_output()
820 skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x, skein_512_output()
823 memcpy(ctx->x, x, sizeof(x)); skein_512_output()
830 int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val) skein_1024_output() argument
835 skein_assert_ret(ctx->h.b_cnt <= SKEIN_1024_BLOCK_BYTES, SKEIN_FAIL); skein_1024_output()
839 byte_cnt = (ctx->h.hash_bit_len + 7) >> 3; skein_1024_output()
843 memset(ctx->b, 0, sizeof(ctx->b)); skein_1024_output()
845 memcpy(x, ctx->x, sizeof(x)); skein_1024_output()
848 ((u64 *)ctx->b)[0] = skein_swap64((u64) i); skein_1024_output()
849 skein_start_new_type(ctx, OUT_FINAL); skein_1024_output()
851 skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64)); skein_1024_output()
857 skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x, skein_1024_output()
860 memcpy(ctx->x, x, sizeof(x)); skein_1024_output()
H A Dskein_api.c30 int skein_ctx_prepare(struct skein_ctx *ctx, enum skein_size size) skein_ctx_prepare() argument
32 skein_assert_ret(ctx && size, SKEIN_FAIL); skein_ctx_prepare()
34 memset(ctx, 0, sizeof(struct skein_ctx)); skein_ctx_prepare()
35 ctx->skein_size = size; skein_ctx_prepare()
40 int skein_init(struct skein_ctx *ctx, size_t hash_bit_len) skein_init() argument
47 skein_assert_ret(ctx, SKEIN_FAIL); skein_init()
53 x = ctx->m.s256.x; skein_init()
54 x_len = ctx->skein_size/8; skein_init()
59 switch (ctx->skein_size) { skein_init()
61 ret = skein_256_init_ext(&ctx->m.s256, hash_bit_len, skein_init()
65 ret = skein_512_init_ext(&ctx->m.s512, hash_bit_len, skein_init()
69 ret = skein_1024_init_ext(&ctx->m.s1024, hash_bit_len, skein_init()
79 memcpy(ctx->x_save, x, x_len); skein_init()
84 int skein_mac_init(struct skein_ctx *ctx, const u8 *key, size_t key_len, skein_mac_init() argument
92 skein_assert_ret(ctx, SKEIN_FAIL); skein_mac_init()
94 x = ctx->m.s256.x; skein_mac_init()
95 x_len = ctx->skein_size/8; skein_mac_init()
99 switch (ctx->skein_size) { skein_mac_init()
101 ret = skein_256_init_ext(&ctx->m.s256, hash_bit_len, skein_mac_init()
107 ret = skein_512_init_ext(&ctx->m.s512, hash_bit_len, skein_mac_init()
112 ret = skein_1024_init_ext(&ctx->m.s1024, hash_bit_len, skein_mac_init()
123 memcpy(ctx->x_save, x, x_len); skein_mac_init()
128 void skein_reset(struct skein_ctx *ctx) skein_reset() argument
138 x = ctx->m.s256.x; skein_reset()
139 x_len = ctx->skein_size/8; skein_reset()
141 memcpy(x, ctx->x_save, x_len); skein_reset()
144 skein_start_new_type(&ctx->m, MSG); skein_reset()
147 int skein_update(struct skein_ctx *ctx, const u8 *msg, skein_update() argument
152 skein_assert_ret(ctx, SKEIN_FAIL); skein_update()
154 switch (ctx->skein_size) { skein_update()
156 ret = skein_256_update(&ctx->m.s256, (const u8 *)msg, skein_update()
160 ret = skein_512_update(&ctx->m.s512, (const u8 *)msg, skein_update()
164 ret = skein_1024_update(&ctx->m.s1024, (const u8 *)msg, skein_update()
172 int skein_update_bits(struct skein_ctx *ctx, const u8 *msg, skein_update_bits() argument
188 skein_assert_ret((ctx->m.h.T[1] & SKEIN_T1_FLAG_BIT_PAD) == 0 || skein_update_bits()
193 return skein_update(ctx, msg, msg_bit_cnt >> 3); skein_update_bits()
195 skein_update(ctx, msg, (msg_bit_cnt >> 3) + 1); skein_update_bits()
203 up = (u8 *)ctx->m.s256.x + ctx->skein_size / 8; skein_update_bits()
206 skein_set_bit_pad_flag(ctx->m.h); skein_update_bits()
210 length = ctx->m.h.b_cnt; skein_update_bits()
221 int skein_final(struct skein_ctx *ctx, u8 *hash) skein_final() argument
225 skein_assert_ret(ctx, SKEIN_FAIL); skein_final()
227 switch (ctx->skein_size) { skein_final()
229 ret = skein_256_final(&ctx->m.s256, (u8 *)hash); skein_final()
232 ret = skein_512_final(&ctx->m.s512, (u8 *)hash); skein_final()
235 ret = skein_1024_final(&ctx->m.s1024, (u8 *)hash); skein_final()
H A Dskein_block.h15 void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
17 void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
19 void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
H A Dskein_block.c36 #define debug_save_tweak(ctx) \
38 ctx->h.tweak[0] = ts[0]; \
39 ctx->h.tweak[1] = ts[1]; \
42 #define debug_save_tweak(ctx)
345 void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr, skein_256_process_block() argument
370 ts[0] = ctx->h.tweak[0]; skein_256_process_block()
371 ts[1] = ctx->h.tweak[1]; skein_256_process_block()
380 ks[0] = ctx->x[0]; skein_256_process_block()
381 ks[1] = ctx->x[1]; skein_256_process_block()
382 ks[2] = ctx->x[2]; skein_256_process_block()
383 ks[3] = ctx->x[3]; skein_256_process_block()
390 debug_save_tweak(ctx); skein_256_process_block()
449 ctx->x[0] = X0 ^ w[0]; skein_256_process_block()
450 ctx->x[1] = X1 ^ w[1]; skein_256_process_block()
451 ctx->x[2] = X2 ^ w[2]; skein_256_process_block()
452 ctx->x[3] = X3 ^ w[3]; skein_256_process_block()
456 ctx->h.tweak[0] = ts[0]; skein_256_process_block()
457 ctx->h.tweak[1] = ts[1]; skein_256_process_block()
475 void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr, skein_512_process_block() argument
503 ts[0] = ctx->h.tweak[0]; skein_512_process_block()
504 ts[1] = ctx->h.tweak[1]; skein_512_process_block()
513 ks[0] = ctx->x[0]; skein_512_process_block()
514 ks[1] = ctx->x[1]; skein_512_process_block()
515 ks[2] = ctx->x[2]; skein_512_process_block()
516 ks[3] = ctx->x[3]; skein_512_process_block()
517 ks[4] = ctx->x[4]; skein_512_process_block()
518 ks[5] = ctx->x[5]; skein_512_process_block()
519 ks[6] = ctx->x[6]; skein_512_process_block()
520 ks[7] = ctx->x[7]; skein_512_process_block()
528 debug_save_tweak(ctx); skein_512_process_block()
594 ctx->x[0] = X0 ^ w[0]; skein_512_process_block()
595 ctx->x[1] = X1 ^ w[1]; skein_512_process_block()
596 ctx->x[2] = X2 ^ w[2]; skein_512_process_block()
597 ctx->x[3] = X3 ^ w[3]; skein_512_process_block()
598 ctx->x[4] = X4 ^ w[4]; skein_512_process_block()
599 ctx->x[5] = X5 ^ w[5]; skein_512_process_block()
600 ctx->x[6] = X6 ^ w[6]; skein_512_process_block()
601 ctx->x[7] = X7 ^ w[7]; skein_512_process_block()
605 ctx->h.tweak[0] = ts[0]; skein_512_process_block()
606 ctx->h.tweak[1] = ts[1]; skein_512_process_block()
624 void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr, skein_1024_process_block() argument
643 ts[0] = ctx->h.tweak[0]; skein_1024_process_block()
644 ts[1] = ctx->h.tweak[1]; skein_1024_process_block()
653 ks[0] = ctx->x[0]; skein_1024_process_block()
654 ks[1] = ctx->x[1]; skein_1024_process_block()
655 ks[2] = ctx->x[2]; skein_1024_process_block()
656 ks[3] = ctx->x[3]; skein_1024_process_block()
657 ks[4] = ctx->x[4]; skein_1024_process_block()
658 ks[5] = ctx->x[5]; skein_1024_process_block()
659 ks[6] = ctx->x[6]; skein_1024_process_block()
660 ks[7] = ctx->x[7]; skein_1024_process_block()
661 ks[8] = ctx->x[8]; skein_1024_process_block()
662 ks[9] = ctx->x[9]; skein_1024_process_block()
663 ks[10] = ctx->x[10]; skein_1024_process_block()
664 ks[11] = ctx->x[11]; skein_1024_process_block()
665 ks[12] = ctx->x[12]; skein_1024_process_block()
666 ks[13] = ctx->x[13]; skein_1024_process_block()
667 ks[14] = ctx->x[14]; skein_1024_process_block()
668 ks[15] = ctx->x[15]; skein_1024_process_block()
678 debug_save_tweak(ctx); skein_1024_process_block()
747 ctx->x[0] = X00 ^ w[0]; skein_1024_process_block()
748 ctx->x[1] = X01 ^ w[1]; skein_1024_process_block()
749 ctx->x[2] = X02 ^ w[2]; skein_1024_process_block()
750 ctx->x[3] = X03 ^ w[3]; skein_1024_process_block()
751 ctx->x[4] = X04 ^ w[4]; skein_1024_process_block()
752 ctx->x[5] = X05 ^ w[5]; skein_1024_process_block()
753 ctx->x[6] = X06 ^ w[6]; skein_1024_process_block()
754 ctx->x[7] = X07 ^ w[7]; skein_1024_process_block()
755 ctx->x[8] = X08 ^ w[8]; skein_1024_process_block()
756 ctx->x[9] = X09 ^ w[9]; skein_1024_process_block()
757 ctx->x[10] = X10 ^ w[10]; skein_1024_process_block()
758 ctx->x[11] = X11 ^ w[11]; skein_1024_process_block()
759 ctx->x[12] = X12 ^ w[12]; skein_1024_process_block()
760 ctx->x[13] = X13 ^ w[13]; skein_1024_process_block()
761 ctx->x[14] = X14 ^ w[14]; skein_1024_process_block()
762 ctx->x[15] = X15 ^ w[15]; skein_1024_process_block()
767 ctx->h.tweak[0] = ts[0]; skein_1024_process_block()
768 ctx->h.tweak[1] = ts[1]; skein_1024_process_block()
/linux-4.1.27/drivers/char/hw_random/
H A Dxgene-rng.c105 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) arg; xgene_rng_expired_timer() local
108 disable_irq(ctx->irq); xgene_rng_expired_timer()
109 ctx->failure_cnt = 0; xgene_rng_expired_timer()
110 del_timer(&ctx->failure_timer); xgene_rng_expired_timer()
111 enable_irq(ctx->irq); xgene_rng_expired_timer()
114 static void xgene_rng_start_timer(struct xgene_rng_dev *ctx) xgene_rng_start_timer() argument
116 ctx->failure_timer.data = (unsigned long) ctx; xgene_rng_start_timer()
117 ctx->failure_timer.function = xgene_rng_expired_timer; xgene_rng_start_timer()
118 ctx->failure_timer.expires = jiffies + 120 * HZ; xgene_rng_start_timer()
119 add_timer(&ctx->failure_timer); xgene_rng_start_timer()
125 static void xgene_rng_init_fro(struct xgene_rng_dev *ctx, u32 fro_val) xgene_rng_init_fro() argument
127 writel(fro_val, ctx->csr_base + RNG_FRODETUNE); xgene_rng_init_fro()
128 writel(0x00000000, ctx->csr_base + RNG_ALARMMASK); xgene_rng_init_fro()
129 writel(0x00000000, ctx->csr_base + RNG_ALARMSTOP); xgene_rng_init_fro()
130 writel(0xFFFFFFFF, ctx->csr_base + RNG_FROENABLE); xgene_rng_init_fro()
133 static void xgene_rng_chk_overflow(struct xgene_rng_dev *ctx) xgene_rng_chk_overflow() argument
137 val = readl(ctx->csr_base + RNG_INTR_STS_ACK); xgene_rng_chk_overflow()
144 dev_err(ctx->dev, "test monobit failure error 0x%08X\n", val); xgene_rng_chk_overflow()
152 dev_err(ctx->dev, "test poker failure error 0x%08X\n", val); xgene_rng_chk_overflow()
158 dev_err(ctx->dev, "test long run failure error 0x%08X\n", val); xgene_rng_chk_overflow()
165 dev_err(ctx->dev, "test run failure error 0x%08X\n", val); xgene_rng_chk_overflow()
168 dev_err(ctx->dev, "noise failure error 0x%08X\n", val); xgene_rng_chk_overflow()
174 dev_err(ctx->dev, "stuck out failure error 0x%08X\n", val); xgene_rng_chk_overflow()
180 if (++ctx->failure_cnt == 1) { xgene_rng_chk_overflow()
182 ctx->failure_ts = jiffies; xgene_rng_chk_overflow()
183 frostopped = readl(ctx->csr_base + RNG_ALARMSTOP); xgene_rng_chk_overflow()
184 xgene_rng_init_fro(ctx, frostopped); xgene_rng_chk_overflow()
190 xgene_rng_start_timer(ctx); xgene_rng_chk_overflow()
193 if (time_after(ctx->failure_ts + 60 * HZ, jiffies)) { xgene_rng_chk_overflow()
194 dev_err(ctx->dev, xgene_rng_chk_overflow()
199 ctx->failure_ts = jiffies; xgene_rng_chk_overflow()
200 ctx->failure_cnt = 1; xgene_rng_chk_overflow()
206 xgene_rng_start_timer(ctx); xgene_rng_chk_overflow()
208 frostopped = readl(ctx->csr_base + RNG_ALARMSTOP); xgene_rng_chk_overflow()
209 xgene_rng_init_fro(ctx, frostopped); xgene_rng_chk_overflow()
213 writel(val, ctx->csr_base + RNG_INTR_STS_ACK); xgene_rng_chk_overflow()
218 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) id; xgene_rng_irq_handler() local
221 xgene_rng_chk_overflow(ctx); xgene_rng_irq_handler()
228 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; xgene_rng_data_present() local
232 val = readl(ctx->csr_base + RNG_INTR_STS_ACK); xgene_rng_data_present()
243 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; xgene_rng_data_read() local
246 for (i = 0; i < ctx->datum_size; i++) xgene_rng_data_read()
247 data[i] = readl(ctx->csr_base + RNG_INOUT_0 + i * 4); xgene_rng_data_read()
250 writel(READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK); xgene_rng_data_read()
252 return ctx->datum_size << 2; xgene_rng_data_read()
255 static void xgene_rng_init_internal(struct xgene_rng_dev *ctx) xgene_rng_init_internal() argument
259 writel(0x00000000, ctx->csr_base + RNG_CONTROL); xgene_rng_init_internal()
263 writel(val, ctx->csr_base + RNG_CONFIG); xgene_rng_init_internal()
266 writel(val, ctx->csr_base + RNG_ALARMCNT); xgene_rng_init_internal()
268 xgene_rng_init_fro(ctx, 0); xgene_rng_init_internal()
277 READY_MASK, ctx->csr_base + RNG_INTR_STS_ACK); xgene_rng_init_internal()
287 writel(val, ctx->csr_base + RNG_CONTROL); xgene_rng_init_internal()
292 struct xgene_rng_dev *ctx = (struct xgene_rng_dev *) rng->priv; xgene_rng_init() local
294 ctx->failure_cnt = 0; xgene_rng_init()
295 init_timer(&ctx->failure_timer); xgene_rng_init()
297 ctx->revision = readl(ctx->csr_base + RNG_EIP_REV); xgene_rng_init()
299 dev_dbg(ctx->dev, "Rev %d.%d.%d\n", xgene_rng_init()
300 MAJOR_HW_REV_RD(ctx->revision), xgene_rng_init()
301 MINOR_HW_REV_RD(ctx->revision), xgene_rng_init()
302 HW_PATCH_LEVEL_RD(ctx->revision)); xgene_rng_init()
304 dev_dbg(ctx->dev, "Options 0x%08X", xgene_rng_init()
305 readl(ctx->csr_base + RNG_OPTIONS)); xgene_rng_init()
307 xgene_rng_init_internal(ctx); xgene_rng_init()
309 ctx->datum_size = RNG_MAX_DATUM; xgene_rng_init()
332 struct xgene_rng_dev *ctx; xgene_rng_probe() local
335 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); xgene_rng_probe()
336 if (!ctx) xgene_rng_probe()
339 ctx->dev = &pdev->dev; xgene_rng_probe()
340 platform_set_drvdata(pdev, ctx); xgene_rng_probe()
343 ctx->csr_base = devm_ioremap_resource(&pdev->dev, res); xgene_rng_probe()
344 if (IS_ERR(ctx->csr_base)) xgene_rng_probe()
345 return PTR_ERR(ctx->csr_base); xgene_rng_probe()
347 ctx->irq = platform_get_irq(pdev, 0); xgene_rng_probe()
348 if (ctx->irq < 0) { xgene_rng_probe()
350 return ctx->irq; xgene_rng_probe()
354 ctx->csr_base, ctx->irq); xgene_rng_probe()
356 rc = devm_request_irq(&pdev->dev, ctx->irq, xgene_rng_irq_handler, 0, xgene_rng_probe()
357 dev_name(&pdev->dev), ctx); xgene_rng_probe()
364 ctx->clk = devm_clk_get(&pdev->dev, NULL); xgene_rng_probe()
365 if (IS_ERR(ctx->clk)) { xgene_rng_probe()
368 rc = clk_prepare_enable(ctx->clk); xgene_rng_probe()
376 xgene_rng_func.priv = (unsigned long) ctx; xgene_rng_probe()
381 if (!IS_ERR(ctx->clk)) xgene_rng_probe()
382 clk_disable_unprepare(ctx->clk); xgene_rng_probe()
390 if (!IS_ERR(ctx->clk)) xgene_rng_probe()
391 clk_disable_unprepare(ctx->clk); xgene_rng_probe()
401 struct xgene_rng_dev *ctx = platform_get_drvdata(pdev); xgene_rng_remove() local
407 if (!IS_ERR(ctx->clk)) xgene_rng_remove()
408 clk_disable_unprepare(ctx->clk); xgene_rng_remove()
/linux-4.1.27/arch/arm64/net/
H A Dbpf_jit_comp.c68 static inline void emit(const u32 insn, struct jit_ctx *ctx) emit() argument
70 if (ctx->image != NULL) emit()
71 ctx->image[ctx->idx] = cpu_to_le32(insn); emit()
73 ctx->idx++; emit()
77 struct jit_ctx *ctx) emit_a64_mov_i64()
82 emit(A64_MOVZ(1, reg, tmp & 0xffff, shift), ctx); emit_a64_mov_i64()
87 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx); emit_a64_mov_i64()
94 const s32 val, struct jit_ctx *ctx) emit_a64_mov_i()
101 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx); emit_a64_mov_i() local
103 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx); emit_a64_mov_i() local
104 emit(A64_MOVK(is64, reg, lo, 0), ctx); emit_a64_mov_i()
107 emit(A64_MOVZ(is64, reg, lo, 0), ctx); emit_a64_mov_i()
109 emit(A64_MOVK(is64, reg, hi, 16), ctx); emit_a64_mov_i()
114 const struct jit_ctx *ctx) bpf2a64_offset()
116 int to = ctx->offset[bpf_to]; bpf2a64_offset()
118 int from = ctx->offset[bpf_from] - 1; bpf2a64_offset()
131 static inline int epilogue_offset(const struct jit_ctx *ctx) epilogue_offset() argument
133 int to = ctx->epilogue_offset; epilogue_offset()
134 int from = ctx->idx; epilogue_offset()
142 static void build_prologue(struct jit_ctx *ctx) build_prologue() argument
159 emit(A64_PUSH(r6, r7, A64_SP), ctx); build_prologue() local
160 emit(A64_PUSH(r8, r9, A64_SP), ctx); build_prologue() local
161 if (ctx->tmp_used) build_prologue()
162 emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx); build_prologue() local
165 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx); build_prologue()
168 emit(A64_MOV(1, fp, A64_SP), ctx); build_prologue()
171 emit_a64_mov_i64(ra, 0, ctx); build_prologue()
172 emit_a64_mov_i64(rx, 0, ctx); build_prologue()
175 static void build_epilogue(struct jit_ctx *ctx) build_epilogue() argument
191 emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx); build_epilogue()
194 if (ctx->tmp_used) build_epilogue()
195 emit(A64_POP(tmp1, tmp2, A64_SP), ctx); build_epilogue() local
196 emit(A64_POP(r8, r9, A64_SP), ctx); build_epilogue() local
197 emit(A64_POP(r6, r7, A64_SP), ctx); build_epilogue() local
200 emit(A64_MOV(1, fp, A64_SP), ctx); build_epilogue()
203 emit(A64_MOV(1, A64_R(0), r0), ctx); build_epilogue()
205 emit(A64_RET(A64_LR), ctx); build_epilogue() local
214 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) build_insn() argument
223 const int i = insn - ctx->prog->insnsi; build_insn()
243 emit(A64_MOV(is64, dst, src), ctx); build_insn() local
248 emit(A64_ADD(is64, dst, dst, src), ctx); build_insn() local
252 emit(A64_SUB(is64, dst, dst, src), ctx); build_insn() local
256 emit(A64_AND(is64, dst, dst, src), ctx); build_insn() local
260 emit(A64_ORR(is64, dst, dst, src), ctx); build_insn() local
264 emit(A64_EOR(is64, dst, dst, src), ctx); build_insn() local
268 emit(A64_MUL(is64, dst, dst, src), ctx); build_insn() local
280 emit(A64_CBNZ(is64, src, jmp_offset), ctx); build_insn() local
281 emit(A64_MOVZ(1, r0, 0, 0), ctx); build_insn()
282 jmp_offset = epilogue_offset(ctx); build_insn()
284 emit(A64_B(jmp_offset), ctx); build_insn() local
288 emit(A64_UDIV(is64, dst, dst, src), ctx); build_insn() local
291 ctx->tmp_used = 1; build_insn()
292 emit(A64_UDIV(is64, tmp, dst, src), ctx); build_insn() local
293 emit(A64_MUL(is64, tmp, tmp, src), ctx); build_insn() local
294 emit(A64_SUB(is64, dst, dst, tmp), ctx); build_insn() local
301 emit(A64_LSLV(is64, dst, dst, src), ctx); build_insn() local
305 emit(A64_LSRV(is64, dst, dst, src), ctx); build_insn() local
309 emit(A64_ASRV(is64, dst, dst, src), ctx); build_insn() local
314 emit(A64_NEG(is64, dst, dst), ctx); build_insn() local
328 emit(A64_REV16(is64, dst, dst), ctx); build_insn() local
330 emit(A64_UXTH(is64, dst, dst), ctx); build_insn() local
333 emit(A64_REV32(is64, dst, dst), ctx); build_insn() local
337 emit(A64_REV64(dst, dst), ctx); build_insn() local
345 emit(A64_UXTH(is64, dst, dst), ctx); build_insn() local
349 emit(A64_UXTW(is64, dst, dst), ctx); build_insn() local
359 emit_a64_mov_i(is64, dst, imm, ctx); build_insn()
364 ctx->tmp_used = 1; build_insn()
365 emit_a64_mov_i(is64, tmp, imm, ctx); build_insn()
366 emit(A64_ADD(is64, dst, dst, tmp), ctx); build_insn() local
370 ctx->tmp_used = 1; build_insn()
371 emit_a64_mov_i(is64, tmp, imm, ctx); build_insn()
372 emit(A64_SUB(is64, dst, dst, tmp), ctx); build_insn() local
376 ctx->tmp_used = 1; build_insn()
377 emit_a64_mov_i(is64, tmp, imm, ctx); build_insn()
378 emit(A64_AND(is64, dst, dst, tmp), ctx); build_insn() local
382 ctx->tmp_used = 1; build_insn()
383 emit_a64_mov_i(is64, tmp, imm, ctx); build_insn()
384 emit(A64_ORR(is64, dst, dst, tmp), ctx); build_insn() local
388 ctx->tmp_used = 1; build_insn()
389 emit_a64_mov_i(is64, tmp, imm, ctx); build_insn()
390 emit(A64_EOR(is64, dst, dst, tmp), ctx); build_insn() local
394 ctx->tmp_used = 1; build_insn()
395 emit_a64_mov_i(is64, tmp, imm, ctx); build_insn()
396 emit(A64_MUL(is64, dst, dst, tmp), ctx); build_insn() local
400 ctx->tmp_used = 1; build_insn()
401 emit_a64_mov_i(is64, tmp, imm, ctx); build_insn()
402 emit(A64_UDIV(is64, dst, dst, tmp), ctx); build_insn() local
406 ctx->tmp_used = 1; build_insn()
407 emit_a64_mov_i(is64, tmp2, imm, ctx); build_insn()
408 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx); build_insn() local
409 emit(A64_MUL(is64, tmp, tmp, tmp2), ctx); build_insn() local
410 emit(A64_SUB(is64, dst, dst, tmp), ctx); build_insn() local
414 emit(A64_LSL(is64, dst, dst, imm), ctx); build_insn() local
418 emit(A64_LSR(is64, dst, dst, imm), ctx); build_insn() local
422 emit(A64_ASR(is64, dst, dst, imm), ctx); build_insn() local
427 jmp_offset = bpf2a64_offset(i + off, i, ctx); build_insn()
429 emit(A64_B(jmp_offset), ctx); build_insn() local
438 emit(A64_CMP(1, dst, src), ctx); build_insn()
440 jmp_offset = bpf2a64_offset(i + off, i, ctx); build_insn()
464 emit(A64_B_(jmp_cond, jmp_offset), ctx); build_insn() local
467 emit(A64_TST(1, dst, src), ctx); build_insn()
476 ctx->tmp_used = 1; build_insn()
477 emit_a64_mov_i(1, tmp, imm, ctx); build_insn()
478 emit(A64_CMP(1, dst, tmp), ctx); build_insn()
481 ctx->tmp_used = 1; build_insn()
482 emit_a64_mov_i(1, tmp, imm, ctx); build_insn()
483 emit(A64_TST(1, dst, tmp), ctx); build_insn()
491 ctx->tmp_used = 1; build_insn()
492 emit_a64_mov_i64(tmp, func, ctx); build_insn()
493 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); build_insn() local
494 emit(A64_MOV(1, A64_FP, A64_SP), ctx); build_insn()
495 emit(A64_BLR(tmp), ctx); build_insn() local
496 emit(A64_MOV(1, r0, A64_R(0)), ctx); build_insn()
497 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); build_insn() local
504 if (i == ctx->prog->len - 1) build_insn()
506 jmp_offset = epilogue_offset(ctx); build_insn()
508 emit(A64_B(jmp_offset), ctx); build_insn() local
527 emit_a64_mov_i64(dst, imm64, ctx); build_insn()
537 ctx->tmp_used = 1; build_insn()
538 emit_a64_mov_i(1, tmp, off, ctx); build_insn()
541 emit(A64_LDR32(dst, src, tmp), ctx); build_insn() local
544 emit(A64_LDRH(dst, src, tmp), ctx); build_insn() local
547 emit(A64_LDRB(dst, src, tmp), ctx); build_insn() local
550 emit(A64_LDR64(dst, src, tmp), ctx); build_insn() local
567 ctx->tmp_used = 1; build_insn()
568 emit_a64_mov_i(1, tmp, off, ctx); build_insn()
571 emit(A64_STR32(src, dst, tmp), ctx); build_insn() local
574 emit(A64_STRH(src, dst, tmp), ctx); build_insn() local
577 emit(A64_STRB(src, dst, tmp), ctx); build_insn() local
580 emit(A64_STR64(src, dst, tmp), ctx); build_insn() local
609 emit(A64_MOV(1, r1, r6), ctx); build_insn()
610 emit_a64_mov_i(0, r2, imm, ctx); build_insn()
612 emit(A64_ADD(0, r2, r2, src), ctx); build_insn()
626 emit_a64_mov_i64(r3, size, ctx); build_insn()
627 emit(A64_ADD_I(1, r4, fp, MAX_BPF_STACK), ctx); build_insn()
628 emit_a64_mov_i64(r5, (unsigned long)bpf_load_pointer, ctx); build_insn()
629 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx); build_insn() local
630 emit(A64_MOV(1, A64_FP, A64_SP), ctx); build_insn()
631 emit(A64_BLR(r5), ctx); build_insn() local
632 emit(A64_MOV(1, r0, A64_R(0)), ctx); build_insn()
633 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx); build_insn() local
635 jmp_offset = epilogue_offset(ctx); build_insn()
637 emit(A64_CBZ(1, r0, jmp_offset), ctx); build_insn()
638 emit(A64_MOV(1, r5, r0), ctx); build_insn()
641 emit(A64_LDR32(r0, r5, A64_ZR), ctx); build_insn() local
643 emit(A64_REV32(0, r0, r0), ctx); build_insn()
647 emit(A64_LDRH(r0, r5, A64_ZR), ctx); build_insn() local
649 emit(A64_REV16(0, r0, r0), ctx); build_insn()
653 emit(A64_LDRB(r0, r5, A64_ZR), ctx); build_insn() local
670 static int build_body(struct jit_ctx *ctx) build_body() argument
672 const struct bpf_prog *prog = ctx->prog; build_body()
679 ret = build_insn(insn, ctx); build_body()
681 if (ctx->image == NULL) build_body()
682 ctx->offset[i] = ctx->idx; build_body()
708 struct jit_ctx ctx; bpf_int_jit_compile() local
718 memset(&ctx, 0, sizeof(ctx)); bpf_int_jit_compile()
719 ctx.prog = prog; bpf_int_jit_compile()
721 ctx.offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL); bpf_int_jit_compile()
722 if (ctx.offset == NULL) bpf_int_jit_compile()
725 /* 1. Initial fake pass to compute ctx->idx. */ bpf_int_jit_compile()
727 /* Fake pass to fill in ctx->offset and ctx->tmp_used. */ bpf_int_jit_compile()
728 if (build_body(&ctx)) bpf_int_jit_compile()
731 build_prologue(&ctx); bpf_int_jit_compile()
733 ctx.epilogue_offset = ctx.idx; bpf_int_jit_compile()
734 build_epilogue(&ctx); bpf_int_jit_compile()
737 image_size = sizeof(u32) * ctx.idx; bpf_int_jit_compile()
745 ctx.image = (u32 *)image_ptr; bpf_int_jit_compile()
746 ctx.idx = 0; bpf_int_jit_compile()
748 build_prologue(&ctx); bpf_int_jit_compile()
750 if (build_body(&ctx)) { bpf_int_jit_compile()
755 build_epilogue(&ctx); bpf_int_jit_compile()
759 bpf_jit_dump(prog->len, image_size, 2, ctx.image); bpf_int_jit_compile()
761 bpf_flush_icache(ctx.image, ctx.image + ctx.idx); bpf_int_jit_compile()
764 prog->bpf_func = (void *)ctx.image; bpf_int_jit_compile()
767 kfree(ctx.offset); bpf_int_jit_compile()
76 emit_a64_mov_i64(const int reg, const u64 val, struct jit_ctx *ctx) emit_a64_mov_i64() argument
93 emit_a64_mov_i(const int is64, const int reg, const s32 val, struct jit_ctx *ctx) emit_a64_mov_i() argument
113 bpf2a64_offset(int bpf_to, int bpf_from, const struct jit_ctx *ctx) bpf2a64_offset() argument
/linux-4.1.27/drivers/crypto/amcc/
H A Dcrypto4xx_alg.c75 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); crypto4xx_encrypt() local
77 ctx->direction = DIR_OUTBOUND; crypto4xx_encrypt()
78 ctx->hash_final = 0; crypto4xx_encrypt()
79 ctx->is_hash = 0; crypto4xx_encrypt()
80 ctx->pd_ctl = 0x1; crypto4xx_encrypt()
82 return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, crypto4xx_encrypt()
84 get_dynamic_sa_iv_size(ctx)); crypto4xx_encrypt()
89 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); crypto4xx_decrypt() local
91 ctx->direction = DIR_INBOUND; crypto4xx_decrypt()
92 ctx->hash_final = 0; crypto4xx_decrypt()
93 ctx->is_hash = 0; crypto4xx_decrypt()
94 ctx->pd_ctl = 1; crypto4xx_decrypt()
96 return crypto4xx_build_pd(&req->base, ctx, req->src, req->dst, crypto4xx_decrypt()
98 get_dynamic_sa_iv_size(ctx)); crypto4xx_decrypt()
111 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); crypto4xx_setkey_aes() local
123 if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) crypto4xx_setkey_aes()
124 crypto4xx_free_sa(ctx); crypto4xx_setkey_aes()
126 rc = crypto4xx_alloc_sa(ctx, SA_AES128_LEN + (keylen-16) / 4); crypto4xx_setkey_aes()
130 if (ctx->state_record_dma_addr == 0) { crypto4xx_setkey_aes()
131 rc = crypto4xx_alloc_state_record(ctx); crypto4xx_setkey_aes()
133 crypto4xx_free_sa(ctx); crypto4xx_setkey_aes()
138 sa = (struct dynamic_sa_ctl *) ctx->sa_in; crypto4xx_setkey_aes()
139 ctx->hash_final = 0; crypto4xx_setkey_aes()
153 crypto4xx_memcpy_le(ctx->sa_in + get_dynamic_sa_offset_key_field(ctx), crypto4xx_setkey_aes()
157 ctx->is_hash = 0; crypto4xx_setkey_aes()
158 ctx->direction = DIR_INBOUND; crypto4xx_setkey_aes()
159 memcpy(ctx->sa_in + get_dynamic_sa_offset_state_ptr_field(ctx), crypto4xx_setkey_aes()
160 (void *)&ctx->state_record_dma_addr, 4); crypto4xx_setkey_aes()
161 ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx); crypto4xx_setkey_aes()
163 memcpy(ctx->sa_out, ctx->sa_in, ctx->sa_len * 4); crypto4xx_setkey_aes()
164 sa = (struct dynamic_sa_ctl *) ctx->sa_out; crypto4xx_setkey_aes()
187 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(tfm); crypto4xx_hash_alg_init() local
192 ctx->dev = my_alg->dev; crypto4xx_hash_alg_init()
193 ctx->is_hash = 1; crypto4xx_hash_alg_init()
194 ctx->hash_final = 0; crypto4xx_hash_alg_init()
197 if (ctx->sa_in_dma_addr || ctx->sa_out_dma_addr) crypto4xx_hash_alg_init()
198 crypto4xx_free_sa(ctx); crypto4xx_hash_alg_init()
200 rc = crypto4xx_alloc_sa(ctx, sa_len); crypto4xx_hash_alg_init()
204 if (ctx->state_record_dma_addr == 0) { crypto4xx_hash_alg_init()
205 crypto4xx_alloc_state_record(ctx); crypto4xx_hash_alg_init()
206 if (!ctx->state_record_dma_addr) { crypto4xx_hash_alg_init()
207 crypto4xx_free_sa(ctx); crypto4xx_hash_alg_init()
214 sa = (struct dynamic_sa_ctl *) ctx->sa_in; crypto4xx_hash_alg_init()
225 ctx->direction = DIR_INBOUND; crypto4xx_hash_alg_init()
227 sa_in = (struct dynamic_sa_hash160 *) ctx->sa_in; crypto4xx_hash_alg_init()
231 sa_in->state_ptr = ctx->state_record_dma_addr; crypto4xx_hash_alg_init()
232 ctx->offset_to_sr_ptr = get_dynamic_sa_offset_state_ptr_field(ctx); crypto4xx_hash_alg_init()
239 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); crypto4xx_hash_init() local
243 sa = (struct dynamic_sa_ctl *) ctx->sa_in; crypto4xx_hash_init()
248 ctx->is_hash = 1; crypto4xx_hash_init()
249 ctx->direction = DIR_INBOUND; crypto4xx_hash_init()
256 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); crypto4xx_hash_update() local
258 ctx->is_hash = 1; crypto4xx_hash_update()
259 ctx->hash_final = 0; crypto4xx_hash_update()
260 ctx->pd_ctl = 0x11; crypto4xx_hash_update()
261 ctx->direction = DIR_INBOUND; crypto4xx_hash_update()
263 return crypto4xx_build_pd(&req->base, ctx, req->src, crypto4xx_hash_update()
275 struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm); crypto4xx_hash_digest() local
277 ctx->hash_final = 1; crypto4xx_hash_digest()
278 ctx->pd_ctl = 0x11; crypto4xx_hash_digest()
279 ctx->direction = DIR_INBOUND; crypto4xx_hash_digest()
281 return crypto4xx_build_pd(&req->base, ctx, req->src, crypto4xx_hash_digest()
H A Dcrypto4xx_sa.c37 u32 get_dynamic_sa_offset_state_ptr_field(struct crypto4xx_ctx *ctx) get_dynamic_sa_offset_state_ptr_field() argument
42 if (ctx->direction == DIR_INBOUND) get_dynamic_sa_offset_state_ptr_field()
43 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; get_dynamic_sa_offset_state_ptr_field()
45 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; get_dynamic_sa_offset_state_ptr_field()
64 u32 get_dynamic_sa_iv_size(struct crypto4xx_ctx *ctx) get_dynamic_sa_iv_size() argument
68 if (ctx->direction == DIR_INBOUND) get_dynamic_sa_iv_size()
69 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; get_dynamic_sa_iv_size()
71 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; get_dynamic_sa_iv_size()
75 u32 get_dynamic_sa_offset_key_field(struct crypto4xx_ctx *ctx) get_dynamic_sa_offset_key_field() argument
79 if (ctx->direction == DIR_INBOUND) get_dynamic_sa_offset_key_field()
80 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_in)->sa_contents; get_dynamic_sa_offset_key_field()
82 cts.w = ((struct dynamic_sa_ctl *) ctx->sa_out)->sa_contents; get_dynamic_sa_offset_key_field()
/linux-4.1.27/arch/s390/crypto/
H A Dsha_common.c23 struct s390_sha_ctx *ctx = shash_desc_ctx(desc); s390_sha_update() local
29 index = ctx->count & (bsize - 1); s390_sha_update()
30 ctx->count += len; s390_sha_update()
37 memcpy(ctx->buf + index, data, bsize - index); s390_sha_update()
38 ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, bsize); s390_sha_update()
48 ret = crypt_s390_kimd(ctx->func, ctx->state, data, s390_sha_update()
57 memcpy(ctx->buf + index , data, len); s390_sha_update()
65 struct s390_sha_ctx *ctx = shash_desc_ctx(desc); s390_sha_final() local
75 index = ctx->count & (bsize - 1); s390_sha_final()
79 ctx->buf[index] = 0x80; s390_sha_final()
83 memset(ctx->buf + index, 0x00, end - index - 8); s390_sha_final()
89 bits = ctx->count * 8; s390_sha_final()
90 memcpy(ctx->buf + end - 8, &bits, sizeof(bits)); s390_sha_final()
92 ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end); s390_sha_final()
97 memcpy(out, ctx->state, crypto_shash_digestsize(desc->tfm)); s390_sha_final()
99 memset(ctx, 0, sizeof *ctx); s390_sha_final()
H A Dsha512_s390.c27 struct s390_sha_ctx *ctx = shash_desc_ctx(desc); sha512_init() local
29 *(__u64 *)&ctx->state[0] = 0x6a09e667f3bcc908ULL; sha512_init()
30 *(__u64 *)&ctx->state[2] = 0xbb67ae8584caa73bULL; sha512_init()
31 *(__u64 *)&ctx->state[4] = 0x3c6ef372fe94f82bULL; sha512_init()
32 *(__u64 *)&ctx->state[6] = 0xa54ff53a5f1d36f1ULL; sha512_init()
33 *(__u64 *)&ctx->state[8] = 0x510e527fade682d1ULL; sha512_init()
34 *(__u64 *)&ctx->state[10] = 0x9b05688c2b3e6c1fULL; sha512_init()
35 *(__u64 *)&ctx->state[12] = 0x1f83d9abfb41bd6bULL; sha512_init()
36 *(__u64 *)&ctx->state[14] = 0x5be0cd19137e2179ULL; sha512_init()
37 ctx->count = 0; sha512_init()
38 ctx->func = KIMD_SHA_512; sha512_init()
93 struct s390_sha_ctx *ctx = shash_desc_ctx(desc); sha384_init() local
95 *(__u64 *)&ctx->state[0] = 0xcbbb9d5dc1059ed8ULL; sha384_init()
96 *(__u64 *)&ctx->state[2] = 0x629a292a367cd507ULL; sha384_init()
97 *(__u64 *)&ctx->state[4] = 0x9159015a3070dd17ULL; sha384_init()
98 *(__u64 *)&ctx->state[6] = 0x152fecd8f70e5939ULL; sha384_init()
99 *(__u64 *)&ctx->state[8] = 0x67332667ffc00b31ULL; sha384_init()
100 *(__u64 *)&ctx->state[10] = 0x8eb44a8768581511ULL; sha384_init()
101 *(__u64 *)&ctx->state[12] = 0xdb0c2e0d64f98fa7ULL; sha384_init()
102 *(__u64 *)&ctx->state[14] = 0x47b5481dbefa4fa4ULL; sha384_init()
103 ctx->count = 0; sha384_init()
104 ctx->func = KIMD_SHA_512; sha384_init()
/linux-4.1.27/drivers/media/platform/exynos-gsc/
H A Dgsc-m2m.c31 static int gsc_m2m_ctx_stop_req(struct gsc_ctx *ctx) gsc_m2m_ctx_stop_req() argument
34 struct gsc_dev *gsc = ctx->gsc_dev; gsc_m2m_ctx_stop_req()
38 if (!gsc_m2m_pending(gsc) || (curr_ctx != ctx)) gsc_m2m_ctx_stop_req()
41 gsc_ctx_state_lock_set(GSC_CTX_STOP_REQ, ctx); gsc_m2m_ctx_stop_req()
43 !gsc_ctx_state_is_set(GSC_CTX_STOP_REQ, ctx), gsc_m2m_ctx_stop_req()
49 static void __gsc_m2m_job_abort(struct gsc_ctx *ctx) __gsc_m2m_job_abort() argument
53 ret = gsc_m2m_ctx_stop_req(ctx); __gsc_m2m_job_abort()
54 if ((ret == -ETIMEDOUT) || (ctx->state & GSC_CTX_ABORT)) { __gsc_m2m_job_abort()
55 gsc_ctx_state_lock_clear(GSC_CTX_STOP_REQ | GSC_CTX_ABORT, ctx); __gsc_m2m_job_abort()
56 gsc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); __gsc_m2m_job_abort()
62 struct gsc_ctx *ctx = q->drv_priv; gsc_m2m_start_streaming() local
65 ret = pm_runtime_get_sync(&ctx->gsc_dev->pdev->dev); gsc_m2m_start_streaming()
71 struct gsc_ctx *ctx = q->drv_priv; gsc_m2m_stop_streaming() local
73 __gsc_m2m_job_abort(ctx); gsc_m2m_stop_streaming()
75 pm_runtime_put(&ctx->gsc_dev->pdev->dev); gsc_m2m_stop_streaming()
78 void gsc_m2m_job_finish(struct gsc_ctx *ctx, int vb_state) gsc_m2m_job_finish() argument
82 if (!ctx || !ctx->m2m_ctx) gsc_m2m_job_finish()
85 src_vb = v4l2_m2m_src_buf_remove(ctx->m2m_ctx); gsc_m2m_job_finish()
86 dst_vb = v4l2_m2m_dst_buf_remove(ctx->m2m_ctx); gsc_m2m_job_finish()
99 v4l2_m2m_job_finish(ctx->gsc_dev->m2m.m2m_dev, gsc_m2m_job_finish()
100 ctx->m2m_ctx); gsc_m2m_job_finish()
109 static int gsc_get_bufs(struct gsc_ctx *ctx) gsc_get_bufs() argument
115 s_frame = &ctx->s_frame; gsc_get_bufs()
116 d_frame = &ctx->d_frame; gsc_get_bufs()
118 src_vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx); gsc_get_bufs()
119 ret = gsc_prepare_addr(ctx, src_vb, s_frame, &s_frame->addr); gsc_get_bufs()
123 dst_vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); gsc_get_bufs()
124 ret = gsc_prepare_addr(ctx, dst_vb, d_frame, &d_frame->addr); gsc_get_bufs()
135 struct gsc_ctx *ctx = priv; gsc_m2m_device_run() local
141 if (WARN(!ctx, "null hardware context\n")) gsc_m2m_device_run()
144 gsc = ctx->gsc_dev; gsc_m2m_device_run()
150 if (gsc->m2m.ctx != ctx) { gsc_m2m_device_run()
151 pr_debug("gsc->m2m.ctx = 0x%p, current_ctx = 0x%p", gsc_m2m_device_run()
152 gsc->m2m.ctx, ctx); gsc_m2m_device_run()
153 ctx->state |= GSC_PARAMS; gsc_m2m_device_run()
154 gsc->m2m.ctx = ctx; gsc_m2m_device_run()
157 is_set = ctx->state & GSC_CTX_STOP_REQ; gsc_m2m_device_run()
159 ctx->state &= ~GSC_CTX_STOP_REQ; gsc_m2m_device_run()
160 ctx->state |= GSC_CTX_ABORT; gsc_m2m_device_run()
165 ret = gsc_get_bufs(ctx); gsc_m2m_device_run()
171 gsc_set_prefbuf(gsc, &ctx->s_frame); gsc_m2m_device_run()
172 gsc_hw_set_input_addr(gsc, &ctx->s_frame.addr, GSC_M2M_BUF_NUM); gsc_m2m_device_run()
173 gsc_hw_set_output_addr(gsc, &ctx->d_frame.addr, GSC_M2M_BUF_NUM); gsc_m2m_device_run()
175 if (ctx->state & GSC_PARAMS) { gsc_m2m_device_run()
181 if (gsc_set_scaler_info(ctx)) { gsc_m2m_device_run()
186 gsc_hw_set_input_path(ctx); gsc_m2m_device_run()
187 gsc_hw_set_in_size(ctx); gsc_m2m_device_run()
188 gsc_hw_set_in_image_format(ctx); gsc_m2m_device_run()
190 gsc_hw_set_output_path(ctx); gsc_m2m_device_run()
191 gsc_hw_set_out_size(ctx); gsc_m2m_device_run()
192 gsc_hw_set_out_image_format(ctx); gsc_m2m_device_run()
194 gsc_hw_set_prescaler(ctx); gsc_m2m_device_run()
195 gsc_hw_set_mainscaler(ctx); gsc_m2m_device_run()
196 gsc_hw_set_rotation(ctx); gsc_m2m_device_run()
197 gsc_hw_set_global_alpha(ctx); gsc_m2m_device_run()
201 gsc_hw_set_sfr_update(ctx); gsc_m2m_device_run()
203 ctx->state &= ~GSC_PARAMS; gsc_m2m_device_run()
210 ctx->state &= ~GSC_PARAMS; gsc_m2m_device_run()
219 struct gsc_ctx *ctx = vb2_get_drv_priv(vq); gsc_m2m_queue_setup() local
223 frame = ctx_get_frame(ctx, vq->type); gsc_m2m_queue_setup()
233 allocators[i] = ctx->gsc_dev->alloc_ctx; gsc_m2m_queue_setup()
240 struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); gsc_m2m_buf_prepare() local
244 frame = ctx_get_frame(ctx, vb->vb2_queue->type); gsc_m2m_buf_prepare()
258 struct gsc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); gsc_m2m_buf_queue() local
260 pr_debug("ctx: %p, ctx->state: 0x%x", ctx, ctx->state); gsc_m2m_buf_queue()
262 if (ctx->m2m_ctx) gsc_m2m_buf_queue()
263 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); gsc_m2m_buf_queue()
279 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_querycap() local
280 struct gsc_dev *gsc = ctx->gsc_dev; gsc_m2m_querycap()
301 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_g_fmt_mplane() local
303 return gsc_g_fmt_mplane(ctx, f); gsc_m2m_g_fmt_mplane()
309 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_try_fmt_mplane() local
311 return gsc_try_fmt_mplane(ctx, f); gsc_m2m_try_fmt_mplane()
317 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_s_fmt_mplane() local
327 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); gsc_m2m_s_fmt_mplane()
335 frame = &ctx->s_frame; gsc_m2m_s_fmt_mplane()
337 frame = &ctx->d_frame; gsc_m2m_s_fmt_mplane()
351 gsc_ctx_state_lock_set(GSC_PARAMS | GSC_DST_FMT, ctx); gsc_m2m_s_fmt_mplane()
353 gsc_ctx_state_lock_set(GSC_PARAMS | GSC_SRC_FMT, ctx); gsc_m2m_s_fmt_mplane()
363 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_reqbufs() local
364 struct gsc_dev *gsc = ctx->gsc_dev; gsc_m2m_reqbufs()
373 gsc_ctx_state_lock_clear(GSC_SRC_FMT, ctx); gsc_m2m_reqbufs()
375 gsc_ctx_state_lock_clear(GSC_DST_FMT, ctx); gsc_m2m_reqbufs()
378 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); gsc_m2m_reqbufs()
384 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_expbuf() local
385 return v4l2_m2m_expbuf(file, ctx->m2m_ctx, eb); gsc_m2m_expbuf()
391 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_querybuf() local
392 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); gsc_m2m_querybuf()
398 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_qbuf() local
399 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); gsc_m2m_qbuf()
405 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_dqbuf() local
406 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); gsc_m2m_dqbuf()
412 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_streamon() local
416 if (!gsc_ctx_state_is_set(GSC_SRC_FMT, ctx)) gsc_m2m_streamon()
418 } else if (!gsc_ctx_state_is_set(GSC_DST_FMT, ctx)) { gsc_m2m_streamon()
422 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); gsc_m2m_streamon()
428 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_streamoff() local
429 return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); gsc_m2m_streamoff()
451 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_g_selection() local
457 frame = ctx_get_frame(ctx, s->type); gsc_m2m_g_selection()
488 struct gsc_ctx *ctx = fh_to_ctx(fh); gsc_m2m_s_selection() local
490 struct gsc_variant *variant = ctx->gsc_dev->variant; gsc_m2m_s_selection()
500 ret = gsc_try_crop(ctx, &cr); gsc_m2m_s_selection()
518 frame = &ctx->s_frame; gsc_m2m_s_selection()
524 frame = &ctx->d_frame; gsc_m2m_s_selection()
532 if (gsc_ctx_state_is_set(GSC_DST_FMT | GSC_SRC_FMT, ctx)) { gsc_m2m_s_selection()
535 cr.c.height, ctx->d_frame.crop.width, gsc_m2m_s_selection()
536 ctx->d_frame.crop.height, gsc_m2m_s_selection()
537 ctx->gsc_ctrls.rotate->val, ctx->out_path); gsc_m2m_s_selection()
540 ctx->s_frame.crop.width, gsc_m2m_s_selection()
541 ctx->s_frame.crop.height, cr.c.width, gsc_m2m_s_selection()
542 cr.c.height, ctx->gsc_ctrls.rotate->val, gsc_m2m_s_selection()
543 ctx->out_path); gsc_m2m_s_selection()
554 gsc_ctx_state_lock_set(GSC_PARAMS, ctx); gsc_m2m_s_selection()
582 struct gsc_ctx *ctx = priv; queue_init() local
588 src_vq->drv_priv = ctx; queue_init()
593 src_vq->lock = &ctx->gsc_dev->lock; queue_init()
602 dst_vq->drv_priv = ctx; queue_init()
607 dst_vq->lock = &ctx->gsc_dev->lock; queue_init()
615 struct gsc_ctx *ctx = NULL; gsc_m2m_open() local
623 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); gsc_m2m_open()
624 if (!ctx) { gsc_m2m_open()
629 v4l2_fh_init(&ctx->fh, gsc->m2m.vfd); gsc_m2m_open()
630 ret = gsc_ctrls_create(ctx); gsc_m2m_open()
635 ctx->fh.ctrl_handler = &ctx->ctrl_handler; gsc_m2m_open()
636 file->private_data = &ctx->fh; gsc_m2m_open()
637 v4l2_fh_add(&ctx->fh); gsc_m2m_open()
639 ctx->gsc_dev = gsc; gsc_m2m_open()
641 ctx->s_frame.fmt = get_format(0); gsc_m2m_open()
642 ctx->d_frame.fmt = get_format(0); gsc_m2m_open()
644 ctx->state = GSC_CTX_M2M; gsc_m2m_open()
645 ctx->flags = 0; gsc_m2m_open()
646 ctx->in_path = GSC_DMA; gsc_m2m_open()
647 ctx->out_path = GSC_DMA; gsc_m2m_open()
649 ctx->m2m_ctx = v4l2_m2m_ctx_init(gsc->m2m.m2m_dev, ctx, queue_init); gsc_m2m_open()
650 if (IS_ERR(ctx->m2m_ctx)) { gsc_m2m_open()
652 ret = PTR_ERR(ctx->m2m_ctx); gsc_m2m_open()
659 pr_debug("gsc m2m driver is opened, ctx(0x%p)", ctx); gsc_m2m_open()
665 gsc_ctrls_delete(ctx); gsc_m2m_open()
667 v4l2_fh_del(&ctx->fh); gsc_m2m_open()
668 v4l2_fh_exit(&ctx->fh); gsc_m2m_open()
669 kfree(ctx); gsc_m2m_open()
677 struct gsc_ctx *ctx = fh_to_ctx(file->private_data); gsc_m2m_release() local
678 struct gsc_dev *gsc = ctx->gsc_dev; gsc_m2m_release()
685 v4l2_m2m_ctx_release(ctx->m2m_ctx); gsc_m2m_release()
686 gsc_ctrls_delete(ctx); gsc_m2m_release()
687 v4l2_fh_del(&ctx->fh); gsc_m2m_release()
688 v4l2_fh_exit(&ctx->fh); gsc_m2m_release()
692 kfree(ctx); gsc_m2m_release()
701 struct gsc_ctx *ctx = fh_to_ctx(file->private_data); gsc_m2m_poll() local
702 struct gsc_dev *gsc = ctx->gsc_dev; gsc_m2m_poll()
708 ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait); gsc_m2m_poll()
716 struct gsc_ctx *ctx = fh_to_ctx(file->private_data); gsc_m2m_mmap() local
717 struct gsc_dev *gsc = ctx->gsc_dev; gsc_m2m_mmap()
723 ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); gsc_m2m_mmap()
H A Dgsc-regs.c111 void gsc_hw_set_input_path(struct gsc_ctx *ctx) gsc_hw_set_input_path() argument
113 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_input_path()
118 if (ctx->in_path == GSC_DMA) gsc_hw_set_input_path()
124 void gsc_hw_set_in_size(struct gsc_ctx *ctx) gsc_hw_set_in_size() argument
126 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_in_size()
127 struct gsc_frame *frame = &ctx->s_frame; gsc_hw_set_in_size()
146 void gsc_hw_set_in_image_rgb(struct gsc_ctx *ctx) gsc_hw_set_in_image_rgb() argument
148 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_in_image_rgb()
149 struct gsc_frame *frame = &ctx->s_frame; gsc_hw_set_in_image_rgb()
166 void gsc_hw_set_in_image_format(struct gsc_ctx *ctx) gsc_hw_set_in_image_format() argument
168 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_in_image_format()
169 struct gsc_frame *frame = &ctx->s_frame; gsc_hw_set_in_image_format()
180 gsc_hw_set_in_image_rgb(ctx); gsc_hw_set_in_image_format()
222 void gsc_hw_set_output_path(struct gsc_ctx *ctx) gsc_hw_set_output_path() argument
224 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_output_path()
229 if (ctx->out_path == GSC_DMA) gsc_hw_set_output_path()
237 void gsc_hw_set_out_size(struct gsc_ctx *ctx) gsc_hw_set_out_size() argument
239 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_out_size()
240 struct gsc_frame *frame = &ctx->d_frame; gsc_hw_set_out_size()
244 if (ctx->out_path == GSC_DMA) { gsc_hw_set_out_size()
255 if (ctx->gsc_ctrls.rotate->val == 90 || gsc_hw_set_out_size()
256 ctx->gsc_ctrls.rotate->val == 270) { gsc_hw_set_out_size()
266 void gsc_hw_set_out_image_rgb(struct gsc_ctx *ctx) gsc_hw_set_out_image_rgb() argument
268 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_out_image_rgb()
269 struct gsc_frame *frame = &ctx->d_frame; gsc_hw_set_out_image_rgb()
286 void gsc_hw_set_out_image_format(struct gsc_ctx *ctx) gsc_hw_set_out_image_format() argument
288 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_out_image_format()
289 struct gsc_frame *frame = &ctx->d_frame; gsc_hw_set_out_image_format()
300 gsc_hw_set_out_image_rgb(ctx); gsc_hw_set_out_image_format()
304 if (ctx->out_path != GSC_DMA) { gsc_hw_set_out_image_format()
346 void gsc_hw_set_prescaler(struct gsc_ctx *ctx) gsc_hw_set_prescaler() argument
348 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_prescaler()
349 struct gsc_scaler *sc = &ctx->scaler; gsc_hw_set_prescaler()
358 void gsc_hw_set_mainscaler(struct gsc_ctx *ctx) gsc_hw_set_mainscaler() argument
360 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_mainscaler()
361 struct gsc_scaler *sc = &ctx->scaler; gsc_hw_set_mainscaler()
371 void gsc_hw_set_rotation(struct gsc_ctx *ctx) gsc_hw_set_rotation() argument
373 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_rotation()
379 switch (ctx->gsc_ctrls.rotate->val) { gsc_hw_set_rotation()
387 if (ctx->gsc_ctrls.hflip->val) gsc_hw_set_rotation()
389 else if (ctx->gsc_ctrls.vflip->val) gsc_hw_set_rotation()
395 if (ctx->gsc_ctrls.hflip->val) gsc_hw_set_rotation()
397 else if (ctx->gsc_ctrls.vflip->val) gsc_hw_set_rotation()
404 void gsc_hw_set_global_alpha(struct gsc_ctx *ctx) gsc_hw_set_global_alpha() argument
406 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_global_alpha()
407 struct gsc_frame *frame = &ctx->d_frame; gsc_hw_set_global_alpha()
418 cfg |= GSC_OUT_GLOBAL_ALPHA(ctx->gsc_ctrls.global_alpha->val); gsc_hw_set_global_alpha()
422 void gsc_hw_set_sfr_update(struct gsc_ctx *ctx) gsc_hw_set_sfr_update() argument
424 struct gsc_dev *dev = ctx->gsc_dev; gsc_hw_set_sfr_update()
/linux-4.1.27/drivers/crypto/
H A Datmel-sha.c158 static size_t atmel_sha_append_sg(struct atmel_sha_reqctx *ctx) atmel_sha_append_sg() argument
162 while ((ctx->bufcnt < ctx->buflen) && ctx->total) { atmel_sha_append_sg()
163 count = min(ctx->sg->length - ctx->offset, ctx->total); atmel_sha_append_sg()
164 count = min(count, ctx->buflen - ctx->bufcnt); atmel_sha_append_sg()
173 if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { atmel_sha_append_sg()
174 ctx->sg = sg_next(ctx->sg); atmel_sha_append_sg()
181 scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, atmel_sha_append_sg()
182 ctx->offset, count, 0); atmel_sha_append_sg()
184 ctx->bufcnt += count; atmel_sha_append_sg()
185 ctx->offset += count; atmel_sha_append_sg()
186 ctx->total -= count; atmel_sha_append_sg()
188 if (ctx->offset == ctx->sg->length) { atmel_sha_append_sg()
189 ctx->sg = sg_next(ctx->sg); atmel_sha_append_sg()
190 if (ctx->sg) atmel_sha_append_sg()
191 ctx->offset = 0; atmel_sha_append_sg()
193 ctx->total = 0; atmel_sha_append_sg()
216 static void atmel_sha_fill_padding(struct atmel_sha_reqctx *ctx, int length) atmel_sha_fill_padding() argument
222 size[0] = ctx->digcnt[0]; atmel_sha_fill_padding()
223 size[1] = ctx->digcnt[1]; atmel_sha_fill_padding()
225 size[0] += ctx->bufcnt; atmel_sha_fill_padding()
226 if (size[0] < ctx->bufcnt) atmel_sha_fill_padding()
236 if (ctx->flags & (SHA_FLAGS_SHA384 | SHA_FLAGS_SHA512)) { atmel_sha_fill_padding()
237 index = ctx->bufcnt & 0x7f; atmel_sha_fill_padding()
239 *(ctx->buffer + ctx->bufcnt) = 0x80; atmel_sha_fill_padding()
240 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); atmel_sha_fill_padding()
241 memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); atmel_sha_fill_padding()
242 ctx->bufcnt += padlen + 16; atmel_sha_fill_padding()
243 ctx->flags |= SHA_FLAGS_PAD; atmel_sha_fill_padding()
245 index = ctx->bufcnt & 0x3f; atmel_sha_fill_padding()
247 *(ctx->buffer + ctx->bufcnt) = 0x80; atmel_sha_fill_padding()
248 memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen-1); atmel_sha_fill_padding()
249 memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); atmel_sha_fill_padding()
250 ctx->bufcnt += padlen + 8; atmel_sha_fill_padding()
251 ctx->flags |= SHA_FLAGS_PAD; atmel_sha_fill_padding()
259 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_init() local
276 ctx->dd = dd; atmel_sha_init()
278 ctx->flags = 0; atmel_sha_init()
285 ctx->flags |= SHA_FLAGS_SHA1; atmel_sha_init()
286 ctx->block_size = SHA1_BLOCK_SIZE; atmel_sha_init()
289 ctx->flags |= SHA_FLAGS_SHA224; atmel_sha_init()
290 ctx->block_size = SHA224_BLOCK_SIZE; atmel_sha_init()
293 ctx->flags |= SHA_FLAGS_SHA256; atmel_sha_init()
294 ctx->block_size = SHA256_BLOCK_SIZE; atmel_sha_init()
297 ctx->flags |= SHA_FLAGS_SHA384; atmel_sha_init()
298 ctx->block_size = SHA384_BLOCK_SIZE; atmel_sha_init()
301 ctx->flags |= SHA_FLAGS_SHA512; atmel_sha_init()
302 ctx->block_size = SHA512_BLOCK_SIZE; atmel_sha_init()
309 ctx->bufcnt = 0; atmel_sha_init()
310 ctx->digcnt[0] = 0; atmel_sha_init()
311 ctx->digcnt[1] = 0; atmel_sha_init()
312 ctx->buflen = SHA_BUFFER_LEN; atmel_sha_init()
319 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); atmel_sha_write_ctrl() local
332 if (ctx->flags & SHA_FLAGS_SHA1) atmel_sha_write_ctrl()
334 else if (ctx->flags & SHA_FLAGS_SHA224) atmel_sha_write_ctrl()
336 else if (ctx->flags & SHA_FLAGS_SHA256) atmel_sha_write_ctrl()
338 else if (ctx->flags & SHA_FLAGS_SHA384) atmel_sha_write_ctrl()
340 else if (ctx->flags & SHA_FLAGS_SHA512) atmel_sha_write_ctrl()
344 if (!(ctx->digcnt[0] || ctx->digcnt[1])) atmel_sha_write_ctrl()
354 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); atmel_sha_xmit_cpu() local
359 ctx->digcnt[1], ctx->digcnt[0], length, final); atmel_sha_xmit_cpu()
364 ctx->digcnt[0] += length; atmel_sha_xmit_cpu()
365 if (ctx->digcnt[0] < length) atmel_sha_xmit_cpu()
366 ctx->digcnt[1]++; atmel_sha_xmit_cpu()
384 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); atmel_sha_xmit_pdc() local
388 ctx->digcnt[1], ctx->digcnt[0], length1, final); atmel_sha_xmit_pdc()
402 ctx->digcnt[0] += length1; atmel_sha_xmit_pdc()
403 if (ctx->digcnt[0] < length1) atmel_sha_xmit_pdc()
404 ctx->digcnt[1]++; atmel_sha_xmit_pdc()
428 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); atmel_sha_xmit_dma() local
433 ctx->digcnt[1], ctx->digcnt[0], length1, final); atmel_sha_xmit_dma()
464 ctx->digcnt[0] += length1; atmel_sha_xmit_dma()
465 if (ctx->digcnt[0] < length1) atmel_sha_xmit_dma()
466 ctx->digcnt[1]++; atmel_sha_xmit_dma()
493 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); atmel_sha_update_cpu() local
496 atmel_sha_append_sg(ctx); atmel_sha_update_cpu()
497 atmel_sha_fill_padding(ctx, 0); atmel_sha_update_cpu()
498 bufcnt = ctx->bufcnt; atmel_sha_update_cpu()
499 ctx->bufcnt = 0; atmel_sha_update_cpu()
501 return atmel_sha_xmit_cpu(dd, ctx->buffer, bufcnt, 1); atmel_sha_update_cpu()
505 struct atmel_sha_reqctx *ctx, atmel_sha_xmit_dma_map()
508 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, atmel_sha_xmit_dma_map()
509 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); atmel_sha_xmit_dma_map()
510 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { atmel_sha_xmit_dma_map()
511 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen + atmel_sha_xmit_dma_map()
512 ctx->block_size); atmel_sha_xmit_dma_map()
516 ctx->flags &= ~SHA_FLAGS_SG; atmel_sha_xmit_dma_map()
519 return atmel_sha_xmit_start(dd, ctx->dma_addr, length, 0, 0, final); atmel_sha_xmit_dma_map()
524 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); atmel_sha_update_dma_slow() local
528 atmel_sha_append_sg(ctx); atmel_sha_update_dma_slow()
530 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; atmel_sha_update_dma_slow()
533 ctx->bufcnt, ctx->digcnt[1], ctx->digcnt[0], final); atmel_sha_update_dma_slow()
536 atmel_sha_fill_padding(ctx, 0); atmel_sha_update_dma_slow()
538 if (final || (ctx->bufcnt == ctx->buflen)) { atmel_sha_update_dma_slow()
539 count = ctx->bufcnt; atmel_sha_update_dma_slow()
540 ctx->bufcnt = 0; atmel_sha_update_dma_slow()
541 return atmel_sha_xmit_dma_map(dd, ctx, count, final); atmel_sha_update_dma_slow()
549 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); atmel_sha_update_dma_start() local
554 if (!ctx->total) atmel_sha_update_dma_start()
557 if (ctx->bufcnt || ctx->offset) atmel_sha_update_dma_start()
561 ctx->digcnt[1], ctx->digcnt[0], ctx->bufcnt, ctx->total); atmel_sha_update_dma_start()
563 sg = ctx->sg; atmel_sha_update_dma_start()
568 if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->block_size)) atmel_sha_update_dma_start()
569 /* size is not ctx->block_size aligned */ atmel_sha_update_dma_start()
572 length = min(ctx->total, sg->length); atmel_sha_update_dma_start()
575 if (!(ctx->flags & SHA_FLAGS_FINUP)) { atmel_sha_update_dma_start()
576 /* not last sg must be ctx->block_size aligned */ atmel_sha_update_dma_start()
577 tail = length & (ctx->block_size - 1); atmel_sha_update_dma_start()
582 ctx->total -= length; atmel_sha_update_dma_start()
583 ctx->offset = length; /* offset where to start slow */ atmel_sha_update_dma_start()
585 final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; atmel_sha_update_dma_start()
589 tail = length & (ctx->block_size - 1); atmel_sha_update_dma_start()
591 ctx->total += tail; atmel_sha_update_dma_start()
592 ctx->offset = length; /* offset where to start slow */ atmel_sha_update_dma_start()
594 sg = ctx->sg; atmel_sha_update_dma_start()
595 atmel_sha_append_sg(ctx); atmel_sha_update_dma_start()
597 atmel_sha_fill_padding(ctx, length); atmel_sha_update_dma_start()
599 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, atmel_sha_update_dma_start()
600 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); atmel_sha_update_dma_start()
601 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { atmel_sha_update_dma_start()
603 ctx->buflen + ctx->block_size); atmel_sha_update_dma_start()
608 ctx->flags &= ~SHA_FLAGS_SG; atmel_sha_update_dma_start()
609 count = ctx->bufcnt; atmel_sha_update_dma_start()
610 ctx->bufcnt = 0; atmel_sha_update_dma_start()
611 return atmel_sha_xmit_start(dd, ctx->dma_addr, count, 0, atmel_sha_update_dma_start()
614 ctx->sg = sg; atmel_sha_update_dma_start()
615 if (!dma_map_sg(dd->dev, ctx->sg, 1, atmel_sha_update_dma_start()
621 ctx->flags |= SHA_FLAGS_SG; atmel_sha_update_dma_start()
623 count = ctx->bufcnt; atmel_sha_update_dma_start()
624 ctx->bufcnt = 0; atmel_sha_update_dma_start()
625 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), atmel_sha_update_dma_start()
626 length, ctx->dma_addr, count, final); atmel_sha_update_dma_start()
630 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { atmel_sha_update_dma_start()
635 ctx->flags |= SHA_FLAGS_SG; atmel_sha_update_dma_start()
638 return atmel_sha_xmit_start(dd, sg_dma_address(ctx->sg), length, 0, atmel_sha_update_dma_start()
644 struct atmel_sha_reqctx *ctx = ahash_request_ctx(dd->req); atmel_sha_update_dma_stop() local
646 if (ctx->flags & SHA_FLAGS_SG) { atmel_sha_update_dma_stop()
647 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); atmel_sha_update_dma_stop()
648 if (ctx->sg->length == ctx->offset) { atmel_sha_update_dma_stop()
649 ctx->sg = sg_next(ctx->sg); atmel_sha_update_dma_stop()
650 if (ctx->sg) atmel_sha_update_dma_stop()
651 ctx->offset = 0; atmel_sha_update_dma_stop()
653 if (ctx->flags & SHA_FLAGS_PAD) { atmel_sha_update_dma_stop()
654 dma_unmap_single(dd->dev, ctx->dma_addr, atmel_sha_update_dma_stop()
655 ctx->buflen + ctx->block_size, DMA_TO_DEVICE); atmel_sha_update_dma_stop()
658 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen + atmel_sha_update_dma_stop()
659 ctx->block_size, DMA_TO_DEVICE); atmel_sha_update_dma_stop()
668 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_update_req() local
672 ctx->total, ctx->digcnt[1], ctx->digcnt[0]); atmel_sha_update_req()
674 if (ctx->flags & SHA_FLAGS_CPU) atmel_sha_update_req()
681 err, ctx->digcnt[1], ctx->digcnt[0]); atmel_sha_update_req()
689 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_final_req() local
693 if (ctx->bufcnt >= ATMEL_SHA_DMA_THRESHOLD) { atmel_sha_final_req()
694 atmel_sha_fill_padding(ctx, 0); atmel_sha_final_req()
695 count = ctx->bufcnt; atmel_sha_final_req()
696 ctx->bufcnt = 0; atmel_sha_final_req()
697 err = atmel_sha_xmit_dma_map(dd, ctx, count, 1); atmel_sha_final_req()
701 atmel_sha_fill_padding(ctx, 0); atmel_sha_final_req()
702 count = ctx->bufcnt; atmel_sha_final_req()
703 ctx->bufcnt = 0; atmel_sha_final_req()
704 err = atmel_sha_xmit_cpu(dd, ctx->buffer, count, 1); atmel_sha_final_req()
714 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_copy_hash() local
715 u32 *hash = (u32 *)ctx->digest; atmel_sha_copy_hash()
718 if (ctx->flags & SHA_FLAGS_SHA1) atmel_sha_copy_hash()
720 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); atmel_sha_copy_hash()
721 else if (ctx->flags & SHA_FLAGS_SHA224) atmel_sha_copy_hash()
723 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); atmel_sha_copy_hash()
724 else if (ctx->flags & SHA_FLAGS_SHA256) atmel_sha_copy_hash()
726 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); atmel_sha_copy_hash()
727 else if (ctx->flags & SHA_FLAGS_SHA384) atmel_sha_copy_hash()
729 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); atmel_sha_copy_hash()
732 hash[i] = atmel_sha_read(ctx->dd, SHA_REG_DIGEST(i)); atmel_sha_copy_hash()
737 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_copy_ready_hash() local
742 if (ctx->flags & SHA_FLAGS_SHA1) atmel_sha_copy_ready_hash()
743 memcpy(req->result, ctx->digest, SHA1_DIGEST_SIZE); atmel_sha_copy_ready_hash()
744 else if (ctx->flags & SHA_FLAGS_SHA224) atmel_sha_copy_ready_hash()
745 memcpy(req->result, ctx->digest, SHA224_DIGEST_SIZE); atmel_sha_copy_ready_hash()
746 else if (ctx->flags & SHA_FLAGS_SHA256) atmel_sha_copy_ready_hash()
747 memcpy(req->result, ctx->digest, SHA256_DIGEST_SIZE); atmel_sha_copy_ready_hash()
748 else if (ctx->flags & SHA_FLAGS_SHA384) atmel_sha_copy_ready_hash()
749 memcpy(req->result, ctx->digest, SHA384_DIGEST_SIZE); atmel_sha_copy_ready_hash()
751 memcpy(req->result, ctx->digest, SHA512_DIGEST_SIZE); atmel_sha_copy_ready_hash()
756 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_finish() local
757 struct atmel_sha_dev *dd = ctx->dd; atmel_sha_finish()
760 if (ctx->digcnt[0] || ctx->digcnt[1]) atmel_sha_finish()
763 dev_dbg(dd->dev, "digcnt: 0x%llx 0x%llx, bufcnt: %d\n", ctx->digcnt[1], atmel_sha_finish()
764 ctx->digcnt[0], ctx->bufcnt); atmel_sha_finish()
771 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_finish_req() local
772 struct atmel_sha_dev *dd = ctx->dd; atmel_sha_finish_req()
779 ctx->flags |= SHA_FLAGS_ERROR; atmel_sha_finish_req()
833 struct atmel_sha_reqctx *ctx; atmel_sha_handle_queue() local
861 ctx = ahash_request_ctx(req); atmel_sha_handle_queue()
864 ctx->op, req->nbytes); atmel_sha_handle_queue()
871 if (ctx->op == SHA_OP_UPDATE) { atmel_sha_handle_queue()
873 if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) atmel_sha_handle_queue()
876 } else if (ctx->op == SHA_OP_FINAL) { atmel_sha_handle_queue()
892 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_enqueue() local
896 ctx->op = op; atmel_sha_enqueue()
903 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_update() local
908 ctx->total = req->nbytes; atmel_sha_update()
909 ctx->sg = req->src; atmel_sha_update()
910 ctx->offset = 0; atmel_sha_update()
912 if (ctx->flags & SHA_FLAGS_FINUP) { atmel_sha_update()
913 if (ctx->bufcnt + ctx->total < ATMEL_SHA_DMA_THRESHOLD) atmel_sha_update()
915 ctx->flags |= SHA_FLAGS_CPU; atmel_sha_update()
916 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { atmel_sha_update()
917 atmel_sha_append_sg(ctx); atmel_sha_update()
925 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_final() local
931 ctx->flags |= SHA_FLAGS_FINUP; atmel_sha_final()
933 if (ctx->flags & SHA_FLAGS_ERROR) atmel_sha_final()
936 if (ctx->bufcnt) { atmel_sha_final()
938 } else if (!(ctx->flags & SHA_FLAGS_PAD)) { /* add padding */ atmel_sha_final()
960 struct atmel_sha_reqctx *ctx = ahash_request_ctx(req); atmel_sha_finup() local
963 ctx->flags |= SHA_FLAGS_FINUP; atmel_sha_finup()
504 atmel_sha_xmit_dma_map(struct atmel_sha_dev *dd, struct atmel_sha_reqctx *ctx, size_t length, int final) atmel_sha_xmit_dma_map() argument
H A Dimg-hash.c165 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_start() local
168 if (ctx->flags & DRIVER_FLAGS_MD5) img_hash_start()
170 else if (ctx->flags & DRIVER_FLAGS_SHA1) img_hash_start()
172 else if (ctx->flags & DRIVER_FLAGS_SHA224) img_hash_start()
174 else if (ctx->flags & DRIVER_FLAGS_SHA256) img_hash_start()
211 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_dma_callback() local
213 if (ctx->bufcnt) { img_hash_dma_callback()
214 img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0); img_hash_dma_callback()
215 ctx->bufcnt = 0; img_hash_dma_callback()
217 if (ctx->sg) img_hash_dma_callback()
224 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_xmit_dma() local
226 ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV); img_hash_xmit_dma()
227 if (ctx->dma_ct == 0) { img_hash_xmit_dma()
235 ctx->dma_ct, img_hash_xmit_dma()
254 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_write_via_cpu() local
256 ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg), img_hash_write_via_cpu()
257 ctx->buffer, hdev->req->nbytes); img_hash_write_via_cpu()
259 ctx->total = hdev->req->nbytes; img_hash_write_via_cpu()
260 ctx->bufcnt = 0; img_hash_write_via_cpu()
266 return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1); img_hash_write_via_cpu()
271 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); img_hash_finish() local
276 memcpy(req->result, ctx->digest, ctx->digsize); img_hash_finish()
283 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); img_hash_copy_hash() local
284 u32 *hash = (u32 *)ctx->digest; img_hash_copy_hash()
287 for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--) img_hash_copy_hash()
288 hash[i] = img_hash_read_result_queue(ctx->hdev); img_hash_copy_hash()
293 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); img_hash_finish_req() local
294 struct img_hash_dev *hdev = ctx->hdev; img_hash_finish_req()
302 ctx->flags |= DRIVER_FLAGS_ERROR; img_hash_finish_req()
314 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_write_via_dma() local
318 dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total); img_hash_write_via_dma()
320 if (!ctx->total) img_hash_write_via_dma()
359 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_dma_task() local
364 if (!ctx->sg) img_hash_dma_task()
367 addr = sg_virt(ctx->sg); img_hash_dma_task()
368 nbytes = ctx->sg->length - ctx->offset; img_hash_dma_task()
384 sg_init_one(&tsg, addr + ctx->offset, wsend * 4); img_hash_dma_task()
387 ctx->flags |= DRIVER_FLAGS_CPU; img_hash_dma_task()
389 img_hash_xmit_cpu(hdev, addr + ctx->offset, img_hash_dma_task()
391 ctx->sent += wsend * 4; img_hash_dma_task()
394 ctx->sent += wsend * 4; img_hash_dma_task()
399 ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents, img_hash_dma_task()
400 ctx->buffer, bleft, ctx->sent); img_hash_dma_task()
402 ctx->sg = sg_next(ctx->sg); img_hash_dma_task()
403 while (ctx->sg && (ctx->bufcnt < 4)) { img_hash_dma_task()
404 len = ctx->sg->length; img_hash_dma_task()
405 if (likely(len > (4 - ctx->bufcnt))) img_hash_dma_task()
406 len = 4 - ctx->bufcnt; img_hash_dma_task()
407 tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents, img_hash_dma_task()
408 ctx->buffer + ctx->bufcnt, len, img_hash_dma_task()
409 ctx->sent + ctx->bufcnt); img_hash_dma_task()
410 ctx->bufcnt += tbc; img_hash_dma_task()
411 if (tbc >= ctx->sg->length) { img_hash_dma_task()
412 ctx->sg = sg_next(ctx->sg); img_hash_dma_task()
417 ctx->sent += ctx->bufcnt; img_hash_dma_task()
418 ctx->offset = tbc; img_hash_dma_task()
423 ctx->offset = 0; img_hash_dma_task()
424 ctx->sg = sg_next(ctx->sg); img_hash_dma_task()
430 struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req); img_hash_write_via_dma_stop() local
432 if (ctx->flags & DRIVER_FLAGS_SG) img_hash_write_via_dma_stop()
433 dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE); img_hash_write_via_dma_stop()
441 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); img_hash_process_data() local
444 ctx->bufcnt = 0; img_hash_process_data()
485 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); img_hash_init() local
487 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); img_hash_init()
498 struct img_hash_request_ctx *ctx; img_hash_handle_queue() local
528 ctx = ahash_request_ctx(req); img_hash_handle_queue()
531 ctx->op, req->nbytes); img_hash_handle_queue()
549 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); img_hash_update() local
551 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); img_hash_update()
564 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); img_hash_final() local
566 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); img_hash_final()
578 struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm); img_hash_finup() local
580 ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback); img_hash_finup()
594 struct img_hash_request_ctx *ctx = ahash_request_ctx(req); img_hash_digest() local
612 ctx->hdev = hdev; img_hash_digest()
613 ctx->flags = 0; img_hash_digest()
614 ctx->digsize = crypto_ahash_digestsize(tfm); img_hash_digest()
616 switch (ctx->digsize) { img_hash_digest()
618 ctx->flags |= DRIVER_FLAGS_SHA1; img_hash_digest()
621 ctx->flags |= DRIVER_FLAGS_SHA256; img_hash_digest()
624 ctx->flags |= DRIVER_FLAGS_SHA224; img_hash_digest()
627 ctx->flags |= DRIVER_FLAGS_MD5; img_hash_digest()
633 ctx->bufcnt = 0; img_hash_digest()
634 ctx->offset = 0; img_hash_digest()
635 ctx->sent = 0; img_hash_digest()
636 ctx->total = req->nbytes; img_hash_digest()
637 ctx->sg = req->src; img_hash_digest()
638 ctx->sgfirst = req->src; img_hash_digest()
639 ctx->nents = sg_nents(ctx->sg); img_hash_digest()
648 struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm); img_hash_cra_init() local
652 ctx->fallback = crypto_alloc_ahash(alg_name, 0, img_hash_cra_init()
654 if (IS_ERR(ctx->fallback)) { img_hash_cra_init()
656 err = PTR_ERR(ctx->fallback); img_hash_cra_init()
H A Domap-sham.c281 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_copy_hash_omap2() local
282 struct omap_sham_dev *dd = ctx->dd; omap_sham_copy_hash_omap2()
283 u32 *hash = (u32 *)ctx->digest; omap_sham_copy_hash_omap2()
296 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_copy_hash_omap4() local
297 struct omap_sham_dev *dd = ctx->dd; omap_sham_copy_hash_omap4()
300 if (ctx->flags & BIT(FLAGS_HMAC)) { omap_sham_copy_hash_omap4()
321 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_copy_ready_hash() local
322 u32 *in = (u32 *)ctx->digest; omap_sham_copy_ready_hash()
329 switch (ctx->flags & FLAGS_MODE_MASK) { omap_sham_copy_ready_hash()
335 if (test_bit(FLAGS_BE32_SHA1, &ctx->dd->flags)) omap_sham_copy_ready_hash()
378 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_sham_write_ctrl_omap2() local
381 if (likely(ctx->digcnt)) omap_sham_write_ctrl_omap2()
382 omap_sham_write(dd, SHA_REG_DIGCNT(dd), ctx->digcnt); omap_sham_write_ctrl_omap2()
391 if ((ctx->flags & FLAGS_MODE_MASK) == FLAGS_MODE_SHA1) omap_sham_write_ctrl_omap2()
393 if (!ctx->digcnt) omap_sham_write_ctrl_omap2()
413 static int get_block_size(struct omap_sham_reqctx *ctx) get_block_size() argument
417 switch (ctx->flags & FLAGS_MODE_MASK) { get_block_size()
447 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_sham_write_ctrl_omap4() local
455 val = (ctx->flags & FLAGS_MODE_MASK) >> (FLAGS_MODE_SHIFT); omap_sham_write_ctrl_omap4()
456 if (!ctx->digcnt) { omap_sham_write_ctrl_omap4()
464 if (ctx->flags & BIT(FLAGS_HMAC)) { omap_sham_write_ctrl_omap4()
465 bs = get_block_size(ctx); omap_sham_write_ctrl_omap4()
472 ctx->digcnt += bs; omap_sham_write_ctrl_omap4()
479 if (ctx->flags & BIT(FLAGS_HMAC)) omap_sham_write_ctrl_omap4()
487 dev_dbg(dd->dev, "ctrl: %08x, flags: %08lx\n", val, ctx->flags); omap_sham_write_ctrl_omap4()
510 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_sham_xmit_cpu() local
515 ctx->digcnt, length, final); omap_sham_xmit_cpu()
521 ctx->digcnt += length; omap_sham_xmit_cpu()
529 bs32 = get_block_size(ctx) / sizeof(u32); omap_sham_xmit_cpu()
555 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_sham_xmit_dma() local
558 int len32, ret, dma_min = get_block_size(ctx); omap_sham_xmit_dma()
561 ctx->digcnt, length, final); omap_sham_xmit_dma()
584 sg_init_table(&ctx->sgl, 1); omap_sham_xmit_dma()
585 ctx->sgl.page_link = ctx->sg->page_link; omap_sham_xmit_dma()
586 ctx->sgl.offset = ctx->sg->offset; omap_sham_xmit_dma()
587 sg_dma_len(&ctx->sgl) = len32; omap_sham_xmit_dma()
588 sg_dma_address(&ctx->sgl) = sg_dma_address(ctx->sg); omap_sham_xmit_dma()
590 tx = dmaengine_prep_slave_sg(dd->dma_lch, &ctx->sgl, 1, omap_sham_xmit_dma()
607 ctx->digcnt += length; omap_sham_xmit_dma()
622 static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, omap_sham_append_buffer() argument
625 size_t count = min(length, ctx->buflen - ctx->bufcnt); omap_sham_append_buffer()
627 count = min(count, ctx->total); omap_sham_append_buffer()
630 memcpy(ctx->buffer + ctx->bufcnt, data, count); omap_sham_append_buffer()
631 ctx->bufcnt += count; omap_sham_append_buffer()
636 static size_t omap_sham_append_sg(struct omap_sham_reqctx *ctx) omap_sham_append_sg() argument
641 while (ctx->sg) { omap_sham_append_sg()
642 vaddr = kmap_atomic(sg_page(ctx->sg)); omap_sham_append_sg()
643 vaddr += ctx->sg->offset; omap_sham_append_sg()
645 count = omap_sham_append_buffer(ctx, omap_sham_append_sg()
646 vaddr + ctx->offset, omap_sham_append_sg()
647 ctx->sg->length - ctx->offset); omap_sham_append_sg()
653 ctx->offset += count; omap_sham_append_sg()
654 ctx->total -= count; omap_sham_append_sg()
655 if (ctx->offset == ctx->sg->length) { omap_sham_append_sg()
656 ctx->sg = sg_next(ctx->sg); omap_sham_append_sg()
657 if (ctx->sg) omap_sham_append_sg()
658 ctx->offset = 0; omap_sham_append_sg()
660 ctx->total = 0; omap_sham_append_sg()
668 struct omap_sham_reqctx *ctx, omap_sham_xmit_dma_map()
673 ctx->dma_addr = dma_map_single(dd->dev, ctx->buffer, ctx->buflen, omap_sham_xmit_dma_map()
675 if (dma_mapping_error(dd->dev, ctx->dma_addr)) { omap_sham_xmit_dma_map()
676 dev_err(dd->dev, "dma %u bytes error\n", ctx->buflen); omap_sham_xmit_dma_map()
680 ctx->flags &= ~BIT(FLAGS_SG); omap_sham_xmit_dma_map()
682 ret = omap_sham_xmit_dma(dd, ctx->dma_addr, length, final, 0); omap_sham_xmit_dma_map()
684 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, omap_sham_xmit_dma_map()
692 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_sham_update_dma_slow() local
696 omap_sham_append_sg(ctx); omap_sham_update_dma_slow()
698 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; omap_sham_update_dma_slow()
701 ctx->bufcnt, ctx->digcnt, final); omap_sham_update_dma_slow()
703 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { omap_sham_update_dma_slow()
704 count = ctx->bufcnt; omap_sham_update_dma_slow()
705 ctx->bufcnt = 0; omap_sham_update_dma_slow()
706 return omap_sham_xmit_dma_map(dd, ctx, count, final); omap_sham_update_dma_slow()
719 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_sham_update_dma_start() local
724 if (!ctx->total) omap_sham_update_dma_start()
727 if (ctx->bufcnt || ctx->offset) omap_sham_update_dma_start()
736 if (ctx->total < get_block_size(ctx)) omap_sham_update_dma_start()
740 ctx->digcnt, ctx->bufcnt, ctx->total); omap_sham_update_dma_start()
742 sg = ctx->sg; omap_sham_update_dma_start()
743 bs = get_block_size(ctx); omap_sham_update_dma_start()
752 length = min(ctx->total, sg->length); omap_sham_update_dma_start()
755 if (!(ctx->flags & BIT(FLAGS_FINUP))) { omap_sham_update_dma_start()
765 if (!dma_map_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE)) { omap_sham_update_dma_start()
770 ctx->flags |= BIT(FLAGS_SG); omap_sham_update_dma_start()
772 ctx->total -= length; omap_sham_update_dma_start()
773 ctx->offset = length; /* offset where to start slow */ omap_sham_update_dma_start()
775 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; omap_sham_update_dma_start()
777 ret = omap_sham_xmit_dma(dd, sg_dma_address(ctx->sg), length, final, 1); omap_sham_update_dma_start()
779 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); omap_sham_update_dma_start()
786 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_sham_update_cpu() local
789 if (!ctx->total) omap_sham_update_cpu()
792 omap_sham_append_sg(ctx); omap_sham_update_cpu()
794 final = (ctx->flags & BIT(FLAGS_FINUP)) && !ctx->total; omap_sham_update_cpu()
797 ctx->bufcnt, ctx->digcnt, final); omap_sham_update_cpu()
799 if (final || (ctx->bufcnt == ctx->buflen && ctx->total)) { omap_sham_update_cpu()
800 bufcnt = ctx->bufcnt; omap_sham_update_cpu()
801 ctx->bufcnt = 0; omap_sham_update_cpu()
802 return omap_sham_xmit_cpu(dd, ctx->buffer, bufcnt, final); omap_sham_update_cpu()
810 struct omap_sham_reqctx *ctx = ahash_request_ctx(dd->req); omap_sham_update_dma_stop() local
814 if (ctx->flags & BIT(FLAGS_SG)) { omap_sham_update_dma_stop()
815 dma_unmap_sg(dd->dev, ctx->sg, 1, DMA_TO_DEVICE); omap_sham_update_dma_stop()
816 if (ctx->sg->length == ctx->offset) { omap_sham_update_dma_stop()
817 ctx->sg = sg_next(ctx->sg); omap_sham_update_dma_stop()
818 if (ctx->sg) omap_sham_update_dma_stop()
819 ctx->offset = 0; omap_sham_update_dma_stop()
822 dma_unmap_single(dd->dev, ctx->dma_addr, ctx->buflen, omap_sham_update_dma_stop()
833 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_init() local
849 ctx->dd = dd; omap_sham_init()
851 ctx->flags = 0; omap_sham_init()
858 ctx->flags |= FLAGS_MODE_MD5; omap_sham_init()
862 ctx->flags |= FLAGS_MODE_SHA1; omap_sham_init()
866 ctx->flags |= FLAGS_MODE_SHA224; omap_sham_init()
870 ctx->flags |= FLAGS_MODE_SHA256; omap_sham_init()
874 ctx->flags |= FLAGS_MODE_SHA384; omap_sham_init()
878 ctx->flags |= FLAGS_MODE_SHA512; omap_sham_init()
883 ctx->bufcnt = 0; omap_sham_init()
884 ctx->digcnt = 0; omap_sham_init()
885 ctx->buflen = BUFLEN; omap_sham_init()
891 memcpy(ctx->buffer, bctx->ipad, bs); omap_sham_init()
892 ctx->bufcnt = bs; omap_sham_init()
895 ctx->flags |= BIT(FLAGS_HMAC); omap_sham_init()
905 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_update_req() local
909 ctx->total, ctx->digcnt, (ctx->flags & BIT(FLAGS_FINUP)) != 0); omap_sham_update_req()
911 if (ctx->flags & BIT(FLAGS_CPU)) omap_sham_update_req()
917 dev_dbg(dd->dev, "update: err: %d, digcnt: %d\n", err, ctx->digcnt); omap_sham_update_req()
925 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_final_req() local
928 if ((ctx->bufcnt <= get_block_size(ctx)) || dd->polling_mode) omap_sham_final_req()
936 err = omap_sham_xmit_dma_map(dd, ctx, ctx->bufcnt, 1); omap_sham_final_req()
938 err = omap_sham_xmit_cpu(dd, ctx->buffer, ctx->bufcnt, 1); omap_sham_final_req()
940 ctx->bufcnt = 0; omap_sham_final_req()
965 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_finish() local
966 struct omap_sham_dev *dd = ctx->dd; omap_sham_finish()
969 if (ctx->digcnt) { omap_sham_finish()
971 if ((ctx->flags & BIT(FLAGS_HMAC)) && omap_sham_finish()
976 dev_dbg(dd->dev, "digcnt: %d, bufcnt: %d\n", ctx->digcnt, ctx->bufcnt); omap_sham_finish()
983 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_finish_req() local
984 struct omap_sham_dev *dd = ctx->dd; omap_sham_finish_req()
991 ctx->flags |= BIT(FLAGS_ERROR); omap_sham_finish_req()
1011 struct omap_sham_reqctx *ctx; omap_sham_handle_queue() local
1036 ctx = ahash_request_ctx(req); omap_sham_handle_queue()
1039 ctx->op, req->nbytes); omap_sham_handle_queue()
1045 if (ctx->digcnt) omap_sham_handle_queue()
1049 if (ctx->op == OP_UPDATE) { omap_sham_handle_queue()
1051 if (err != -EINPROGRESS && (ctx->flags & BIT(FLAGS_FINUP))) omap_sham_handle_queue()
1054 } else if (ctx->op == OP_FINAL) { omap_sham_handle_queue()
1069 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_enqueue() local
1073 ctx->op = op; omap_sham_enqueue()
1080 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_update() local
1081 struct omap_sham_dev *dd = ctx->dd; omap_sham_update()
1082 int bs = get_block_size(ctx); omap_sham_update()
1087 ctx->total = req->nbytes; omap_sham_update()
1088 ctx->sg = req->src; omap_sham_update()
1089 ctx->offset = 0; omap_sham_update()
1091 if (ctx->flags & BIT(FLAGS_FINUP)) { omap_sham_update()
1092 if ((ctx->digcnt + ctx->bufcnt + ctx->total) < 9) { omap_sham_update()
1098 omap_sham_append_sg(ctx); omap_sham_update()
1100 } else if ((ctx->bufcnt + ctx->total <= bs) || omap_sham_update()
1106 ctx->flags |= BIT(FLAGS_CPU); omap_sham_update()
1108 } else if (ctx->bufcnt + ctx->total < ctx->buflen) { omap_sham_update()
1109 omap_sham_append_sg(ctx); omap_sham_update()
1114 ctx->flags |= BIT(FLAGS_CPU); omap_sham_update()
1133 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_final_shash() local
1136 ctx->buffer, ctx->bufcnt, req->result); omap_sham_final_shash()
1141 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_final() local
1143 ctx->flags |= BIT(FLAGS_FINUP); omap_sham_final()
1145 if (ctx->flags & BIT(FLAGS_ERROR)) omap_sham_final()
1150 if ((ctx->digcnt + ctx->bufcnt) < 9) omap_sham_final()
1152 else if (ctx->bufcnt) omap_sham_final()
1161 struct omap_sham_reqctx *ctx = ahash_request_ctx(req); omap_sham_finup() local
1164 ctx->flags |= BIT(FLAGS_FINUP); omap_sham_finup()
667 omap_sham_xmit_dma_map(struct omap_sham_dev *dd, struct omap_sham_reqctx *ctx, size_t length, int final) omap_sham_xmit_dma_map() argument
H A Dbfin_crc.c152 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); bfin_crypto_crc_init() local
155 dev_dbg(ctx->crc->dev, "crc_init\n"); bfin_crypto_crc_init()
164 dev_dbg(ctx->crc->dev, "init: requested sg list is too big > %d\n", bfin_crypto_crc_init()
169 ctx->crc = crc; bfin_crypto_crc_init()
170 ctx->bufnext_len = 0; bfin_crypto_crc_init()
171 ctx->buflast_len = 0; bfin_crypto_crc_init()
172 ctx->sg_buflen = 0; bfin_crypto_crc_init()
173 ctx->total = 0; bfin_crypto_crc_init()
174 ctx->flag = 0; bfin_crypto_crc_init()
179 dev_dbg(ctx->crc->dev, "init: digest size: %d\n", bfin_crypto_crc_init()
188 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(crc->req); bfin_crypto_crc_config_dma() local
196 dma_map_sg(crc->dev, ctx->sg, ctx->sg_nents, DMA_TO_DEVICE); bfin_crypto_crc_config_dma()
198 for_each_sg(ctx->sg, sg, ctx->sg_nents, j) { bfin_crypto_crc_config_dma()
202 dma_count = sg_dma_len(sg) - ctx->bufnext_len; bfin_crypto_crc_config_dma()
272 if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE || bfin_crypto_crc_config_dma()
273 ctx->flag == CRC_CRYPTO_STATE_FINISH)) { bfin_crypto_crc_config_dma()
274 crc->sg_cpu[i].start_addr = dma_map_single(crc->dev, ctx->bufnext, bfin_crypto_crc_config_dma()
303 struct bfin_crypto_crc_reqctx *ctx; bfin_crypto_crc_handle_queue() local
332 ctx = ahash_request_ctx(req); bfin_crypto_crc_handle_queue()
333 ctx->sg = NULL; bfin_crypto_crc_handle_queue()
334 ctx->sg_buflen = 0; bfin_crypto_crc_handle_queue()
335 ctx->sg_nents = 0; bfin_crypto_crc_handle_queue()
338 ctx->flag, req->nbytes); bfin_crypto_crc_handle_queue()
340 if (ctx->flag == CRC_CRYPTO_STATE_FINISH) { bfin_crypto_crc_handle_queue()
341 if (ctx->bufnext_len == 0) { bfin_crypto_crc_handle_queue()
347 memset(ctx->bufnext + ctx->bufnext_len, 0, bfin_crypto_crc_handle_queue()
348 CHKSUM_DIGEST_SIZE - ctx->bufnext_len); bfin_crypto_crc_handle_queue()
351 if (ctx->bufnext_len + req->nbytes < CHKSUM_DIGEST_SIZE) { bfin_crypto_crc_handle_queue()
352 memcpy(ctx->bufnext + ctx->bufnext_len, bfin_crypto_crc_handle_queue()
354 ctx->bufnext_len += req->nbytes; bfin_crypto_crc_handle_queue()
355 if (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE && bfin_crypto_crc_handle_queue()
356 ctx->bufnext_len) { bfin_crypto_crc_handle_queue()
364 if (ctx->bufnext_len) { bfin_crypto_crc_handle_queue()
366 ctx->buflast_len = ctx->bufnext_len; bfin_crypto_crc_handle_queue()
367 memcpy(ctx->buflast, ctx->bufnext, ctx->buflast_len); bfin_crypto_crc_handle_queue()
369 nsg = ctx->sg_buflen ? 2 : 1; bfin_crypto_crc_handle_queue()
370 sg_init_table(ctx->bufsl, nsg); bfin_crypto_crc_handle_queue()
371 sg_set_buf(ctx->bufsl, ctx->buflast, ctx->buflast_len); bfin_crypto_crc_handle_queue()
373 scatterwalk_sg_chain(ctx->bufsl, nsg, bfin_crypto_crc_handle_queue()
375 ctx->sg = ctx->bufsl; bfin_crypto_crc_handle_queue()
377 ctx->sg = req->src; bfin_crypto_crc_handle_queue()
380 nsg = ctx->sg_nents = sg_count(ctx->sg); bfin_crypto_crc_handle_queue()
381 ctx->sg_buflen = ctx->buflast_len + req->nbytes; bfin_crypto_crc_handle_queue()
382 ctx->bufnext_len = ctx->sg_buflen % 4; bfin_crypto_crc_handle_queue()
383 ctx->sg_buflen &= ~0x3; bfin_crypto_crc_handle_queue()
385 if (ctx->bufnext_len) { bfin_crypto_crc_handle_queue()
387 memset(ctx->bufnext, 0, CHKSUM_DIGEST_SIZE); bfin_crypto_crc_handle_queue()
388 nextlen = ctx->bufnext_len; bfin_crypto_crc_handle_queue()
390 sg = sg_get(ctx->sg, nsg, i); bfin_crypto_crc_handle_queue()
392 memcpy(ctx->bufnext + nextlen - j, bfin_crypto_crc_handle_queue()
395 ctx->sg_nents--; bfin_crypto_crc_handle_queue()
404 if (ctx->bufnext_len && (ctx->flag == CRC_CRYPTO_STATE_FINALUPDATE || bfin_crypto_crc_handle_queue()
405 ctx->flag == CRC_CRYPTO_STATE_FINISH)) bfin_crypto_crc_handle_queue()
406 ctx->sg_buflen += CHKSUM_DIGEST_SIZE; bfin_crypto_crc_handle_queue()
409 writel(ctx->sg_buflen >> 2, &crc->regs->datacnt); bfin_crypto_crc_handle_queue()
423 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); bfin_crypto_crc_update() local
428 dev_dbg(ctx->crc->dev, "crc_update\n"); bfin_crypto_crc_update()
429 ctx->total += req->nbytes; bfin_crypto_crc_update()
430 ctx->flag = CRC_CRYPTO_STATE_UPDATE; bfin_crypto_crc_update()
432 return bfin_crypto_crc_handle_queue(ctx->crc, req); bfin_crypto_crc_update()
439 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); bfin_crypto_crc_final() local
441 dev_dbg(ctx->crc->dev, "crc_final\n"); bfin_crypto_crc_final()
442 ctx->flag = CRC_CRYPTO_STATE_FINISH; bfin_crypto_crc_final()
445 return bfin_crypto_crc_handle_queue(ctx->crc, req); bfin_crypto_crc_final()
452 struct bfin_crypto_crc_reqctx *ctx = ahash_request_ctx(req); bfin_crypto_crc_finup() local
454 dev_dbg(ctx->crc->dev, "crc_finishupdate\n"); bfin_crypto_crc_finup()
455 ctx->total += req->nbytes; bfin_crypto_crc_finup()
456 ctx->flag = CRC_CRYPTO_STATE_FINALUPDATE; bfin_crypto_crc_finup()
459 return bfin_crypto_crc_handle_queue(ctx->crc, req); bfin_crypto_crc_finup()
H A Dpadlock-aes.c84 static inline struct aes_ctx *aes_ctx_common(void *ctx) aes_ctx_common() argument
86 unsigned long addr = (unsigned long)ctx; aes_ctx_common()
107 struct aes_ctx *ctx = aes_ctx(tfm); aes_set_key() local
123 ctx->D = ctx->E; aes_set_key()
125 ctx->E[0] = le32_to_cpu(key[0]); aes_set_key()
126 ctx->E[1] = le32_to_cpu(key[1]); aes_set_key()
127 ctx->E[2] = le32_to_cpu(key[2]); aes_set_key()
128 ctx->E[3] = le32_to_cpu(key[3]); aes_set_key()
131 memset(&ctx->cword, 0, sizeof(ctx->cword)); aes_set_key()
133 ctx->cword.decrypt.encdec = 1; aes_set_key()
134 ctx->cword.encrypt.rounds = 10 + (key_len - 16) / 4; aes_set_key()
135 ctx->cword.decrypt.rounds = ctx->cword.encrypt.rounds; aes_set_key()
136 ctx->cword.encrypt.ksize = (key_len - 16) / 8; aes_set_key()
137 ctx->cword.decrypt.ksize = ctx->cword.encrypt.ksize; aes_set_key()
143 ctx->D = ctx->d_data; aes_set_key()
144 ctx->cword.encrypt.keygen = 1; aes_set_key()
145 ctx->cword.decrypt.keygen = 1; aes_set_key()
152 memcpy(ctx->E, gen_aes.key_enc, AES_MAX_KEYLENGTH); aes_set_key()
153 memcpy(ctx->D, gen_aes.key_dec, AES_MAX_KEYLENGTH); aes_set_key()
157 if (&ctx->cword.encrypt == per_cpu(paes_last_cword, cpu) || aes_set_key()
158 &ctx->cword.decrypt == per_cpu(paes_last_cword, cpu)) aes_set_key()
300 struct aes_ctx *ctx = aes_ctx(tfm); aes_encrypt() local
303 padlock_reset_key(&ctx->cword.encrypt); aes_encrypt()
305 ecb_crypt(in, out, ctx->E, &ctx->cword.encrypt, 1); aes_encrypt()
307 padlock_store_cword(&ctx->cword.encrypt); aes_encrypt()
312 struct aes_ctx *ctx = aes_ctx(tfm); aes_decrypt() local
315 padlock_reset_key(&ctx->cword.encrypt); aes_decrypt()
317 ecb_crypt(in, out, ctx->D, &ctx->cword.decrypt, 1); aes_decrypt()
319 padlock_store_cword(&ctx->cword.encrypt); aes_decrypt()
346 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); ecb_aes_encrypt() local
351 padlock_reset_key(&ctx->cword.encrypt); ecb_aes_encrypt()
359 ctx->E, &ctx->cword.encrypt, ecb_aes_encrypt()
366 padlock_store_cword(&ctx->cword.encrypt); ecb_aes_encrypt()
375 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); ecb_aes_decrypt() local
380 padlock_reset_key(&ctx->cword.decrypt); ecb_aes_decrypt()
388 ctx->D, &ctx->cword.decrypt, ecb_aes_decrypt()
395 padlock_store_cword(&ctx->cword.encrypt); ecb_aes_decrypt()
425 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); cbc_aes_encrypt() local
430 padlock_reset_key(&ctx->cword.encrypt); cbc_aes_encrypt()
438 walk.dst.virt.addr, ctx->E, cbc_aes_encrypt()
439 walk.iv, &ctx->cword.encrypt, cbc_aes_encrypt()
447 padlock_store_cword(&ctx->cword.decrypt); cbc_aes_encrypt()
456 struct aes_ctx *ctx = blk_aes_ctx(desc->tfm); cbc_aes_decrypt() local
461 padlock_reset_key(&ctx->cword.encrypt); cbc_aes_decrypt()
469 ctx->D, walk.iv, &ctx->cword.decrypt, cbc_aes_decrypt()
477 padlock_store_cword(&ctx->cword.encrypt); cbc_aes_decrypt()
/linux-4.1.27/crypto/asymmetric_keys/
H A Dpkcs7_parser.c88 struct pkcs7_parse_context *ctx; pkcs7_parse_message() local
92 ctx = kzalloc(sizeof(struct pkcs7_parse_context), GFP_KERNEL); pkcs7_parse_message()
93 if (!ctx) pkcs7_parse_message()
95 ctx->msg = kzalloc(sizeof(struct pkcs7_message), GFP_KERNEL); pkcs7_parse_message()
96 if (!ctx->msg) pkcs7_parse_message()
98 ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL); pkcs7_parse_message()
99 if (!ctx->sinfo) pkcs7_parse_message()
102 ctx->data = (unsigned long)data; pkcs7_parse_message()
103 ctx->ppcerts = &ctx->certs; pkcs7_parse_message()
104 ctx->ppsinfo = &ctx->msg->signed_infos; pkcs7_parse_message()
107 ret = asn1_ber_decoder(&pkcs7_decoder, ctx, data, datalen); pkcs7_parse_message()
113 msg = ctx->msg; pkcs7_parse_message()
114 ctx->msg = NULL; pkcs7_parse_message()
117 while (ctx->certs) { pkcs7_parse_message()
118 struct x509_certificate *cert = ctx->certs; pkcs7_parse_message()
119 ctx->certs = cert->next; pkcs7_parse_message()
122 pkcs7_free_signed_info(ctx->sinfo); pkcs7_parse_message()
124 pkcs7_free_message(ctx->msg); pkcs7_parse_message()
126 kfree(ctx); pkcs7_parse_message()
167 struct pkcs7_parse_context *ctx = context; pkcs7_note_OID() local
169 ctx->last_oid = look_up_OID(value, vlen); pkcs7_note_OID()
170 if (ctx->last_oid == OID__NR) { pkcs7_note_OID()
174 (unsigned long)value - ctx->data, buffer); pkcs7_note_OID()
186 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_digest_algo() local
188 switch (ctx->last_oid) { pkcs7_sig_note_digest_algo()
190 ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_MD4; pkcs7_sig_note_digest_algo()
193 ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_MD5; pkcs7_sig_note_digest_algo()
196 ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA1; pkcs7_sig_note_digest_algo()
199 ctx->sinfo->sig.pkey_hash_algo = HASH_ALGO_SHA256; pkcs7_sig_note_digest_algo()
202 printk("Unsupported digest algo: %u\n", ctx->last_oid); pkcs7_sig_note_digest_algo()
215 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_pkey_algo() local
217 switch (ctx->last_oid) { pkcs7_sig_note_pkey_algo()
219 ctx->sinfo->sig.pkey_algo = PKEY_ALGO_RSA; pkcs7_sig_note_pkey_algo()
222 printk("Unsupported pkey algo: %u\n", ctx->last_oid); pkcs7_sig_note_pkey_algo()
235 struct pkcs7_parse_context *ctx = context; pkcs7_extract_cert() local
240 tag, (unsigned long)ctx - ctx->data); pkcs7_extract_cert()
259 x509->index = ++ctx->x509_index; pkcs7_extract_cert()
263 *ctx->ppcerts = x509; pkcs7_extract_cert()
264 ctx->ppcerts = &x509->next; pkcs7_extract_cert()
275 struct pkcs7_parse_context *ctx = context; pkcs7_note_certificate_list() local
279 *ctx->ppcerts = ctx->msg->certs; pkcs7_note_certificate_list()
280 ctx->msg->certs = ctx->certs; pkcs7_note_certificate_list()
281 ctx->certs = NULL; pkcs7_note_certificate_list()
282 ctx->ppcerts = &ctx->certs; pkcs7_note_certificate_list()
294 struct pkcs7_parse_context *ctx = context; pkcs7_note_data() local
298 ctx->msg->data = value; pkcs7_note_data()
299 ctx->msg->data_len = vlen; pkcs7_note_data()
300 ctx->msg->data_hdrlen = hdrlen; pkcs7_note_data()
301 ctx->msg->data_type = ctx->last_oid; pkcs7_note_data()
312 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_authenticated_attr() local
316 switch (ctx->last_oid) { pkcs7_sig_note_authenticated_attr()
320 ctx->sinfo->msgdigest = value; pkcs7_sig_note_authenticated_attr()
321 ctx->sinfo->msgdigest_len = vlen; pkcs7_sig_note_authenticated_attr()
335 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_set_of_authattrs() local
338 ctx->sinfo->authattrs = value - (hdrlen - 1); pkcs7_sig_note_set_of_authattrs()
339 ctx->sinfo->authattrs_len = vlen + (hdrlen - 1); pkcs7_sig_note_set_of_authattrs()
350 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_serial() local
351 ctx->raw_serial = value; pkcs7_sig_note_serial()
352 ctx->raw_serial_size = vlen; pkcs7_sig_note_serial()
363 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_issuer() local
364 ctx->raw_issuer = value; pkcs7_sig_note_issuer()
365 ctx->raw_issuer_size = vlen; pkcs7_sig_note_issuer()
376 struct pkcs7_parse_context *ctx = context; pkcs7_sig_note_signature() local
379 BUG_ON(ctx->sinfo->sig.pkey_algo != PKEY_ALGO_RSA); pkcs7_sig_note_signature()
385 ctx->sinfo->sig.mpi[0] = mpi; pkcs7_sig_note_signature()
386 ctx->sinfo->sig.nr_mpi = 1; pkcs7_sig_note_signature()
397 struct pkcs7_parse_context *ctx = context; pkcs7_note_signed_info() local
398 struct pkcs7_signed_info *sinfo = ctx->sinfo; pkcs7_note_signed_info()
402 kid = asymmetric_key_generate_id(ctx->raw_serial, pkcs7_note_signed_info()
403 ctx->raw_serial_size, pkcs7_note_signed_info()
404 ctx->raw_issuer, pkcs7_note_signed_info()
405 ctx->raw_issuer_size); pkcs7_note_signed_info()
410 sinfo->index = ++ctx->sinfo_index; pkcs7_note_signed_info()
411 *ctx->ppsinfo = sinfo; pkcs7_note_signed_info()
412 ctx->ppsinfo = &sinfo->next; pkcs7_note_signed_info()
413 ctx->sinfo = kzalloc(sizeof(struct pkcs7_signed_info), GFP_KERNEL); pkcs7_note_signed_info()
414 if (!ctx->sinfo) pkcs7_note_signed_info()
H A Dx509_cert_parser.c71 struct x509_parse_context *ctx; x509_cert_parse() local
82 ctx = kzalloc(sizeof(struct x509_parse_context), GFP_KERNEL); x509_cert_parse()
83 if (!ctx) x509_cert_parse()
86 ctx->cert = cert; x509_cert_parse()
87 ctx->data = (unsigned long)data; x509_cert_parse()
90 ret = asn1_ber_decoder(&x509_decoder, ctx, data, datalen); x509_cert_parse()
95 if (ctx->raw_akid) { x509_cert_parse()
97 ctx->raw_akid_size, ctx->raw_akid_size, ctx->raw_akid); x509_cert_parse()
98 ret = asn1_ber_decoder(&x509_akid_decoder, ctx, x509_cert_parse()
99 ctx->raw_akid, ctx->raw_akid_size); x509_cert_parse()
107 ret = asn1_ber_decoder(&x509_rsakey_decoder, ctx, x509_cert_parse()
108 ctx->key, ctx->key_size); x509_cert_parse()
123 kfree(ctx); x509_cert_parse()
127 kfree(ctx); x509_cert_parse()
143 struct x509_parse_context *ctx = context; x509_note_OID() local
145 ctx->last_oid = look_up_OID(value, vlen); x509_note_OID()
146 if (ctx->last_oid == OID__NR) { x509_note_OID()
150 (unsigned long)value - ctx->data, buffer); x509_note_OID()
163 struct x509_parse_context *ctx = context; x509_note_tbs_certificate() local
166 hdrlen, tag, (unsigned long)value - ctx->data, vlen); x509_note_tbs_certificate()
168 ctx->cert->tbs = value - hdrlen; x509_note_tbs_certificate()
169 ctx->cert->tbs_size = vlen + hdrlen; x509_note_tbs_certificate()
180 struct x509_parse_context *ctx = context; x509_note_pkey_algo() local
182 pr_debug("PubKey Algo: %u\n", ctx->last_oid); x509_note_pkey_algo()
184 switch (ctx->last_oid) { x509_note_pkey_algo()
191 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_MD5; x509_note_pkey_algo()
192 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; x509_note_pkey_algo()
196 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA1; x509_note_pkey_algo()
197 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; x509_note_pkey_algo()
201 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA256; x509_note_pkey_algo()
202 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; x509_note_pkey_algo()
206 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA384; x509_note_pkey_algo()
207 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; x509_note_pkey_algo()
211 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA512; x509_note_pkey_algo()
212 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; x509_note_pkey_algo()
216 ctx->cert->sig.pkey_hash_algo = HASH_ALGO_SHA224; x509_note_pkey_algo()
217 ctx->cert->sig.pkey_algo = PKEY_ALGO_RSA; x509_note_pkey_algo()
221 ctx->algo_oid = ctx->last_oid; x509_note_pkey_algo()
232 struct x509_parse_context *ctx = context; x509_note_signature() local
234 pr_debug("Signature type: %u size %zu\n", ctx->last_oid, vlen); x509_note_signature()
236 if (ctx->last_oid != ctx->algo_oid) { x509_note_signature()
238 ctx->algo_oid, ctx->last_oid); x509_note_signature()
242 ctx->cert->raw_sig = value; x509_note_signature()
243 ctx->cert->raw_sig_size = vlen; x509_note_signature()
254 struct x509_parse_context *ctx = context; x509_note_serial() local
255 ctx->cert->raw_serial = value; x509_note_serial()
256 ctx->cert->raw_serial_size = vlen; x509_note_serial()
267 struct x509_parse_context *ctx = context; x509_extract_name_segment() local
269 switch (ctx->last_oid) { x509_extract_name_segment()
271 ctx->cn_size = vlen; x509_extract_name_segment()
272 ctx->cn_offset = (unsigned long)value - ctx->data; x509_extract_name_segment()
275 ctx->o_size = vlen; x509_extract_name_segment()
276 ctx->o_offset = (unsigned long)value - ctx->data; x509_extract_name_segment()
279 ctx->email_size = vlen; x509_extract_name_segment()
280 ctx->email_offset = (unsigned long)value - ctx->data; x509_extract_name_segment()
292 static int x509_fabricate_name(struct x509_parse_context *ctx, size_t hdrlen, x509_fabricate_name() argument
296 const void *name, *data = (const void *)ctx->data; x509_fabricate_name()
304 if (!ctx->cn_size && !ctx->o_size && !ctx->email_size) { x509_fabricate_name()
312 if (ctx->cn_size && ctx->o_size) { x509_fabricate_name()
316 namesize = ctx->cn_size; x509_fabricate_name()
317 name = data + ctx->cn_offset; x509_fabricate_name()
318 if (ctx->cn_size >= ctx->o_size && x509_fabricate_name()
319 memcmp(data + ctx->cn_offset, data + ctx->o_offset, x509_fabricate_name()
320 ctx->o_size) == 0) x509_fabricate_name()
322 if (ctx->cn_size >= 7 && x509_fabricate_name()
323 ctx->o_size >= 7 && x509_fabricate_name()
324 memcmp(data + ctx->cn_offset, data + ctx->o_offset, 7) == 0) x509_fabricate_name()
327 buffer = kmalloc(ctx->o_size + 2 + ctx->cn_size + 1, x509_fabricate_name()
333 data + ctx->o_offset, ctx->o_size); x509_fabricate_name()
334 buffer[ctx->o_size + 0] = ':'; x509_fabricate_name()
335 buffer[ctx->o_size + 1] = ' '; x509_fabricate_name()
336 memcpy(buffer + ctx->o_size + 2, x509_fabricate_name()
337 data + ctx->cn_offset, ctx->cn_size); x509_fabricate_name()
338 buffer[ctx->o_size + 2 + ctx->cn_size] = 0; x509_fabricate_name()
341 } else if (ctx->cn_size) { x509_fabricate_name()
342 namesize = ctx->cn_size; x509_fabricate_name()
343 name = data + ctx->cn_offset; x509_fabricate_name()
344 } else if (ctx->o_size) { x509_fabricate_name()
345 namesize = ctx->o_size; x509_fabricate_name()
346 name = data + ctx->o_offset; x509_fabricate_name()
348 namesize = ctx->email_size; x509_fabricate_name()
349 name = data + ctx->email_offset; x509_fabricate_name()
361 ctx->cn_size = 0; x509_fabricate_name()
362 ctx->o_size = 0; x509_fabricate_name()
363 ctx->email_size = 0; x509_fabricate_name()
371 struct x509_parse_context *ctx = context; x509_note_issuer() local
372 ctx->cert->raw_issuer = value; x509_note_issuer()
373 ctx->cert->raw_issuer_size = vlen; x509_note_issuer()
374 return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->issuer, vlen); x509_note_issuer()
381 struct x509_parse_context *ctx = context; x509_note_subject() local
382 ctx->cert->raw_subject = value; x509_note_subject()
383 ctx->cert->raw_subject_size = vlen; x509_note_subject()
384 return x509_fabricate_name(ctx, hdrlen, tag, &ctx->cert->subject, vlen); x509_note_subject()
394 struct x509_parse_context *ctx = context; x509_extract_key_data() local
396 if (ctx->last_oid != OID_rsaEncryption) x509_extract_key_data()
399 ctx->cert->pub->pkey_algo = PKEY_ALGO_RSA; x509_extract_key_data()
402 ctx->key = value + 1; x509_extract_key_data()
403 ctx->key_size = vlen - 1; x509_extract_key_data()
414 struct x509_parse_context *ctx = context; rsa_extract_mpi() local
417 if (ctx->nr_mpi >= ARRAY_SIZE(ctx->cert->pub->mpi)) { rsa_extract_mpi()
426 ctx->cert->pub->mpi[ctx->nr_mpi++] = mpi; rsa_extract_mpi()
440 struct x509_parse_context *ctx = context; x509_process_extension() local
444 pr_debug("Extension: %u\n", ctx->last_oid); x509_process_extension()
446 if (ctx->last_oid == OID_subjectKeyIdentifier) { x509_process_extension()
448 if (ctx->cert->skid || vlen < 3) x509_process_extension()
455 ctx->cert->raw_skid_size = vlen; x509_process_extension()
456 ctx->cert->raw_skid = v; x509_process_extension()
457 kid = asymmetric_key_generate_id(ctx->cert->raw_subject, x509_process_extension()
458 ctx->cert->raw_subject_size, x509_process_extension()
462 ctx->cert->skid = kid; x509_process_extension()
467 if (ctx->last_oid == OID_authorityKeyIdentifier) { x509_process_extension()
469 ctx->raw_akid = v; x509_process_extension()
470 ctx->raw_akid_size = vlen; x509_process_extension()
574 struct x509_parse_context *ctx = context; x509_note_not_before() local
575 return x509_decode_time(&ctx->cert->valid_from, hdrlen, tag, value, vlen); x509_note_not_before()
582 struct x509_parse_context *ctx = context; x509_note_not_after() local
583 return x509_decode_time(&ctx->cert->valid_to, hdrlen, tag, value, vlen); x509_note_not_after()
593 struct x509_parse_context *ctx = context; x509_akid_note_kid() local
598 if (ctx->cert->akid_skid) x509_akid_note_kid()
601 kid = asymmetric_key_generate_id(ctx->cert->raw_issuer, x509_akid_note_kid()
602 ctx->cert->raw_issuer_size, x509_akid_note_kid()
607 ctx->cert->akid_skid = kid; x509_akid_note_kid()
618 struct x509_parse_context *ctx = context; x509_akid_note_name() local
622 ctx->akid_raw_issuer = value; x509_akid_note_name()
623 ctx->akid_raw_issuer_size = vlen; x509_akid_note_name()
634 struct x509_parse_context *ctx = context; x509_akid_note_serial() local
639 if (!ctx->akid_raw_issuer || ctx->cert->akid_id) x509_akid_note_serial()
644 ctx->akid_raw_issuer, x509_akid_note_serial()
645 ctx->akid_raw_issuer_size); x509_akid_note_serial()
650 ctx->cert->akid_id = kid; x509_akid_note_serial()
H A Dverify_pefile.c27 struct pefile_context *ctx) pefile_parse_binary()
64 ctx->image_checksum_offset = pefile_parse_binary()
66 ctx->header_size = pe32->header_size; pefile_parse_binary()
68 ctx->n_data_dirents = pe32->data_dirs; pefile_parse_binary()
73 ctx->image_checksum_offset = pefile_parse_binary()
75 ctx->header_size = pe64->header_size; pefile_parse_binary()
77 ctx->n_data_dirents = pe64->data_dirs; pefile_parse_binary()
85 pr_debug("checksum @ %x\n", ctx->image_checksum_offset); pefile_parse_binary()
86 pr_debug("header size = %x\n", ctx->header_size); pefile_parse_binary()
88 if (cursor >= ctx->header_size || ctx->header_size >= datalen) pefile_parse_binary()
91 if (ctx->n_data_dirents > (ctx->header_size - cursor) / sizeof(*dde)) pefile_parse_binary()
95 cursor += sizeof(*dde) * ctx->n_data_dirents; pefile_parse_binary()
97 ctx->cert_dirent_offset = pefile_parse_binary()
99 ctx->certs_size = ddir->certs.size; pefile_parse_binary()
106 chkaddr(ctx->header_size, ddir->certs.virtual_address, pefile_parse_binary()
108 ctx->sig_offset = ddir->certs.virtual_address; pefile_parse_binary()
109 ctx->sig_len = ddir->certs.size; pefile_parse_binary()
111 ctx->sig_len, ctx->sig_offset, pefile_parse_binary()
112 ctx->sig_len, pebuf + ctx->sig_offset); pefile_parse_binary()
114 ctx->n_sections = pe->sections; pefile_parse_binary()
115 if (ctx->n_sections > (ctx->header_size - cursor) / sizeof(*sec)) pefile_parse_binary()
117 ctx->secs = secs = pebuf + cursor; pefile_parse_binary()
127 struct pefile_context *ctx) pefile_strip_sig_wrapper()
133 if (ctx->sig_len < sizeof(wrapper)) { pefile_strip_sig_wrapper()
138 memcpy(&wrapper, pebuf + ctx->sig_offset, sizeof(wrapper)); pefile_strip_sig_wrapper()
145 if (round_up(wrapper.length, 8) != ctx->sig_len) { pefile_strip_sig_wrapper()
163 ctx->sig_len = wrapper.length; pefile_strip_sig_wrapper()
164 ctx->sig_offset += sizeof(wrapper); pefile_strip_sig_wrapper()
165 ctx->sig_len -= sizeof(wrapper); pefile_strip_sig_wrapper()
166 if (ctx->sig_len < 4) { pefile_strip_sig_wrapper()
172 pkcs7 = pebuf + ctx->sig_offset; pefile_strip_sig_wrapper()
195 if (len <= ctx->sig_len) { pefile_strip_sig_wrapper()
197 ctx->sig_len = len; pefile_strip_sig_wrapper()
246 struct pefile_context *ctx, pefile_digest_pe_contents()
255 ret = crypto_shash_update(desc, pebuf, ctx->image_checksum_offset); pefile_digest_pe_contents()
259 tmp = ctx->image_checksum_offset + sizeof(uint32_t); pefile_digest_pe_contents()
261 ctx->cert_dirent_offset - tmp); pefile_digest_pe_contents()
265 tmp = ctx->cert_dirent_offset + sizeof(struct data_dirent); pefile_digest_pe_contents()
266 ret = crypto_shash_update(desc, pebuf + tmp, ctx->header_size - tmp); pefile_digest_pe_contents()
270 canon = kcalloc(ctx->n_sections, sizeof(unsigned), GFP_KERNEL); pefile_digest_pe_contents()
278 for (loop = 1; loop < ctx->n_sections; loop++) { pefile_digest_pe_contents()
280 if (pefile_compare_shdrs(&ctx->secs[canon[i]], pefile_digest_pe_contents()
281 &ctx->secs[loop]) > 0) { pefile_digest_pe_contents()
290 hashed_bytes = ctx->header_size; pefile_digest_pe_contents()
291 for (loop = 0; loop < ctx->n_sections; loop++) { pefile_digest_pe_contents()
293 if (ctx->secs[i].raw_data_size == 0) pefile_digest_pe_contents()
296 pebuf + ctx->secs[i].data_addr, pefile_digest_pe_contents()
297 ctx->secs[i].raw_data_size); pefile_digest_pe_contents()
302 hashed_bytes += ctx->secs[i].raw_data_size; pefile_digest_pe_contents()
307 tmp = hashed_bytes + ctx->certs_size; pefile_digest_pe_contents()
323 struct pefile_context *ctx) pefile_digest_pe()
331 kenter(",%u", ctx->digest_algo); pefile_digest_pe()
336 tfm = crypto_alloc_shash(hash_algo_name[ctx->digest_algo], 0, 0); pefile_digest_pe()
343 if (digest_size != ctx->digest_len) { pefile_digest_pe()
345 digest_size, ctx->digest_len); pefile_digest_pe()
362 ret = pefile_digest_pe_contents(pebuf, pelen, ctx, desc); pefile_digest_pe()
371 pr_debug("Digest calc = [%*ph]\n", ctx->digest_len, digest); pefile_digest_pe()
376 if (memcmp(digest, ctx->digest, ctx->digest_len) != 0) { pefile_digest_pe()
423 struct pefile_context ctx; verify_pefile_signature() local
430 memset(&ctx, 0, sizeof(ctx)); verify_pefile_signature()
431 ret = pefile_parse_binary(pebuf, pelen, &ctx); verify_pefile_signature()
435 ret = pefile_strip_sig_wrapper(pebuf, &ctx); verify_pefile_signature()
439 pkcs7 = pkcs7_parse_message(pebuf + ctx.sig_offset, ctx.sig_len); verify_pefile_signature()
442 ctx.pkcs7 = pkcs7; verify_pefile_signature()
444 ret = pkcs7_get_content_data(ctx.pkcs7, &data, &datalen, false); verify_pefile_signature()
451 ret = mscode_parse(&ctx); verify_pefile_signature()
456 ctx.digest_len, ctx.digest_len, ctx.digest); verify_pefile_signature()
461 ret = pefile_digest_pe(pebuf, pelen, &ctx); verify_pefile_signature()
472 pkcs7_free_message(ctx.pkcs7); verify_pefile_signature()
26 pefile_parse_binary(const void *pebuf, unsigned int pelen, struct pefile_context *ctx) pefile_parse_binary() argument
126 pefile_strip_sig_wrapper(const void *pebuf, struct pefile_context *ctx) pefile_strip_sig_wrapper() argument
245 pefile_digest_pe_contents(const void *pebuf, unsigned int pelen, struct pefile_context *ctx, struct shash_desc *desc) pefile_digest_pe_contents() argument
322 pefile_digest_pe(const void *pebuf, unsigned int pelen, struct pefile_context *ctx) pefile_digest_pe() argument
H A Dmscode_parser.c24 int mscode_parse(struct pefile_context *ctx) mscode_parse() argument
30 ret = pkcs7_get_content_data(ctx->pkcs7, &content_data, &data_len, 1); mscode_parse()
40 return asn1_ber_decoder(&mscode_decoder, ctx, content_data, data_len); mscode_parse()
82 struct pefile_context *ctx = context; mscode_note_digest_algo() local
89 ctx->digest_algo = HASH_ALGO_MD4; mscode_note_digest_algo()
92 ctx->digest_algo = HASH_ALGO_MD5; mscode_note_digest_algo()
95 ctx->digest_algo = HASH_ALGO_SHA1; mscode_note_digest_algo()
98 ctx->digest_algo = HASH_ALGO_SHA256; mscode_note_digest_algo()
121 struct pefile_context *ctx = context; mscode_note_digest() local
123 ctx->digest = value; mscode_note_digest()
124 ctx->digest_len = vlen; mscode_note_digest()
/linux-4.1.27/arch/x86/include/asm/crypto/
H A Dserpent-sse2.h11 asmlinkage void __serpent_enc_blk_4way(struct serpent_ctx *ctx, u8 *dst,
13 asmlinkage void serpent_dec_blk_4way(struct serpent_ctx *ctx, u8 *dst,
16 static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, serpent_enc_blk_xway() argument
19 __serpent_enc_blk_4way(ctx, dst, src, false); serpent_enc_blk_xway()
22 static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, serpent_enc_blk_xway_xor() argument
25 __serpent_enc_blk_4way(ctx, dst, src, true); serpent_enc_blk_xway_xor()
28 static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, serpent_dec_blk_xway() argument
31 serpent_dec_blk_4way(ctx, dst, src); serpent_dec_blk_xway()
38 asmlinkage void __serpent_enc_blk_8way(struct serpent_ctx *ctx, u8 *dst,
40 asmlinkage void serpent_dec_blk_8way(struct serpent_ctx *ctx, u8 *dst,
43 static inline void serpent_enc_blk_xway(struct serpent_ctx *ctx, u8 *dst, serpent_enc_blk_xway() argument
46 __serpent_enc_blk_8way(ctx, dst, src, false); serpent_enc_blk_xway()
49 static inline void serpent_enc_blk_xway_xor(struct serpent_ctx *ctx, u8 *dst, serpent_enc_blk_xway_xor() argument
52 __serpent_enc_blk_8way(ctx, dst, src, true); serpent_enc_blk_xway_xor()
55 static inline void serpent_dec_blk_xway(struct serpent_ctx *ctx, u8 *dst, serpent_dec_blk_xway() argument
58 serpent_dec_blk_8way(ctx, dst, src); serpent_dec_blk_xway()
H A Dcamellia.h40 asmlinkage void __camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst,
42 asmlinkage void camellia_dec_blk(struct camellia_ctx *ctx, u8 *dst,
46 asmlinkage void __camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst,
48 asmlinkage void camellia_dec_blk_2way(struct camellia_ctx *ctx, u8 *dst,
52 asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
54 asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
57 asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
59 asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
62 asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
64 asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
67 static inline void camellia_enc_blk(struct camellia_ctx *ctx, u8 *dst, camellia_enc_blk() argument
70 __camellia_enc_blk(ctx, dst, src, false); camellia_enc_blk()
73 static inline void camellia_enc_blk_xor(struct camellia_ctx *ctx, u8 *dst, camellia_enc_blk_xor() argument
76 __camellia_enc_blk(ctx, dst, src, true); camellia_enc_blk_xor()
79 static inline void camellia_enc_blk_2way(struct camellia_ctx *ctx, u8 *dst, camellia_enc_blk_2way() argument
82 __camellia_enc_blk_2way(ctx, dst, src, false); camellia_enc_blk_2way()
85 static inline void camellia_enc_blk_xor_2way(struct camellia_ctx *ctx, u8 *dst, camellia_enc_blk_xor_2way() argument
88 __camellia_enc_blk_2way(ctx, dst, src, true); camellia_enc_blk_xor_2way()
92 extern void camellia_decrypt_cbc_2way(void *ctx, u128 *dst, const u128 *src);
93 extern void camellia_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
95 extern void camellia_crypt_ctr_2way(void *ctx, u128 *dst, const u128 *src,
98 extern void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
99 extern void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
H A Daes.h7 void crypto_aes_encrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
9 void crypto_aes_decrypt_x86(struct crypto_aes_ctx *ctx, u8 *dst,
H A Dtwofish.h20 asmlinkage void twofish_enc_blk(struct twofish_ctx *ctx, u8 *dst,
22 asmlinkage void twofish_dec_blk(struct twofish_ctx *ctx, u8 *dst,
26 asmlinkage void __twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst,
28 asmlinkage void twofish_dec_blk_3way(struct twofish_ctx *ctx, u8 *dst,
32 extern void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src);
33 extern void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src,
35 extern void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src,
H A Dserpent-avx.h19 asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
21 asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
24 asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
26 asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
29 asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
31 asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
34 extern void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src,
37 extern void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv);
38 extern void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv);
/linux-4.1.27/drivers/gpu/drm/radeon/
H A Datom.c56 struct atom_context *ctx; member in struct:__anon4334
66 static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
67 int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
102 static uint32_t atom_iio_execute(struct atom_context *ctx, int base, atom_iio_execute() argument
105 struct radeon_device *rdev = ctx->card->dev->dev_private; atom_iio_execute()
114 temp = ctx->card->ioreg_read(ctx->card, CU16(base + 1)); atom_iio_execute()
119 (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1)); atom_iio_execute()
120 ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp); atom_iio_execute()
160 ((ctx-> atom_iio_execute()
177 static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr, atom_get_src_int() argument
181 struct atom_context *gctx = ctx->ctx; atom_get_src_int()
225 val = get_unaligned_le32((u32 *)&ctx->ps[idx]); atom_get_src_int()
263 val = ctx->ws[idx]; atom_get_src_int()
365 static void atom_skip_src_int(atom_exec_context *ctx, uint8_t attr, int *ptr) atom_skip_src_int() argument
401 static uint32_t atom_get_src(atom_exec_context *ctx, uint8_t attr, int *ptr) atom_get_src() argument
403 return atom_get_src_int(ctx, attr, ptr, NULL, 1); atom_get_src()
406 static uint32_t atom_get_src_direct(atom_exec_context *ctx, uint8_t align, int *ptr) atom_get_src_direct() argument
432 static uint32_t atom_get_dst(atom_exec_context *ctx, int arg, uint8_t attr, atom_get_dst() argument
435 return atom_get_src_int(ctx, atom_get_dst()
441 static void atom_skip_dst(atom_exec_context *ctx, int arg, uint8_t attr, int *ptr) atom_skip_dst() argument
443 atom_skip_src_int(ctx, atom_skip_dst()
448 static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr, atom_put_dst() argument
454 struct atom_context *gctx = ctx->ctx; atom_put_dst()
501 ctx->ps[idx] = cpu_to_le32(val); atom_put_dst()
533 ctx->ws[idx] = val; atom_put_dst()
587 static void atom_op_add(atom_exec_context *ctx, int *ptr, int arg) atom_op_add() argument
593 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); atom_op_add()
595 src = atom_get_src(ctx, attr, ptr); atom_op_add()
598 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); atom_op_add()
601 static void atom_op_and(atom_exec_context *ctx, int *ptr, int arg) atom_op_and() argument
607 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); atom_op_and()
609 src = atom_get_src(ctx, attr, ptr); atom_op_and()
612 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); atom_op_and()
615 static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg) atom_op_beep() argument
620 static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg) atom_op_calltable() argument
629 if (U16(ctx->ctx->cmd_table + 4 + 2 * idx)) atom_op_calltable()
630 r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift); atom_op_calltable()
632 ctx->abort = true; atom_op_calltable()
636 static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg) atom_op_clear() argument
643 atom_get_dst(ctx, arg, attr, ptr, &saved, 0); atom_op_clear()
645 atom_put_dst(ctx, arg, attr, &dptr, 0, saved); atom_op_clear()
648 static void atom_op_compare(atom_exec_context *ctx, int *ptr, int arg) atom_op_compare() argument
653 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); atom_op_compare()
655 src = atom_get_src(ctx, attr, ptr); atom_op_compare()
656 ctx->ctx->cs_equal = (dst == src); atom_op_compare()
657 ctx->ctx->cs_above = (dst > src); atom_op_compare()
658 SDEBUG(" result: %s %s\n", ctx->ctx->cs_equal ? "EQ" : "NE", atom_op_compare()
659 ctx->ctx->cs_above ? "GT" : "LE"); atom_op_compare()
662 static void atom_op_delay(atom_exec_context *ctx, int *ptr, int arg) atom_op_delay() argument
674 static void atom_op_div(atom_exec_context *ctx, int *ptr, int arg) atom_op_div() argument
679 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); atom_op_div()
681 src = atom_get_src(ctx, attr, ptr); atom_op_div()
683 ctx->ctx->divmul[0] = dst / src; atom_op_div()
684 ctx->ctx->divmul[1] = dst % src; atom_op_div()
686 ctx->ctx->divmul[0] = 0; atom_op_div()
687 ctx->ctx->divmul[1] = 0; atom_op_div()
691 static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg) atom_op_eot() argument
696 static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg) atom_op_jump() argument
704 execute = ctx->ctx->cs_above; atom_op_jump()
707 execute = ctx->ctx->cs_above || ctx->ctx->cs_equal; atom_op_jump()
713 execute = !(ctx->ctx->cs_above || ctx->ctx->cs_equal); atom_op_jump()
716 execute = !ctx->ctx->cs_above; atom_op_jump()
719 execute = ctx->ctx->cs_equal; atom_op_jump()
722 execute = !ctx->ctx->cs_equal; atom_op_jump()
729 if (ctx->last_jump == (ctx->start + target)) { atom_op_jump()
731 if (time_after(cjiffies, ctx->last_jump_jiffies)) { atom_op_jump()
732 cjiffies -= ctx->last_jump_jiffies; atom_op_jump()
735 ctx->abort = true; atom_op_jump()
739 ctx->last_jump_jiffies = jiffies; atom_op_jump()
742 ctx->last_jump = ctx->start + target; atom_op_jump()
743 ctx->last_jump_jiffies = jiffies; atom_op_jump()
745 *ptr = ctx->start + target; atom_op_jump()
749 static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg) atom_op_mask() argument
755 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); atom_op_mask()
756 mask = atom_get_src_direct(ctx, ((attr >> 3) & 7), ptr); atom_op_mask()
759 src = atom_get_src(ctx, attr, ptr); atom_op_mask()
763 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); atom_op_mask()
766 static void atom_op_move(atom_exec_context *ctx, int *ptr, int arg) atom_op_move() argument
772 atom_get_dst(ctx, arg, attr, ptr, &saved, 0); atom_op_move()
774 atom_skip_dst(ctx, arg, attr, ptr); atom_op_move()
778 src = atom_get_src(ctx, attr, ptr); atom_op_move()
780 atom_put_dst(ctx, arg, attr, &dptr, src, saved); atom_op_move()
783 static void atom_op_mul(atom_exec_context *ctx, int *ptr, int arg) atom_op_mul() argument
788 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); atom_op_mul()
790 src = atom_get_src(ctx, attr, ptr); atom_op_mul()
791 ctx->ctx->divmul[0] = dst * src; atom_op_mul()
794 static void atom_op_nop(atom_exec_context *ctx, int *ptr, int arg) atom_op_nop() argument
799 static void atom_op_or(atom_exec_context *ctx, int *ptr, int arg) atom_op_or() argument
805 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); atom_op_or()
807 src = atom_get_src(ctx, attr, ptr); atom_op_or()
810 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); atom_op_or()
813 static void atom_op_postcard(atom_exec_context *ctx, int *ptr, int arg) atom_op_postcard() argument
819 static void atom_op_repeat(atom_exec_context *ctx, int *ptr, int arg) atom_op_repeat() argument
824 static void atom_op_restorereg(atom_exec_context *ctx, int *ptr, int arg) atom_op_restorereg() argument
829 static void atom_op_savereg(atom_exec_context *ctx, int *ptr, int arg) atom_op_savereg() argument
834 static void atom_op_setdatablock(atom_exec_context *ctx, int *ptr, int arg) atom_op_setdatablock() argument
840 ctx->ctx->data_block = 0; atom_op_setdatablock()
842 ctx->ctx->data_block = ctx->start; atom_op_setdatablock()
844 ctx->ctx->data_block = U16(ctx->ctx->data_table + 4 + 2 * idx); atom_op_setdatablock()
845 SDEBUG(" base: 0x%04X\n", ctx->ctx->data_block); atom_op_setdatablock()
848 static void atom_op_setfbbase(atom_exec_context *ctx, int *ptr, int arg) atom_op_setfbbase() argument
852 ctx->ctx->fb_base = atom_get_src(ctx, attr, ptr); atom_op_setfbbase()
855 static void atom_op_setport(atom_exec_context *ctx, int *ptr, int arg) atom_op_setport() argument
866 ctx->ctx->io_mode = ATOM_IO_MM; atom_op_setport()
868 ctx->ctx->io_mode = ATOM_IO_IIO | port; atom_op_setport()
872 ctx->ctx->io_mode = ATOM_IO_PCI; atom_op_setport()
876 ctx->ctx->io_mode = ATOM_IO_SYSIO; atom_op_setport()
882 static void atom_op_setregblock(atom_exec_context *ctx, int *ptr, int arg) atom_op_setregblock() argument
884 ctx->ctx->reg_block = U16(*ptr); atom_op_setregblock()
886 SDEBUG(" base: 0x%04X\n", ctx->ctx->reg_block); atom_op_setregblock()
889 static void atom_op_shift_left(atom_exec_context *ctx, int *ptr, int arg) atom_op_shift_left() argument
897 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); atom_op_shift_left()
898 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); atom_op_shift_left()
902 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); atom_op_shift_left()
905 static void atom_op_shift_right(atom_exec_context *ctx, int *ptr, int arg) atom_op_shift_right() argument
913 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); atom_op_shift_right()
914 shift = atom_get_src_direct(ctx, ATOM_SRC_BYTE0, ptr); atom_op_shift_right()
918 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); atom_op_shift_right()
921 static void atom_op_shl(atom_exec_context *ctx, int *ptr, int arg) atom_op_shl() argument
928 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); atom_op_shl()
931 shift = atom_get_src(ctx, attr, ptr); atom_op_shl()
937 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); atom_op_shl()
940 static void atom_op_shr(atom_exec_context *ctx, int *ptr, int arg) atom_op_shr() argument
947 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); atom_op_shr()
950 shift = atom_get_src(ctx, attr, ptr); atom_op_shr()
956 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); atom_op_shr()
959 static void atom_op_sub(atom_exec_context *ctx, int *ptr, int arg) atom_op_sub() argument
965 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); atom_op_sub()
967 src = atom_get_src(ctx, attr, ptr); atom_op_sub()
970 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); atom_op_sub()
973 static void atom_op_switch(atom_exec_context *ctx, int *ptr, int arg) atom_op_switch() argument
978 src = atom_get_src(ctx, attr, ptr); atom_op_switch()
984 atom_get_src(ctx, (attr & 0x38) | ATOM_ARG_IMM, atom_op_switch()
989 *ptr = ctx->start + target; atom_op_switch()
1000 static void atom_op_test(atom_exec_context *ctx, int *ptr, int arg) atom_op_test() argument
1005 dst = atom_get_dst(ctx, arg, attr, ptr, NULL, 1); atom_op_test()
1007 src = atom_get_src(ctx, attr, ptr); atom_op_test()
1008 ctx->ctx->cs_equal = ((dst & src) == 0); atom_op_test()
1009 SDEBUG(" result: %s\n", ctx->ctx->cs_equal ? "EQ" : "NE"); atom_op_test()
1012 static void atom_op_xor(atom_exec_context *ctx, int *ptr, int arg) atom_op_xor() argument
1018 dst = atom_get_dst(ctx, arg, attr, ptr, &saved, 1); atom_op_xor()
1020 src = atom_get_src(ctx, attr, ptr); atom_op_xor()
1023 atom_put_dst(ctx, arg, attr, &dptr, dst, saved); atom_op_xor()
1026 static void atom_op_debug(atom_exec_context *ctx, int *ptr, int arg) atom_op_debug() argument
1159 static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params) atom_execute_table_locked() argument
1161 int base = CU16(ctx->cmd_table + 4 + 2 * index); atom_execute_table_locked()
1177 ectx.ctx = ctx; atom_execute_table_locked()
1220 int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params) atom_execute_table_scratch_unlocked() argument
1224 mutex_lock(&ctx->mutex); atom_execute_table_scratch_unlocked()
1226 ctx->data_block = 0; atom_execute_table_scratch_unlocked()
1228 ctx->reg_block = 0; atom_execute_table_scratch_unlocked()
1230 ctx->fb_base = 0; atom_execute_table_scratch_unlocked()
1232 ctx->io_mode = ATOM_IO_MM; atom_execute_table_scratch_unlocked()
1234 ctx->divmul[0] = 0; atom_execute_table_scratch_unlocked()
1235 ctx->divmul[1] = 0; atom_execute_table_scratch_unlocked()
1236 r = atom_execute_table_locked(ctx, index, params); atom_execute_table_scratch_unlocked()
1237 mutex_unlock(&ctx->mutex); atom_execute_table_scratch_unlocked()
1241 int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) atom_execute_table() argument
1244 mutex_lock(&ctx->scratch_mutex); atom_execute_table()
1245 r = atom_execute_table_scratch_unlocked(ctx, index, params); atom_execute_table()
1246 mutex_unlock(&ctx->scratch_mutex); atom_execute_table()
1252 static void atom_index_iio(struct atom_context *ctx, int base) atom_index_iio() argument
1254 ctx->iio = kzalloc(2 * 256, GFP_KERNEL); atom_index_iio()
1255 if (!ctx->iio) atom_index_iio()
1258 ctx->iio[CU8(base + 1)] = base + 2; atom_index_iio()
1269 struct atom_context *ctx = atom_parse() local
1275 if (!ctx) atom_parse()
1278 ctx->card = card; atom_parse()
1279 ctx->bios = bios; atom_parse()
1283 kfree(ctx); atom_parse()
1290 kfree(ctx); atom_parse()
1299 kfree(ctx); atom_parse()
1303 ctx->cmd_table = CU16(base + ATOM_ROM_CMD_PTR); atom_parse()
1304 ctx->data_table = CU16(base + ATOM_ROM_DATA_PTR); atom_parse()
1305 atom_index_iio(ctx, CU16(ctx->data_table + ATOM_DATA_IIO_PTR) + 4); atom_parse()
1306 if (!ctx->iio) { atom_parse()
1307 atom_destroy(ctx); atom_parse()
1324 return ctx; atom_parse()
1327 int atom_asic_init(struct atom_context *ctx) atom_asic_init() argument
1329 struct radeon_device *rdev = ctx->card->dev->dev_private; atom_asic_init()
1330 int hwi = CU16(ctx->data_table + ATOM_DATA_FWI_PTR); atom_asic_init()
1341 if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT)) atom_asic_init()
1343 ret = atom_execute_table(ctx, ATOM_CMD_INIT, ps); atom_asic_init()
1350 if (CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_SPDFANCNTL)) atom_asic_init()
1351 atom_execute_table(ctx, ATOM_CMD_SPDFANCNTL, ps); atom_asic_init()
1356 void atom_destroy(struct atom_context *ctx) atom_destroy() argument
1358 kfree(ctx->iio); atom_destroy()
1359 kfree(ctx); atom_destroy()
1362 bool atom_parse_data_header(struct atom_context *ctx, int index, atom_parse_data_header() argument
1367 int idx = CU16(ctx->data_table + offset); atom_parse_data_header()
1368 u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4); atom_parse_data_header()
1383 bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev, atom_parse_cmd_header() argument
1387 int idx = CU16(ctx->cmd_table + offset); atom_parse_cmd_header()
1388 u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4); atom_parse_cmd_header()
1400 int atom_allocate_fb_scratch(struct atom_context *ctx) atom_allocate_fb_scratch() argument
1407 if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { atom_allocate_fb_scratch()
1408 firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); atom_allocate_fb_scratch()
1416 ctx->scratch_size_bytes = 0; atom_allocate_fb_scratch()
1420 ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL); atom_allocate_fb_scratch()
1421 if (!ctx->scratch) atom_allocate_fb_scratch()
1423 ctx->scratch_size_bytes = usage_bytes; atom_allocate_fb_scratch()
H A Datom-bits.h32 #define U8(ptr) get_u8(ctx->ctx->bios, (ptr))
33 #define CU8(ptr) get_u8(ctx->bios, (ptr)) get_u16()
38 #define U16(ptr) get_u16(ctx->ctx->bios, (ptr))
39 #define CU16(ptr) get_u16(ctx->bios, (ptr)) get_u32()
44 #define U32(ptr) get_u32(ctx->ctx->bios, (ptr))
45 #define CU32(ptr) get_u32(ctx->bios, (ptr))
46 #define CSTR(ptr) (((char *)(ctx->bios))+(ptr))
/linux-4.1.27/drivers/base/regmap/
H A Dregmap-mmio.c92 struct regmap_mmio_context *ctx = context; regmap_mmio_gather_write() local
98 if (!IS_ERR(ctx->clk)) { regmap_mmio_gather_write()
99 ret = clk_enable(ctx->clk); regmap_mmio_gather_write()
107 switch (ctx->val_bytes) { regmap_mmio_gather_write()
109 writeb(*(u8 *)val, ctx->regs + offset); regmap_mmio_gather_write()
112 writew(*(u16 *)val, ctx->regs + offset); regmap_mmio_gather_write()
115 writel(*(u32 *)val, ctx->regs + offset); regmap_mmio_gather_write()
119 writeq(*(u64 *)val, ctx->regs + offset); regmap_mmio_gather_write()
126 val_size -= ctx->val_bytes; regmap_mmio_gather_write()
127 val += ctx->val_bytes; regmap_mmio_gather_write()
128 offset += ctx->val_bytes; regmap_mmio_gather_write()
131 if (!IS_ERR(ctx->clk)) regmap_mmio_gather_write()
132 clk_disable(ctx->clk); regmap_mmio_gather_write()
139 struct regmap_mmio_context *ctx = context; regmap_mmio_write() local
140 unsigned int offset = ctx->reg_bytes + ctx->pad_bytes; regmap_mmio_write()
144 return regmap_mmio_gather_write(context, data, ctx->reg_bytes, regmap_mmio_write()
152 struct regmap_mmio_context *ctx = context; regmap_mmio_read() local
158 if (!IS_ERR(ctx->clk)) { regmap_mmio_read()
159 ret = clk_enable(ctx->clk); regmap_mmio_read()
167 switch (ctx->val_bytes) { regmap_mmio_read()
169 *(u8 *)val = readb(ctx->regs + offset); regmap_mmio_read()
172 *(u16 *)val = readw(ctx->regs + offset); regmap_mmio_read()
175 *(u32 *)val = readl(ctx->regs + offset); regmap_mmio_read()
179 *(u64 *)val = readq(ctx->regs + offset); regmap_mmio_read()
186 val_size -= ctx->val_bytes; regmap_mmio_read()
187 val += ctx->val_bytes; regmap_mmio_read()
188 offset += ctx->val_bytes; regmap_mmio_read()
191 if (!IS_ERR(ctx->clk)) regmap_mmio_read()
192 clk_disable(ctx->clk); regmap_mmio_read()
199 struct regmap_mmio_context *ctx = context; regmap_mmio_free_context() local
201 if (!IS_ERR(ctx->clk)) { regmap_mmio_free_context()
202 clk_unprepare(ctx->clk); regmap_mmio_free_context()
203 clk_put(ctx->clk); regmap_mmio_free_context()
223 struct regmap_mmio_context *ctx; regmap_mmio_gen_context() local
266 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); regmap_mmio_gen_context()
267 if (!ctx) regmap_mmio_gen_context()
270 ctx->regs = regs; regmap_mmio_gen_context()
271 ctx->val_bytes = config->val_bits / 8; regmap_mmio_gen_context()
272 ctx->reg_bytes = config->reg_bits / 8; regmap_mmio_gen_context()
273 ctx->pad_bytes = config->pad_bits / 8; regmap_mmio_gen_context()
274 ctx->clk = ERR_PTR(-ENODEV); regmap_mmio_gen_context()
277 return ctx; regmap_mmio_gen_context()
279 ctx->clk = clk_get(dev, clk_id); regmap_mmio_gen_context()
280 if (IS_ERR(ctx->clk)) { regmap_mmio_gen_context()
281 ret = PTR_ERR(ctx->clk); regmap_mmio_gen_context()
285 ret = clk_prepare(ctx->clk); regmap_mmio_gen_context()
287 clk_put(ctx->clk); regmap_mmio_gen_context()
291 return ctx; regmap_mmio_gen_context()
294 kfree(ctx); regmap_mmio_gen_context()
314 struct regmap_mmio_context *ctx; regmap_init_mmio_clk() local
316 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); regmap_init_mmio_clk()
317 if (IS_ERR(ctx)) regmap_init_mmio_clk()
318 return ERR_CAST(ctx); regmap_init_mmio_clk()
320 return regmap_init(dev, &regmap_mmio, ctx, config); regmap_init_mmio_clk()
340 struct regmap_mmio_context *ctx; devm_regmap_init_mmio_clk() local
342 ctx = regmap_mmio_gen_context(dev, clk_id, regs, config); devm_regmap_init_mmio_clk()
343 if (IS_ERR(ctx)) devm_regmap_init_mmio_clk()
344 return ERR_CAST(ctx); devm_regmap_init_mmio_clk()
346 return devm_regmap_init(dev, &regmap_mmio, ctx, config); devm_regmap_init_mmio_clk()
/linux-4.1.27/fs/ext4/
H A Dcrypto_policy.c32 struct ext4_encryption_context ctx; ext4_is_encryption_context_consistent_with_policy() local
34 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, ext4_is_encryption_context_consistent_with_policy()
35 sizeof(ctx)); ext4_is_encryption_context_consistent_with_policy()
36 if (res != sizeof(ctx)) ext4_is_encryption_context_consistent_with_policy()
38 return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, ext4_is_encryption_context_consistent_with_policy()
40 (ctx.flags == ext4_is_encryption_context_consistent_with_policy()
42 (ctx.contents_encryption_mode == ext4_is_encryption_context_consistent_with_policy()
44 (ctx.filenames_encryption_mode == ext4_is_encryption_context_consistent_with_policy()
51 struct ext4_encryption_context ctx; ext4_create_encryption_context_from_policy() local
54 ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1; ext4_create_encryption_context_from_policy()
55 memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, ext4_create_encryption_context_from_policy()
71 ctx.contents_encryption_mode = policy->contents_encryption_mode; ext4_create_encryption_context_from_policy()
72 ctx.filenames_encryption_mode = policy->filenames_encryption_mode; ext4_create_encryption_context_from_policy()
73 ctx.flags = policy->flags; ext4_create_encryption_context_from_policy()
74 BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE); ext4_create_encryption_context_from_policy()
75 get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); ext4_create_encryption_context_from_policy()
78 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, ext4_create_encryption_context_from_policy()
79 sizeof(ctx), 0); ext4_create_encryption_context_from_policy()
108 struct ext4_encryption_context ctx; ext4_get_policy() local
112 &ctx, sizeof(ctx)); ext4_get_policy()
113 if (res != sizeof(ctx)) ext4_get_policy()
115 if (ctx.format != EXT4_ENCRYPTION_CONTEXT_FORMAT_V1) ext4_get_policy()
118 policy->contents_encryption_mode = ctx.contents_encryption_mode; ext4_get_policy()
119 policy->filenames_encryption_mode = ctx.filenames_encryption_mode; ext4_get_policy()
120 policy->flags = ctx.flags; ext4_get_policy()
121 memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, ext4_get_policy()
170 struct ext4_encryption_context ctx; ext4_inherit_context() local
173 &ctx, sizeof(ctx)); ext4_inherit_context()
175 if (res != sizeof(ctx)) { ext4_inherit_context()
177 ctx.format = EXT4_ENCRYPTION_CONTEXT_FORMAT_V1; ext4_inherit_context()
178 ctx.contents_encryption_mode = ext4_inherit_context()
180 ctx.filenames_encryption_mode = ext4_inherit_context()
182 ctx.flags = 0; ext4_inherit_context()
183 memset(ctx.master_key_descriptor, 0x42, ext4_inherit_context()
190 get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE); ext4_inherit_context()
192 EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx, ext4_inherit_context()
193 sizeof(ctx), 0); ext4_inherit_context()
H A Dcrypto.c60 * @ctx: The encryption context to release.
67 void ext4_release_crypto_ctx(struct ext4_crypto_ctx *ctx) ext4_release_crypto_ctx() argument
71 if (ctx->bounce_page) { ext4_release_crypto_ctx()
72 if (ctx->flags & EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL) ext4_release_crypto_ctx()
73 __free_page(ctx->bounce_page); ext4_release_crypto_ctx()
75 mempool_free(ctx->bounce_page, ext4_bounce_page_pool); ext4_release_crypto_ctx()
76 ctx->bounce_page = NULL; ext4_release_crypto_ctx()
78 ctx->control_page = NULL; ext4_release_crypto_ctx()
79 if (ctx->flags & EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL) { ext4_release_crypto_ctx()
80 if (ctx->tfm) ext4_release_crypto_ctx()
81 crypto_free_tfm(ctx->tfm); ext4_release_crypto_ctx()
82 kfree(ctx); ext4_release_crypto_ctx()
85 list_add(&ctx->free_list, &ext4_free_crypto_ctxs); ext4_release_crypto_ctx()
99 struct ext4_crypto_ctx *ctx = kzalloc(sizeof(struct ext4_crypto_ctx), ext4_alloc_and_init_crypto_ctx() local
102 if (!ctx) ext4_alloc_and_init_crypto_ctx()
104 return ctx; ext4_alloc_and_init_crypto_ctx()
118 struct ext4_crypto_ctx *ctx = NULL; ext4_get_crypto_ctx() local
127 * We first try getting the ctx from a free list because in ext4_get_crypto_ctx()
128 * the common case the ctx will have an allocated and ext4_get_crypto_ctx()
137 ctx = list_first_entry_or_null(&ext4_free_crypto_ctxs, ext4_get_crypto_ctx()
139 if (ctx) ext4_get_crypto_ctx()
140 list_del(&ctx->free_list); ext4_get_crypto_ctx()
142 if (!ctx) { ext4_get_crypto_ctx()
143 ctx = ext4_alloc_and_init_crypto_ctx(GFP_NOFS); ext4_get_crypto_ctx()
144 if (IS_ERR(ctx)) { ext4_get_crypto_ctx()
145 res = PTR_ERR(ctx); ext4_get_crypto_ctx()
148 ctx->flags |= EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; ext4_get_crypto_ctx()
150 ctx->flags &= ~EXT4_CTX_REQUIRES_FREE_ENCRYPT_FL; ext4_get_crypto_ctx()
156 if (ctx->tfm && (ctx->mode != key->mode)) { ext4_get_crypto_ctx()
157 crypto_free_tfm(ctx->tfm); ext4_get_crypto_ctx()
158 ctx->tfm = NULL; ext4_get_crypto_ctx()
159 ctx->mode = EXT4_ENCRYPTION_MODE_INVALID; ext4_get_crypto_ctx()
161 if (!ctx->tfm) { ext4_get_crypto_ctx()
164 ctx->tfm = crypto_ablkcipher_tfm( ext4_get_crypto_ctx()
170 ctx->tfm = ERR_PTR(-ENOTSUPP); ext4_get_crypto_ctx()
175 if (IS_ERR_OR_NULL(ctx->tfm)) { ext4_get_crypto_ctx()
176 res = PTR_ERR(ctx->tfm); ext4_get_crypto_ctx()
177 ctx->tfm = NULL; ext4_get_crypto_ctx()
180 ctx->mode = key->mode; ext4_get_crypto_ctx()
186 BUG_ON(ctx->bounce_page); ext4_get_crypto_ctx()
190 if (!IS_ERR_OR_NULL(ctx)) ext4_get_crypto_ctx()
191 ext4_release_crypto_ctx(ctx); ext4_get_crypto_ctx()
192 ctx = ERR_PTR(res); ext4_get_crypto_ctx()
194 return ctx; ext4_get_crypto_ctx()
252 struct ext4_crypto_ctx *ctx; ext4_init_crypto() local
254 ctx = ext4_alloc_and_init_crypto_ctx(GFP_KERNEL); ext4_init_crypto()
255 if (IS_ERR(ctx)) { ext4_init_crypto()
256 res = PTR_ERR(ctx); ext4_init_crypto()
259 list_add(&ctx->free_list, &ext4_free_crypto_ctxs); ext4_init_crypto()
279 struct ext4_crypto_ctx *ctx = ext4_restore_control_page() local
285 ext4_release_crypto_ctx(ctx); ext4_restore_control_page()
308 static int ext4_page_crypto(struct ext4_crypto_ctx *ctx, ext4_page_crypto() argument
321 struct crypto_ablkcipher *atfm = __crypto_ablkcipher_cast(ctx->tfm); ext4_page_crypto()
324 BUG_ON(!ctx->tfm); ext4_page_crypto()
325 BUG_ON(ctx->mode != ei->i_encryption_key.mode); ext4_page_crypto()
327 if (ctx->mode != EXT4_ENCRYPTION_MODE_AES_256_XTS) { ext4_page_crypto()
330 __func__, ctx->mode); ext4_page_crypto()
335 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_REQ_WEAK_KEY); ext4_page_crypto()
392 * Allocates a ciphertext page and encrypts plaintext_page into it using the ctx
405 struct ext4_crypto_ctx *ctx; ext4_encrypt() local
411 ctx = ext4_get_crypto_ctx(inode); ext4_encrypt()
412 if (IS_ERR(ctx)) ext4_encrypt()
413 return (struct page *) ctx; ext4_encrypt()
426 ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; ext4_encrypt()
428 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; ext4_encrypt()
430 ctx->bounce_page = ciphertext_page; ext4_encrypt()
431 ctx->control_page = plaintext_page; ext4_encrypt()
432 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, plaintext_page->index, ext4_encrypt()
435 ext4_release_crypto_ctx(ctx); ext4_encrypt()
439 set_page_private(ciphertext_page, (unsigned long)ctx); ext4_encrypt()
446 * @ctx: The encryption context.
449 * Decrypts page in-place using the ctx encryption context.
455 int ext4_decrypt(struct ext4_crypto_ctx *ctx, struct page *page) ext4_decrypt() argument
459 return ext4_page_crypto(ctx, page->mapping->host, ext4_decrypt()
471 struct ext4_crypto_ctx *ctx = ext4_get_crypto_ctx(inode); ext4_decrypt_one() local
473 if (!ctx) ext4_decrypt_one()
475 ret = ext4_decrypt(ctx, page); ext4_decrypt_one()
476 ext4_release_crypto_ctx(ctx); ext4_decrypt_one()
482 struct ext4_crypto_ctx *ctx; ext4_encrypted_zeroout() local
492 ctx = ext4_get_crypto_ctx(inode); ext4_encrypted_zeroout()
493 if (IS_ERR(ctx)) ext4_encrypted_zeroout()
494 return PTR_ERR(ctx); ext4_encrypted_zeroout()
506 ctx->flags &= ~EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; ext4_encrypted_zeroout()
508 ctx->flags |= EXT4_BOUNCE_PAGE_REQUIRES_FREE_ENCRYPT_FL; ext4_encrypted_zeroout()
510 ctx->bounce_page = ciphertext_page; ext4_encrypted_zeroout()
513 err = ext4_page_crypto(ctx, inode, EXT4_ENCRYPT, lblk, ext4_encrypted_zeroout()
537 ext4_release_crypto_ctx(ctx); ext4_encrypted_zeroout()
H A Dcrypto_fname.c58 static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx, ext4_fname_encrypt() argument
65 struct crypto_ablkcipher *tfm = ctx->ctfm; ext4_fname_encrypt()
69 int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK); ext4_fname_encrypt()
72 if (iname->len <= 0 || iname->len > ctx->lim) ext4_fname_encrypt()
78 ciphertext_len = (ciphertext_len > ctx->lim) ext4_fname_encrypt()
79 ? ctx->lim : ciphertext_len; ext4_fname_encrypt()
93 workbuf = kmap(ctx->workpage); ext4_fname_encrypt()
105 sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0); ext4_fname_encrypt()
118 kunmap(ctx->workpage); ext4_fname_encrypt()
135 static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx, ext4_fname_decrypt() argument
143 struct crypto_ablkcipher *tfm = ctx->ctfm; ext4_fname_decrypt()
148 if (iname->len <= 0 || iname->len > ctx->lim) ext4_fname_decrypt()
167 workbuf = kmap(ctx->workpage); ext4_fname_decrypt()
177 sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0); ext4_fname_decrypt()
190 kunmap(ctx->workpage); ext4_fname_decrypt()
261 void ext4_free_fname_crypto_ctx(struct ext4_fname_crypto_ctx *ctx) ext4_free_fname_crypto_ctx() argument
263 if (ctx == NULL || IS_ERR(ctx)) ext4_free_fname_crypto_ctx()
266 if (ctx->ctfm && !IS_ERR(ctx->ctfm)) ext4_free_fname_crypto_ctx()
267 crypto_free_ablkcipher(ctx->ctfm); ext4_free_fname_crypto_ctx()
268 if (ctx->htfm && !IS_ERR(ctx->htfm)) ext4_free_fname_crypto_ctx()
269 crypto_free_hash(ctx->htfm); ext4_free_fname_crypto_ctx()
270 if (ctx->workpage && !IS_ERR(ctx->workpage)) ext4_free_fname_crypto_ctx()
271 __free_page(ctx->workpage); ext4_free_fname_crypto_ctx()
272 kfree(ctx); ext4_free_fname_crypto_ctx()
285 void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx) ext4_put_fname_crypto_ctx() argument
287 if (*ctx == NULL || IS_ERR(*ctx)) ext4_put_fname_crypto_ctx()
289 ext4_free_fname_crypto_ctx(*ctx); ext4_put_fname_crypto_ctx()
290 *ctx = NULL; ext4_put_fname_crypto_ctx()
308 struct ext4_fname_crypto_ctx *ctx; ext4_alloc_fname_crypto_ctx() local
310 ctx = kmalloc(sizeof(struct ext4_fname_crypto_ctx), GFP_NOFS); ext4_alloc_fname_crypto_ctx()
311 if (ctx == NULL) ext4_alloc_fname_crypto_ctx()
316 memset(&ctx->key, 0, sizeof(ctx->key)); ext4_alloc_fname_crypto_ctx()
318 memcpy(&ctx->key, key, sizeof(struct ext4_encryption_key)); ext4_alloc_fname_crypto_ctx()
320 ctx->has_valid_key = (EXT4_ENCRYPTION_MODE_INVALID == key->mode) ext4_alloc_fname_crypto_ctx()
322 ctx->ctfm_key_is_ready = 0; ext4_alloc_fname_crypto_ctx()
323 ctx->ctfm = NULL; ext4_alloc_fname_crypto_ctx()
324 ctx->htfm = NULL; ext4_alloc_fname_crypto_ctx()
325 ctx->workpage = NULL; ext4_alloc_fname_crypto_ctx()
326 return ctx; ext4_alloc_fname_crypto_ctx()
340 struct ext4_fname_crypto_ctx *ctx; ext4_get_fname_crypto_ctx() local
355 ctx = ext4_search_fname_crypto_ctx(&(ei->i_encryption_key)); ext4_get_fname_crypto_ctx()
356 if (ctx == NULL) ext4_get_fname_crypto_ctx()
357 ctx = ext4_alloc_fname_crypto_ctx(&(ei->i_encryption_key)); ext4_get_fname_crypto_ctx()
358 if (IS_ERR(ctx)) ext4_get_fname_crypto_ctx()
359 return ctx; ext4_get_fname_crypto_ctx()
361 ctx->flags = ei->i_crypt_policy_flags; ext4_get_fname_crypto_ctx()
362 if (ctx->has_valid_key) { ext4_get_fname_crypto_ctx()
363 if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) { ext4_get_fname_crypto_ctx()
366 ctx->key.mode); ext4_get_fname_crypto_ctx()
373 if (ctx->ctfm == NULL) { ext4_get_fname_crypto_ctx()
374 ctx->ctfm = crypto_alloc_ablkcipher("cts(cbc(aes))", ext4_get_fname_crypto_ctx()
377 if (IS_ERR(ctx->ctfm)) { ext4_get_fname_crypto_ctx()
378 res = PTR_ERR(ctx->ctfm); ext4_get_fname_crypto_ctx()
382 ctx->ctfm = NULL; ext4_get_fname_crypto_ctx()
383 ext4_put_fname_crypto_ctx(&ctx); ext4_get_fname_crypto_ctx()
386 if (ctx->ctfm == NULL) { ext4_get_fname_crypto_ctx()
390 ext4_put_fname_crypto_ctx(&ctx); ext4_get_fname_crypto_ctx()
393 if (ctx->workpage == NULL) ext4_get_fname_crypto_ctx()
394 ctx->workpage = alloc_page(GFP_NOFS); ext4_get_fname_crypto_ctx()
395 if (IS_ERR(ctx->workpage)) { ext4_get_fname_crypto_ctx()
396 res = PTR_ERR(ctx->workpage); ext4_get_fname_crypto_ctx()
400 ctx->workpage = NULL; ext4_get_fname_crypto_ctx()
401 ext4_put_fname_crypto_ctx(&ctx); ext4_get_fname_crypto_ctx()
404 if (ctx->workpage == NULL) { ext4_get_fname_crypto_ctx()
408 ext4_put_fname_crypto_ctx(&ctx); ext4_get_fname_crypto_ctx()
411 ctx->lim = max_ciphertext_len; ext4_get_fname_crypto_ctx()
412 crypto_ablkcipher_clear_flags(ctx->ctfm, ~0); ext4_get_fname_crypto_ctx()
413 crypto_tfm_set_flags(crypto_ablkcipher_tfm(ctx->ctfm), ext4_get_fname_crypto_ctx()
419 if (!ctx->ctfm_key_is_ready) { ext4_get_fname_crypto_ctx()
423 res = crypto_ablkcipher_setkey(ctx->ctfm, ext4_get_fname_crypto_ctx()
424 ctx->key.raw, ctx->key.size); ext4_get_fname_crypto_ctx()
426 ext4_put_fname_crypto_ctx(&ctx); ext4_get_fname_crypto_ctx()
429 ctx->ctfm_key_is_ready = 1; ext4_get_fname_crypto_ctx()
437 if (ctx->htfm == NULL) ext4_get_fname_crypto_ctx()
438 ctx->htfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); ext4_get_fname_crypto_ctx()
439 if (IS_ERR(ctx->htfm)) { ext4_get_fname_crypto_ctx()
440 res = PTR_ERR(ctx->htfm); ext4_get_fname_crypto_ctx()
443 ctx->htfm = NULL; ext4_get_fname_crypto_ctx()
444 ext4_put_fname_crypto_ctx(&ctx); ext4_get_fname_crypto_ctx()
447 if (ctx->htfm == NULL) { ext4_get_fname_crypto_ctx()
450 ext4_put_fname_crypto_ctx(&ctx); ext4_get_fname_crypto_ctx()
454 return ctx; ext4_get_fname_crypto_ctx()
470 int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx, ext4_fname_crypto_namelen_on_disk() argument
474 int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK); ext4_fname_crypto_namelen_on_disk()
476 if (ctx == NULL) ext4_fname_crypto_namelen_on_disk()
478 if (!(ctx->has_valid_key)) ext4_fname_crypto_namelen_on_disk()
483 ciphertext_len = (ciphertext_len > ctx->lim) ext4_fname_crypto_namelen_on_disk()
484 ? ctx->lim : ciphertext_len; ext4_fname_crypto_namelen_on_disk()
494 int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx, ext4_fname_crypto_alloc_buffer() argument
498 int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK); ext4_fname_crypto_alloc_buffer()
500 if (!ctx) ext4_fname_crypto_alloc_buffer()
532 int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, _ext4_fname_disk_to_usr() argument
540 if (ctx == NULL) _ext4_fname_disk_to_usr()
551 if (ctx->has_valid_key) _ext4_fname_disk_to_usr()
552 return ext4_fname_decrypt(ctx, iname, oname); _ext4_fname_disk_to_usr()
571 int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx, ext4_fname_disk_to_usr() argument
579 return _ext4_fname_disk_to_usr(ctx, hinfo, &iname, oname); ext4_fname_disk_to_usr()
586 int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx, ext4_fname_usr_to_disk() argument
592 if (ctx == NULL) ext4_fname_usr_to_disk()
604 if (ctx->has_valid_key) { ext4_fname_usr_to_disk()
605 res = ext4_fname_encrypt(ctx, iname, oname); ext4_fname_usr_to_disk()
617 int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx, ext4_fname_usr_to_hash() argument
625 if (!ctx || ext4_fname_usr_to_hash()
633 if (!ctx->has_valid_key && iname->name[0] == '_') { ext4_fname_usr_to_hash()
644 if (!ctx->has_valid_key && iname->name[0] != '_') { ext4_fname_usr_to_hash()
653 ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp); ext4_fname_usr_to_hash()
657 ret = ext4_fname_encrypt(ctx, iname, &tmp); ext4_fname_usr_to_hash()
667 int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr, ext4_fname_match() argument
674 if (ctx->has_valid_key) { ext4_fname_match()
678 ret = ext4_fname_crypto_alloc_buffer(ctx, len, cstr); ext4_fname_match()
683 ret = ext4_fname_encrypt(ctx, &istr, cstr); ext4_fname_match()
/linux-4.1.27/arch/mips/pci/
H A Dpci-alchemy.c91 static void alchemy_pci_wired_entry(struct alchemy_pci_context *ctx) alchemy_pci_wired_entry() argument
93 ctx->wired_entry = read_c0_wired(); alchemy_pci_wired_entry()
94 add_wired_entry(0, 0, (unsigned long)ctx->pci_cfg_vm->addr, PM_4K); alchemy_pci_wired_entry()
95 ctx->last_elo0 = ctx->last_elo1 = ~0; alchemy_pci_wired_entry()
101 struct alchemy_pci_context *ctx = bus->sysdata; config_access() local
113 r = __raw_readl(ctx->regs + PCI_REG_STATCMD) & 0x0000ffff; config_access()
115 __raw_writel(r, ctx->regs + PCI_REG_STATCMD); config_access()
121 if (ctx->board_pci_idsel(device, 1) == 0) { config_access()
146 if ((entryLo0 != ctx->last_elo0) || (entryLo1 != ctx->last_elo1)) { config_access()
147 mod_wired_entry(ctx->wired_entry, entryLo0, entryLo1, config_access()
148 (unsigned long)ctx->pci_cfg_vm->addr, PM_4K); config_access()
149 ctx->last_elo0 = entryLo0; config_access()
150 ctx->last_elo1 = entryLo1; config_access()
154 __raw_writel(*data, ctx->pci_cfg_vm->addr + offset); config_access()
156 *data = __raw_readl(ctx->pci_cfg_vm->addr + offset); config_access()
163 status = __raw_readl(ctx->regs + PCI_REG_STATCMD); config_access()
174 __raw_writel(status & 0xf000ffff, ctx->regs + PCI_REG_STATCMD); config_access()
181 (void)ctx->board_pci_idsel(device, 0); config_access()
308 struct alchemy_pci_context *ctx = __alchemy_pci_ctx; alchemy_pci_suspend() local
309 if (!ctx) alchemy_pci_suspend()
312 ctx->pm[0] = __raw_readl(ctx->regs + PCI_REG_CMEM); alchemy_pci_suspend()
313 ctx->pm[1] = __raw_readl(ctx->regs + PCI_REG_CONFIG) & 0x0009ffff; alchemy_pci_suspend()
314 ctx->pm[2] = __raw_readl(ctx->regs + PCI_REG_B2BMASK_CCH); alchemy_pci_suspend()
315 ctx->pm[3] = __raw_readl(ctx->regs + PCI_REG_B2BBASE0_VID); alchemy_pci_suspend()
316 ctx->pm[4] = __raw_readl(ctx->regs + PCI_REG_B2BBASE1_SID); alchemy_pci_suspend()
317 ctx->pm[5] = __raw_readl(ctx->regs + PCI_REG_MWMASK_DEV); alchemy_pci_suspend()
318 ctx->pm[6] = __raw_readl(ctx->regs + PCI_REG_MWBASE_REV_CCL); alchemy_pci_suspend()
319 ctx->pm[7] = __raw_readl(ctx->regs + PCI_REG_ID); alchemy_pci_suspend()
320 ctx->pm[8] = __raw_readl(ctx->regs + PCI_REG_CLASSREV); alchemy_pci_suspend()
321 ctx->pm[9] = __raw_readl(ctx->regs + PCI_REG_PARAM); alchemy_pci_suspend()
322 ctx->pm[10] = __raw_readl(ctx->regs + PCI_REG_MBAR); alchemy_pci_suspend()
323 ctx->pm[11] = __raw_readl(ctx->regs + PCI_REG_TIMEOUT); alchemy_pci_suspend()
330 struct alchemy_pci_context *ctx = __alchemy_pci_ctx; alchemy_pci_resume() local
331 if (!ctx) alchemy_pci_resume()
334 __raw_writel(ctx->pm[0], ctx->regs + PCI_REG_CMEM); alchemy_pci_resume()
335 __raw_writel(ctx->pm[2], ctx->regs + PCI_REG_B2BMASK_CCH); alchemy_pci_resume()
336 __raw_writel(ctx->pm[3], ctx->regs + PCI_REG_B2BBASE0_VID); alchemy_pci_resume()
337 __raw_writel(ctx->pm[4], ctx->regs + PCI_REG_B2BBASE1_SID); alchemy_pci_resume()
338 __raw_writel(ctx->pm[5], ctx->regs + PCI_REG_MWMASK_DEV); alchemy_pci_resume()
339 __raw_writel(ctx->pm[6], ctx->regs + PCI_REG_MWBASE_REV_CCL); alchemy_pci_resume()
340 __raw_writel(ctx->pm[7], ctx->regs + PCI_REG_ID); alchemy_pci_resume()
341 __raw_writel(ctx->pm[8], ctx->regs + PCI_REG_CLASSREV); alchemy_pci_resume()
342 __raw_writel(ctx->pm[9], ctx->regs + PCI_REG_PARAM); alchemy_pci_resume()
343 __raw_writel(ctx->pm[10], ctx->regs + PCI_REG_MBAR); alchemy_pci_resume()
344 __raw_writel(ctx->pm[11], ctx->regs + PCI_REG_TIMEOUT); alchemy_pci_resume()
346 __raw_writel(ctx->pm[1], ctx->regs + PCI_REG_CONFIG); alchemy_pci_resume()
352 ctx->wired_entry = 8191; /* impossibly high value */ alchemy_pci_resume()
353 alchemy_pci_wired_entry(ctx); /* install it */ alchemy_pci_resume()
364 struct alchemy_pci_context *ctx; alchemy_pci_probe() local
378 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); alchemy_pci_probe()
379 if (!ctx) { alchemy_pci_probe()
411 ctx->regs = ioremap_nocache(r->start, resource_size(r)); alchemy_pci_probe()
412 if (!ctx->regs) { alchemy_pci_probe()
428 ctx->alchemy_pci_ctrl.io_map_base = (unsigned long)virt_io; alchemy_pci_probe()
433 val = __raw_readl(ctx->regs + PCI_REG_CONFIG); alchemy_pci_probe()
435 __raw_writel(val, ctx->regs + PCI_REG_CONFIG); alchemy_pci_probe()
441 ctx->board_map_irq = pd->board_map_irq; alchemy_pci_probe()
444 ctx->board_pci_idsel = pd->board_pci_idsel; alchemy_pci_probe()
446 ctx->board_pci_idsel = alchemy_pci_def_idsel; alchemy_pci_probe()
449 ctx->alchemy_pci_ctrl.pci_ops = &alchemy_pci_ops; alchemy_pci_probe()
450 ctx->alchemy_pci_ctrl.mem_resource = &alchemy_pci_def_memres; alchemy_pci_probe()
451 ctx->alchemy_pci_ctrl.io_resource = &alchemy_pci_def_iores; alchemy_pci_probe()
459 ctx->pci_cfg_vm = get_vm_area(0x2000, VM_IOREMAP); alchemy_pci_probe()
460 if (!ctx->pci_cfg_vm) { alchemy_pci_probe()
465 ctx->wired_entry = 8191; /* impossibly high value */ alchemy_pci_probe()
466 alchemy_pci_wired_entry(ctx); /* install it */ alchemy_pci_probe()
468 set_io_port_base((unsigned long)ctx->alchemy_pci_ctrl.io_map_base); alchemy_pci_probe()
471 val = __raw_readl(ctx->regs + PCI_REG_CONFIG); alchemy_pci_probe()
475 __raw_writel(val, ctx->regs + PCI_REG_CONFIG); alchemy_pci_probe()
478 __alchemy_pci_ctx = ctx; alchemy_pci_probe()
479 platform_set_drvdata(pdev, ctx); alchemy_pci_probe()
481 register_pci_controller(&ctx->alchemy_pci_ctrl); alchemy_pci_probe()
491 iounmap(ctx->regs); alchemy_pci_probe()
499 kfree(ctx); alchemy_pci_probe()
526 struct alchemy_pci_context *ctx = dev->sysdata; pcibios_map_irq() local
527 if (ctx && ctx->board_map_irq) pcibios_map_irq()
528 return ctx->board_map_irq(dev, slot, pin); pcibios_map_irq()
/linux-4.1.27/sound/soc/au1x/
H A Dac97c.c71 static inline unsigned long RD(struct au1xpsc_audio_data *ctx, int reg) RD() argument
73 return __raw_readl(ctx->mmio + reg); RD()
76 static inline void WR(struct au1xpsc_audio_data *ctx, int reg, unsigned long v) WR() argument
78 __raw_writel(v, ctx->mmio + reg); WR()
85 struct au1xpsc_audio_data *ctx = ac97_to_ctx(ac97); au1xac97c_ac97_read() local
92 mutex_lock(&ctx->lock); au1xac97c_ac97_read()
95 while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--) au1xac97c_ac97_read()
102 WR(ctx, AC97_CMDRESP, CMD_IDX(r) | CMD_READ); au1xac97c_ac97_read()
108 while ((RD(ctx, AC97_STATUS) & STAT_CP) && tmo--) au1xac97c_ac97_read()
110 data = RD(ctx, AC97_CMDRESP); au1xac97c_ac97_read()
116 mutex_unlock(&ctx->lock); au1xac97c_ac97_read()
127 struct au1xpsc_audio_data *ctx = ac97_to_ctx(ac97); au1xac97c_ac97_write() local
132 mutex_lock(&ctx->lock); au1xac97c_ac97_write()
134 for (tmo = 5; (RD(ctx, AC97_STATUS) & STAT_CP) && tmo; tmo--) au1xac97c_ac97_write()
141 WR(ctx, AC97_CMDRESP, CMD_WRITE | CMD_IDX(r) | CMD_SET_DATA(v)); au1xac97c_ac97_write()
143 for (tmo = 10; (RD(ctx, AC97_STATUS) & STAT_CP) && tmo; tmo--) au1xac97c_ac97_write()
148 mutex_unlock(&ctx->lock); au1xac97c_ac97_write()
156 struct au1xpsc_audio_data *ctx = ac97_to_ctx(ac97); au1xac97c_ac97_warm_reset() local
158 WR(ctx, AC97_CONFIG, ctx->cfg | CFG_SG | CFG_SN); au1xac97c_ac97_warm_reset()
160 WR(ctx, AC97_CONFIG, ctx->cfg | CFG_SG); au1xac97c_ac97_warm_reset()
161 WR(ctx, AC97_CONFIG, ctx->cfg); au1xac97c_ac97_warm_reset()
166 struct au1xpsc_audio_data *ctx = ac97_to_ctx(ac97); au1xac97c_ac97_cold_reset() local
169 WR(ctx, AC97_CONFIG, ctx->cfg | CFG_RS); au1xac97c_ac97_cold_reset()
171 WR(ctx, AC97_CONFIG, ctx->cfg); au1xac97c_ac97_cold_reset()
175 while (((RD(ctx, AC97_STATUS) & STAT_RD) == 0) && --i) au1xac97c_ac97_cold_reset()
192 struct au1xpsc_audio_data *ctx = snd_soc_dai_get_drvdata(dai); alchemy_ac97c_startup() local
193 snd_soc_dai_set_dma_data(dai, substream, &ctx->dmaids[0]); alchemy_ac97c_startup()
233 struct au1xpsc_audio_data *ctx; au1xac97c_drvprobe() local
235 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); au1xac97c_drvprobe()
236 if (!ctx) au1xac97c_drvprobe()
239 mutex_init(&ctx->lock); au1xac97c_drvprobe()
250 ctx->mmio = devm_ioremap_nocache(&pdev->dev, iores->start, au1xac97c_drvprobe()
252 if (!ctx->mmio) au1xac97c_drvprobe()
258 ctx->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start; au1xac97c_drvprobe()
263 ctx->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start; au1xac97c_drvprobe()
266 WR(ctx, AC97_ENABLE, EN_D | EN_CE); au1xac97c_drvprobe()
267 WR(ctx, AC97_ENABLE, EN_CE); au1xac97c_drvprobe()
269 ctx->cfg = CFG_RC(3) | CFG_XS(3); au1xac97c_drvprobe()
270 WR(ctx, AC97_CONFIG, ctx->cfg); au1xac97c_drvprobe()
272 platform_set_drvdata(pdev, ctx); au1xac97c_drvprobe()
283 ac97c_workdata = ctx; au1xac97c_drvprobe()
289 struct au1xpsc_audio_data *ctx = platform_get_drvdata(pdev); au1xac97c_drvremove() local
293 WR(ctx, AC97_ENABLE, EN_D); /* clock off, disable */ au1xac97c_drvremove()
303 struct au1xpsc_audio_data *ctx = dev_get_drvdata(dev); au1xac97c_drvsuspend() local
305 WR(ctx, AC97_ENABLE, EN_D); /* clock off, disable */ au1xac97c_drvsuspend()
312 struct au1xpsc_audio_data *ctx = dev_get_drvdata(dev); au1xac97c_drvresume() local
314 WR(ctx, AC97_ENABLE, EN_D | EN_CE); au1xac97c_drvresume()
315 WR(ctx, AC97_ENABLE, EN_CE); au1xac97c_drvresume()
316 WR(ctx, AC97_CONFIG, ctx->cfg); au1xac97c_drvresume()
H A Di2sc.c69 static inline unsigned long RD(struct au1xpsc_audio_data *ctx, int reg) RD() argument
71 return __raw_readl(ctx->mmio + reg); RD()
74 static inline void WR(struct au1xpsc_audio_data *ctx, int reg, unsigned long v) WR() argument
76 __raw_writel(v, ctx->mmio + reg); WR()
82 struct au1xpsc_audio_data *ctx = snd_soc_dai_get_drvdata(cpu_dai); au1xi2s_set_fmt() local
87 c = ctx->cfg; au1xi2s_set_fmt()
130 ctx->cfg = c; au1xi2s_set_fmt()
138 struct au1xpsc_audio_data *ctx = snd_soc_dai_get_drvdata(dai); au1xi2s_trigger() local
145 WR(ctx, I2S_ENABLE, EN_D | EN_CE); au1xi2s_trigger()
146 WR(ctx, I2S_ENABLE, EN_CE); au1xi2s_trigger()
147 ctx->cfg |= (stype == PCM_TX) ? CFG_TN : CFG_RN; au1xi2s_trigger()
148 WR(ctx, I2S_CFG, ctx->cfg); au1xi2s_trigger()
152 ctx->cfg &= ~((stype == PCM_TX) ? CFG_TN : CFG_RN); au1xi2s_trigger()
153 WR(ctx, I2S_CFG, ctx->cfg); au1xi2s_trigger()
154 WR(ctx, I2S_ENABLE, EN_D); /* power off */ au1xi2s_trigger()
184 struct au1xpsc_audio_data *ctx = snd_soc_dai_get_drvdata(dai); au1xi2s_hw_params() local
191 ctx->cfg &= ~CFG_SZ_MASK; au1xi2s_hw_params()
192 ctx->cfg |= v; au1xi2s_hw_params()
199 struct au1xpsc_audio_data *ctx = snd_soc_dai_get_drvdata(dai); au1xi2s_startup() local
200 snd_soc_dai_set_dma_data(dai, substream, &ctx->dmaids[0]); au1xi2s_startup()
235 struct au1xpsc_audio_data *ctx; au1xi2s_drvprobe() local
237 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); au1xi2s_drvprobe()
238 if (!ctx) au1xi2s_drvprobe()
250 ctx->mmio = devm_ioremap_nocache(&pdev->dev, iores->start, au1xi2s_drvprobe()
252 if (!ctx->mmio) au1xi2s_drvprobe()
258 ctx->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start; au1xi2s_drvprobe()
263 ctx->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start; au1xi2s_drvprobe()
265 platform_set_drvdata(pdev, ctx); au1xi2s_drvprobe()
273 struct au1xpsc_audio_data *ctx = platform_get_drvdata(pdev); au1xi2s_drvremove() local
277 WR(ctx, I2S_ENABLE, EN_D); /* clock off, disable */ au1xi2s_drvremove()
285 struct au1xpsc_audio_data *ctx = dev_get_drvdata(dev); au1xi2s_drvsuspend() local
287 WR(ctx, I2S_ENABLE, EN_D); /* clock off, disable */ au1xi2s_drvsuspend()
/linux-4.1.27/crypto/
H A Dansi_cprng.c88 static int _get_more_prng_bytes(struct prng_context *ctx, int cont_test) _get_more_prng_bytes() argument
96 ctx); _get_more_prng_bytes()
98 hexdump("Input DT: ", ctx->DT, DEFAULT_BLK_SZ); _get_more_prng_bytes()
99 hexdump("Input I: ", ctx->I, DEFAULT_BLK_SZ); _get_more_prng_bytes()
100 hexdump("Input V: ", ctx->V, DEFAULT_BLK_SZ); _get_more_prng_bytes()
113 memcpy(tmp, ctx->DT, DEFAULT_BLK_SZ); _get_more_prng_bytes()
114 output = ctx->I; _get_more_prng_bytes()
124 xor_vectors(ctx->I, ctx->V, tmp, DEFAULT_BLK_SZ); _get_more_prng_bytes()
126 output = ctx->rand_data; _get_more_prng_bytes()
133 if (!memcmp(ctx->rand_data, ctx->last_rand_data, _get_more_prng_bytes()
137 ctx); _get_more_prng_bytes()
141 "ctx %p Failed repetition check!\n", _get_more_prng_bytes()
142 ctx); _get_more_prng_bytes()
144 ctx->flags |= PRNG_NEED_RESET; _get_more_prng_bytes()
147 memcpy(ctx->last_rand_data, ctx->rand_data, _get_more_prng_bytes()
154 xor_vectors(ctx->rand_data, ctx->I, tmp, _get_more_prng_bytes()
156 output = ctx->V; _get_more_prng_bytes()
163 crypto_cipher_encrypt_one(ctx->tfm, output, tmp); _get_more_prng_bytes()
171 ctx->DT[i] += 1; _get_more_prng_bytes()
172 if (ctx->DT[i] != 0) _get_more_prng_bytes()
176 dbgprint("Returning new block for context %p\n", ctx); _get_more_prng_bytes()
177 ctx->rand_data_valid = 0; _get_more_prng_bytes()
179 hexdump("Output DT: ", ctx->DT, DEFAULT_BLK_SZ); _get_more_prng_bytes()
180 hexdump("Output I: ", ctx->I, DEFAULT_BLK_SZ); _get_more_prng_bytes()
181 hexdump("Output V: ", ctx->V, DEFAULT_BLK_SZ); _get_more_prng_bytes()
182 hexdump("New Random Data: ", ctx->rand_data, DEFAULT_BLK_SZ); _get_more_prng_bytes()
188 static int get_prng_bytes(char *buf, size_t nbytes, struct prng_context *ctx, get_prng_bytes() argument
196 spin_lock_bh(&ctx->prng_lock); get_prng_bytes()
199 if (ctx->flags & PRNG_NEED_RESET) get_prng_bytes()
207 if (ctx->flags & PRNG_FIXED_SIZE) { get_prng_bytes()
220 byte_count, ctx); get_prng_bytes()
224 if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { get_prng_bytes()
225 if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { get_prng_bytes()
237 while (ctx->rand_data_valid < DEFAULT_BLK_SZ) { get_prng_bytes()
238 *ptr = ctx->rand_data[ctx->rand_data_valid]; get_prng_bytes()
241 ctx->rand_data_valid++; get_prng_bytes()
251 if (ctx->rand_data_valid == DEFAULT_BLK_SZ) { get_prng_bytes()
252 if (_get_more_prng_bytes(ctx, do_cont_test) < 0) { get_prng_bytes()
258 if (ctx->rand_data_valid > 0) get_prng_bytes()
260 memcpy(ptr, ctx->rand_data, DEFAULT_BLK_SZ); get_prng_bytes()
261 ctx->rand_data_valid += DEFAULT_BLK_SZ; get_prng_bytes()
272 spin_unlock_bh(&ctx->prng_lock); get_prng_bytes()
274 err, ctx); get_prng_bytes()
278 static void free_prng_context(struct prng_context *ctx) free_prng_context() argument
280 crypto_free_cipher(ctx->tfm); free_prng_context()
283 static int reset_prng_context(struct prng_context *ctx, reset_prng_context() argument
290 spin_lock_bh(&ctx->prng_lock); reset_prng_context()
291 ctx->flags |= PRNG_NEED_RESET; reset_prng_context()
299 memcpy(ctx->V, V, DEFAULT_BLK_SZ); reset_prng_context()
301 memcpy(ctx->V, DEFAULT_V_SEED, DEFAULT_BLK_SZ); reset_prng_context()
304 memcpy(ctx->DT, DT, DEFAULT_BLK_SZ); reset_prng_context()
306 memset(ctx->DT, 0, DEFAULT_BLK_SZ); reset_prng_context()
308 memset(ctx->rand_data, 0, DEFAULT_BLK_SZ); reset_prng_context()
309 memset(ctx->last_rand_data, 0, DEFAULT_BLK_SZ); reset_prng_context()
311 ctx->rand_data_valid = DEFAULT_BLK_SZ; reset_prng_context()
313 ret = crypto_cipher_setkey(ctx->tfm, prng_key, klen); reset_prng_context()
316 crypto_cipher_get_flags(ctx->tfm)); reset_prng_context()
321 ctx->flags &= ~PRNG_NEED_RESET; reset_prng_context()
323 spin_unlock_bh(&ctx->prng_lock); reset_prng_context()
329 struct prng_context *ctx = crypto_tfm_ctx(tfm); cprng_init() local
331 spin_lock_init(&ctx->prng_lock); cprng_init()
332 ctx->tfm = crypto_alloc_cipher("aes", 0, 0); cprng_init()
333 if (IS_ERR(ctx->tfm)) { cprng_init()
335 ctx); cprng_init()
336 return PTR_ERR(ctx->tfm); cprng_init()
339 if (reset_prng_context(ctx, NULL, DEFAULT_PRNG_KSZ, NULL, NULL) < 0) cprng_init()
347 ctx->flags |= PRNG_NEED_RESET; cprng_init()
H A Dalgif_aead.c59 struct aead_ctx *ctx = ask->private; aead_sndbuf() local
62 ctx->used, 0); aead_sndbuf()
70 static inline bool aead_sufficient_data(struct aead_ctx *ctx) aead_sufficient_data() argument
72 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); aead_sufficient_data()
74 return (ctx->used >= (ctx->aead_assoclen + (ctx->enc ? 0 : as))); aead_sufficient_data()
80 struct aead_ctx *ctx = ask->private; aead_put_sgl() local
81 struct aead_sg_list *sgl = &ctx->tsgl; aead_put_sgl()
93 ctx->used = 0; aead_put_sgl()
94 ctx->more = 0; aead_put_sgl()
95 ctx->merge = 0; aead_put_sgl()
118 struct aead_ctx *ctx = ask->private; aead_wait_for_data() local
133 if (sk_wait_event(sk, &timeout, !ctx->more)) { aead_wait_for_data()
148 struct aead_ctx *ctx = ask->private; aead_data_wakeup() local
151 if (ctx->more) aead_data_wakeup()
153 if (!ctx->used) aead_data_wakeup()
170 struct aead_ctx *ctx = ask->private; aead_sendmsg() local
172 crypto_aead_ivsize(crypto_aead_reqtfm(&ctx->aead_req)); aead_sendmsg()
173 struct aead_sg_list *sgl = &ctx->tsgl; aead_sendmsg()
202 if (!ctx->more && ctx->used) aead_sendmsg()
206 ctx->enc = enc; aead_sendmsg()
208 memcpy(ctx->iv, con.iv->iv, ivsize); aead_sendmsg()
210 ctx->aead_assoclen = con.aead_assoclen; aead_sendmsg()
218 if (ctx->merge) { aead_sendmsg()
229 ctx->merge = (sg->offset + sg->length) & aead_sendmsg()
232 ctx->used += len; aead_sendmsg()
275 ctx->used += plen; aead_sendmsg()
279 ctx->merge = plen & (PAGE_SIZE - 1); aead_sendmsg()
285 ctx->more = msg->msg_flags & MSG_MORE; aead_sendmsg()
286 if (!ctx->more && !aead_sufficient_data(ctx)) { aead_sendmsg()
303 struct aead_ctx *ctx = ask->private; aead_sendpage() local
304 struct aead_sg_list *sgl = &ctx->tsgl; aead_sendpage()
314 if (!ctx->more && ctx->used) aead_sendpage()
327 ctx->merge = 0; aead_sendpage()
332 ctx->used += size; aead_sendpage()
337 ctx->more = flags & MSG_MORE; aead_sendpage()
338 if (!ctx->more && !aead_sufficient_data(ctx)) { aead_sendpage()
354 struct aead_ctx *ctx = ask->private; aead_recvmsg() local
355 unsigned bs = crypto_aead_blocksize(crypto_aead_reqtfm(&ctx->aead_req)); aead_recvmsg()
356 unsigned as = crypto_aead_authsize(crypto_aead_reqtfm(&ctx->aead_req)); aead_recvmsg()
357 struct aead_sg_list *sgl = &ctx->tsgl; aead_recvmsg()
389 if (ctx->more) { aead_recvmsg()
395 used = ctx->used; aead_recvmsg()
406 if (!aead_sufficient_data(ctx)) aead_recvmsg()
413 used -= ctx->aead_assoclen; aead_recvmsg()
415 if (ctx->enc) { aead_recvmsg()
433 err = af_alg_make_sg(&ctx->rsgl[cnt], &msg->msg_iter, aead_recvmsg()
440 af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]); aead_recvmsg()
455 assoclen = ctx->aead_assoclen; aead_recvmsg()
462 for (i = 0; i < ctx->tsgl.cur; i++) { aead_recvmsg()
469 if (i >= ctx->tsgl.cur) aead_recvmsg()
492 aead_request_set_assoc(&ctx->aead_req, assoc, ctx->aead_assoclen); aead_recvmsg()
493 aead_request_set_crypt(&ctx->aead_req, sg, ctx->rsgl[0].sg, used, aead_recvmsg()
494 ctx->iv); aead_recvmsg()
496 err = af_alg_wait_for_completion(ctx->enc ? aead_recvmsg()
497 crypto_aead_encrypt(&ctx->aead_req) : aead_recvmsg()
498 crypto_aead_decrypt(&ctx->aead_req), aead_recvmsg()
499 &ctx->completion); aead_recvmsg()
514 af_alg_free_sg(&ctx->rsgl[i]); aead_recvmsg()
527 struct aead_ctx *ctx = ask->private; aead_poll() local
533 if (!ctx->more) aead_poll()
587 struct aead_ctx *ctx = ask->private; aead_sock_destruct() local
589 crypto_aead_reqtfm(&ctx->aead_req)); aead_sock_destruct()
592 sock_kzfree_s(sk, ctx->iv, ivlen); aead_sock_destruct()
593 sock_kfree_s(sk, ctx, ctx->len); aead_sock_destruct()
599 struct aead_ctx *ctx; aead_accept_parent() local
601 unsigned int len = sizeof(*ctx) + crypto_aead_reqsize(private); aead_accept_parent()
604 ctx = sock_kmalloc(sk, len, GFP_KERNEL); aead_accept_parent()
605 if (!ctx) aead_accept_parent()
607 memset(ctx, 0, len); aead_accept_parent()
609 ctx->iv = sock_kmalloc(sk, ivlen, GFP_KERNEL); aead_accept_parent()
610 if (!ctx->iv) { aead_accept_parent()
611 sock_kfree_s(sk, ctx, len); aead_accept_parent()
614 memset(ctx->iv, 0, ivlen); aead_accept_parent()
616 ctx->len = len; aead_accept_parent()
617 ctx->used = 0; aead_accept_parent()
618 ctx->more = 0; aead_accept_parent()
619 ctx->merge = 0; aead_accept_parent()
620 ctx->enc = 0; aead_accept_parent()
621 ctx->tsgl.cur = 0; aead_accept_parent()
622 ctx->aead_assoclen = 0; aead_accept_parent()
623 af_alg_init_completion(&ctx->completion); aead_accept_parent()
624 sg_init_table(ctx->tsgl.sg, ALG_MAX_PAGES); aead_accept_parent()
626 ask->private = ctx; aead_accept_parent()
628 aead_request_set_tfm(&ctx->aead_req, private); aead_accept_parent()
629 aead_request_set_callback(&ctx->aead_req, CRYPTO_TFM_REQ_MAY_BACKLOG, aead_accept_parent()
630 af_alg_complete, &ctx->completion); aead_accept_parent()
H A Dchainiv.c51 struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); chainiv_givencrypt() local
64 spin_lock_bh(&ctx->lock); chainiv_givencrypt()
68 memcpy(req->giv, ctx->iv, ivsize); chainiv_givencrypt()
69 memcpy(subreq->info, ctx->iv, ivsize); chainiv_givencrypt()
75 memcpy(ctx->iv, subreq->info, ivsize); chainiv_givencrypt()
78 spin_unlock_bh(&ctx->lock); chainiv_givencrypt()
86 struct chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); chainiv_givencrypt_first() local
89 spin_lock_bh(&ctx->lock); chainiv_givencrypt_first()
95 err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, chainiv_givencrypt_first()
99 spin_unlock_bh(&ctx->lock); chainiv_givencrypt_first()
116 struct chainiv_ctx *ctx = crypto_tfm_ctx(tfm); chainiv_init() local
118 spin_lock_init(&ctx->lock); chainiv_init()
123 static int async_chainiv_schedule_work(struct async_chainiv_ctx *ctx) async_chainiv_schedule_work() argument
126 int err = ctx->err; async_chainiv_schedule_work()
128 if (!ctx->queue.qlen) { async_chainiv_schedule_work()
130 clear_bit(CHAINIV_STATE_INUSE, &ctx->state); async_chainiv_schedule_work()
132 if (!ctx->queue.qlen || async_chainiv_schedule_work()
133 test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) async_chainiv_schedule_work()
137 queued = queue_work(kcrypto_wq, &ctx->postponed); async_chainiv_schedule_work()
147 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); async_chainiv_postpone_request() local
150 spin_lock_bh(&ctx->lock); async_chainiv_postpone_request()
151 err = skcipher_enqueue_givcrypt(&ctx->queue, req); async_chainiv_postpone_request()
152 spin_unlock_bh(&ctx->lock); async_chainiv_postpone_request()
154 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) async_chainiv_postpone_request()
157 ctx->err = err; async_chainiv_postpone_request()
158 return async_chainiv_schedule_work(ctx); async_chainiv_postpone_request()
164 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); async_chainiv_givencrypt_tail() local
168 memcpy(req->giv, ctx->iv, ivsize); async_chainiv_givencrypt_tail()
169 memcpy(subreq->info, ctx->iv, ivsize); async_chainiv_givencrypt_tail()
171 ctx->err = crypto_ablkcipher_encrypt(subreq); async_chainiv_givencrypt_tail()
172 if (ctx->err) async_chainiv_givencrypt_tail()
175 memcpy(ctx->iv, subreq->info, ivsize); async_chainiv_givencrypt_tail()
178 return async_chainiv_schedule_work(ctx); async_chainiv_givencrypt_tail()
184 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); async_chainiv_givencrypt() local
194 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) async_chainiv_givencrypt()
197 if (ctx->queue.qlen) { async_chainiv_givencrypt()
198 clear_bit(CHAINIV_STATE_INUSE, &ctx->state); async_chainiv_givencrypt()
211 struct async_chainiv_ctx *ctx = crypto_ablkcipher_ctx(geniv); async_chainiv_givencrypt_first() local
214 if (test_and_set_bit(CHAINIV_STATE_INUSE, &ctx->state)) async_chainiv_givencrypt_first()
222 err = crypto_rng_get_bytes(crypto_default_rng, ctx->iv, async_chainiv_givencrypt_first()
226 clear_bit(CHAINIV_STATE_INUSE, &ctx->state); async_chainiv_givencrypt_first()
237 struct async_chainiv_ctx *ctx = container_of(work, async_chainiv_do_postponed() local
245 spin_lock_bh(&ctx->lock); async_chainiv_do_postponed()
246 req = skcipher_dequeue_givcrypt(&ctx->queue); async_chainiv_do_postponed()
247 spin_unlock_bh(&ctx->lock); async_chainiv_do_postponed()
250 async_chainiv_schedule_work(ctx); async_chainiv_do_postponed()
266 struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); async_chainiv_init() local
268 spin_lock_init(&ctx->lock); async_chainiv_init()
270 crypto_init_queue(&ctx->queue, 100); async_chainiv_init()
271 INIT_WORK(&ctx->postponed, async_chainiv_do_postponed); async_chainiv_init()
278 struct async_chainiv_ctx *ctx = crypto_tfm_ctx(tfm); async_chainiv_exit() local
280 BUG_ON(test_bit(CHAINIV_STATE_INUSE, &ctx->state) || ctx->queue.qlen); async_chainiv_exit()
H A Darc4.c32 struct arc4_ctx *ctx = crypto_tfm_ctx(tfm); arc4_set_key() local
35 ctx->x = 1; arc4_set_key()
36 ctx->y = 0; arc4_set_key()
39 ctx->S[i] = i; arc4_set_key()
42 u32 a = ctx->S[i]; arc4_set_key()
44 ctx->S[i] = ctx->S[j]; arc4_set_key()
45 ctx->S[j] = a; arc4_set_key()
53 static void arc4_crypt(struct arc4_ctx *ctx, u8 *out, const u8 *in, arc4_crypt() argument
56 u32 *const S = ctx->S; arc4_crypt()
63 x = ctx->x; arc4_crypt()
64 y = ctx->y; arc4_crypt()
86 ctx->x = x; arc4_crypt()
87 ctx->y = y; arc4_crypt()
98 struct arc4_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_arc4_crypt() local
110 arc4_crypt(ctx, wdst, wsrc, walk.nbytes); ecb_arc4_crypt()
H A Dalgif_hash.c48 struct hash_ctx *ctx = ask->private; hash_sendmsg() local
56 if (!ctx->more) { hash_sendmsg()
57 err = af_alg_wait_for_completion(crypto_ahash_init(&ctx->req), hash_sendmsg()
58 &ctx->completion); hash_sendmsg()
63 ctx->more = 0; hash_sendmsg()
71 len = af_alg_make_sg(&ctx->sgl, &msg->msg_iter, len); hash_sendmsg()
77 ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, NULL, len); hash_sendmsg()
79 err = af_alg_wait_for_completion(crypto_ahash_update(&ctx->req), hash_sendmsg()
80 &ctx->completion); hash_sendmsg()
81 af_alg_free_sg(&ctx->sgl); hash_sendmsg()
91 ctx->more = msg->msg_flags & MSG_MORE; hash_sendmsg()
92 if (!ctx->more) { hash_sendmsg()
93 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); hash_sendmsg()
94 err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), hash_sendmsg()
95 &ctx->completion); hash_sendmsg()
109 struct hash_ctx *ctx = ask->private; hash_sendpage() local
116 sg_init_table(ctx->sgl.sg, 1); hash_sendpage()
117 sg_set_page(ctx->sgl.sg, page, size, offset); hash_sendpage()
119 ahash_request_set_crypt(&ctx->req, ctx->sgl.sg, ctx->result, size); hash_sendpage()
122 if (ctx->more) hash_sendpage()
123 err = crypto_ahash_finup(&ctx->req); hash_sendpage()
125 err = crypto_ahash_digest(&ctx->req); hash_sendpage()
127 if (!ctx->more) { hash_sendpage()
128 err = crypto_ahash_init(&ctx->req); hash_sendpage()
129 err = af_alg_wait_for_completion(err, &ctx->completion); hash_sendpage()
134 err = crypto_ahash_update(&ctx->req); hash_sendpage()
137 err = af_alg_wait_for_completion(err, &ctx->completion); hash_sendpage()
141 ctx->more = flags & MSG_MORE; hash_sendpage()
154 struct hash_ctx *ctx = ask->private; hash_recvmsg() local
155 unsigned ds = crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req)); hash_recvmsg()
164 if (ctx->more) { hash_recvmsg()
165 ctx->more = 0; hash_recvmsg()
166 ahash_request_set_crypt(&ctx->req, NULL, ctx->result, 0); hash_recvmsg()
167 err = af_alg_wait_for_completion(crypto_ahash_final(&ctx->req), hash_recvmsg()
168 &ctx->completion); hash_recvmsg()
173 err = memcpy_to_msg(msg, ctx->result, len); hash_recvmsg()
185 struct hash_ctx *ctx = ask->private; hash_accept() local
186 struct ahash_request *req = &ctx->req; hash_accept()
387 struct hash_ctx *ctx = ask->private; hash_sock_destruct() local
389 sock_kzfree_s(sk, ctx->result, hash_sock_destruct()
390 crypto_ahash_digestsize(crypto_ahash_reqtfm(&ctx->req))); hash_sock_destruct()
391 sock_kfree_s(sk, ctx, ctx->len); hash_sock_destruct()
397 struct hash_ctx *ctx; hash_accept_parent_nokey() local
401 unsigned len = sizeof(*ctx) + crypto_ahash_reqsize(hash); hash_accept_parent_nokey()
404 ctx = sock_kmalloc(sk, len, GFP_KERNEL); hash_accept_parent_nokey()
405 if (!ctx) hash_accept_parent_nokey()
408 ctx->result = sock_kmalloc(sk, ds, GFP_KERNEL); hash_accept_parent_nokey()
409 if (!ctx->result) { hash_accept_parent_nokey()
410 sock_kfree_s(sk, ctx, len); hash_accept_parent_nokey()
414 memset(ctx->result, 0, ds); hash_accept_parent_nokey()
416 ctx->len = len; hash_accept_parent_nokey()
417 ctx->more = 0; hash_accept_parent_nokey()
418 af_alg_init_completion(&ctx->completion); hash_accept_parent_nokey()
420 ask->private = ctx; hash_accept_parent_nokey()
422 ahash_request_set_tfm(&ctx->req, hash); hash_accept_parent_nokey()
423 ahash_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, hash_accept_parent_nokey()
424 af_alg_complete, &ctx->completion); hash_accept_parent_nokey()
H A Dsalsa20_generic.c107 static void salsa20_keysetup(struct salsa20_ctx *ctx, const u8 *k, u32 kbytes) salsa20_keysetup() argument
111 ctx->input[1] = U8TO32_LITTLE(k + 0); salsa20_keysetup()
112 ctx->input[2] = U8TO32_LITTLE(k + 4); salsa20_keysetup()
113 ctx->input[3] = U8TO32_LITTLE(k + 8); salsa20_keysetup()
114 ctx->input[4] = U8TO32_LITTLE(k + 12); salsa20_keysetup()
121 ctx->input[11] = U8TO32_LITTLE(k + 0); salsa20_keysetup()
122 ctx->input[12] = U8TO32_LITTLE(k + 4); salsa20_keysetup()
123 ctx->input[13] = U8TO32_LITTLE(k + 8); salsa20_keysetup()
124 ctx->input[14] = U8TO32_LITTLE(k + 12); salsa20_keysetup()
125 ctx->input[0] = U8TO32_LITTLE(constants + 0); salsa20_keysetup()
126 ctx->input[5] = U8TO32_LITTLE(constants + 4); salsa20_keysetup()
127 ctx->input[10] = U8TO32_LITTLE(constants + 8); salsa20_keysetup()
128 ctx->input[15] = U8TO32_LITTLE(constants + 12); salsa20_keysetup()
131 static void salsa20_ivsetup(struct salsa20_ctx *ctx, const u8 *iv) salsa20_ivsetup() argument
133 ctx->input[6] = U8TO32_LITTLE(iv + 0); salsa20_ivsetup()
134 ctx->input[7] = U8TO32_LITTLE(iv + 4); salsa20_ivsetup()
135 ctx->input[8] = 0; salsa20_ivsetup()
136 ctx->input[9] = 0; salsa20_ivsetup()
139 static void salsa20_encrypt_bytes(struct salsa20_ctx *ctx, u8 *dst, salsa20_encrypt_bytes() argument
148 salsa20_wordtobyte(buf, ctx->input); salsa20_encrypt_bytes()
150 ctx->input[8]++; salsa20_encrypt_bytes()
151 if (!ctx->input[8]) salsa20_encrypt_bytes()
152 ctx->input[9]++; salsa20_encrypt_bytes()
172 struct salsa20_ctx *ctx = crypto_tfm_ctx(tfm); setkey() local
173 salsa20_keysetup(ctx, key, keysize); setkey()
183 struct salsa20_ctx *ctx = crypto_blkcipher_ctx(tfm); encrypt() local
189 salsa20_ivsetup(ctx, walk.iv); encrypt()
193 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, encrypt()
199 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, encrypt()
206 salsa20_encrypt_bytes(ctx, walk.dst.virt.addr, encrypt()
H A Dalgif_skcipher.c71 #define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
72 crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req)))
74 #define GET_REQ_SIZE(ctx) \
75 crypto_ablkcipher_reqsize(crypto_ablkcipher_reqtfm(&ctx->req))
77 #define GET_IV_SIZE(ctx) \
78 crypto_ablkcipher_ivsize(crypto_ablkcipher_reqtfm(&ctx->req))
107 struct skcipher_ctx *ctx = ask->private; skcipher_async_cb() local
108 struct skcipher_async_req *sreq = GET_SREQ(req, ctx); skcipher_async_cb()
111 atomic_dec(&ctx->inflight); skcipher_async_cb()
120 struct skcipher_ctx *ctx = ask->private; skcipher_sndbuf() local
123 ctx->used, 0); skcipher_sndbuf()
134 struct skcipher_ctx *ctx = ask->private; skcipher_alloc_sgl() local
138 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); skcipher_alloc_sgl()
139 if (!list_empty(&ctx->tsgl)) skcipher_alloc_sgl()
155 list_add_tail(&sgl->list, &ctx->tsgl); skcipher_alloc_sgl()
164 struct skcipher_ctx *ctx = ask->private; skcipher_pull_sgl() local
169 while (!list_empty(&ctx->tsgl)) { skcipher_pull_sgl()
170 sgl = list_first_entry(&ctx->tsgl, struct skcipher_sg_list, skcipher_pull_sgl()
184 ctx->used -= plen; skcipher_pull_sgl()
199 if (!ctx->used) skcipher_pull_sgl()
200 ctx->merge = 0; skcipher_pull_sgl()
206 struct skcipher_ctx *ctx = ask->private; skcipher_free_sgl() local
208 skcipher_pull_sgl(sk, ctx->used, 1); skcipher_free_sgl()
257 struct skcipher_ctx *ctx = ask->private; skcipher_wait_for_data() local
273 if (sk_wait_event(sk, &timeout, ctx->used)) { skcipher_wait_for_data()
288 struct skcipher_ctx *ctx = ask->private; skcipher_data_wakeup() local
291 if (!ctx->used) skcipher_data_wakeup()
309 struct skcipher_ctx *ctx = ask->private; skcipher_sendmsg() local
310 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); skcipher_sendmsg()
344 if (!ctx->more && ctx->used) skcipher_sendmsg()
348 ctx->enc = enc; skcipher_sendmsg()
350 memcpy(ctx->iv, con.iv->iv, ivsize); skcipher_sendmsg()
358 if (ctx->merge) { skcipher_sendmsg()
359 sgl = list_entry(ctx->tsgl.prev, skcipher_sendmsg()
372 ctx->merge = (sg->offset + sg->length) & skcipher_sendmsg()
375 ctx->used += len; skcipher_sendmsg()
393 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); skcipher_sendmsg()
416 ctx->used += plen; skcipher_sendmsg()
425 ctx->merge = plen & (PAGE_SIZE - 1); skcipher_sendmsg()
430 ctx->more = msg->msg_flags & MSG_MORE; skcipher_sendmsg()
444 struct skcipher_ctx *ctx = ask->private; skcipher_sendpage() local
452 if (!ctx->more && ctx->used) skcipher_sendpage()
468 ctx->merge = 0; skcipher_sendpage()
469 sgl = list_entry(ctx->tsgl.prev, struct skcipher_sg_list, list); skcipher_sendpage()
478 ctx->used += size; skcipher_sendpage()
481 ctx->more = flags & MSG_MORE; skcipher_sendpage()
490 static int skcipher_all_sg_nents(struct skcipher_ctx *ctx) skcipher_all_sg_nents() argument
496 list_for_each_entry(sgl, &ctx->tsgl, list) { skcipher_all_sg_nents()
512 struct skcipher_ctx *ctx = ask->private; skcipher_recvmsg_async() local
518 unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx); skcipher_recvmsg_async()
520 GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx); skcipher_recvmsg_async()
529 sreq = GET_SREQ(req, ctx); skcipher_recvmsg_async()
539 memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx)); skcipher_recvmsg_async()
540 ablkcipher_request_set_tfm(req, crypto_ablkcipher_reqtfm(&ctx->req)); skcipher_recvmsg_async()
548 if (!ctx->used) { skcipher_recvmsg_async()
553 sgl = list_first_entry(&ctx->tsgl, skcipher_recvmsg_async()
560 used = min_t(unsigned long, ctx->used, skcipher_recvmsg_async()
584 /* Need to take over the tx sgl from ctx skcipher_recvmsg_async()
619 err = ctx->enc ? crypto_ablkcipher_encrypt(req) : skcipher_recvmsg_async()
622 atomic_inc(&ctx->inflight); skcipher_recvmsg_async()
640 struct skcipher_ctx *ctx = ask->private; skcipher_recvmsg_sync() local
642 &ctx->req)); skcipher_recvmsg_sync()
651 sgl = list_first_entry(&ctx->tsgl, skcipher_recvmsg_sync()
658 if (!ctx->used) { skcipher_recvmsg_sync()
664 used = min_t(unsigned long, ctx->used, msg_data_left(msg)); skcipher_recvmsg_sync()
666 used = af_alg_make_sg(&ctx->rsgl, &msg->msg_iter, used); skcipher_recvmsg_sync()
671 if (ctx->more || used < ctx->used) skcipher_recvmsg_sync()
678 ablkcipher_request_set_crypt(&ctx->req, sg, skcipher_recvmsg_sync()
679 ctx->rsgl.sg, used, skcipher_recvmsg_sync()
680 ctx->iv); skcipher_recvmsg_sync()
683 ctx->enc ? skcipher_recvmsg_sync()
684 crypto_ablkcipher_encrypt(&ctx->req) : skcipher_recvmsg_sync()
685 crypto_ablkcipher_decrypt(&ctx->req), skcipher_recvmsg_sync()
686 &ctx->completion); skcipher_recvmsg_sync()
689 af_alg_free_sg(&ctx->rsgl); skcipher_recvmsg_sync()
721 struct skcipher_ctx *ctx = ask->private; skcipher_poll() local
727 if (ctx->used) skcipher_poll()
896 struct skcipher_ctx *ctx = ask->private; skcipher_wait() local
899 while (atomic_read(&ctx->inflight) && ctr++ < 100) skcipher_wait()
906 struct skcipher_ctx *ctx = ask->private; skcipher_sock_destruct() local
907 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(&ctx->req); skcipher_sock_destruct()
909 if (atomic_read(&ctx->inflight)) skcipher_sock_destruct()
913 sock_kzfree_s(sk, ctx->iv, crypto_ablkcipher_ivsize(tfm)); skcipher_sock_destruct()
914 sock_kfree_s(sk, ctx, ctx->len); skcipher_sock_destruct()
920 struct skcipher_ctx *ctx; skcipher_accept_parent_nokey() local
924 unsigned int len = sizeof(*ctx) + crypto_ablkcipher_reqsize(skcipher); skcipher_accept_parent_nokey()
926 ctx = sock_kmalloc(sk, len, GFP_KERNEL); skcipher_accept_parent_nokey()
927 if (!ctx) skcipher_accept_parent_nokey()
930 ctx->iv = sock_kmalloc(sk, crypto_ablkcipher_ivsize(skcipher), skcipher_accept_parent_nokey()
932 if (!ctx->iv) { skcipher_accept_parent_nokey()
933 sock_kfree_s(sk, ctx, len); skcipher_accept_parent_nokey()
937 memset(ctx->iv, 0, crypto_ablkcipher_ivsize(skcipher)); skcipher_accept_parent_nokey()
939 INIT_LIST_HEAD(&ctx->tsgl); skcipher_accept_parent_nokey()
940 ctx->len = len; skcipher_accept_parent_nokey()
941 ctx->used = 0; skcipher_accept_parent_nokey()
942 ctx->more = 0; skcipher_accept_parent_nokey()
943 ctx->merge = 0; skcipher_accept_parent_nokey()
944 ctx->enc = 0; skcipher_accept_parent_nokey()
945 atomic_set(&ctx->inflight, 0); skcipher_accept_parent_nokey()
946 af_alg_init_completion(&ctx->completion); skcipher_accept_parent_nokey()
948 ask->private = ctx; skcipher_accept_parent_nokey()
950 ablkcipher_request_set_tfm(&ctx->req, skcipher); skcipher_accept_parent_nokey()
951 ablkcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG, skcipher_accept_parent_nokey()
952 af_alg_complete, &ctx->completion); skcipher_accept_parent_nokey()
H A Dghash-generic.c47 struct ghash_ctx *ctx = crypto_shash_ctx(tfm); ghash_setkey() local
54 if (ctx->gf128) ghash_setkey()
55 gf128mul_free_4k(ctx->gf128); ghash_setkey()
56 ctx->gf128 = gf128mul_init_4k_lle((be128 *)key); ghash_setkey()
57 if (!ctx->gf128) ghash_setkey()
67 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); ghash_update() local
70 if (!ctx->gf128) ghash_update()
84 gf128mul_4k_lle((be128 *)dst, ctx->gf128); ghash_update()
89 gf128mul_4k_lle((be128 *)dst, ctx->gf128); ghash_update()
103 static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) ghash_flush() argument
113 gf128mul_4k_lle((be128 *)dst, ctx->gf128); ghash_flush()
122 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); ghash_final() local
125 if (!ctx->gf128) ghash_final()
128 ghash_flush(ctx, dctx); ghash_final()
136 struct ghash_ctx *ctx = crypto_tfm_ctx(tfm); ghash_exit_tfm() local
137 if (ctx->gf128) ghash_exit_tfm()
138 gf128mul_free_4k(ctx->gf128); ghash_exit_tfm()
H A Dxcbc.c42 u8 ctx[]; member in struct:xcbc_tfm_ctx
58 u8 ctx[]; member in struct:xcbc_desc_ctx
65 struct xcbc_tfm_ctx *ctx = crypto_shash_ctx(parent); crypto_xcbc_digest_setkey() local
67 u8 *consts = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); crypto_xcbc_digest_setkey()
71 if ((err = crypto_cipher_setkey(ctx->child, inkey, keylen))) crypto_xcbc_digest_setkey()
74 crypto_cipher_encrypt_one(ctx->child, consts, (u8 *)ks + bs); crypto_xcbc_digest_setkey()
75 crypto_cipher_encrypt_one(ctx->child, consts + bs, (u8 *)ks + bs * 2); crypto_xcbc_digest_setkey()
76 crypto_cipher_encrypt_one(ctx->child, key1, (u8 *)ks); crypto_xcbc_digest_setkey()
78 return crypto_cipher_setkey(ctx->child, key1, bs); crypto_xcbc_digest_setkey()
85 struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); crypto_xcbc_digest_init() local
87 u8 *prev = PTR_ALIGN(&ctx->ctx[0], alignmask + 1) + bs; crypto_xcbc_digest_init()
89 ctx->len = 0; crypto_xcbc_digest_init()
101 struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); crypto_xcbc_digest_update() local
104 u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); crypto_xcbc_digest_update()
108 if ((ctx->len + len) <= bs) { crypto_xcbc_digest_update()
109 memcpy(odds + ctx->len, p, len); crypto_xcbc_digest_update()
110 ctx->len += len; crypto_xcbc_digest_update()
115 memcpy(odds + ctx->len, p, bs - ctx->len); crypto_xcbc_digest_update()
116 len -= bs - ctx->len; crypto_xcbc_digest_update()
117 p += bs - ctx->len; crypto_xcbc_digest_update()
123 ctx->len = 0; crypto_xcbc_digest_update()
136 ctx->len = len; crypto_xcbc_digest_update()
147 struct xcbc_desc_ctx *ctx = shash_desc_ctx(pdesc); crypto_xcbc_digest_final() local
150 u8 *consts = PTR_ALIGN(&tctx->ctx[0], alignmask + 1); crypto_xcbc_digest_final()
151 u8 *odds = PTR_ALIGN(&ctx->ctx[0], alignmask + 1); crypto_xcbc_digest_final()
155 if (ctx->len != bs) { crypto_xcbc_digest_final()
157 u8 *p = odds + ctx->len; crypto_xcbc_digest_final()
162 rlen = bs - ctx->len -1; crypto_xcbc_digest_final()
182 struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); xcbc_init_tfm() local
188 ctx->child = cipher; xcbc_init_tfm()
195 struct xcbc_tfm_ctx *ctx = crypto_tfm_ctx(tfm); xcbc_exit_tfm() local
196 crypto_free_cipher(ctx->child); xcbc_exit_tfm()
H A Dcmac.c35 u8 ctx[]; member in struct:cmac_tfm_ctx
51 u8 ctx[]; member in struct:cmac_desc_ctx
58 struct cmac_tfm_ctx *ctx = crypto_shash_ctx(parent); crypto_cmac_digest_setkey() local
60 __be64 *consts = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); crypto_cmac_digest_setkey()
65 err = crypto_cipher_setkey(ctx->child, inkey, keylen); crypto_cmac_digest_setkey()
71 crypto_cipher_encrypt_one(ctx->child, (u8 *)consts, (u8 *)consts); crypto_cmac_digest_setkey()
111 struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); crypto_cmac_digest_init() local
113 u8 *prev = PTR_ALIGN((void *)ctx->ctx, alignmask + 1) + bs; crypto_cmac_digest_init()
115 ctx->len = 0; crypto_cmac_digest_init()
127 struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); crypto_cmac_digest_update() local
130 u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); crypto_cmac_digest_update()
134 if ((ctx->len + len) <= bs) { crypto_cmac_digest_update()
135 memcpy(odds + ctx->len, p, len); crypto_cmac_digest_update()
136 ctx->len += len; crypto_cmac_digest_update()
141 memcpy(odds + ctx->len, p, bs - ctx->len); crypto_cmac_digest_update()
142 len -= bs - ctx->len; crypto_cmac_digest_update()
143 p += bs - ctx->len; crypto_cmac_digest_update()
149 ctx->len = 0; crypto_cmac_digest_update()
162 ctx->len = len; crypto_cmac_digest_update()
173 struct cmac_desc_ctx *ctx = shash_desc_ctx(pdesc); crypto_cmac_digest_final() local
176 u8 *consts = PTR_ALIGN((void *)tctx->ctx, alignmask + 1); crypto_cmac_digest_final()
177 u8 *odds = PTR_ALIGN((void *)ctx->ctx, alignmask + 1); crypto_cmac_digest_final()
181 if (ctx->len != bs) { crypto_cmac_digest_final()
183 u8 *p = odds + ctx->len; crypto_cmac_digest_final()
188 rlen = bs - ctx->len - 1; crypto_cmac_digest_final()
208 struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); cmac_init_tfm() local
214 ctx->child = cipher; cmac_init_tfm()
221 struct cmac_tfm_ctx *ctx = crypto_tfm_ctx(tfm); cmac_exit_tfm() local
222 crypto_free_cipher(ctx->child); cmac_exit_tfm()
H A Dlrw.c48 int lrw_init_table(struct lrw_table_ctx *ctx, const u8 *tweak) lrw_init_table() argument
53 if (ctx->table) lrw_init_table()
54 gf128mul_free_64k(ctx->table); lrw_init_table()
57 ctx->table = gf128mul_init_64k_bbe((be128 *)tweak); lrw_init_table()
58 if (!ctx->table) lrw_init_table()
64 ctx->mulinc[i] = tmp; lrw_init_table()
65 gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); lrw_init_table()
72 void lrw_free_table(struct lrw_table_ctx *ctx) lrw_free_table() argument
74 if (ctx->table) lrw_free_table()
75 gf128mul_free_64k(ctx->table); lrw_free_table()
82 struct priv *ctx = crypto_tfm_ctx(parent); setkey() local
83 struct crypto_cipher *child = ctx->child; setkey()
96 return lrw_init_table(&ctx->table, tweak); setkey()
139 struct blkcipher_walk *w, struct priv *ctx, crypt()
146 .tfm = crypto_cipher_tfm(ctx->child), crypt()
165 gf128mul_64k_bbe(&s.t, ctx->table.table); crypt()
174 &ctx->table.mulinc[get_index128(iv)]); crypt()
198 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); encrypt() local
202 return crypt(desc, &w, ctx, encrypt()
203 crypto_cipher_alg(ctx->child)->cia_encrypt); encrypt()
209 struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); decrypt() local
213 return crypt(desc, &w, ctx, decrypt()
214 crypto_cipher_alg(ctx->child)->cia_decrypt); decrypt()
223 struct lrw_table_ctx *ctx = req->table_ctx; lrw_crypt() local
248 gf128mul_64k_bbe(&t_buf[0], ctx->table); lrw_crypt()
259 &ctx->mulinc[get_index128(iv)]); lrw_crypt()
301 struct priv *ctx = crypto_tfm_ctx(tfm); init_tfm() local
314 ctx->child = cipher; init_tfm()
320 struct priv *ctx = crypto_tfm_ctx(tfm); exit_tfm() local
322 lrw_free_table(&ctx->table); exit_tfm()
323 crypto_free_cipher(ctx->child); exit_tfm()
138 crypt(struct blkcipher_desc *d, struct blkcipher_walk *w, struct priv *ctx, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) crypt() argument
H A Dtea.c50 struct tea_ctx *ctx = crypto_tfm_ctx(tfm); tea_setkey() local
53 ctx->KEY[0] = le32_to_cpu(key[0]); tea_setkey()
54 ctx->KEY[1] = le32_to_cpu(key[1]); tea_setkey()
55 ctx->KEY[2] = le32_to_cpu(key[2]); tea_setkey()
56 ctx->KEY[3] = le32_to_cpu(key[3]); tea_setkey()
66 struct tea_ctx *ctx = crypto_tfm_ctx(tfm); tea_encrypt() local
73 k0 = ctx->KEY[0]; tea_encrypt()
74 k1 = ctx->KEY[1]; tea_encrypt()
75 k2 = ctx->KEY[2]; tea_encrypt()
76 k3 = ctx->KEY[3]; tea_encrypt()
94 struct tea_ctx *ctx = crypto_tfm_ctx(tfm); tea_decrypt() local
101 k0 = ctx->KEY[0]; tea_decrypt()
102 k1 = ctx->KEY[1]; tea_decrypt()
103 k2 = ctx->KEY[2]; tea_decrypt()
104 k3 = ctx->KEY[3]; tea_decrypt()
123 struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); xtea_setkey() local
126 ctx->KEY[0] = le32_to_cpu(key[0]); xtea_setkey()
127 ctx->KEY[1] = le32_to_cpu(key[1]); xtea_setkey()
128 ctx->KEY[2] = le32_to_cpu(key[2]); xtea_setkey()
129 ctx->KEY[3] = le32_to_cpu(key[3]); xtea_setkey()
139 struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); xtea_encrypt() local
147 y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); xtea_encrypt()
149 z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); xtea_encrypt()
159 struct tea_ctx *ctx = crypto_tfm_ctx(tfm); xtea_decrypt() local
169 z -= ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 & 3]); xtea_decrypt()
171 y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); xtea_decrypt()
183 struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); xeta_encrypt() local
191 y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; xeta_encrypt()
193 z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; xeta_encrypt()
203 struct tea_ctx *ctx = crypto_tfm_ctx(tfm); xeta_decrypt() local
213 z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3]; xeta_decrypt()
215 y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; xeta_decrypt()
H A Dvmac.c321 static void vhash_abort(struct vmac_ctx *ctx) vhash_abort() argument
323 ctx->polytmp[0] = ctx->polykey[0] ; vhash_abort()
324 ctx->polytmp[1] = ctx->polykey[1] ; vhash_abort()
325 ctx->first_block_processed = 0; vhash_abort()
369 struct vmac_ctx *ctx) vhash_update()
372 const u64 *kptr = (u64 *)ctx->nhkey; vhash_update()
375 u64 pkh = ctx->polykey[0]; vhash_update()
376 u64 pkl = ctx->polykey[1]; vhash_update()
386 ch = ctx->polytmp[0]; vhash_update()
387 cl = ctx->polytmp[1]; vhash_update()
389 if (!ctx->first_block_processed) { vhash_update()
390 ctx->first_block_processed = 1; vhash_update()
405 ctx->polytmp[0] = ch; vhash_update()
406 ctx->polytmp[1] = cl; vhash_update()
410 u64 *tagl, struct vmac_ctx *ctx) vhash()
413 const u64 *kptr = (u64 *)ctx->nhkey; vhash()
416 u64 pkh = ctx->polykey[0]; vhash()
417 u64 pkl = ctx->polykey[1]; vhash()
423 if (ctx->first_block_processed) { vhash()
424 ch = ctx->polytmp[0]; vhash()
425 cl = ctx->polytmp[1]; vhash()
456 vhash_abort(ctx); vhash()
458 return l3hash(ch, cl, ctx->l3key[0], ctx->l3key[1], remaining); vhash()
463 struct vmac_ctx_t *ctx) vmac()
469 in_n = ctx->__vmac_ctx.cached_nonce; vmac()
470 out_p = ctx->__vmac_ctx.cached_aes; vmac()
477 crypto_cipher_encrypt_one(ctx->child, vmac()
483 h = vhash(m, mbytes, (u64 *)0, &ctx->__vmac_ctx); vmac()
487 static int vmac_set_key(unsigned char user_key[], struct vmac_ctx_t *ctx) vmac_set_key() argument
493 err = crypto_cipher_setkey(ctx->child, user_key, VMAC_KEY_LEN); vmac_set_key()
499 for (i = 0; i < sizeof(ctx->__vmac_ctx.nhkey)/8; i += 2) { vmac_set_key()
500 crypto_cipher_encrypt_one(ctx->child, vmac_set_key()
502 ctx->__vmac_ctx.nhkey[i] = be64_to_cpup(out); vmac_set_key()
503 ctx->__vmac_ctx.nhkey[i+1] = be64_to_cpup(out+1); vmac_set_key()
510 for (i = 0; i < sizeof(ctx->__vmac_ctx.polykey)/8; i += 2) { vmac_set_key()
511 crypto_cipher_encrypt_one(ctx->child, vmac_set_key()
513 ctx->__vmac_ctx.polytmp[i] = vmac_set_key()
514 ctx->__vmac_ctx.polykey[i] = vmac_set_key()
516 ctx->__vmac_ctx.polytmp[i+1] = vmac_set_key()
517 ctx->__vmac_ctx.polykey[i+1] = vmac_set_key()
525 for (i = 0; i < sizeof(ctx->__vmac_ctx.l3key)/8; i += 2) { vmac_set_key()
527 crypto_cipher_encrypt_one(ctx->child, vmac_set_key()
529 ctx->__vmac_ctx.l3key[i] = be64_to_cpup(out); vmac_set_key()
530 ctx->__vmac_ctx.l3key[i+1] = be64_to_cpup(out+1); vmac_set_key()
532 } while (ctx->__vmac_ctx.l3key[i] >= p64 vmac_set_key()
533 || ctx->__vmac_ctx.l3key[i+1] >= p64); vmac_set_key()
537 ctx->__vmac_ctx.cached_nonce[0] = (u64)-1; /* Ensure illegal nonce */ vmac_set_key()
538 ctx->__vmac_ctx.cached_nonce[1] = (u64)0; /* Ensure illegal nonce */ vmac_set_key()
539 ctx->__vmac_ctx.first_block_processed = 0; vmac_set_key()
547 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); vmac_setkey() local
554 return vmac_set_key((u8 *)key, ctx); vmac_setkey()
566 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); vmac_update() local
570 expand = VMAC_NHBYTES - ctx->partial_size > 0 ? vmac_update()
571 VMAC_NHBYTES - ctx->partial_size : 0; vmac_update()
575 memcpy(ctx->partial + ctx->partial_size, p, min); vmac_update()
576 ctx->partial_size += min; vmac_update()
581 vhash_update(ctx->partial, VMAC_NHBYTES, &ctx->__vmac_ctx); vmac_update()
582 ctx->partial_size = 0; vmac_update()
588 memcpy(ctx->partial, p + len - (len % VMAC_NHBYTES), vmac_update()
590 ctx->partial_size = len % VMAC_NHBYTES; vmac_update()
593 vhash_update(p, len - len % VMAC_NHBYTES, &ctx->__vmac_ctx); vmac_update()
601 struct vmac_ctx_t *ctx = crypto_shash_ctx(parent); vmac_final() local
610 if (ctx->partial_size) { vmac_final()
611 memset(ctx->partial + ctx->partial_size, 0, vmac_final()
612 VMAC_NHBYTES - ctx->partial_size); vmac_final()
614 mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); vmac_final()
617 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); vmac_final()
618 ctx->partial_size = 0; vmac_final()
627 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); vmac_init_tfm() local
633 ctx->child = cipher; vmac_init_tfm()
639 struct vmac_ctx_t *ctx = crypto_tfm_ctx(tfm); vmac_exit_tfm() local
640 crypto_free_cipher(ctx->child); vmac_exit_tfm()
367 vhash_update(const unsigned char *m, unsigned int mbytes, struct vmac_ctx *ctx) vhash_update() argument
409 vhash(unsigned char m[], unsigned int mbytes, u64 *tagl, struct vmac_ctx *ctx) vhash() argument
461 vmac(unsigned char m[], unsigned int mbytes, const unsigned char n[16], u64 *tagl, struct vmac_ctx_t *ctx) vmac() argument
H A Dablk_helper.c40 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); ablk_set_key() local
41 struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; ablk_set_key()
57 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); __ablk_encrypt() local
60 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); __ablk_encrypt()
72 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); ablk_encrypt() local
79 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); ablk_encrypt()
91 struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); ablk_decrypt() local
98 ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); ablk_decrypt()
104 desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); ablk_decrypt()
116 struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); ablk_exit() local
118 cryptd_free_ablkcipher(ctx->cryptd_tfm); ablk_exit()
124 struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); ablk_init_common() local
132 ctx->cryptd_tfm = cryptd_tfm; ablk_init_common()
H A Dlzo.c32 struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); lzo_init() local
34 ctx->lzo_comp_mem = kmalloc(LZO1X_MEM_COMPRESS, lzo_init()
36 if (!ctx->lzo_comp_mem) lzo_init()
37 ctx->lzo_comp_mem = vmalloc(LZO1X_MEM_COMPRESS); lzo_init()
38 if (!ctx->lzo_comp_mem) lzo_init()
46 struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); lzo_exit() local
48 kvfree(ctx->lzo_comp_mem); lzo_exit()
54 struct lzo_ctx *ctx = crypto_tfm_ctx(tfm); lzo_compress() local
58 err = lzo1x_1_compress(src, slen, dst, &tmp_len, ctx->lzo_comp_mem); lzo_compress()
H A Ddeflate.c45 static int deflate_comp_init(struct deflate_ctx *ctx) deflate_comp_init() argument
48 struct z_stream_s *stream = &ctx->comp_stream; deflate_comp_init()
70 static int deflate_decomp_init(struct deflate_ctx *ctx) deflate_decomp_init() argument
73 struct z_stream_s *stream = &ctx->decomp_stream; deflate_decomp_init()
92 static void deflate_comp_exit(struct deflate_ctx *ctx) deflate_comp_exit() argument
94 zlib_deflateEnd(&ctx->comp_stream); deflate_comp_exit()
95 vfree(ctx->comp_stream.workspace); deflate_comp_exit()
98 static void deflate_decomp_exit(struct deflate_ctx *ctx) deflate_decomp_exit() argument
100 zlib_inflateEnd(&ctx->decomp_stream); deflate_decomp_exit()
101 vfree(ctx->decomp_stream.workspace); deflate_decomp_exit()
106 struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); deflate_init() local
109 ret = deflate_comp_init(ctx); deflate_init()
112 ret = deflate_decomp_init(ctx); deflate_init()
114 deflate_comp_exit(ctx); deflate_init()
121 struct deflate_ctx *ctx = crypto_tfm_ctx(tfm); deflate_exit() local
123 deflate_comp_exit(ctx); deflate_exit()
124 deflate_decomp_exit(ctx); deflate_exit()
H A Dfcrypt.c238 const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm); fcrypt_encrypt() local
245 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); fcrypt_encrypt()
246 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); fcrypt_encrypt()
247 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); fcrypt_encrypt()
248 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); fcrypt_encrypt()
249 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); fcrypt_encrypt()
250 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); fcrypt_encrypt()
251 F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); fcrypt_encrypt()
252 F_ENCRYPT(X.l, X.r, ctx->sched[0x7]); fcrypt_encrypt()
253 F_ENCRYPT(X.r, X.l, ctx->sched[0x8]); fcrypt_encrypt()
254 F_ENCRYPT(X.l, X.r, ctx->sched[0x9]); fcrypt_encrypt()
255 F_ENCRYPT(X.r, X.l, ctx->sched[0xa]); fcrypt_encrypt()
256 F_ENCRYPT(X.l, X.r, ctx->sched[0xb]); fcrypt_encrypt()
257 F_ENCRYPT(X.r, X.l, ctx->sched[0xc]); fcrypt_encrypt()
258 F_ENCRYPT(X.l, X.r, ctx->sched[0xd]); fcrypt_encrypt()
259 F_ENCRYPT(X.r, X.l, ctx->sched[0xe]); fcrypt_encrypt()
260 F_ENCRYPT(X.l, X.r, ctx->sched[0xf]); fcrypt_encrypt()
270 const struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm); fcrypt_decrypt() local
277 F_ENCRYPT(X.l, X.r, ctx->sched[0xf]); fcrypt_decrypt()
278 F_ENCRYPT(X.r, X.l, ctx->sched[0xe]); fcrypt_decrypt()
279 F_ENCRYPT(X.l, X.r, ctx->sched[0xd]); fcrypt_decrypt()
280 F_ENCRYPT(X.r, X.l, ctx->sched[0xc]); fcrypt_decrypt()
281 F_ENCRYPT(X.l, X.r, ctx->sched[0xb]); fcrypt_decrypt()
282 F_ENCRYPT(X.r, X.l, ctx->sched[0xa]); fcrypt_decrypt()
283 F_ENCRYPT(X.l, X.r, ctx->sched[0x9]); fcrypt_decrypt()
284 F_ENCRYPT(X.r, X.l, ctx->sched[0x8]); fcrypt_decrypt()
285 F_ENCRYPT(X.l, X.r, ctx->sched[0x7]); fcrypt_decrypt()
286 F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); fcrypt_decrypt()
287 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); fcrypt_decrypt()
288 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); fcrypt_decrypt()
289 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); fcrypt_decrypt()
290 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); fcrypt_decrypt()
291 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); fcrypt_decrypt()
292 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); fcrypt_decrypt()
305 struct fcrypt_ctx *ctx = crypto_tfm_ctx(tfm); fcrypt_setkey() local
331 ctx->sched[0x0] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
332 ctx->sched[0x1] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
333 ctx->sched[0x2] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
334 ctx->sched[0x3] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
335 ctx->sched[0x4] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
336 ctx->sched[0x5] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
337 ctx->sched[0x6] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
338 ctx->sched[0x7] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
339 ctx->sched[0x8] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
340 ctx->sched[0x9] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
341 ctx->sched[0xa] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
342 ctx->sched[0xb] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
343 ctx->sched[0xc] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
344 ctx->sched[0xd] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
345 ctx->sched[0xe] = cpu_to_be32(k); ror56_64(k, 11); fcrypt_setkey()
346 ctx->sched[0xf] = cpu_to_be32(k); fcrypt_setkey()
372 ctx->sched[0x0] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
373 ctx->sched[0x1] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
374 ctx->sched[0x2] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
375 ctx->sched[0x3] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
376 ctx->sched[0x4] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
377 ctx->sched[0x5] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
378 ctx->sched[0x6] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
379 ctx->sched[0x7] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
380 ctx->sched[0x8] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
381 ctx->sched[0x9] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
382 ctx->sched[0xa] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
383 ctx->sched[0xb] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
384 ctx->sched[0xc] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
385 ctx->sched[0xd] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
386 ctx->sched[0xe] = cpu_to_be32(lo); ror56(hi, lo, 11); fcrypt_setkey()
387 ctx->sched[0xf] = cpu_to_be32(lo); fcrypt_setkey()
H A Dcrct10dif_generic.c44 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_init() local
46 ctx->crc = 0; chksum_init()
54 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_update() local
56 ctx->crc = crc_t10dif_generic(ctx->crc, data, length); chksum_update()
62 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_final() local
64 *(__u16 *)out = ctx->crc; chksum_final()
78 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_finup() local
80 return __chksum_finup(&ctx->crc, data, len, out); chksum_finup()
86 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_digest() local
88 return __chksum_finup(&ctx->crc, data, length, out); chksum_digest()
H A Dmd5.c44 static inline void md5_transform_helper(struct md5_state *ctx) md5_transform_helper() argument
46 le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(u32)); md5_transform_helper()
47 md5_transform(ctx->hash, ctx->block); md5_transform_helper()
125 struct md5_state *ctx = shash_desc_ctx(desc); md5_export() local
127 memcpy(out, ctx, sizeof(*ctx)); md5_export()
133 struct md5_state *ctx = shash_desc_ctx(desc); md5_import() local
135 memcpy(ctx, in, sizeof(*ctx)); md5_import()
/linux-4.1.27/net/sunrpc/auth_gss/
H A Dgss_krb5_mech.c220 struct krb5_ctx *ctx, struct crypto_blkcipher **res) get_key()
248 *res = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, get_key()
252 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); get_key()
258 "crypto algorithm %s\n", ctx->gk5e->encrypt_name); get_key()
275 gss_import_v1_context(const void *p, const void *end, struct krb5_ctx *ctx) gss_import_v1_context() argument
279 p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); gss_import_v1_context()
284 ctx->enctype = ENCTYPE_DES_CBC_RAW; gss_import_v1_context()
286 ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); gss_import_v1_context()
287 if (ctx->gk5e == NULL) { gss_import_v1_context()
315 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); gss_import_v1_context()
318 p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); gss_import_v1_context()
321 p = simple_get_netobj(p, end, &ctx->mech_used); gss_import_v1_context()
324 p = get_key(p, end, ctx, &ctx->enc); gss_import_v1_context()
327 p = get_key(p, end, ctx, &ctx->seq); gss_import_v1_context()
338 crypto_free_blkcipher(ctx->seq); gss_import_v1_context()
340 crypto_free_blkcipher(ctx->enc); gss_import_v1_context()
342 kfree(ctx->mech_used.data); gss_import_v1_context()
348 context_v2_alloc_cipher(struct krb5_ctx *ctx, const char *cname, u8 *key) context_v2_alloc_cipher() argument
358 if (crypto_blkcipher_setkey(cp, key, ctx->gk5e->keylength)) { context_v2_alloc_cipher()
378 context_derive_keys_des3(struct krb5_ctx *ctx, gfp_t gfp_mask) context_derive_keys_des3() argument
387 keyin.data = ctx->Ksess; context_derive_keys_des3()
388 keyin.len = ctx->gk5e->keylength; context_derive_keys_des3()
389 keyout.len = ctx->gk5e->keylength; context_derive_keys_des3()
392 ctx->seq = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, context_derive_keys_des3()
393 ctx->Ksess); context_derive_keys_des3()
394 if (ctx->seq == NULL) context_derive_keys_des3()
397 ctx->enc = context_v2_alloc_cipher(ctx, ctx->gk5e->encrypt_name, context_derive_keys_des3()
398 ctx->Ksess); context_derive_keys_des3()
399 if (ctx->enc == NULL) context_derive_keys_des3()
404 keyout.data = ctx->cksum; context_derive_keys_des3()
405 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); context_derive_keys_des3()
415 crypto_free_blkcipher(ctx->enc); context_derive_keys_des3()
417 crypto_free_blkcipher(ctx->seq); context_derive_keys_des3()
428 context_derive_keys_rc4(struct krb5_ctx *ctx) context_derive_keys_rc4() argument
441 hmac = crypto_alloc_hash(ctx->gk5e->cksum_name, 0, CRYPTO_ALG_ASYNC); context_derive_keys_rc4()
444 __func__, PTR_ERR(hmac), ctx->gk5e->cksum_name); context_derive_keys_rc4()
449 err = crypto_hash_setkey(hmac, ctx->Ksess, ctx->gk5e->keylength); context_derive_keys_rc4()
463 err = crypto_hash_digest(&desc, sg, slen, ctx->cksum); context_derive_keys_rc4()
469 ctx->enc = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, context_derive_keys_rc4()
471 if (IS_ERR(ctx->enc)) { context_derive_keys_rc4()
472 err = PTR_ERR(ctx->enc); context_derive_keys_rc4()
476 ctx->seq = crypto_alloc_blkcipher(ctx->gk5e->encrypt_name, 0, context_derive_keys_rc4()
478 if (IS_ERR(ctx->seq)) { context_derive_keys_rc4()
479 crypto_free_blkcipher(ctx->enc); context_derive_keys_rc4()
480 err = PTR_ERR(ctx->seq); context_derive_keys_rc4()
496 context_derive_keys_new(struct krb5_ctx *ctx, gfp_t gfp_mask) context_derive_keys_new() argument
505 keyin.data = ctx->Ksess; context_derive_keys_new()
506 keyin.len = ctx->gk5e->keylength; context_derive_keys_new()
507 keyout.len = ctx->gk5e->keylength; context_derive_keys_new()
511 keyout.data = ctx->initiator_seal; context_derive_keys_new()
512 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); context_derive_keys_new()
518 ctx->initiator_enc = context_v2_alloc_cipher(ctx, context_derive_keys_new()
519 ctx->gk5e->encrypt_name, context_derive_keys_new()
520 ctx->initiator_seal); context_derive_keys_new()
521 if (ctx->initiator_enc == NULL) context_derive_keys_new()
526 keyout.data = ctx->acceptor_seal; context_derive_keys_new()
527 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); context_derive_keys_new()
533 ctx->acceptor_enc = context_v2_alloc_cipher(ctx, context_derive_keys_new()
534 ctx->gk5e->encrypt_name, context_derive_keys_new()
535 ctx->acceptor_seal); context_derive_keys_new()
536 if (ctx->acceptor_enc == NULL) context_derive_keys_new()
541 keyout.data = ctx->initiator_sign; context_derive_keys_new()
542 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); context_derive_keys_new()
551 keyout.data = ctx->acceptor_sign; context_derive_keys_new()
552 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); context_derive_keys_new()
561 keyout.data = ctx->initiator_integ; context_derive_keys_new()
562 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); context_derive_keys_new()
571 keyout.data = ctx->acceptor_integ; context_derive_keys_new()
572 err = krb5_derive_key(ctx->gk5e, &keyin, &keyout, &c, gfp_mask); context_derive_keys_new()
579 switch (ctx->enctype) { context_derive_keys_new()
582 ctx->initiator_enc_aux = context_derive_keys_new()
583 context_v2_alloc_cipher(ctx, "cbc(aes)", context_derive_keys_new()
584 ctx->initiator_seal); context_derive_keys_new()
585 if (ctx->initiator_enc_aux == NULL) context_derive_keys_new()
587 ctx->acceptor_enc_aux = context_derive_keys_new()
588 context_v2_alloc_cipher(ctx, "cbc(aes)", context_derive_keys_new()
589 ctx->acceptor_seal); context_derive_keys_new()
590 if (ctx->acceptor_enc_aux == NULL) { context_derive_keys_new()
591 crypto_free_blkcipher(ctx->initiator_enc_aux); context_derive_keys_new()
599 crypto_free_blkcipher(ctx->acceptor_enc); context_derive_keys_new()
601 crypto_free_blkcipher(ctx->initiator_enc); context_derive_keys_new()
607 gss_import_v2_context(const void *p, const void *end, struct krb5_ctx *ctx, gss_import_v2_context() argument
612 p = simple_get_bytes(p, end, &ctx->flags, sizeof(ctx->flags)); gss_import_v2_context()
615 ctx->initiate = ctx->flags & KRB5_CTX_FLAG_INITIATOR; gss_import_v2_context()
617 p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); gss_import_v2_context()
620 p = simple_get_bytes(p, end, &ctx->seq_send64, sizeof(ctx->seq_send64)); gss_import_v2_context()
624 ctx->seq_send = ctx->seq_send64; gss_import_v2_context()
625 if (ctx->seq_send64 != ctx->seq_send) { gss_import_v2_context()
627 (unsigned long)ctx->seq_send64, ctx->seq_send); gss_import_v2_context()
631 p = simple_get_bytes(p, end, &ctx->enctype, sizeof(ctx->enctype)); gss_import_v2_context()
635 if (ctx->enctype == ENCTYPE_DES3_CBC_SHA1) gss_import_v2_context()
636 ctx->enctype = ENCTYPE_DES3_CBC_RAW; gss_import_v2_context()
637 ctx->gk5e = get_gss_krb5_enctype(ctx->enctype); gss_import_v2_context()
638 if (ctx->gk5e == NULL) { gss_import_v2_context()
640 ctx->enctype); gss_import_v2_context()
644 keylen = ctx->gk5e->keylength; gss_import_v2_context()
646 p = simple_get_bytes(p, end, ctx->Ksess, keylen); gss_import_v2_context()
655 ctx->mech_used.data = kmemdup(gss_kerberos_mech.gm_oid.data, gss_import_v2_context()
657 if (unlikely(ctx->mech_used.data == NULL)) { gss_import_v2_context()
661 ctx->mech_used.len = gss_kerberos_mech.gm_oid.len; gss_import_v2_context()
663 switch (ctx->enctype) { gss_import_v2_context()
665 return context_derive_keys_des3(ctx, gfp_mask); gss_import_v2_context()
667 return context_derive_keys_rc4(ctx); gss_import_v2_context()
670 return context_derive_keys_new(ctx, gfp_mask); gss_import_v2_context()
686 struct krb5_ctx *ctx; gss_import_sec_context_kerberos() local
689 ctx = kzalloc(sizeof(*ctx), gfp_mask); gss_import_sec_context_kerberos()
690 if (ctx == NULL) gss_import_sec_context_kerberos()
694 ret = gss_import_v1_context(p, end, ctx); gss_import_sec_context_kerberos()
696 ret = gss_import_v2_context(p, end, ctx, gfp_mask); gss_import_sec_context_kerberos()
699 ctx_id->internal_ctx_id = ctx; gss_import_sec_context_kerberos()
701 *endtime = ctx->endtime; gss_import_sec_context_kerberos()
703 kfree(ctx); gss_import_sec_context_kerberos()
219 get_key(const void *p, const void *end, struct krb5_ctx *ctx, struct crypto_blkcipher **res) get_key() argument
H A Dgss_krb5_seal.c74 setup_token(struct krb5_ctx *ctx, struct xdr_netobj *token) setup_token() argument
78 int body_size = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; setup_token()
80 token->len = g_token_size(&ctx->mech_used, body_size); setup_token()
83 g_make_token_header(&ctx->mech_used, body_size, (unsigned char **)&ptr); setup_token()
92 *ptr++ = (__force u16)cpu_to_le16(ctx->gk5e->signalg); setup_token()
100 setup_token_v2(struct krb5_ctx *ctx, struct xdr_netobj *token) setup_token_v2() argument
106 if ((ctx->flags & KRB5_CTX_FLAG_INITIATOR) == 0) setup_token_v2()
108 if (ctx->flags & KRB5_CTX_FLAG_ACCEPTOR_SUBKEY) setup_token_v2()
123 token->len = GSS_KRB5_TOK_HDR_LEN + ctx->gk5e->cksumlength; setup_token_v2()
128 gss_get_mic_v1(struct krb5_ctx *ctx, struct xdr_buf *text, gss_get_mic_v1() argument
140 BUG_ON(ctx == NULL); gss_get_mic_v1()
144 ptr = setup_token(ctx, token); gss_get_mic_v1()
146 if (ctx->gk5e->keyed_cksum) gss_get_mic_v1()
147 cksumkey = ctx->cksum; gss_get_mic_v1()
151 if (make_checksum(ctx, ptr, 8, text, 0, cksumkey, gss_get_mic_v1()
158 seq_send = ctx->seq_send++; gss_get_mic_v1()
161 if (krb5_make_seq_num(ctx, ctx->seq, ctx->initiate ? 0 : 0xff, gss_get_mic_v1()
165 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; gss_get_mic_v1()
169 gss_get_mic_v2(struct krb5_ctx *ctx, struct xdr_buf *text, gss_get_mic_v2() argument
183 krb5_hdr = setup_token_v2(ctx, token); gss_get_mic_v2()
188 seq_send = ctx->seq_send64++; gss_get_mic_v2()
192 if (ctx->initiate) { gss_get_mic_v2()
193 cksumkey = ctx->initiator_sign; gss_get_mic_v2()
196 cksumkey = ctx->acceptor_sign; gss_get_mic_v2()
200 if (make_checksum_v2(ctx, krb5_hdr, GSS_KRB5_TOK_HDR_LEN, gss_get_mic_v2()
208 return (ctx->endtime < now) ? GSS_S_CONTEXT_EXPIRED : GSS_S_COMPLETE; gss_get_mic_v2()
215 struct krb5_ctx *ctx = gss_ctx->internal_ctx_id; gss_get_mic_kerberos() local
217 switch (ctx->enctype) { gss_get_mic_kerberos()
223 return gss_get_mic_v1(ctx, text, token); gss_get_mic_kerberos()
226 return gss_get_mic_v2(ctx, text, token); gss_get_mic_kerberos()
/linux-4.1.27/drivers/net/usb/
H A Dcdc_ncm.c65 static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx);
107 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_get_ethtool_stats() local
112 p = (char *)ctx + cdc_ncm_gstrings_stats[i].stat_offset; cdc_ncm_get_ethtool_stats()
149 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_check_rx_max() local
154 max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_RX, le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)); cdc_ncm_check_rx_max()
159 le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize), min); cdc_ncm_check_rx_max()
172 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_check_tx_max() local
176 min = ctx->max_datagram_size + ctx->max_ndp_size + sizeof(struct usb_cdc_ncm_nth16); cdc_ncm_check_tx_max()
177 max = min_t(u32, CDC_NCM_NTB_MAX_SIZE_TX, le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); cdc_ncm_check_tx_max()
192 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_show_min_tx_pkt() local
194 return sprintf(buf, "%u\n", ctx->min_tx_pkt); cdc_ncm_show_min_tx_pkt()
200 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_show_rx_max() local
202 return sprintf(buf, "%u\n", ctx->rx_max); cdc_ncm_show_rx_max()
208 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_show_tx_max() local
210 return sprintf(buf, "%u\n", ctx->tx_max); cdc_ncm_show_tx_max()
216 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_show_tx_timer_usecs() local
218 return sprintf(buf, "%u\n", ctx->timer_interval / (u32)NSEC_PER_USEC); cdc_ncm_show_tx_timer_usecs()
224 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_store_min_tx_pkt() local
231 ctx->min_tx_pkt = val; cdc_ncm_store_min_tx_pkt()
238 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_store_rx_max() local
244 cdc_ncm_update_rxtx_max(dev, val, ctx->tx_max); cdc_ncm_store_rx_max()
251 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_store_tx_max() local
257 cdc_ncm_update_rxtx_max(dev, ctx->rx_max, val); cdc_ncm_store_tx_max()
264 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_store_tx_timer_usecs() local
274 spin_lock_bh(&ctx->mtx); cdc_ncm_store_tx_timer_usecs()
275 ctx->timer_interval = val * NSEC_PER_USEC; cdc_ncm_store_tx_timer_usecs()
276 if (!ctx->timer_interval) cdc_ncm_store_tx_timer_usecs()
277 ctx->tx_timer_pending = 0; cdc_ncm_store_tx_timer_usecs()
278 spin_unlock_bh(&ctx->mtx); cdc_ncm_store_tx_timer_usecs()
291 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; \
292 return sprintf(buf, format "\n", tocpu(ctx->ncm_parm.name)); \
333 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_update_rxtx_max() local
334 u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; cdc_ncm_update_rxtx_max()
340 if (val != ctx->rx_max) { cdc_ncm_update_rxtx_max()
352 ctx->rx_max = val; cdc_ncm_update_rxtx_max()
356 if (dev->rx_urb_size != ctx->rx_max) { cdc_ncm_update_rxtx_max()
357 dev->rx_urb_size = ctx->rx_max; cdc_ncm_update_rxtx_max()
363 if (val != ctx->tx_max) cdc_ncm_update_rxtx_max()
373 if (val != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) && cdc_ncm_update_rxtx_max()
378 if (netif_running(dev->net) && val > ctx->tx_max) { cdc_ncm_update_rxtx_max()
382 if (ctx->tx_curr_skb) { cdc_ncm_update_rxtx_max()
383 dev_kfree_skb_any(ctx->tx_curr_skb); cdc_ncm_update_rxtx_max()
384 ctx->tx_curr_skb = NULL; cdc_ncm_update_rxtx_max()
386 ctx->tx_max = val; cdc_ncm_update_rxtx_max()
389 ctx->tx_max = val; cdc_ncm_update_rxtx_max()
392 dev->hard_mtu = ctx->tx_max; cdc_ncm_update_rxtx_max()
398 ctx->min_tx_pkt = clamp_t(u16, ctx->tx_max - 3 * usb_maxpacket(dev->udev, dev->out, 1), cdc_ncm_update_rxtx_max()
399 CDC_NCM_MIN_TX_PKT, ctx->tx_max); cdc_ncm_update_rxtx_max()
405 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_flags() local
407 if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc) cdc_ncm_flags()
408 return ctx->mbim_desc->bmNetworkCapabilities; cdc_ncm_flags()
409 if (ctx->func_desc) cdc_ncm_flags()
410 return ctx->func_desc->bmNetworkCapabilities; cdc_ncm_flags()
430 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_max_dgram_size() local
432 if (cdc_ncm_comm_intf_is_mbim(dev->intf->cur_altsetting) && ctx->mbim_desc) cdc_ncm_max_dgram_size()
433 return le16_to_cpu(ctx->mbim_desc->wMaxSegmentSize); cdc_ncm_max_dgram_size()
434 if (ctx->ether_desc) cdc_ncm_max_dgram_size()
435 return le16_to_cpu(ctx->ether_desc->wMaxSegmentSize); cdc_ncm_max_dgram_size()
444 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_init() local
445 u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; cdc_ncm_init()
451 0, iface_no, &ctx->ncm_parm, cdc_ncm_init()
452 sizeof(ctx->ncm_parm)); cdc_ncm_init()
475 if (le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported) & cdc_ncm_init()
488 ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize); cdc_ncm_init()
489 ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize); cdc_ncm_init()
490 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder); cdc_ncm_init()
491 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor); cdc_ncm_init()
492 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment); cdc_ncm_init()
494 ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams); cdc_ncm_init()
498 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus, cdc_ncm_init()
499 ctx->tx_ndp_modulus, ctx->tx_max_datagrams, cdc_ncm_flags(dev)); cdc_ncm_init()
502 if ((ctx->tx_max_datagrams == 0) || cdc_ncm_init()
503 (ctx->tx_max_datagrams > CDC_NCM_DPT_DATAGRAMS_MAX)) cdc_ncm_init()
504 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX; cdc_ncm_init()
507 ctx->max_ndp_size = sizeof(struct usb_cdc_ncm_ndp16) + (ctx->tx_max_datagrams + 1) * sizeof(struct usb_cdc_ncm_dpe16); cdc_ncm_init()
510 ctx->timer_interval = CDC_NCM_TIMER_INTERVAL_USEC * NSEC_PER_USEC; cdc_ncm_init()
518 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_set_dgram_size() local
519 u8 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber; cdc_ncm_set_dgram_size()
525 ctx->max_datagram_size = clamp_t(u32, new_size, cdc_ncm_set_dgram_size()
542 if (le16_to_cpu(max_datagram_size) == ctx->max_datagram_size) cdc_ncm_set_dgram_size()
545 max_datagram_size = cpu_to_le16(ctx->max_datagram_size); cdc_ncm_set_dgram_size()
554 dev->net->mtu = min_t(int, dev->net->mtu, ctx->max_datagram_size - cdc_ncm_eth_hlen(dev)); cdc_ncm_set_dgram_size()
557 if (ctx->mbim_extended_desc) { cdc_ncm_set_dgram_size()
558 mbim_mtu = le16_to_cpu(ctx->mbim_extended_desc->wMTU); cdc_ncm_set_dgram_size()
566 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_fix_modulus() local
575 val = ctx->tx_ndp_modulus; cdc_ncm_fix_modulus()
578 (val != ((-val) & val)) || (val >= ctx->tx_max)) { cdc_ncm_fix_modulus()
580 ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE; cdc_ncm_fix_modulus()
589 val = ctx->tx_modulus; cdc_ncm_fix_modulus()
592 (val != ((-val) & val)) || (val >= ctx->tx_max)) { cdc_ncm_fix_modulus()
594 ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE; cdc_ncm_fix_modulus()
598 if (ctx->tx_remainder >= ctx->tx_modulus) { cdc_ncm_fix_modulus()
600 ctx->tx_remainder = 0; cdc_ncm_fix_modulus()
604 ctx->tx_remainder = ((ctx->tx_remainder - cdc_ncm_eth_hlen(dev)) & cdc_ncm_fix_modulus()
605 (ctx->tx_modulus - 1)); cdc_ncm_fix_modulus()
610 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_setup() local
617 le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)); cdc_ncm_setup()
619 le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize)); cdc_ncm_setup()
673 static void cdc_ncm_free(struct cdc_ncm_ctx *ctx) cdc_ncm_free() argument
675 if (ctx == NULL) cdc_ncm_free()
678 if (ctx->tx_rem_skb != NULL) { cdc_ncm_free()
679 dev_kfree_skb_any(ctx->tx_rem_skb); cdc_ncm_free()
680 ctx->tx_rem_skb = NULL; cdc_ncm_free()
683 if (ctx->tx_curr_skb != NULL) { cdc_ncm_free()
684 dev_kfree_skb_any(ctx->tx_curr_skb); cdc_ncm_free()
685 ctx->tx_curr_skb = NULL; cdc_ncm_free()
688 kfree(ctx); cdc_ncm_free()
698 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_change_mtu() local
699 int maxmtu = ctx->max_datagram_size - cdc_ncm_eth_hlen(dev); cdc_ncm_change_mtu()
721 struct cdc_ncm_ctx *ctx; cdc_ncm_bind_common() local
728 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); cdc_ncm_bind_common()
729 if (!ctx) cdc_ncm_bind_common()
732 hrtimer_init(&ctx->tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cdc_ncm_bind_common()
733 ctx->tx_timer.function = &cdc_ncm_tx_timer_cb; cdc_ncm_bind_common()
734 ctx->bh.data = (unsigned long)dev; cdc_ncm_bind_common()
735 ctx->bh.func = cdc_ncm_txpath_bh; cdc_ncm_bind_common()
736 atomic_set(&ctx->stop, 0); cdc_ncm_bind_common()
737 spin_lock_init(&ctx->mtx); cdc_ncm_bind_common()
739 /* store ctx pointer in device data field */ cdc_ncm_bind_common()
740 dev->data[0] = (unsigned long)ctx; cdc_ncm_bind_common()
743 ctx->control = intf; cdc_ncm_bind_common()
768 ctx->data = usb_ifnum_to_if(dev->udev, cdc_ncm_bind_common()
773 if (buf[0] < sizeof(*(ctx->ether_desc))) cdc_ncm_bind_common()
776 ctx->ether_desc = cdc_ncm_bind_common()
781 if (buf[0] < sizeof(*(ctx->func_desc))) cdc_ncm_bind_common()
784 ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf; cdc_ncm_bind_common()
788 if (buf[0] < sizeof(*(ctx->mbim_desc))) cdc_ncm_bind_common()
791 ctx->mbim_desc = (const struct usb_cdc_mbim_desc *)buf; cdc_ncm_bind_common()
795 if (buf[0] < sizeof(*(ctx->mbim_extended_desc))) cdc_ncm_bind_common()
798 ctx->mbim_extended_desc = cdc_ncm_bind_common()
814 ctx->data = usb_ifnum_to_if(dev->udev, intf->cur_altsetting->desc.bInterfaceNumber + 1); cdc_ncm_bind_common()
819 if (!ctx->data) { cdc_ncm_bind_common()
824 if (!ctx->mbim_desc) { cdc_ncm_bind_common()
829 if (!ctx->ether_desc || !ctx->func_desc) { cdc_ncm_bind_common()
836 if (ctx->data != ctx->control) { cdc_ncm_bind_common()
837 temp = usb_driver_claim_interface(driver, ctx->data, dev); cdc_ncm_bind_common()
844 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber; cdc_ncm_bind_common()
864 cdc_ncm_find_endpoints(dev, ctx->data); cdc_ncm_bind_common()
865 cdc_ncm_find_endpoints(dev, ctx->control); cdc_ncm_bind_common()
871 usb_set_intfdata(ctx->data, dev); cdc_ncm_bind_common()
872 usb_set_intfdata(ctx->control, dev); cdc_ncm_bind_common()
874 if (ctx->ether_desc) { cdc_ncm_bind_common()
875 temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress); cdc_ncm_bind_common()
898 usb_set_intfdata(ctx->control, NULL); cdc_ncm_bind_common()
899 usb_set_intfdata(ctx->data, NULL); cdc_ncm_bind_common()
900 if (ctx->data != ctx->control) cdc_ncm_bind_common()
901 usb_driver_release_interface(driver, ctx->data); cdc_ncm_bind_common()
912 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_unbind() local
915 if (ctx == NULL) cdc_ncm_unbind()
918 atomic_set(&ctx->stop, 1); cdc_ncm_unbind()
920 if (hrtimer_active(&ctx->tx_timer)) cdc_ncm_unbind()
921 hrtimer_cancel(&ctx->tx_timer); cdc_ncm_unbind()
923 tasklet_kill(&ctx->bh); cdc_ncm_unbind()
926 if (ctx->control == ctx->data) cdc_ncm_unbind()
927 ctx->data = NULL; cdc_ncm_unbind()
930 if (intf == ctx->control && ctx->data) { cdc_ncm_unbind()
931 usb_set_intfdata(ctx->data, NULL); cdc_ncm_unbind()
932 usb_driver_release_interface(driver, ctx->data); cdc_ncm_unbind()
933 ctx->data = NULL; cdc_ncm_unbind()
935 } else if (intf == ctx->data && ctx->control) { cdc_ncm_unbind()
936 usb_set_intfdata(ctx->control, NULL); cdc_ncm_unbind()
937 usb_driver_release_interface(driver, ctx->control); cdc_ncm_unbind()
938 ctx->control = NULL; cdc_ncm_unbind()
942 cdc_ncm_free(ctx); cdc_ncm_unbind()
1014 static struct usb_cdc_ncm_ndp16 *cdc_ncm_ndp(struct cdc_ncm_ctx *ctx, struct sk_buff *skb, __le32 sign, size_t reserve) cdc_ncm_ndp() argument
1029 cdc_ncm_align_tail(skb, ctx->tx_ndp_modulus, 0, ctx->tx_max); cdc_ncm_ndp()
1032 if ((ctx->tx_max - skb->len - reserve) < ctx->max_ndp_size) cdc_ncm_ndp()
1042 ndp16 = (struct usb_cdc_ncm_ndp16 *)memset(skb_put(skb, ctx->max_ndp_size), 0, ctx->max_ndp_size); cdc_ncm_ndp()
1051 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_fill_tx_frame() local
1060 swap(skb, ctx->tx_rem_skb); cdc_ncm_fill_tx_frame()
1061 swap(sign, ctx->tx_rem_sign); cdc_ncm_fill_tx_frame()
1067 skb_out = ctx->tx_curr_skb; cdc_ncm_fill_tx_frame()
1071 skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC); cdc_ncm_fill_tx_frame()
1083 nth16->wSequence = cpu_to_le16(ctx->tx_seq++); cdc_ncm_fill_tx_frame()
1086 ctx->tx_curr_frame_num = 0; cdc_ncm_fill_tx_frame()
1089 ctx->tx_curr_frame_payload = 0; cdc_ncm_fill_tx_frame()
1092 for (n = ctx->tx_curr_frame_num; n < ctx->tx_max_datagrams; n++) { cdc_ncm_fill_tx_frame()
1095 skb = ctx->tx_rem_skb; cdc_ncm_fill_tx_frame()
1096 sign = ctx->tx_rem_sign; cdc_ncm_fill_tx_frame()
1097 ctx->tx_rem_skb = NULL; cdc_ncm_fill_tx_frame()
1105 ndp16 = cdc_ncm_ndp(ctx, skb_out, sign, skb->len + ctx->tx_modulus + ctx->tx_remainder); cdc_ncm_fill_tx_frame()
1108 cdc_ncm_align_tail(skb_out, ctx->tx_modulus, ctx->tx_remainder, ctx->tx_max); cdc_ncm_fill_tx_frame()
1111 if (!ndp16 || skb_out->len + skb->len > ctx->tx_max) { cdc_ncm_fill_tx_frame()
1119 if (ctx->tx_rem_skb != NULL) { cdc_ncm_fill_tx_frame()
1120 dev_kfree_skb_any(ctx->tx_rem_skb); cdc_ncm_fill_tx_frame()
1123 ctx->tx_rem_skb = skb; cdc_ncm_fill_tx_frame()
1124 ctx->tx_rem_sign = sign; cdc_ncm_fill_tx_frame()
1127 ctx->tx_reason_ntb_full++; /* count reason for transmitting */ cdc_ncm_fill_tx_frame()
1141 ctx->tx_curr_frame_payload += skb->len; /* count real tx payload data */ cdc_ncm_fill_tx_frame()
1148 ctx->tx_reason_ndp_full++; /* count reason for transmitting */ cdc_ncm_fill_tx_frame()
1160 ctx->tx_curr_frame_num = n; cdc_ncm_fill_tx_frame()
1165 ctx->tx_curr_skb = skb_out; cdc_ncm_fill_tx_frame()
1168 } else if ((n < ctx->tx_max_datagrams) && (ready2send == 0) && (ctx->timer_interval > 0)) { cdc_ncm_fill_tx_frame()
1171 ctx->tx_curr_skb = skb_out; cdc_ncm_fill_tx_frame()
1174 ctx->tx_timer_pending = CDC_NCM_TIMER_PENDING_CNT; cdc_ncm_fill_tx_frame()
1178 if (n == ctx->tx_max_datagrams) cdc_ncm_fill_tx_frame()
1179 ctx->tx_reason_max_datagram++; /* count reason for transmitting */ cdc_ncm_fill_tx_frame()
1184 /* If collected data size is less or equal ctx->min_tx_pkt cdc_ncm_fill_tx_frame()
1194 skb_out->len > ctx->min_tx_pkt) cdc_ncm_fill_tx_frame()
1195 memset(skb_put(skb_out, ctx->tx_max - skb_out->len), 0, cdc_ncm_fill_tx_frame()
1196 ctx->tx_max - skb_out->len); cdc_ncm_fill_tx_frame()
1197 else if (skb_out->len < ctx->tx_max && (skb_out->len % dev->maxpacket) == 0) cdc_ncm_fill_tx_frame()
1205 ctx->tx_curr_skb = NULL; cdc_ncm_fill_tx_frame()
1208 ctx->tx_overhead += skb_out->len - ctx->tx_curr_frame_payload; cdc_ncm_fill_tx_frame()
1209 ctx->tx_ntbs++; cdc_ncm_fill_tx_frame()
1216 (long)ctx->tx_curr_frame_payload - skb_out->len); cdc_ncm_fill_tx_frame()
1222 if (ctx->tx_curr_skb != NULL && n > 0) cdc_ncm_fill_tx_frame()
1223 cdc_ncm_tx_timeout_start(ctx); cdc_ncm_fill_tx_frame()
1228 static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx) cdc_ncm_tx_timeout_start() argument
1231 if (!(hrtimer_active(&ctx->tx_timer) || atomic_read(&ctx->stop))) cdc_ncm_tx_timeout_start()
1232 hrtimer_start(&ctx->tx_timer, cdc_ncm_tx_timeout_start()
1233 ktime_set(0, ctx->timer_interval), cdc_ncm_tx_timeout_start()
1239 struct cdc_ncm_ctx *ctx = cdc_ncm_tx_timer_cb() local
1242 if (!atomic_read(&ctx->stop)) cdc_ncm_tx_timer_cb()
1243 tasklet_schedule(&ctx->bh); cdc_ncm_tx_timer_cb()
1250 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_txpath_bh() local
1252 spin_lock_bh(&ctx->mtx); cdc_ncm_txpath_bh()
1253 if (ctx->tx_timer_pending != 0) { cdc_ncm_txpath_bh()
1254 ctx->tx_timer_pending--; cdc_ncm_txpath_bh()
1255 cdc_ncm_tx_timeout_start(ctx); cdc_ncm_txpath_bh()
1256 spin_unlock_bh(&ctx->mtx); cdc_ncm_txpath_bh()
1258 ctx->tx_reason_timeout++; /* count reason for transmitting */ cdc_ncm_txpath_bh()
1259 spin_unlock_bh(&ctx->mtx); cdc_ncm_txpath_bh()
1264 spin_unlock_bh(&ctx->mtx); cdc_ncm_txpath_bh()
1272 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_tx_fixup() local
1281 if (ctx == NULL) cdc_ncm_tx_fixup()
1284 spin_lock_bh(&ctx->mtx); cdc_ncm_tx_fixup()
1286 spin_unlock_bh(&ctx->mtx); cdc_ncm_tx_fixup()
1298 int cdc_ncm_rx_verify_nth16(struct cdc_ncm_ctx *ctx, struct sk_buff *skb_in) cdc_ncm_rx_verify_nth16() argument
1305 if (ctx == NULL) cdc_ncm_rx_verify_nth16()
1324 if (len > ctx->rx_max) { cdc_ncm_rx_verify_nth16()
1327 ctx->rx_max); cdc_ncm_rx_verify_nth16()
1331 if ((ctx->rx_seq + 1) != le16_to_cpu(nth16->wSequence) && cdc_ncm_rx_verify_nth16()
1332 (ctx->rx_seq || le16_to_cpu(nth16->wSequence)) && cdc_ncm_rx_verify_nth16()
1333 !((ctx->rx_seq == 0xffff) && !le16_to_cpu(nth16->wSequence))) { cdc_ncm_rx_verify_nth16()
1336 ctx->rx_seq, le16_to_cpu(nth16->wSequence)); cdc_ncm_rx_verify_nth16()
1338 ctx->rx_seq = le16_to_cpu(nth16->wSequence); cdc_ncm_rx_verify_nth16()
1385 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_rx_fixup() local
1396 ndpoffset = cdc_ncm_rx_verify_nth16(ctx, skb_in); cdc_ncm_rx_fixup()
1431 (len > ctx->rx_max) || (len < ETH_HLEN)) { cdc_ncm_rx_fixup()
1456 ctx->rx_overhead += skb_in->len - payload; cdc_ncm_rx_fixup()
1457 ctx->rx_ntbs++; cdc_ncm_rx_fixup()
1491 struct cdc_ncm_ctx *ctx; cdc_ncm_status() local
1494 ctx = (struct cdc_ncm_ctx *)dev->data[0]; cdc_ncm_status()
/linux-4.1.27/drivers/crypto/caam/
H A Dcaamrng.c88 static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx) rng_unmap_ctx() argument
90 struct device *jrdev = ctx->jrdev; rng_unmap_ctx()
92 if (ctx->sh_desc_dma) rng_unmap_ctx()
93 dma_unmap_single(jrdev, ctx->sh_desc_dma, rng_unmap_ctx()
94 desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); rng_unmap_ctx()
95 rng_unmap_buf(jrdev, &ctx->bufs[0]); rng_unmap_ctx()
96 rng_unmap_buf(jrdev, &ctx->bufs[1]); rng_unmap_ctx()
117 static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) submit_job() argument
119 struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)]; submit_job()
120 struct device *jrdev = ctx->jrdev; submit_job()
124 dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf)); submit_job()
126 err = caam_jr_enqueue(jrdev, desc, rng_done, ctx); submit_job()
137 struct caam_rng_ctx *ctx = rng_ctx; caam_read() local
138 struct buf_data *bd = &ctx->bufs[ctx->current_buf]; caam_read()
145 err = submit_job(ctx, 1); caam_read()
159 next_buf_idx = ctx->cur_buf_idx + max; caam_read()
160 dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n", caam_read()
161 __func__, ctx->current_buf, ctx->cur_buf_idx); caam_read()
165 memcpy(data, bd->buf + ctx->cur_buf_idx, max); caam_read()
166 ctx->cur_buf_idx = next_buf_idx; caam_read()
171 copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx; caam_read()
172 memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx); caam_read()
173 ctx->cur_buf_idx = 0; caam_read()
177 submit_job(ctx, 1); caam_read()
180 ctx->current_buf = !ctx->current_buf; caam_read()
181 dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf); caam_read()
188 static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx) rng_create_sh_desc() argument
190 struct device *jrdev = ctx->jrdev; rng_create_sh_desc()
191 u32 *desc = ctx->sh_desc; rng_create_sh_desc()
204 ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), rng_create_sh_desc()
206 if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) { rng_create_sh_desc()
217 static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) rng_create_job_desc() argument
219 struct device *jrdev = ctx->jrdev; rng_create_job_desc()
220 struct buf_data *bd = &ctx->bufs[buf_id]; rng_create_job_desc()
222 int sh_len = desc_len(ctx->sh_desc); rng_create_job_desc()
224 init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER | rng_create_job_desc()
255 static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) caam_init_buf() argument
257 struct buf_data *bd = &ctx->bufs[buf_id]; caam_init_buf()
260 err = rng_create_job_desc(ctx, buf_id); caam_init_buf()
265 submit_job(ctx, buf_id == ctx->current_buf); caam_init_buf()
271 static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) caam_init_rng() argument
275 ctx->jrdev = jrdev; caam_init_rng()
277 err = rng_create_sh_desc(ctx); caam_init_rng()
281 ctx->current_buf = 0; caam_init_rng()
282 ctx->cur_buf_idx = 0; caam_init_rng()
284 err = caam_init_buf(ctx, 0); caam_init_rng()
288 err = caam_init_buf(ctx, 1); caam_init_rng()
H A Dcaamhash.c147 dev_err(jrdev, "unable to map ctx\n"); map_seq_out_ptr_ctx()
217 dev_err(jrdev, "unable to map ctx\n"); ctx_map_to_sec4_sg()
227 static inline void append_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) append_key_ahash() argument
229 append_key_as_imm(desc, ctx->key, ctx->split_key_pad_len, append_key_ahash()
230 ctx->split_key_len, CLASS_2 | append_key_ahash()
235 static inline void init_sh_desc_key_ahash(u32 *desc, struct caam_hash_ctx *ctx) init_sh_desc_key_ahash() argument
241 if (ctx->split_key_len) { init_sh_desc_key_ahash()
246 append_key_ahash(desc, ctx); init_sh_desc_key_ahash()
279 struct caam_hash_ctx *ctx) ahash_ctx_data_to_out()
281 init_sh_desc_key_ahash(desc, ctx); ahash_ctx_data_to_out()
285 LDST_CLASS_2_CCB | ctx->ctx_len); ahash_ctx_data_to_out()
298 int digestsize, struct caam_hash_ctx *ctx) ahash_data_to_out()
300 init_sh_desc_key_ahash(desc, ctx); ahash_data_to_out()
313 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_set_sh_desc() local
315 struct device *jrdev = ctx->jrdev; ahash_set_sh_desc()
319 if (ctx->split_key_len) ahash_set_sh_desc()
323 desc = ctx->sh_desc_update; ahash_set_sh_desc()
329 LDST_CLASS_2_CCB | ctx->ctx_len); ahash_set_sh_desc()
332 append_operation(desc, ctx->alg_type | OP_ALG_AS_UPDATE | ahash_set_sh_desc()
336 ahash_append_load_str(desc, ctx->ctx_len); ahash_set_sh_desc()
338 ctx->sh_desc_update_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc()
340 if (dma_mapping_error(jrdev, ctx->sh_desc_update_dma)) { ahash_set_sh_desc()
351 desc = ctx->sh_desc_update_first; ahash_set_sh_desc()
353 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INIT, ahash_set_sh_desc()
354 ctx->ctx_len, ctx); ahash_set_sh_desc()
356 ctx->sh_desc_update_first_dma = dma_map_single(jrdev, desc, ahash_set_sh_desc()
359 if (dma_mapping_error(jrdev, ctx->sh_desc_update_first_dma)) { ahash_set_sh_desc()
370 desc = ctx->sh_desc_fin; ahash_set_sh_desc()
372 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, ahash_set_sh_desc()
373 OP_ALG_AS_FINALIZE, digestsize, ctx); ahash_set_sh_desc()
375 ctx->sh_desc_fin_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc()
377 if (dma_mapping_error(jrdev, ctx->sh_desc_fin_dma)) { ahash_set_sh_desc()
388 desc = ctx->sh_desc_finup; ahash_set_sh_desc()
390 ahash_ctx_data_to_out(desc, have_key | ctx->alg_type, ahash_set_sh_desc()
391 OP_ALG_AS_FINALIZE, digestsize, ctx); ahash_set_sh_desc()
393 ctx->sh_desc_finup_dma = dma_map_single(jrdev, desc, desc_bytes(desc), ahash_set_sh_desc()
395 if (dma_mapping_error(jrdev, ctx->sh_desc_finup_dma)) { ahash_set_sh_desc()
406 desc = ctx->sh_desc_digest; ahash_set_sh_desc()
408 ahash_data_to_out(desc, have_key | ctx->alg_type, OP_ALG_AS_INITFINAL, ahash_set_sh_desc()
409 digestsize, ctx); ahash_set_sh_desc()
411 ctx->sh_desc_digest_dma = dma_map_single(jrdev, desc, ahash_set_sh_desc()
414 if (dma_mapping_error(jrdev, ctx->sh_desc_digest_dma)) { ahash_set_sh_desc()
428 static int gen_split_hash_key(struct caam_hash_ctx *ctx, const u8 *key_in, gen_split_hash_key() argument
431 return gen_split_key(ctx->jrdev, ctx->key, ctx->split_key_len, gen_split_hash_key()
432 ctx->split_key_pad_len, key_in, keylen, gen_split_hash_key()
433 ctx->alg_op); gen_split_hash_key()
437 static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in, hash_digest_key() argument
440 struct device *jrdev = ctx->jrdev; hash_digest_key()
471 append_operation(desc, ctx->alg_type | OP_ALG_ENCRYPT | hash_digest_key()
517 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_setkey() local
518 struct device *jrdev = ctx->jrdev; ahash_setkey()
533 ret = hash_digest_key(ctx, key, &keylen, hashed_key, ahash_setkey()
541 ctx->split_key_len = mdpadlen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> ahash_setkey()
543 ctx->split_key_pad_len = ALIGN(ctx->split_key_len, 16); ahash_setkey()
547 ctx->split_key_len, ctx->split_key_pad_len); ahash_setkey()
552 ret = gen_split_hash_key(ctx, key, keylen); ahash_setkey()
556 ctx->key_dma = dma_map_single(jrdev, ctx->key, ctx->split_key_pad_len, ahash_setkey()
558 if (dma_mapping_error(jrdev, ctx->key_dma)) { ahash_setkey()
564 print_hex_dump(KERN_ERR, "ctx.key@"__stringify(__LINE__)": ", ahash_setkey()
565 DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, ahash_setkey()
566 ctx->split_key_pad_len, 1); ahash_setkey()
571 dma_unmap_single(jrdev, ctx->key_dma, ctx->split_key_pad_len, ahash_setkey()
624 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_unmap_ctx() local
628 dma_unmap_single(dev, state->ctx_dma, ctx->ctx_len, flag); ahash_unmap_ctx()
640 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_done() local
655 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", ahash_done()
657 ctx->ctx_len, 1); ahash_done()
673 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_done_bi() local
686 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL); ahash_done_bi()
690 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", ahash_done_bi()
692 ctx->ctx_len, 1); ahash_done_bi()
710 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_done_ctx_src() local
725 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", ahash_done_ctx_src()
727 ctx->ctx_len, 1); ahash_done_ctx_src()
743 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_done_ctx_dst() local
756 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_FROM_DEVICE); ahash_done_ctx_dst()
760 print_hex_dump(KERN_ERR, "ctx@"__stringify(__LINE__)": ", ahash_done_ctx_dst()
762 ctx->ctx_len, 1); ahash_done_ctx_dst()
776 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_update_ctx() local
778 struct device *jrdev = ctx->jrdev; ahash_update_ctx()
787 u32 *sh_desc = ctx->sh_desc_update, *desc; ahash_update_ctx()
788 dma_addr_t ptr = ctx->sh_desc_update_dma; ahash_update_ctx()
824 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_update_ctx()
862 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + ahash_update_ctx()
865 append_seq_out_ptr(desc, state->ctx_dma, ctx->ctx_len, 0); ahash_update_ctx()
877 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_ctx()
901 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_final_ctx() local
903 struct device *jrdev = ctx->jrdev; ahash_final_ctx()
910 u32 *sh_desc = ctx->sh_desc_fin, *desc; ahash_final_ctx()
911 dma_addr_t ptr = ctx->sh_desc_fin_dma; ahash_final_ctx()
938 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_final_ctx()
955 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + buflen, ahash_final_ctx()
984 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_finup_ctx() local
986 struct device *jrdev = ctx->jrdev; ahash_finup_ctx()
993 u32 *sh_desc = ctx->sh_desc_finup, *desc; ahash_finup_ctx()
994 dma_addr_t ptr = ctx->sh_desc_finup_dma; ahash_finup_ctx()
1026 ret = ctx_map_to_sec4_sg(desc, jrdev, state, ctx->ctx_len, ahash_finup_ctx()
1045 append_seq_in_ptr(desc, edesc->sec4_sg_dma, ctx->ctx_len + ahash_finup_ctx()
1074 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_digest() local
1075 struct device *jrdev = ctx->jrdev; ahash_digest()
1078 u32 *sh_desc = ctx->sh_desc_digest, *desc; ahash_digest()
1079 dma_addr_t ptr = ctx->sh_desc_digest_dma; ahash_digest()
1154 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_final_no_ctx() local
1156 struct device *jrdev = ctx->jrdev; ahash_final_no_ctx()
1161 u32 *sh_desc = ctx->sh_desc_digest, *desc; ahash_final_no_ctx()
1162 dma_addr_t ptr = ctx->sh_desc_digest_dma; ahash_final_no_ctx()
1217 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_update_no_ctx() local
1219 struct device *jrdev = ctx->jrdev; ahash_update_no_ctx()
1230 u32 *desc, *sh_desc = ctx->sh_desc_update_first; ahash_update_no_ctx()
1231 dma_addr_t ptr = ctx->sh_desc_update_first_dma; ahash_update_no_ctx()
1290 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); ahash_update_no_ctx()
1307 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_no_ctx()
1332 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_finup_no_ctx() local
1334 struct device *jrdev = ctx->jrdev; ahash_finup_no_ctx()
1341 u32 *sh_desc = ctx->sh_desc_digest, *desc; ahash_finup_no_ctx()
1342 dma_addr_t ptr = ctx->sh_desc_digest_dma; ahash_finup_no_ctx()
1417 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_update_first() local
1419 struct device *jrdev = ctx->jrdev; ahash_update_first()
1426 u32 *sh_desc = ctx->sh_desc_update_first, *desc; ahash_update_first()
1427 dma_addr_t ptr = ctx->sh_desc_update_first_dma; ahash_update_first()
1495 ret = map_seq_out_ptr_ctx(desc, jrdev, state, ctx->ctx_len); ahash_update_first()
1513 ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, ahash_update_first()
1578 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_export() local
1581 memcpy(out, ctx, sizeof(struct caam_hash_ctx)); ahash_export()
1590 struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash); ahash_import() local
1593 memcpy(ctx, in, sizeof(struct caam_hash_ctx)); ahash_import()
1758 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); caam_hash_cra_init() local
1772 ctx->jrdev = caam_jr_alloc(); caam_hash_cra_init()
1773 if (IS_ERR(ctx->jrdev)) { caam_hash_cra_init()
1775 return PTR_ERR(ctx->jrdev); caam_hash_cra_init()
1778 ctx->alg_type = OP_TYPE_CLASS2_ALG | caam_hash->alg_type; caam_hash_cra_init()
1779 ctx->alg_op = OP_TYPE_CLASS2_ALG | caam_hash->alg_op; caam_hash_cra_init()
1781 ctx->ctx_len = runninglen[(ctx->alg_op & OP_ALG_ALGSEL_SUBMASK) >> caam_hash_cra_init()
1794 struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm); caam_hash_cra_exit() local
1796 if (ctx->sh_desc_update_dma && caam_hash_cra_exit()
1797 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_dma)) caam_hash_cra_exit()
1798 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_dma, caam_hash_cra_exit()
1799 desc_bytes(ctx->sh_desc_update), caam_hash_cra_exit()
1801 if (ctx->sh_desc_update_first_dma && caam_hash_cra_exit()
1802 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_update_first_dma)) caam_hash_cra_exit()
1803 dma_unmap_single(ctx->jrdev, ctx->sh_desc_update_first_dma, caam_hash_cra_exit()
1804 desc_bytes(ctx->sh_desc_update_first), caam_hash_cra_exit()
1806 if (ctx->sh_desc_fin_dma && caam_hash_cra_exit()
1807 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_fin_dma)) caam_hash_cra_exit()
1808 dma_unmap_single(ctx->jrdev, ctx->sh_desc_fin_dma, caam_hash_cra_exit()
1809 desc_bytes(ctx->sh_desc_fin), DMA_TO_DEVICE); caam_hash_cra_exit()
1810 if (ctx->sh_desc_digest_dma && caam_hash_cra_exit()
1811 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_digest_dma)) caam_hash_cra_exit()
1812 dma_unmap_single(ctx->jrdev, ctx->sh_desc_digest_dma, caam_hash_cra_exit()
1813 desc_bytes(ctx->sh_desc_digest), caam_hash_cra_exit()
1815 if (ctx->sh_desc_finup_dma && caam_hash_cra_exit()
1816 !dma_mapping_error(ctx->jrdev, ctx->sh_desc_finup_dma)) caam_hash_cra_exit()
1817 dma_unmap_single(ctx->jrdev, ctx->sh_desc_finup_dma, caam_hash_cra_exit()
1818 desc_bytes(ctx->sh_desc_finup), DMA_TO_DEVICE); caam_hash_cra_exit()
1820 caam_jr_free(ctx->jrdev); caam_hash_cra_exit()
277 ahash_ctx_data_to_out(u32 *desc, u32 op, u32 state, int digestsize, struct caam_hash_ctx *ctx) ahash_ctx_data_to_out() argument
297 ahash_data_to_out(u32 *desc, u32 op, u32 state, int digestsize, struct caam_hash_ctx *ctx) ahash_data_to_out() argument
/linux-4.1.27/drivers/media/platform/exynos4-is/
H A Dfimc-m2m.c43 void fimc_m2m_job_finish(struct fimc_ctx *ctx, int vb_state) fimc_m2m_job_finish() argument
47 if (!ctx || !ctx->fh.m2m_ctx) fimc_m2m_job_finish()
50 src_vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); fimc_m2m_job_finish()
51 dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); fimc_m2m_job_finish()
56 v4l2_m2m_job_finish(ctx->fimc_dev->m2m.m2m_dev, fimc_m2m_job_finish()
57 ctx->fh.m2m_ctx); fimc_m2m_job_finish()
62 static int fimc_m2m_shutdown(struct fimc_ctx *ctx) fimc_m2m_shutdown() argument
64 struct fimc_dev *fimc = ctx->fimc_dev; fimc_m2m_shutdown()
70 fimc_ctx_state_set(FIMC_CTX_SHUT, ctx); fimc_m2m_shutdown()
73 !fimc_ctx_state_is_set(FIMC_CTX_SHUT, ctx), fimc_m2m_shutdown()
81 struct fimc_ctx *ctx = q->drv_priv; start_streaming() local
84 ret = pm_runtime_get_sync(&ctx->fimc_dev->pdev->dev); start_streaming()
90 struct fimc_ctx *ctx = q->drv_priv; stop_streaming() local
93 ret = fimc_m2m_shutdown(ctx); stop_streaming()
95 fimc_m2m_job_finish(ctx, VB2_BUF_STATE_ERROR); stop_streaming()
97 pm_runtime_put(&ctx->fimc_dev->pdev->dev); stop_streaming()
103 struct fimc_ctx *ctx = priv; fimc_device_run() local
109 if (WARN(!ctx, "Null context\n")) fimc_device_run()
112 fimc = ctx->fimc_dev; fimc_device_run()
116 sf = &ctx->s_frame; fimc_device_run()
117 df = &ctx->d_frame; fimc_device_run()
119 if (ctx->state & FIMC_PARAMS) { fimc_device_run()
121 fimc_prepare_dma_offset(ctx, sf); fimc_device_run()
122 fimc_prepare_dma_offset(ctx, df); fimc_device_run()
125 src_vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); fimc_device_run()
126 ret = fimc_prepare_addr(ctx, src_vb, sf, &sf->paddr); fimc_device_run()
130 dst_vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); fimc_device_run()
131 ret = fimc_prepare_addr(ctx, dst_vb, df, &df->paddr); fimc_device_run()
141 if (fimc->m2m.ctx != ctx) { fimc_device_run()
142 ctx->state |= FIMC_PARAMS; fimc_device_run()
143 fimc->m2m.ctx = ctx; fimc_device_run()
146 if (ctx->state & FIMC_PARAMS) { fimc_device_run()
147 fimc_set_yuv_order(ctx); fimc_device_run()
148 fimc_hw_set_input_path(ctx); fimc_device_run()
149 fimc_hw_set_in_dma(ctx); fimc_device_run()
150 ret = fimc_set_scaler_info(ctx); fimc_device_run()
153 fimc_hw_set_prescaler(ctx); fimc_device_run()
154 fimc_hw_set_mainscaler(ctx); fimc_device_run()
155 fimc_hw_set_target_format(ctx); fimc_device_run()
156 fimc_hw_set_rotation(ctx); fimc_device_run()
157 fimc_hw_set_effect(ctx); fimc_device_run()
158 fimc_hw_set_out_dma(ctx); fimc_device_run()
160 fimc_hw_set_rgb_alpha(ctx); fimc_device_run()
161 fimc_hw_set_output_path(ctx); fimc_device_run()
166 fimc_activate_capture(ctx); fimc_device_run()
167 ctx->state &= (FIMC_CTX_M2M | FIMC_CTX_CAP); fimc_device_run()
183 struct fimc_ctx *ctx = vb2_get_drv_priv(vq); fimc_queue_setup() local
187 f = ctx_get_frame(ctx, vq->type); fimc_queue_setup()
200 allocators[i] = ctx->fimc_dev->alloc_ctx; fimc_queue_setup()
207 struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); fimc_buf_prepare() local
211 frame = ctx_get_frame(ctx, vb->vb2_queue->type); fimc_buf_prepare()
223 struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); fimc_buf_queue() local
224 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); fimc_buf_queue()
276 struct fimc_ctx *ctx = fh_to_ctx(fh); fimc_m2m_g_fmt_mplane() local
277 struct fimc_frame *frame = ctx_get_frame(ctx, f->type); fimc_m2m_g_fmt_mplane()
286 static int fimc_try_fmt_mplane(struct fimc_ctx *ctx, struct v4l2_format *f) fimc_try_fmt_mplane() argument
288 struct fimc_dev *fimc = ctx->fimc_dev; fimc_try_fmt_mplane()
335 struct fimc_ctx *ctx = fh_to_ctx(fh); fimc_m2m_try_fmt_mplane() local
336 return fimc_try_fmt_mplane(ctx, f); fimc_m2m_try_fmt_mplane()
363 struct fimc_ctx *ctx = fh_to_ctx(fh); fimc_m2m_s_fmt_mplane() local
364 struct fimc_dev *fimc = ctx->fimc_dev; fimc_m2m_s_fmt_mplane()
370 ret = fimc_try_fmt_mplane(ctx, f); fimc_m2m_s_fmt_mplane()
374 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); fimc_m2m_s_fmt_mplane()
382 frame = &ctx->s_frame; fimc_m2m_s_fmt_mplane()
384 frame = &ctx->d_frame; fimc_m2m_s_fmt_mplane()
394 fimc_alpha_ctrl_update(ctx); fimc_m2m_s_fmt_mplane()
402 struct fimc_ctx *ctx = fh_to_ctx(fh); fimc_m2m_cropcap() local
405 frame = ctx_get_frame(ctx, cr->type); fimc_m2m_cropcap()
420 struct fimc_ctx *ctx = fh_to_ctx(fh); fimc_m2m_g_crop() local
423 frame = ctx_get_frame(ctx, cr->type); fimc_m2m_g_crop()
435 static int fimc_m2m_try_crop(struct fimc_ctx *ctx, struct v4l2_crop *cr) fimc_m2m_try_crop() argument
437 struct fimc_dev *fimc = ctx->fimc_dev; fimc_m2m_try_crop()
448 f = &ctx->d_frame; fimc_m2m_try_crop()
450 f = &ctx->s_frame; fimc_m2m_try_crop()
454 min_size = (f == &ctx->s_frame) ? fimc_m2m_try_crop()
489 struct fimc_ctx *ctx = fh_to_ctx(fh); fimc_m2m_s_crop() local
490 struct fimc_dev *fimc = ctx->fimc_dev; fimc_m2m_s_crop()
495 ret = fimc_m2m_try_crop(ctx, &cr); fimc_m2m_s_crop()
500 &ctx->s_frame : &ctx->d_frame; fimc_m2m_s_crop()
504 ret = fimc_check_scaler_ratio(ctx, cr.c.width, fimc_m2m_s_crop()
505 cr.c.height, ctx->d_frame.width, fimc_m2m_s_crop()
506 ctx->d_frame.height, ctx->rotation); fimc_m2m_s_crop()
508 ret = fimc_check_scaler_ratio(ctx, ctx->s_frame.width, fimc_m2m_s_crop()
509 ctx->s_frame.height, cr.c.width, fimc_m2m_s_crop()
510 cr.c.height, ctx->rotation); fimc_m2m_s_crop()
522 fimc_ctx_state_set(FIMC_PARAMS, ctx); fimc_m2m_s_crop()
553 struct fimc_ctx *ctx = priv; queue_init() local
558 src_vq->drv_priv = ctx; queue_init()
563 src_vq->lock = &ctx->fimc_dev->lock; queue_init()
571 dst_vq->drv_priv = ctx; queue_init()
576 dst_vq->lock = &ctx->fimc_dev->lock; queue_init()
581 static int fimc_m2m_set_default_format(struct fimc_ctx *ctx) fimc_m2m_set_default_format() argument
598 __set_frame_format(&ctx->s_frame, fmt, &pixm); fimc_m2m_set_default_format()
599 __set_frame_format(&ctx->d_frame, fmt, &pixm); fimc_m2m_set_default_format()
607 struct fimc_ctx *ctx; fimc_m2m_open() local
621 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); fimc_m2m_open()
622 if (!ctx) { fimc_m2m_open()
626 v4l2_fh_init(&ctx->fh, &fimc->m2m.vfd); fimc_m2m_open()
627 ctx->fimc_dev = fimc; fimc_m2m_open()
630 ctx->s_frame.fmt = fimc_get_format(0); fimc_m2m_open()
631 ctx->d_frame.fmt = fimc_get_format(0); fimc_m2m_open()
633 ret = fimc_ctrls_create(ctx); fimc_m2m_open()
638 ctx->fh.ctrl_handler = &ctx->ctrls.handler; fimc_m2m_open()
639 file->private_data = &ctx->fh; fimc_m2m_open()
640 v4l2_fh_add(&ctx->fh); fimc_m2m_open()
643 ctx->state = FIMC_CTX_M2M; fimc_m2m_open()
644 ctx->flags = 0; fimc_m2m_open()
645 ctx->in_path = FIMC_IO_DMA; fimc_m2m_open()
646 ctx->out_path = FIMC_IO_DMA; fimc_m2m_open()
647 ctx->scaler.enabled = 1; fimc_m2m_open()
649 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(fimc->m2m.m2m_dev, ctx, queue_init); fimc_m2m_open()
650 if (IS_ERR(ctx->fh.m2m_ctx)) { fimc_m2m_open()
651 ret = PTR_ERR(ctx->fh.m2m_ctx); fimc_m2m_open()
658 ret = fimc_m2m_set_default_format(ctx); fimc_m2m_open()
666 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); fimc_m2m_open()
668 fimc_ctrls_delete(ctx); fimc_m2m_open()
670 v4l2_fh_del(&ctx->fh); fimc_m2m_open()
671 v4l2_fh_exit(&ctx->fh); fimc_m2m_open()
672 kfree(ctx); fimc_m2m_open()
680 struct fimc_ctx *ctx = fh_to_ctx(file->private_data); fimc_m2m_release() local
681 struct fimc_dev *fimc = ctx->fimc_dev; fimc_m2m_release()
688 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); fimc_m2m_release()
689 fimc_ctrls_delete(ctx); fimc_m2m_release()
690 v4l2_fh_del(&ctx->fh); fimc_m2m_release()
691 v4l2_fh_exit(&ctx->fh); fimc_m2m_release()
695 kfree(ctx); fimc_m2m_release()
H A Dfimc-reg.c44 static u32 fimc_hw_get_in_flip(struct fimc_ctx *ctx) fimc_hw_get_in_flip() argument
48 if (ctx->hflip) fimc_hw_get_in_flip()
50 if (ctx->vflip) fimc_hw_get_in_flip()
53 if (ctx->rotation <= 90) fimc_hw_get_in_flip()
59 static u32 fimc_hw_get_target_flip(struct fimc_ctx *ctx) fimc_hw_get_target_flip() argument
63 if (ctx->hflip) fimc_hw_get_target_flip()
65 if (ctx->vflip) fimc_hw_get_target_flip()
68 if (ctx->rotation <= 90) fimc_hw_get_target_flip()
74 void fimc_hw_set_rotation(struct fimc_ctx *ctx) fimc_hw_set_rotation() argument
77 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_rotation()
88 if (ctx->rotation == 90 || ctx->rotation == 270) { fimc_hw_set_rotation()
89 if (ctx->out_path == FIMC_IO_LCDFIFO) fimc_hw_set_rotation()
95 if (ctx->out_path == FIMC_IO_DMA) { fimc_hw_set_rotation()
96 cfg |= fimc_hw_get_target_flip(ctx); fimc_hw_set_rotation()
102 flip |= fimc_hw_get_in_flip(ctx); fimc_hw_set_rotation()
107 void fimc_hw_set_target_format(struct fimc_ctx *ctx) fimc_hw_set_target_format() argument
110 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_target_format()
111 struct fimc_frame *frame = &ctx->d_frame; fimc_hw_set_target_format()
137 if (ctx->rotation == 90 || ctx->rotation == 270) fimc_hw_set_target_format()
150 static void fimc_hw_set_out_dma_size(struct fimc_ctx *ctx) fimc_hw_set_out_dma_size() argument
152 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_out_dma_size()
153 struct fimc_frame *frame = &ctx->d_frame; fimc_hw_set_out_dma_size()
169 void fimc_hw_set_out_dma(struct fimc_ctx *ctx) fimc_hw_set_out_dma() argument
171 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_out_dma()
172 struct fimc_frame *frame = &ctx->d_frame; fimc_hw_set_out_dma()
187 fimc_hw_set_out_dma_size(ctx); fimc_hw_set_out_dma()
198 cfg |= ctx->out_order_1p; fimc_hw_set_out_dma()
200 cfg |= ctx->out_order_2p | FIMC_REG_CIOCTRL_YCBCR_2PLANE; fimc_hw_set_out_dma()
234 void fimc_hw_set_prescaler(struct fimc_ctx *ctx) fimc_hw_set_prescaler() argument
236 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_prescaler()
237 struct fimc_scaler *sc = &ctx->scaler; fimc_hw_set_prescaler()
250 static void fimc_hw_set_scaler(struct fimc_ctx *ctx) fimc_hw_set_scaler() argument
252 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_scaler()
253 struct fimc_scaler *sc = &ctx->scaler; fimc_hw_set_scaler()
254 struct fimc_frame *src_frame = &ctx->s_frame; fimc_hw_set_scaler()
255 struct fimc_frame *dst_frame = &ctx->d_frame; fimc_hw_set_scaler()
265 if (!(ctx->flags & FIMC_COLOR_RANGE_NARROW)) fimc_hw_set_scaler()
281 if (ctx->in_path == FIMC_IO_DMA) { fimc_hw_set_scaler()
295 if (ctx->out_path == FIMC_IO_DMA) { fimc_hw_set_scaler()
307 if (ctx->flags & FIMC_SCAN_MODE_INTERLACED) fimc_hw_set_scaler()
314 void fimc_hw_set_mainscaler(struct fimc_ctx *ctx) fimc_hw_set_mainscaler() argument
316 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_mainscaler()
318 struct fimc_scaler *sc = &ctx->scaler; fimc_hw_set_mainscaler()
324 fimc_hw_set_scaler(ctx); fimc_hw_set_mainscaler()
349 void fimc_hw_enable_capture(struct fimc_ctx *ctx) fimc_hw_enable_capture() argument
351 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_enable_capture()
357 if (ctx->scaler.enabled) fimc_hw_enable_capture()
374 void fimc_hw_set_effect(struct fimc_ctx *ctx) fimc_hw_set_effect() argument
376 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_effect()
377 struct fimc_effect *effect = &ctx->effect; fimc_hw_set_effect()
391 void fimc_hw_set_rgb_alpha(struct fimc_ctx *ctx) fimc_hw_set_rgb_alpha() argument
393 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_rgb_alpha()
394 struct fimc_frame *frame = &ctx->d_frame; fimc_hw_set_rgb_alpha()
406 static void fimc_hw_set_in_dma_size(struct fimc_ctx *ctx) fimc_hw_set_in_dma_size() argument
408 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_in_dma_size()
409 struct fimc_frame *frame = &ctx->s_frame; fimc_hw_set_in_dma_size()
413 if (FIMC_IO_LCDFIFO == ctx->out_path) fimc_hw_set_in_dma_size()
423 void fimc_hw_set_in_dma(struct fimc_ctx *ctx) fimc_hw_set_in_dma() argument
425 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_in_dma()
426 struct fimc_frame *frame = &ctx->s_frame; fimc_hw_set_in_dma()
441 fimc_hw_set_in_dma_size(ctx); fimc_hw_set_in_dma()
444 fimc_hw_en_autoload(dev, ctx->out_path == FIMC_IO_LCDFIFO); fimc_hw_set_in_dma()
467 cfg |= ctx->in_order_2p | FIMC_REG_MSCTRL_C_INT_IN_2PLANE; fimc_hw_set_in_dma()
474 cfg |= ctx->in_order_1p fimc_hw_set_in_dma()
480 cfg |= ctx->in_order_2p fimc_hw_set_in_dma()
496 if (tiled_fmt(ctx->s_frame.fmt)) fimc_hw_set_in_dma()
499 if (tiled_fmt(ctx->d_frame.fmt)) fimc_hw_set_in_dma()
506 void fimc_hw_set_input_path(struct fimc_ctx *ctx) fimc_hw_set_input_path() argument
508 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_input_path()
513 if (ctx->in_path == FIMC_IO_DMA) fimc_hw_set_input_path()
521 void fimc_hw_set_output_path(struct fimc_ctx *ctx) fimc_hw_set_output_path() argument
523 struct fimc_dev *dev = ctx->fimc_dev; fimc_hw_set_output_path()
527 if (ctx->out_path == FIMC_IO_LCDFIFO) fimc_hw_set_output_path()
605 struct fimc_frame *f = &vc->ctx->s_frame; fimc_hw_set_camera_source()
790 void fimc_activate_capture(struct fimc_ctx *ctx) fimc_activate_capture() argument
792 fimc_hw_enable_scaler(ctx->fimc_dev, ctx->scaler.enabled); fimc_activate_capture()
793 fimc_hw_enable_capture(ctx); fimc_activate_capture()
H A Dfimc-capture.c38 struct fimc_ctx *ctx = fimc->vid_cap.ctx; fimc_capture_hw_init() local
42 if (ctx == NULL || ctx->s_frame.fmt == NULL) fimc_capture_hw_init()
52 fimc_prepare_dma_offset(ctx, &ctx->d_frame); fimc_capture_hw_init()
53 fimc_set_yuv_order(ctx); fimc_capture_hw_init()
58 fimc_hw_set_camera_offset(fimc, &ctx->s_frame); fimc_capture_hw_init()
60 ret = fimc_set_scaler_info(ctx); fimc_capture_hw_init()
62 fimc_hw_set_input_path(ctx); fimc_capture_hw_init()
63 fimc_hw_set_prescaler(ctx); fimc_capture_hw_init()
64 fimc_hw_set_mainscaler(ctx); fimc_capture_hw_init()
65 fimc_hw_set_target_format(ctx); fimc_capture_hw_init()
66 fimc_hw_set_rotation(ctx); fimc_capture_hw_init()
67 fimc_hw_set_effect(ctx); fimc_capture_hw_init()
68 fimc_hw_set_output_path(ctx); fimc_capture_hw_init()
69 fimc_hw_set_out_dma(ctx); fimc_capture_hw_init()
71 fimc_hw_set_rgb_alpha(ctx); fimc_capture_hw_init()
154 static int fimc_capture_config_update(struct fimc_ctx *ctx) fimc_capture_config_update() argument
156 struct fimc_dev *fimc = ctx->fimc_dev; fimc_capture_config_update()
159 fimc_hw_set_camera_offset(fimc, &ctx->s_frame); fimc_capture_config_update()
161 ret = fimc_set_scaler_info(ctx); fimc_capture_config_update()
165 fimc_hw_set_prescaler(ctx); fimc_capture_config_update()
166 fimc_hw_set_mainscaler(ctx); fimc_capture_config_update()
167 fimc_hw_set_target_format(ctx); fimc_capture_config_update()
168 fimc_hw_set_rotation(ctx); fimc_capture_config_update()
169 fimc_hw_set_effect(ctx); fimc_capture_config_update()
170 fimc_prepare_dma_offset(ctx, &ctx->d_frame); fimc_capture_config_update()
171 fimc_hw_set_out_dma(ctx); fimc_capture_config_update()
173 fimc_hw_set_rgb_alpha(ctx); fimc_capture_config_update()
184 struct fimc_frame *f = &cap->ctx->d_frame; fimc_capture_irq_handler()
254 fimc_capture_config_update(cap->ctx); fimc_capture_irq_handler()
268 struct fimc_ctx *ctx = q->drv_priv; start_streaming() local
269 struct fimc_dev *fimc = ctx->fimc_dev; start_streaming()
288 fimc_activate_capture(ctx); start_streaming()
299 struct fimc_ctx *ctx = q->drv_priv; stop_streaming() local
300 struct fimc_dev *fimc = ctx->fimc_dev; stop_streaming()
352 struct fimc_ctx *ctx = vq->drv_priv; queue_setup() local
353 struct fimc_frame *frame = &ctx->d_frame; queue_setup()
381 allocators[i] = ctx->fimc_dev->alloc_ctx; queue_setup()
390 struct fimc_ctx *ctx = vq->drv_priv; buffer_prepare() local
393 if (ctx->d_frame.fmt == NULL) buffer_prepare()
396 for (i = 0; i < ctx->d_frame.fmt->memplanes; i++) { buffer_prepare()
397 unsigned long size = ctx->d_frame.payload[i]; buffer_prepare()
400 v4l2_err(&ctx->fimc_dev->vid_cap.ve.vdev, buffer_prepare()
415 struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); buffer_queue() local
416 struct fimc_dev *fimc = ctx->fimc_dev; buffer_queue()
423 fimc_prepare_addr(ctx, &buf->vb, &ctx->d_frame, &buf->paddr); buffer_queue()
450 fimc_activate_capture(ctx); buffer_queue()
511 fimc_ctrls_delete(vc->ctx); fimc_capture_open()
513 ret = fimc_ctrls_create(vc->ctx); fimc_capture_open()
583 static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx, fimc_capture_try_format() argument
587 bool rotation = ctx->rotation == 90 || ctx->rotation == 270; fimc_capture_try_format()
588 struct fimc_dev *fimc = ctx->fimc_dev; fimc_capture_try_format()
591 struct fimc_frame *dst = &ctx->d_frame; fimc_capture_try_format()
597 if (code && ctx->s_frame.fmt && pad == FIMC_SD_PAD_SOURCE && fimc_capture_try_format()
598 fimc_fmt_is_user_defined(ctx->s_frame.fmt->color)) fimc_capture_try_format()
599 *code = ctx->s_frame.fmt->mbus_code; fimc_capture_try_format()
630 *width = ctx->s_frame.f_width; fimc_capture_try_format()
631 *height = ctx->s_frame.f_height; fimc_capture_try_format()
636 if (ctx->state & FIMC_COMPOSE) { fimc_capture_try_format()
660 static void fimc_capture_try_selection(struct fimc_ctx *ctx, fimc_capture_try_selection() argument
664 bool rotate = ctx->rotation == 90 || ctx->rotation == 270; fimc_capture_try_selection()
665 struct fimc_dev *fimc = ctx->fimc_dev; fimc_capture_try_selection()
668 struct fimc_frame *sink = &ctx->s_frame; fimc_capture_try_selection()
674 if (fimc_fmt_is_user_defined(ctx->d_frame.fmt->color)) { fimc_capture_try_selection()
681 if (ctx->rotation != 90 && ctx->rotation != 270) fimc_capture_try_selection()
775 * @ctx: FIMC capture context
780 static int fimc_pipeline_try_format(struct fimc_ctx *ctx, fimc_pipeline_try_format() argument
785 struct fimc_dev *fimc = ctx->fimc_dev; fimc_pipeline_try_format()
847 ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height, fimc_pipeline_try_format()
849 ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height, fimc_pipeline_try_format()
915 __fimc_get_format(&fimc->vid_cap.ctx->d_frame, f); fimc_cap_g_fmt_mplane()
932 struct fimc_ctx *ctx = vc->ctx; __video_try_or_set_format() local
938 fimc_capture_try_format(ctx, &pix->width, &pix->height, __video_try_or_set_format()
945 ctx->s_frame.f_width = pix->width; __video_try_or_set_format()
946 ctx->s_frame.f_height = pix->height; __video_try_or_set_format()
951 *out_fmt = fimc_capture_try_format(ctx, &pix->width, &pix->height, __video_try_or_set_format()
975 ret = fimc_pipeline_try_format(ctx, mf, inp_fmt, try); __video_try_or_set_format()
1014 static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx, fimc_capture_mark_jpeg_xfer() argument
1019 ctx->scaler.enabled = !jpeg; fimc_capture_mark_jpeg_xfer()
1020 fimc_ctrls_activate(ctx, !jpeg); fimc_capture_mark_jpeg_xfer()
1023 set_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state); fimc_capture_mark_jpeg_xfer()
1025 clear_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state); fimc_capture_mark_jpeg_xfer()
1032 struct fimc_ctx *ctx = vc->ctx; __fimc_capture_set_format() local
1034 struct fimc_frame *ff = &ctx->d_frame; __fimc_capture_set_format()
1046 fimc_alpha_ctrl_update(ctx); __fimc_capture_set_format()
1055 if (!(ctx->state & FIMC_COMPOSE)) __fimc_capture_set_format()
1058 fimc_capture_mark_jpeg_xfer(ctx, ff->fmt->color); __fimc_capture_set_format()
1062 ctx->s_frame.fmt = inp_fmt; __fimc_capture_set_format()
1063 set_frame_bounds(&ctx->s_frame, pix->width, pix->height); __fimc_capture_set_format()
1064 set_frame_crop(&ctx->s_frame, 0, 0, pix->width, pix->height); __fimc_capture_set_format()
1150 struct fimc_frame *ff = &vc->ctx->s_frame; fimc_pipeline_validate()
1178 struct fimc_frame *frame = &vc->ctx->d_frame; fimc_pipeline_validate()
1280 struct fimc_ctx *ctx = fimc->vid_cap.ctx; fimc_cap_g_selection() local
1281 struct fimc_frame *f = &ctx->s_frame; fimc_cap_g_selection()
1289 f = &ctx->d_frame; fimc_cap_g_selection()
1299 f = &ctx->d_frame; fimc_cap_g_selection()
1328 struct fimc_ctx *ctx = fimc->vid_cap.ctx; fimc_cap_s_selection() local
1337 f = &ctx->d_frame; fimc_cap_s_selection()
1339 f = &ctx->s_frame; fimc_cap_s_selection()
1343 fimc_capture_try_selection(ctx, &rect, s->target); fimc_cap_s_selection()
1428 return v4l2_ctrl_add_handler(&vc->ctx->ctrls.handler, fimc_link_setup()
1502 struct fimc_ctx *ctx = fimc->vid_cap.ctx; fimc_subdev_get_fmt() local
1503 struct fimc_frame *ff = &ctx->s_frame; fimc_subdev_get_fmt()
1545 struct fimc_ctx *ctx = vc->ctx; fimc_subdev_set_fmt() local
1556 ffmt = fimc_capture_try_format(ctx, &mf->width, &mf->height, fimc_subdev_set_fmt()
1571 fimc_alpha_ctrl_update(ctx); fimc_subdev_set_fmt()
1573 fimc_capture_mark_jpeg_xfer(ctx, ffmt->color); fimc_subdev_set_fmt()
1575 ff = &ctx->d_frame; fimc_subdev_set_fmt()
1577 mf->width = ctx->s_frame.width; fimc_subdev_set_fmt()
1578 mf->height = ctx->s_frame.height; fimc_subdev_set_fmt()
1580 ff = &ctx->s_frame; fimc_subdev_set_fmt()
1594 if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_COMPOSE))) fimc_subdev_set_fmt()
1598 ctx->state &= ~FIMC_COMPOSE; fimc_subdev_set_fmt()
1609 struct fimc_ctx *ctx = fimc->vid_cap.ctx; fimc_subdev_get_selection() local
1610 struct fimc_frame *f = &ctx->s_frame; fimc_subdev_get_selection()
1621 f = &ctx->d_frame; fimc_subdev_get_selection()
1635 f = &ctx->d_frame; fimc_subdev_get_selection()
1664 struct fimc_ctx *ctx = fimc->vid_cap.ctx; fimc_subdev_set_selection() local
1665 struct fimc_frame *f = &ctx->s_frame; fimc_subdev_set_selection()
1674 fimc_capture_try_selection(ctx, r, V4L2_SEL_TGT_CROP); fimc_subdev_set_selection()
1682 f = &ctx->d_frame; fimc_subdev_set_selection()
1696 ctx->state |= FIMC_COMPOSE; fimc_subdev_set_selection()
1742 struct fimc_ctx *ctx; fimc_register_capture_device() local
1747 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); fimc_register_capture_device()
1748 if (!ctx) fimc_register_capture_device()
1751 ctx->fimc_dev = fimc; fimc_register_capture_device()
1752 ctx->in_path = FIMC_IO_CAMERA; fimc_register_capture_device()
1753 ctx->out_path = FIMC_IO_DMA; fimc_register_capture_device()
1754 ctx->state = FIMC_CTX_CAP; fimc_register_capture_device()
1755 ctx->s_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0); fimc_register_capture_device()
1756 ctx->d_frame.fmt = ctx->s_frame.fmt; fimc_register_capture_device()
1773 vid_cap->ctx = ctx; fimc_register_capture_device()
1781 q->drv_priv = ctx; fimc_register_capture_device()
1798 ctx->s_frame.width = FIMC_DEFAULT_WIDTH; fimc_register_capture_device()
1799 ctx->s_frame.height = FIMC_DEFAULT_HEIGHT; fimc_register_capture_device()
1800 ctx->s_frame.fmt = fmt; fimc_register_capture_device()
1811 ret = fimc_ctrls_create(ctx); fimc_register_capture_device()
1822 vfd->ctrl_handler = &ctx->ctrls.handler; fimc_register_capture_device()
1826 fimc_ctrls_delete(ctx); fimc_register_capture_device()
1830 kfree(ctx); fimc_register_capture_device()
1873 fimc_ctrls_delete(fimc->vid_cap.ctx); fimc_capture_subdev_unregistered()
1876 kfree(fimc->vid_cap.ctx); fimc_capture_subdev_unregistered()
1877 fimc->vid_cap.ctx = NULL; fimc_capture_subdev_unregistered()
/linux-4.1.27/drivers/infiniband/core/
H A Ducma.c95 struct ucma_context *ctx; member in struct:ucma_multicast
105 struct ucma_context *ctx; member in struct:ucma_event
119 struct ucma_context *ctx; _ucma_find_context() local
121 ctx = idr_find(&ctx_idr, id); _ucma_find_context()
122 if (!ctx) _ucma_find_context()
123 ctx = ERR_PTR(-ENOENT); _ucma_find_context()
124 else if (ctx->file != file) _ucma_find_context()
125 ctx = ERR_PTR(-EINVAL); _ucma_find_context()
126 return ctx; _ucma_find_context()
131 struct ucma_context *ctx; ucma_get_ctx() local
134 ctx = _ucma_find_context(id, file); ucma_get_ctx()
135 if (!IS_ERR(ctx)) ucma_get_ctx()
136 atomic_inc(&ctx->ref); ucma_get_ctx()
138 return ctx; ucma_get_ctx()
141 static void ucma_put_ctx(struct ucma_context *ctx) ucma_put_ctx() argument
143 if (atomic_dec_and_test(&ctx->ref)) ucma_put_ctx()
144 complete(&ctx->comp); ucma_put_ctx()
149 struct ucma_context *ctx; ucma_alloc_ctx() local
151 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); ucma_alloc_ctx()
152 if (!ctx) ucma_alloc_ctx()
155 atomic_set(&ctx->ref, 1); ucma_alloc_ctx()
156 init_completion(&ctx->comp); ucma_alloc_ctx()
157 INIT_LIST_HEAD(&ctx->mc_list); ucma_alloc_ctx()
158 ctx->file = file; ucma_alloc_ctx()
161 ctx->id = idr_alloc(&ctx_idr, ctx, 0, 0, GFP_KERNEL); ucma_alloc_ctx()
163 if (ctx->id < 0) ucma_alloc_ctx()
166 list_add_tail(&ctx->list, &file->ctx_list); ucma_alloc_ctx()
167 return ctx; ucma_alloc_ctx()
170 kfree(ctx); ucma_alloc_ctx()
174 static struct ucma_multicast* ucma_alloc_multicast(struct ucma_context *ctx) ucma_alloc_multicast() argument
188 mc->ctx = ctx; ucma_alloc_multicast()
189 list_add_tail(&mc->list, &ctx->mc_list); ucma_alloc_multicast()
225 static void ucma_set_event_context(struct ucma_context *ctx, ucma_set_event_context() argument
229 uevent->ctx = ctx; ucma_set_event_context()
239 uevent->resp.uid = ctx->uid; ucma_set_event_context()
240 uevent->resp.id = ctx->id; ucma_set_event_context()
249 struct ucma_context *ctx = cm_id->context; ucma_event_handler() local
256 mutex_lock(&ctx->file->mut); ucma_event_handler()
258 ucma_set_event_context(ctx, event, uevent); ucma_event_handler()
268 if (!ctx->backlog) { ucma_event_handler()
273 ctx->backlog--; ucma_event_handler()
274 } else if (!ctx->uid || ctx->cm_id != cm_id) { ucma_event_handler()
285 list_add_tail(&uevent->list, &ctx->file->event_list); ucma_event_handler()
286 wake_up_interruptible(&ctx->file->poll_wait); ucma_event_handler()
288 mutex_unlock(&ctx->file->mut); ucma_event_handler()
295 struct ucma_context *ctx; ucma_get_event() local
323 ctx = ucma_alloc_ctx(file); ucma_get_event()
324 if (!ctx) { ucma_get_event()
328 uevent->ctx->backlog++; ucma_get_event()
329 ctx->cm_id = uevent->cm_id; ucma_get_event()
330 ctx->cm_id->context = ctx; ucma_get_event()
331 uevent->resp.id = ctx->id; ucma_get_event()
341 uevent->ctx->events_reported++; ucma_get_event()
373 struct ucma_context *ctx; ucma_create_id() local
388 ctx = ucma_alloc_ctx(file); ucma_create_id()
390 if (!ctx) ucma_create_id()
393 ctx->uid = cmd.uid; ucma_create_id()
394 ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type); ucma_create_id()
395 if (IS_ERR(ctx->cm_id)) { ucma_create_id()
396 ret = PTR_ERR(ctx->cm_id); ucma_create_id()
400 resp.id = ctx->id; ucma_create_id()
409 rdma_destroy_id(ctx->cm_id); ucma_create_id()
412 idr_remove(&ctx_idr, ctx->id); ucma_create_id()
414 kfree(ctx); ucma_create_id()
418 static void ucma_cleanup_multicast(struct ucma_context *ctx) ucma_cleanup_multicast() argument
423 list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { ucma_cleanup_multicast()
435 list_for_each_entry_safe(uevent, tmp, &mc->ctx->file->event_list, list) { ucma_cleanup_mc_events()
449 static int ucma_free_ctx(struct ucma_context *ctx) ucma_free_ctx() argument
456 rdma_destroy_id(ctx->cm_id); ucma_free_ctx()
458 ucma_cleanup_multicast(ctx); ucma_free_ctx()
461 mutex_lock(&ctx->file->mut); ucma_free_ctx()
462 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) { ucma_free_ctx()
463 if (uevent->ctx == ctx) ucma_free_ctx()
466 list_del(&ctx->list); ucma_free_ctx()
467 mutex_unlock(&ctx->file->mut); ucma_free_ctx()
476 events_reported = ctx->events_reported; ucma_free_ctx()
477 kfree(ctx); ucma_free_ctx()
486 struct ucma_context *ctx; ucma_destroy_id() local
496 ctx = _ucma_find_context(cmd.id, file); ucma_destroy_id()
497 if (!IS_ERR(ctx)) ucma_destroy_id()
498 idr_remove(&ctx_idr, ctx->id); ucma_destroy_id()
501 if (IS_ERR(ctx)) ucma_destroy_id()
502 return PTR_ERR(ctx); ucma_destroy_id()
504 ucma_put_ctx(ctx); ucma_destroy_id()
505 wait_for_completion(&ctx->comp); ucma_destroy_id()
506 resp.events_reported = ucma_free_ctx(ctx); ucma_destroy_id()
519 struct ucma_context *ctx; ucma_bind_ip() local
525 ctx = ucma_get_ctx(file, cmd.id); ucma_bind_ip()
526 if (IS_ERR(ctx)) ucma_bind_ip()
527 return PTR_ERR(ctx); ucma_bind_ip()
529 ret = rdma_bind_addr(ctx->cm_id, (struct sockaddr *) &cmd.addr); ucma_bind_ip()
530 ucma_put_ctx(ctx); ucma_bind_ip()
539 struct ucma_context *ctx; ucma_bind() local
549 ctx = ucma_get_ctx(file, cmd.id); ucma_bind()
550 if (IS_ERR(ctx)) ucma_bind()
551 return PTR_ERR(ctx); ucma_bind()
553 ret = rdma_bind_addr(ctx->cm_id, addr); ucma_bind()
554 ucma_put_ctx(ctx); ucma_bind()
563 struct ucma_context *ctx; ucma_resolve_ip() local
569 ctx = ucma_get_ctx(file, cmd.id); ucma_resolve_ip()
570 if (IS_ERR(ctx)) ucma_resolve_ip()
571 return PTR_ERR(ctx); ucma_resolve_ip()
573 ret = rdma_resolve_addr(ctx->cm_id, (struct sockaddr *) &cmd.src_addr, ucma_resolve_ip()
576 ucma_put_ctx(ctx); ucma_resolve_ip()
586 struct ucma_context *ctx; ucma_resolve_addr() local
598 ctx = ucma_get_ctx(file, cmd.id); ucma_resolve_addr()
599 if (IS_ERR(ctx)) ucma_resolve_addr()
600 return PTR_ERR(ctx); ucma_resolve_addr()
602 ret = rdma_resolve_addr(ctx->cm_id, src, dst, cmd.timeout_ms); ucma_resolve_addr()
603 ucma_put_ctx(ctx); ucma_resolve_addr()
612 struct ucma_context *ctx; ucma_resolve_route() local
618 ctx = ucma_get_ctx(file, cmd.id); ucma_resolve_route()
619 if (IS_ERR(ctx)) ucma_resolve_route()
620 return PTR_ERR(ctx); ucma_resolve_route()
622 ret = rdma_resolve_route(ctx->cm_id, cmd.timeout_ms); ucma_resolve_route()
623 ucma_put_ctx(ctx); ucma_resolve_route()
697 struct ucma_context *ctx; ucma_query_route() local
707 ctx = ucma_get_ctx(file, cmd.id); ucma_query_route()
708 if (IS_ERR(ctx)) ucma_query_route()
709 return PTR_ERR(ctx); ucma_query_route()
712 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; ucma_query_route()
716 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; ucma_query_route()
720 if (!ctx->cm_id->device) ucma_query_route()
723 resp.node_guid = (__force __u64) ctx->cm_id->device->node_guid; ucma_query_route()
724 resp.port_num = ctx->cm_id->port_num; ucma_query_route()
725 switch (rdma_node_get_transport(ctx->cm_id->device->node_type)) { ucma_query_route()
727 switch (rdma_port_get_link_layer(ctx->cm_id->device, ucma_query_route()
728 ctx->cm_id->port_num)) { ucma_query_route()
730 ucma_copy_ib_route(&resp, &ctx->cm_id->route); ucma_query_route()
733 ucma_copy_iboe_route(&resp, &ctx->cm_id->route); ucma_query_route()
740 ucma_copy_iw_route(&resp, &ctx->cm_id->route); ucma_query_route()
751 ucma_put_ctx(ctx); ucma_query_route()
767 static ssize_t ucma_query_addr(struct ucma_context *ctx, ucma_query_addr() argument
779 addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; ucma_query_addr()
783 addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; ucma_query_addr()
787 ucma_query_device_addr(ctx->cm_id, &resp); ucma_query_addr()
795 static ssize_t ucma_query_path(struct ucma_context *ctx, ucma_query_path() argument
808 resp->num_paths = ctx->cm_id->route.num_paths; ucma_query_path()
815 ib_sa_pack_path(&ctx->cm_id->route.path_rec[i], ucma_query_path()
827 static ssize_t ucma_query_gid(struct ucma_context *ctx, ucma_query_gid() argument
839 ucma_query_device_addr(ctx->cm_id, &resp); ucma_query_gid()
843 if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { ucma_query_gid()
844 memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); ucma_query_gid()
848 rdma_addr_get_sgid(&ctx->cm_id->route.addr.dev_addr, ucma_query_gid()
850 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) ucma_query_gid()
851 &ctx->cm_id->route.addr.src_addr); ucma_query_gid()
856 if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { ucma_query_gid()
857 memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); ucma_query_gid()
861 rdma_addr_get_dgid(&ctx->cm_id->route.addr.dev_addr, ucma_query_gid()
863 addr->sib_sid = rdma_get_service_id(ctx->cm_id, (struct sockaddr *) ucma_query_gid()
864 &ctx->cm_id->route.addr.dst_addr); ucma_query_gid()
878 struct ucma_context *ctx; ucma_query() local
886 ctx = ucma_get_ctx(file, cmd.id); ucma_query()
887 if (IS_ERR(ctx)) ucma_query()
888 return PTR_ERR(ctx); ucma_query()
892 ret = ucma_query_addr(ctx, response, out_len); ucma_query()
895 ret = ucma_query_path(ctx, response, out_len); ucma_query()
898 ret = ucma_query_gid(ctx, response, out_len); ucma_query()
905 ucma_put_ctx(ctx); ucma_query()
930 struct ucma_context *ctx; ucma_connect() local
939 ctx = ucma_get_ctx(file, cmd.id); ucma_connect()
940 if (IS_ERR(ctx)) ucma_connect()
941 return PTR_ERR(ctx); ucma_connect()
943 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); ucma_connect()
944 ret = rdma_connect(ctx->cm_id, &conn_param); ucma_connect()
945 ucma_put_ctx(ctx); ucma_connect()
953 struct ucma_context *ctx; ucma_listen() local
959 ctx = ucma_get_ctx(file, cmd.id); ucma_listen()
960 if (IS_ERR(ctx)) ucma_listen()
961 return PTR_ERR(ctx); ucma_listen()
963 ctx->backlog = cmd.backlog > 0 && cmd.backlog < max_backlog ? ucma_listen()
965 ret = rdma_listen(ctx->cm_id, ctx->backlog); ucma_listen()
966 ucma_put_ctx(ctx); ucma_listen()
975 struct ucma_context *ctx; ucma_accept() local
981 ctx = ucma_get_ctx(file, cmd.id); ucma_accept()
982 if (IS_ERR(ctx)) ucma_accept()
983 return PTR_ERR(ctx); ucma_accept()
986 ucma_copy_conn_param(ctx->cm_id, &conn_param, &cmd.conn_param); ucma_accept()
988 ret = rdma_accept(ctx->cm_id, &conn_param); ucma_accept()
990 ctx->uid = cmd.uid; ucma_accept()
993 ret = rdma_accept(ctx->cm_id, NULL); ucma_accept()
995 ucma_put_ctx(ctx); ucma_accept()
1003 struct ucma_context *ctx; ucma_reject() local
1009 ctx = ucma_get_ctx(file, cmd.id); ucma_reject()
1010 if (IS_ERR(ctx)) ucma_reject()
1011 return PTR_ERR(ctx); ucma_reject()
1013 ret = rdma_reject(ctx->cm_id, cmd.private_data, cmd.private_data_len); ucma_reject()
1014 ucma_put_ctx(ctx); ucma_reject()
1022 struct ucma_context *ctx; ucma_disconnect() local
1028 ctx = ucma_get_ctx(file, cmd.id); ucma_disconnect()
1029 if (IS_ERR(ctx)) ucma_disconnect()
1030 return PTR_ERR(ctx); ucma_disconnect()
1032 ret = rdma_disconnect(ctx->cm_id); ucma_disconnect()
1033 ucma_put_ctx(ctx); ucma_disconnect()
1043 struct ucma_context *ctx; ucma_init_qp_attr() local
1053 ctx = ucma_get_ctx(file, cmd.id); ucma_init_qp_attr()
1054 if (IS_ERR(ctx)) ucma_init_qp_attr()
1055 return PTR_ERR(ctx); ucma_init_qp_attr()
1060 ret = rdma_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); ucma_init_qp_attr()
1070 ucma_put_ctx(ctx); ucma_init_qp_attr()
1074 static int ucma_set_option_id(struct ucma_context *ctx, int optname, ucma_set_option_id() argument
1085 rdma_set_service_type(ctx->cm_id, *((u8 *) optval)); ucma_set_option_id()
1092 ret = rdma_set_reuseaddr(ctx->cm_id, *((int *) optval) ? 1 : 0); ucma_set_option_id()
1099 ret = rdma_set_afonly(ctx->cm_id, *((int *) optval) ? 1 : 0); ucma_set_option_id()
1108 static int ucma_set_ib_path(struct ucma_context *ctx, ucma_set_ib_path() argument
1131 ret = rdma_set_ib_paths(ctx->cm_id, &sa_path, 1); ucma_set_ib_path()
1137 return ucma_event_handler(ctx->cm_id, &event); ucma_set_ib_path()
1140 static int ucma_set_option_ib(struct ucma_context *ctx, int optname, ucma_set_option_ib() argument
1147 ret = ucma_set_ib_path(ctx, optval, optlen); ucma_set_option_ib()
1156 static int ucma_set_option_level(struct ucma_context *ctx, int level, ucma_set_option_level() argument
1163 ret = ucma_set_option_id(ctx, optname, optval, optlen); ucma_set_option_level()
1166 ret = ucma_set_option_ib(ctx, optname, optval, optlen); ucma_set_option_level()
1179 struct ucma_context *ctx; ucma_set_option() local
1186 ctx = ucma_get_ctx(file, cmd.id); ucma_set_option()
1187 if (IS_ERR(ctx)) ucma_set_option()
1188 return PTR_ERR(ctx); ucma_set_option()
1197 ret = ucma_set_option_level(ctx, cmd.level, cmd.optname, optval, ucma_set_option()
1202 ucma_put_ctx(ctx); ucma_set_option()
1210 struct ucma_context *ctx; ucma_notify() local
1216 ctx = ucma_get_ctx(file, cmd.id); ucma_notify()
1217 if (IS_ERR(ctx)) ucma_notify()
1218 return PTR_ERR(ctx); ucma_notify()
1220 ret = rdma_notify(ctx->cm_id, (enum ib_event_type) cmd.event); ucma_notify()
1221 ucma_put_ctx(ctx); ucma_notify()
1229 struct ucma_context *ctx; ucma_process_join() local
1241 ctx = ucma_get_ctx(file, cmd->id); ucma_process_join()
1242 if (IS_ERR(ctx)) ucma_process_join()
1243 return PTR_ERR(ctx); ucma_process_join()
1246 mc = ucma_alloc_multicast(ctx); ucma_process_join()
1254 ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr, mc); ucma_process_join()
1266 ucma_put_ctx(ctx); ucma_process_join()
1270 rdma_leave_multicast(ctx->cm_id, (struct sockaddr *) &mc->addr); ucma_process_join()
1280 ucma_put_ctx(ctx); ucma_process_join()
1335 else if (mc->ctx->file != file) ucma_leave_multicast()
1339 atomic_inc(&mc->ctx->ref); ucma_leave_multicast()
1348 rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); ucma_leave_multicast()
1349 mutex_lock(&mc->ctx->file->mut); ucma_leave_multicast()
1352 mutex_unlock(&mc->ctx->file->mut); ucma_leave_multicast()
1354 ucma_put_ctx(mc->ctx); ucma_leave_multicast()
1388 static void ucma_move_events(struct ucma_context *ctx, struct ucma_file *file) ucma_move_events() argument
1392 list_for_each_entry_safe(uevent, tmp, &ctx->file->event_list, list) ucma_move_events()
1393 if (uevent->ctx == ctx) ucma_move_events()
1403 struct ucma_context *ctx; ucma_migrate_id() local
1417 ctx = ucma_get_ctx(f.file->private_data, cmd.id); ucma_migrate_id()
1418 if (IS_ERR(ctx)) { ucma_migrate_id()
1419 ret = PTR_ERR(ctx); ucma_migrate_id()
1423 cur_file = ctx->file; ucma_migrate_id()
1425 resp.events_reported = ctx->events_reported; ucma_migrate_id()
1436 list_move_tail(&ctx->list, &new_file->ctx_list); ucma_migrate_id()
1437 ucma_move_events(ctx, new_file); ucma_migrate_id()
1438 ctx->file = new_file; ucma_migrate_id()
1439 resp.events_reported = ctx->events_reported; ucma_migrate_id()
1449 ucma_put_ctx(ctx); ucma_migrate_id()
1558 struct ucma_context *ctx, *tmp; ucma_close() local
1561 list_for_each_entry_safe(ctx, tmp, &file->ctx_list, list) { ucma_close()
1565 idr_remove(&ctx_idr, ctx->id); ucma_close()
1568 ucma_free_ctx(ctx); ucma_close()
H A Ducm.c88 struct list_head file_list; /* member in file ctx list */
92 struct ib_ucm_context *ctx; member in struct:ib_ucm_event
94 struct list_head ctx_list; /* member in ctx event list */
127 struct ib_ucm_context *ctx; ib_ucm_ctx_get() local
130 ctx = idr_find(&ctx_id_table, id); ib_ucm_ctx_get()
131 if (!ctx) ib_ucm_ctx_get()
132 ctx = ERR_PTR(-ENOENT); ib_ucm_ctx_get()
133 else if (ctx->file != file) ib_ucm_ctx_get()
134 ctx = ERR_PTR(-EINVAL); ib_ucm_ctx_get()
136 atomic_inc(&ctx->ref); ib_ucm_ctx_get()
139 return ctx; ib_ucm_ctx_get()
142 static void ib_ucm_ctx_put(struct ib_ucm_context *ctx) ib_ucm_ctx_put() argument
144 if (atomic_dec_and_test(&ctx->ref)) ib_ucm_ctx_put()
145 complete(&ctx->comp); ib_ucm_ctx_put()
153 static void ib_ucm_cleanup_events(struct ib_ucm_context *ctx) ib_ucm_cleanup_events() argument
157 mutex_lock(&ctx->file->file_mutex); ib_ucm_cleanup_events()
158 list_del(&ctx->file_list); ib_ucm_cleanup_events()
159 while (!list_empty(&ctx->events)) { ib_ucm_cleanup_events()
161 uevent = list_entry(ctx->events.next, ib_ucm_cleanup_events()
165 mutex_unlock(&ctx->file->file_mutex); ib_ucm_cleanup_events()
172 mutex_lock(&ctx->file->file_mutex); ib_ucm_cleanup_events()
174 mutex_unlock(&ctx->file->file_mutex); ib_ucm_cleanup_events()
179 struct ib_ucm_context *ctx; ib_ucm_ctx_alloc() local
181 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); ib_ucm_ctx_alloc()
182 if (!ctx) ib_ucm_ctx_alloc()
185 atomic_set(&ctx->ref, 1); ib_ucm_ctx_alloc()
186 init_completion(&ctx->comp); ib_ucm_ctx_alloc()
187 ctx->file = file; ib_ucm_ctx_alloc()
188 INIT_LIST_HEAD(&ctx->events); ib_ucm_ctx_alloc()
191 ctx->id = idr_alloc(&ctx_id_table, ctx, 0, 0, GFP_KERNEL); ib_ucm_ctx_alloc()
193 if (ctx->id < 0) ib_ucm_ctx_alloc()
196 list_add_tail(&ctx->file_list, &file->ctxs); ib_ucm_ctx_alloc()
197 return ctx; ib_ucm_ctx_alloc()
200 kfree(ctx); ib_ucm_ctx_alloc()
352 struct ib_ucm_context *ctx; ib_ucm_event_handler() local
355 ctx = cm_id->context; ib_ucm_event_handler()
361 uevent->ctx = ctx; ib_ucm_event_handler()
363 uevent->resp.uid = ctx->uid; ib_ucm_event_handler()
364 uevent->resp.id = ctx->id; ib_ucm_event_handler()
371 mutex_lock(&ctx->file->file_mutex); ib_ucm_event_handler()
372 list_add_tail(&uevent->file_list, &ctx->file->events); ib_ucm_event_handler()
373 list_add_tail(&uevent->ctx_list, &ctx->events); ib_ucm_event_handler()
374 wake_up_interruptible(&ctx->file->poll_wait); ib_ucm_event_handler()
375 mutex_unlock(&ctx->file->file_mutex); ib_ucm_event_handler()
389 struct ib_ucm_context *ctx; ib_ucm_event() local
417 ctx = ib_ucm_ctx_alloc(file); ib_ucm_event()
418 if (!ctx) { ib_ucm_event()
423 ctx->cm_id = uevent->cm_id; ib_ucm_event()
424 ctx->cm_id->context = ctx; ib_ucm_event()
425 uevent->resp.id = ctx->id; ib_ucm_event()
460 uevent->ctx->events_reported++; ib_ucm_event()
476 struct ib_ucm_context *ctx; ib_ucm_create_id() local
486 ctx = ib_ucm_ctx_alloc(file); ib_ucm_create_id()
488 if (!ctx) ib_ucm_create_id()
491 ctx->uid = cmd.uid; ib_ucm_create_id()
492 ctx->cm_id = ib_create_cm_id(file->device->ib_dev, ib_ucm_create_id()
493 ib_ucm_event_handler, ctx); ib_ucm_create_id()
494 if (IS_ERR(ctx->cm_id)) { ib_ucm_create_id()
495 result = PTR_ERR(ctx->cm_id); ib_ucm_create_id()
499 resp.id = ctx->id; ib_ucm_create_id()
508 ib_destroy_cm_id(ctx->cm_id); ib_ucm_create_id()
511 idr_remove(&ctx_id_table, ctx->id); ib_ucm_create_id()
513 kfree(ctx); ib_ucm_create_id()
523 struct ib_ucm_context *ctx; ib_ucm_destroy_id() local
533 ctx = idr_find(&ctx_id_table, cmd.id); ib_ucm_destroy_id()
534 if (!ctx) ib_ucm_destroy_id()
535 ctx = ERR_PTR(-ENOENT); ib_ucm_destroy_id()
536 else if (ctx->file != file) ib_ucm_destroy_id()
537 ctx = ERR_PTR(-EINVAL); ib_ucm_destroy_id()
539 idr_remove(&ctx_id_table, ctx->id); ib_ucm_destroy_id()
542 if (IS_ERR(ctx)) ib_ucm_destroy_id()
543 return PTR_ERR(ctx); ib_ucm_destroy_id()
545 ib_ucm_ctx_put(ctx); ib_ucm_destroy_id()
546 wait_for_completion(&ctx->comp); ib_ucm_destroy_id()
549 ib_destroy_cm_id(ctx->cm_id); ib_ucm_destroy_id()
551 ib_ucm_cleanup_events(ctx); ib_ucm_destroy_id()
553 resp.events_reported = ctx->events_reported; ib_ucm_destroy_id()
558 kfree(ctx); ib_ucm_destroy_id()
568 struct ib_ucm_context *ctx; ib_ucm_attr_id() local
577 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_attr_id()
578 if (IS_ERR(ctx)) ib_ucm_attr_id()
579 return PTR_ERR(ctx); ib_ucm_attr_id()
581 resp.service_id = ctx->cm_id->service_id; ib_ucm_attr_id()
582 resp.service_mask = ctx->cm_id->service_mask; ib_ucm_attr_id()
583 resp.local_id = ctx->cm_id->local_id; ib_ucm_attr_id()
584 resp.remote_id = ctx->cm_id->remote_id; ib_ucm_attr_id()
590 ib_ucm_ctx_put(ctx); ib_ucm_attr_id()
600 struct ib_ucm_context *ctx; ib_ucm_init_qp_attr() local
610 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_init_qp_attr()
611 if (IS_ERR(ctx)) ib_ucm_init_qp_attr()
612 return PTR_ERR(ctx); ib_ucm_init_qp_attr()
617 result = ib_cm_init_qp_attr(ctx->cm_id, &qp_attr, &resp.qp_attr_mask); ib_ucm_init_qp_attr()
628 ib_ucm_ctx_put(ctx); ib_ucm_init_qp_attr()
648 struct ib_ucm_context *ctx; ib_ucm_listen() local
654 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_listen()
655 if (IS_ERR(ctx)) ib_ucm_listen()
656 return PTR_ERR(ctx); ib_ucm_listen()
662 result = ib_cm_listen(ctx->cm_id, cmd.service_id, cmd.service_mask, ib_ucm_listen()
665 ib_ucm_ctx_put(ctx); ib_ucm_listen()
674 struct ib_ucm_context *ctx; ib_ucm_notify() local
680 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_notify()
681 if (IS_ERR(ctx)) ib_ucm_notify()
682 return PTR_ERR(ctx); ib_ucm_notify()
684 result = ib_cm_notify(ctx->cm_id, (enum ib_event_type) cmd.event); ib_ucm_notify()
685 ib_ucm_ctx_put(ctx); ib_ucm_notify()
737 struct ib_ucm_context *ctx; ib_ucm_send_req() local
776 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_send_req()
777 if (!IS_ERR(ctx)) { ib_ucm_send_req()
778 result = ib_send_cm_req(ctx->cm_id, &param); ib_ucm_send_req()
779 ib_ucm_ctx_put(ctx); ib_ucm_send_req()
781 result = PTR_ERR(ctx); ib_ucm_send_req()
795 struct ib_ucm_context *ctx; ib_ucm_send_rep() local
818 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_send_rep()
819 if (!IS_ERR(ctx)) { ib_ucm_send_rep()
820 ctx->uid = cmd.uid; ib_ucm_send_rep()
821 result = ib_send_cm_rep(ctx->cm_id, &param); ib_ucm_send_rep()
822 ib_ucm_ctx_put(ctx); ib_ucm_send_rep()
824 result = PTR_ERR(ctx); ib_ucm_send_rep()
837 struct ib_ucm_context *ctx; ib_ucm_send_private_data() local
848 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_send_private_data()
849 if (!IS_ERR(ctx)) { ib_ucm_send_private_data()
850 result = func(ctx->cm_id, private_data, cmd.len); ib_ucm_send_private_data()
851 ib_ucm_ctx_put(ctx); ib_ucm_send_private_data()
853 result = PTR_ERR(ctx); ib_ucm_send_private_data()
889 struct ib_ucm_context *ctx; ib_ucm_send_info() local
906 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_send_info()
907 if (!IS_ERR(ctx)) { ib_ucm_send_info()
908 result = func(ctx->cm_id, cmd.status, info, cmd.info_len, ib_ucm_send_info()
910 ib_ucm_ctx_put(ctx); ib_ucm_send_info()
912 result = PTR_ERR(ctx); ib_ucm_send_info()
938 struct ib_ucm_context *ctx; ib_ucm_send_mra() local
950 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_send_mra()
951 if (!IS_ERR(ctx)) { ib_ucm_send_mra()
952 result = ib_send_cm_mra(ctx->cm_id, cmd.timeout, data, cmd.len); ib_ucm_send_mra()
953 ib_ucm_ctx_put(ctx); ib_ucm_send_mra()
955 result = PTR_ERR(ctx); ib_ucm_send_mra()
965 struct ib_ucm_context *ctx; ib_ucm_send_lap() local
982 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_send_lap()
983 if (!IS_ERR(ctx)) { ib_ucm_send_lap()
984 result = ib_send_cm_lap(ctx->cm_id, path, data, cmd.len); ib_ucm_send_lap()
985 ib_ucm_ctx_put(ctx); ib_ucm_send_lap()
987 result = PTR_ERR(ctx); ib_ucm_send_lap()
1000 struct ib_ucm_context *ctx; ib_ucm_send_sidr_req() local
1023 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_send_sidr_req()
1024 if (!IS_ERR(ctx)) { ib_ucm_send_sidr_req()
1025 result = ib_send_cm_sidr_req(ctx->cm_id, &param); ib_ucm_send_sidr_req()
1026 ib_ucm_ctx_put(ctx); ib_ucm_send_sidr_req()
1028 result = PTR_ERR(ctx); ib_ucm_send_sidr_req()
1042 struct ib_ucm_context *ctx; ib_ucm_send_sidr_rep() local
1065 ctx = ib_ucm_ctx_get(file, cmd.id); ib_ucm_send_sidr_rep()
1066 if (!IS_ERR(ctx)) { ib_ucm_send_sidr_rep()
1067 result = ib_send_cm_sidr_rep(ctx->cm_id, &param); ib_ucm_send_sidr_rep()
1068 ib_ucm_ctx_put(ctx); ib_ucm_send_sidr_rep()
1070 result = PTR_ERR(ctx); ib_ucm_send_sidr_rep()
1177 struct ib_ucm_context *ctx; ib_ucm_close() local
1181 ctx = list_entry(file->ctxs.next, ib_ucm_close()
1186 idr_remove(&ctx_id_table, ctx->id); ib_ucm_close()
1189 ib_destroy_cm_id(ctx->cm_id); ib_ucm_close()
1190 ib_ucm_cleanup_events(ctx); ib_ucm_close()
1191 kfree(ctx); ib_ucm_close()
/linux-4.1.27/drivers/gpu/drm/
H A Ddrm_modeset_lock.c35 * of extra utility/tracking out of our acquire-ctx. This is provided
42 * drm_modeset_acquire_init(&ctx)
45 * ret = drm_modeset_lock(lock, &ctx)
47 * drm_modeset_backoff(&ctx);
54 * drm_modeset_drop_locks(&ctx);
55 * drm_modeset_acquire_fini(&ctx);
74 struct drm_modeset_acquire_ctx *ctx; __drm_modeset_lock_all() local
77 ctx = kzalloc(sizeof(*ctx), __drm_modeset_lock_all()
79 if (!ctx) __drm_modeset_lock_all()
89 drm_modeset_acquire_init(ctx, 0); __drm_modeset_lock_all()
90 ctx->trylock_only = trylock; __drm_modeset_lock_all()
93 ret = drm_modeset_lock(&config->connection_mutex, ctx); __drm_modeset_lock_all()
96 ret = drm_modeset_lock_all_crtcs(dev, ctx); __drm_modeset_lock_all()
103 * ctx for drm_modeset_unlock_all(): __drm_modeset_lock_all()
105 config->acquire_ctx = ctx; __drm_modeset_lock_all()
113 drm_modeset_backoff(ctx); __drm_modeset_lock_all()
144 struct drm_modeset_acquire_ctx *ctx = config->acquire_ctx; drm_modeset_unlock_all() local
146 if (WARN_ON(!ctx)) drm_modeset_unlock_all()
150 drm_modeset_drop_locks(ctx); drm_modeset_unlock_all()
151 drm_modeset_acquire_fini(ctx); drm_modeset_unlock_all()
153 kfree(ctx); drm_modeset_unlock_all()
160 * drm_modeset_lock_crtc - lock crtc with hidden acquire ctx for a plane update
175 struct drm_modeset_acquire_ctx *ctx; drm_modeset_lock_crtc() local
178 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); drm_modeset_lock_crtc()
179 if (WARN_ON(!ctx)) drm_modeset_lock_crtc()
182 drm_modeset_acquire_init(ctx, 0); drm_modeset_lock_crtc()
185 ret = drm_modeset_lock(&crtc->mutex, ctx); drm_modeset_lock_crtc()
190 ret = drm_modeset_lock(&plane->mutex, ctx); drm_modeset_lock_crtc()
195 ret = drm_modeset_lock(&plane->crtc->mutex, ctx); drm_modeset_lock_crtc()
204 * ctx for drm_modeset_unlock_crtc(): drm_modeset_lock_crtc()
206 crtc->acquire_ctx = ctx; drm_modeset_lock_crtc()
212 drm_modeset_backoff(ctx); drm_modeset_lock_crtc()
219 * drm_modeset_legacy_acquire_ctx - find acquire ctx for legacy ioctls
223 * locking, and store the acquire ctx in the corresponding crtc. All other
248 struct drm_modeset_acquire_ctx *ctx = crtc->acquire_ctx; drm_modeset_unlock_crtc() local
250 if (WARN_ON(!ctx)) drm_modeset_unlock_crtc()
254 drm_modeset_drop_locks(ctx); drm_modeset_unlock_crtc()
255 drm_modeset_acquire_fini(ctx); drm_modeset_unlock_crtc()
257 kfree(ctx); drm_modeset_unlock_crtc()
285 * @ctx: the acquire context
288 void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx, drm_modeset_acquire_init() argument
291 memset(ctx, 0, sizeof(*ctx)); drm_modeset_acquire_init()
292 ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class); drm_modeset_acquire_init()
293 INIT_LIST_HEAD(&ctx->locked); drm_modeset_acquire_init()
299 * @ctx: the acquire context
301 void drm_modeset_acquire_fini(struct drm_modeset_acquire_ctx *ctx) drm_modeset_acquire_fini() argument
303 ww_acquire_fini(&ctx->ww_ctx); drm_modeset_acquire_fini()
309 * @ctx: the acquire context
313 void drm_modeset_drop_locks(struct drm_modeset_acquire_ctx *ctx) drm_modeset_drop_locks() argument
315 WARN_ON(ctx->contended); drm_modeset_drop_locks()
316 while (!list_empty(&ctx->locked)) { drm_modeset_drop_locks()
319 lock = list_first_entry(&ctx->locked, drm_modeset_drop_locks()
328 struct drm_modeset_acquire_ctx *ctx, modeset_lock()
333 WARN_ON(ctx->contended); modeset_lock()
335 if (ctx->trylock_only) { modeset_lock()
341 ret = ww_mutex_lock_slow_interruptible(&lock->mutex, &ctx->ww_ctx); modeset_lock()
343 ret = ww_mutex_lock_interruptible(&lock->mutex, &ctx->ww_ctx); modeset_lock()
345 ww_mutex_lock_slow(&lock->mutex, &ctx->ww_ctx); modeset_lock()
348 ret = ww_mutex_lock(&lock->mutex, &ctx->ww_ctx); modeset_lock()
352 list_add(&lock->head, &ctx->locked); modeset_lock()
361 ctx->contended = lock; modeset_lock()
367 static int modeset_backoff(struct drm_modeset_acquire_ctx *ctx, modeset_backoff() argument
370 struct drm_modeset_lock *contended = ctx->contended; modeset_backoff()
372 ctx->contended = NULL; modeset_backoff()
377 drm_modeset_drop_locks(ctx); modeset_backoff()
379 return modeset_lock(contended, ctx, interruptible, true); modeset_backoff()
384 * @ctx: the acquire context
390 void drm_modeset_backoff(struct drm_modeset_acquire_ctx *ctx) drm_modeset_backoff() argument
392 modeset_backoff(ctx, false); drm_modeset_backoff()
398 * @ctx: the acquire context
402 int drm_modeset_backoff_interruptible(struct drm_modeset_acquire_ctx *ctx) drm_modeset_backoff_interruptible() argument
404 return modeset_backoff(ctx, true); drm_modeset_backoff_interruptible()
411 * @ctx: acquire ctx
413 * If ctx is not NULL, then its ww acquire context is used and the
420 struct drm_modeset_acquire_ctx *ctx) drm_modeset_lock()
422 if (ctx) drm_modeset_lock()
423 return modeset_lock(lock, ctx, false, false); drm_modeset_lock()
433 * @ctx: acquire ctx
438 struct drm_modeset_acquire_ctx *ctx) drm_modeset_lock_interruptible()
440 if (ctx) drm_modeset_lock_interruptible()
441 return modeset_lock(lock, ctx, true, false); drm_modeset_lock_interruptible()
461 struct drm_modeset_acquire_ctx *ctx) drm_modeset_lock_all_crtcs()
469 ret = drm_modeset_lock(&crtc->mutex, ctx); drm_modeset_lock_all_crtcs()
475 ret = drm_modeset_lock(&plane->mutex, ctx); drm_modeset_lock_all_crtcs()
327 modeset_lock(struct drm_modeset_lock *lock, struct drm_modeset_acquire_ctx *ctx, bool interruptible, bool slow) modeset_lock() argument
419 drm_modeset_lock(struct drm_modeset_lock *lock, struct drm_modeset_acquire_ctx *ctx) drm_modeset_lock() argument
437 drm_modeset_lock_interruptible(struct drm_modeset_lock *lock, struct drm_modeset_acquire_ctx *ctx) drm_modeset_lock_interruptible() argument
460 drm_modeset_lock_all_crtcs(struct drm_device *dev, struct drm_modeset_acquire_ctx *ctx) drm_modeset_lock_all_crtcs() argument
/linux-4.1.27/drivers/media/platform/ti-vpe/
H A Dvpe.c409 static struct vpe_q_data *get_q_data(struct vpe_ctx *ctx, get_q_data() argument
415 return &ctx->q_data[Q_DATA_SRC]; get_q_data()
418 return &ctx->q_data[Q_DATA_DST]; get_q_data()
494 #define GET_OFFSET_TOP(ctx, obj, reg) \
495 ((obj)->res->start - ctx->dev->res->start + reg)
497 #define VPE_SET_MMR_ADB_HDR(ctx, hdr, regs, offset_a) \
498 VPDMA_SET_MMR_ADB_HDR(ctx->mmr_adb, vpe_mmr_adb, hdr, regs, offset_a)
502 static void init_adb_hdrs(struct vpe_ctx *ctx) init_adb_hdrs() argument
504 VPE_SET_MMR_ADB_HDR(ctx, out_fmt_hdr, out_fmt_reg, VPE_CLK_FORMAT_SELECT); init_adb_hdrs()
505 VPE_SET_MMR_ADB_HDR(ctx, us1_hdr, us1_regs, VPE_US1_R0); init_adb_hdrs()
506 VPE_SET_MMR_ADB_HDR(ctx, us2_hdr, us2_regs, VPE_US2_R0); init_adb_hdrs()
507 VPE_SET_MMR_ADB_HDR(ctx, us3_hdr, us3_regs, VPE_US3_R0); init_adb_hdrs()
508 VPE_SET_MMR_ADB_HDR(ctx, dei_hdr, dei_regs, VPE_DEI_FRAME_SIZE); init_adb_hdrs()
509 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr0, sc_regs0, init_adb_hdrs()
510 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC0)); init_adb_hdrs()
511 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr8, sc_regs8, init_adb_hdrs()
512 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC8)); init_adb_hdrs()
513 VPE_SET_MMR_ADB_HDR(ctx, sc_hdr17, sc_regs17, init_adb_hdrs()
514 GET_OFFSET_TOP(ctx, ctx->dev->sc, CFG_SC17)); init_adb_hdrs()
515 VPE_SET_MMR_ADB_HDR(ctx, csc_hdr, csc_regs, init_adb_hdrs()
516 GET_OFFSET_TOP(ctx, ctx->dev->csc, CSC_CSC00)); init_adb_hdrs()
526 static int realloc_mv_buffers(struct vpe_ctx *ctx, size_t size) realloc_mv_buffers() argument
528 struct device *dev = ctx->dev->v4l2_dev.dev; realloc_mv_buffers()
530 if (ctx->mv_buf_size == size) realloc_mv_buffers()
533 if (ctx->mv_buf[0]) realloc_mv_buffers()
534 dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[0], realloc_mv_buffers()
535 ctx->mv_buf_dma[0]); realloc_mv_buffers()
537 if (ctx->mv_buf[1]) realloc_mv_buffers()
538 dma_free_coherent(dev, ctx->mv_buf_size, ctx->mv_buf[1], realloc_mv_buffers()
539 ctx->mv_buf_dma[1]); realloc_mv_buffers()
544 ctx->mv_buf[0] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[0], realloc_mv_buffers()
546 if (!ctx->mv_buf[0]) { realloc_mv_buffers()
547 vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); realloc_mv_buffers()
551 ctx->mv_buf[1] = dma_alloc_coherent(dev, size, &ctx->mv_buf_dma[1], realloc_mv_buffers()
553 if (!ctx->mv_buf[1]) { realloc_mv_buffers()
554 vpe_err(ctx->dev, "failed to allocate motion vector buffer\n"); realloc_mv_buffers()
555 dma_free_coherent(dev, size, ctx->mv_buf[0], realloc_mv_buffers()
556 ctx->mv_buf_dma[0]); realloc_mv_buffers()
561 ctx->mv_buf_size = size; realloc_mv_buffers()
562 ctx->src_mv_buf_selector = 0; realloc_mv_buffers()
567 static void free_mv_buffers(struct vpe_ctx *ctx) free_mv_buffers() argument
569 realloc_mv_buffers(ctx, 0); free_mv_buffers()
577 static void free_vbs(struct vpe_ctx *ctx) free_vbs() argument
579 struct vpe_dev *dev = ctx->dev; free_vbs()
582 if (ctx->src_vbs[2] == NULL) free_vbs()
586 if (ctx->src_vbs[2]) { free_vbs()
587 v4l2_m2m_buf_done(ctx->src_vbs[2], VB2_BUF_STATE_DONE); free_vbs()
588 v4l2_m2m_buf_done(ctx->src_vbs[1], VB2_BUF_STATE_DONE); free_vbs()
631 static void set_us_coefficients(struct vpe_ctx *ctx) set_us_coefficients() argument
633 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; set_us_coefficients()
634 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; set_us_coefficients()
653 ctx->load_mmrs = true; set_us_coefficients()
659 static void set_cfg_and_line_modes(struct vpe_ctx *ctx) set_cfg_and_line_modes() argument
661 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_SRC].fmt; set_cfg_and_line_modes()
662 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; set_cfg_and_line_modes()
684 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA1_IN); set_cfg_and_line_modes()
685 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA2_IN); set_cfg_and_line_modes()
686 vpdma_set_line_mode(ctx->dev->vpdma, line_mode, VPE_CHAN_CHROMA3_IN); set_cfg_and_line_modes()
689 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, set_cfg_and_line_modes()
691 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, set_cfg_and_line_modes()
693 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, set_cfg_and_line_modes()
697 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, set_cfg_and_line_modes()
699 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, set_cfg_and_line_modes()
701 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, set_cfg_and_line_modes()
705 vpdma_set_frame_start_event(ctx->dev->vpdma, VPDMA_FSEVENT_CHANNEL_ACTIVE, set_cfg_and_line_modes()
708 ctx->load_mmrs = true; set_cfg_and_line_modes()
715 static void set_src_registers(struct vpe_ctx *ctx) set_src_registers() argument
717 set_us_coefficients(ctx); set_src_registers()
724 static void set_dst_registers(struct vpe_ctx *ctx) set_dst_registers() argument
726 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; set_dst_registers()
727 enum v4l2_colorspace clrspc = ctx->q_data[Q_DATA_DST].colorspace; set_dst_registers()
728 struct vpe_fmt *fmt = ctx->q_data[Q_DATA_DST].fmt; set_dst_registers()
747 ctx->load_mmrs = true; set_dst_registers()
753 static void set_dei_regs(struct vpe_ctx *ctx) set_dei_regs() argument
755 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; set_dei_regs()
756 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; set_dei_regs()
769 if ((!ctx->deinterlacing && (s_q_data->flags & Q_DATA_INTERLACED)) || set_dei_regs()
783 ctx->load_mmrs = true; set_dei_regs()
786 static void set_dei_shadow_registers(struct vpe_ctx *ctx) set_dei_shadow_registers() argument
788 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; set_dei_shadow_registers()
799 ctx->load_mmrs = true; set_dei_shadow_registers()
806 static int set_srcdst_params(struct vpe_ctx *ctx) set_srcdst_params() argument
808 struct vpe_q_data *s_q_data = &ctx->q_data[Q_DATA_SRC]; set_srcdst_params()
809 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; set_srcdst_params()
810 struct vpe_mmr_adb *mmr_adb = ctx->mmr_adb.addr; set_srcdst_params()
818 ctx->sequence = 0; set_srcdst_params()
819 ctx->field = V4L2_FIELD_TOP; set_srcdst_params()
838 ctx->deinterlacing = true; set_srcdst_params()
841 ctx->deinterlacing = false; set_srcdst_params()
845 free_vbs(ctx); set_srcdst_params()
847 ret = realloc_mv_buffers(ctx, mv_buf_size); set_srcdst_params()
851 set_cfg_and_line_modes(ctx); set_srcdst_params()
852 set_dei_regs(ctx); set_srcdst_params()
854 csc_set_coeff(ctx->dev->csc, &mmr_adb->csc_regs[0], set_srcdst_params()
857 sc_set_hs_coeffs(ctx->dev->sc, ctx->sc_coeff_h.addr, src_w, dst_w); set_srcdst_params()
858 sc_set_vs_coeffs(ctx->dev->sc, ctx->sc_coeff_v.addr, src_h, dst_h); set_srcdst_params()
860 sc_config_scaler(ctx->dev->sc, &mmr_adb->sc_regs0[0], set_srcdst_params()
884 struct vpe_ctx *ctx = priv; job_ready() local
885 int needed = ctx->bufs_per_job; job_ready()
887 if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) job_ready()
890 if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < needed) job_ready()
893 if (v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < needed) job_ready()
901 struct vpe_ctx *ctx = priv; job_abort() local
904 ctx->aborting = 1; job_abort()
912 struct vpe_ctx *ctx = priv; vpe_lock() local
913 struct vpe_dev *dev = ctx->dev; vpe_lock()
919 struct vpe_ctx *ctx = priv; vpe_unlock() local
920 struct vpe_dev *dev = ctx->dev; vpe_unlock()
987 static void add_out_dtd(struct vpe_ctx *ctx, int port) add_out_dtd() argument
989 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_DST]; add_out_dtd()
991 struct vb2_buffer *vb = ctx->dst_vb; add_out_dtd()
994 int mv_buf_selector = !ctx->src_mv_buf_selector; add_out_dtd()
1000 dma_addr = ctx->mv_buf_dma[mv_buf_selector]; add_out_dtd()
1008 vpe_err(ctx->dev, add_out_dtd()
1020 vpdma_add_out_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect, add_out_dtd()
1024 static void add_in_dtd(struct vpe_ctx *ctx, int port) add_in_dtd() argument
1026 struct vpe_q_data *q_data = &ctx->q_data[Q_DATA_SRC]; add_in_dtd()
1028 struct vb2_buffer *vb = ctx->src_vbs[p_data->vb_index]; add_in_dtd()
1031 int mv_buf_selector = ctx->src_mv_buf_selector; add_in_dtd()
1039 dma_addr = ctx->mv_buf_dma[mv_buf_selector]; add_in_dtd()
1048 vpe_err(ctx->dev, add_in_dtd()
1066 vpdma_add_in_dtd(&ctx->desc_list, q_data->width, &q_data->c_rect, add_in_dtd()
1074 static void enable_irqs(struct vpe_ctx *ctx) enable_irqs() argument
1076 write_reg(ctx->dev, VPE_INT0_ENABLE0_SET, VPE_INT0_LIST0_COMPLETE); enable_irqs()
1077 write_reg(ctx->dev, VPE_INT0_ENABLE1_SET, VPE_DEI_ERROR_INT | enable_irqs()
1080 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, true); enable_irqs()
1083 static void disable_irqs(struct vpe_ctx *ctx) disable_irqs() argument
1085 write_reg(ctx->dev, VPE_INT0_ENABLE0_CLR, 0xffffffff); disable_irqs()
1086 write_reg(ctx->dev, VPE_INT0_ENABLE1_CLR, 0xffffffff); disable_irqs()
1088 vpdma_enable_list_complete_irq(ctx->dev->vpdma, 0, false); disable_irqs()
1098 struct vpe_ctx *ctx = priv; device_run() local
1099 struct sc_data *sc = ctx->dev->sc; device_run()
1100 struct vpe_q_data *d_q_data = &ctx->q_data[Q_DATA_DST]; device_run()
1102 if (ctx->deinterlacing && ctx->src_vbs[2] == NULL) { device_run()
1103 ctx->src_vbs[2] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); device_run()
1104 WARN_ON(ctx->src_vbs[2] == NULL); device_run()
1105 ctx->src_vbs[1] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); device_run()
1106 WARN_ON(ctx->src_vbs[1] == NULL); device_run()
1109 ctx->src_vbs[0] = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); device_run()
1110 WARN_ON(ctx->src_vbs[0] == NULL); device_run()
1111 ctx->dst_vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); device_run()
1112 WARN_ON(ctx->dst_vb == NULL); device_run()
1115 if (ctx->dev->loaded_mmrs != ctx->mmr_adb.dma_addr || ctx->load_mmrs) { device_run()
1116 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->mmr_adb); device_run()
1117 vpdma_add_cfd_adb(&ctx->desc_list, CFD_MMR_CLIENT, &ctx->mmr_adb); device_run()
1118 ctx->dev->loaded_mmrs = ctx->mmr_adb.dma_addr; device_run()
1119 ctx->load_mmrs = false; device_run()
1122 if (sc->loaded_coeff_h != ctx->sc_coeff_h.dma_addr || device_run()
1124 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_h); device_run()
1125 vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT, device_run()
1126 &ctx->sc_coeff_h, 0); device_run()
1128 sc->loaded_coeff_h = ctx->sc_coeff_h.dma_addr; device_run()
1132 if (sc->loaded_coeff_v != ctx->sc_coeff_v.dma_addr || device_run()
1134 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->sc_coeff_v); device_run()
1135 vpdma_add_cfd_block(&ctx->desc_list, CFD_SC_CLIENT, device_run()
1136 &ctx->sc_coeff_v, SC_COEF_SRAM_SIZE >> 4); device_run()
1138 sc->loaded_coeff_v = ctx->sc_coeff_v.dma_addr; device_run()
1143 if (ctx->deinterlacing) device_run()
1144 add_out_dtd(ctx, VPE_PORT_MV_OUT); device_run()
1147 add_out_dtd(ctx, VPE_PORT_RGB_OUT); device_run()
1149 add_out_dtd(ctx, VPE_PORT_LUMA_OUT); device_run()
1151 add_out_dtd(ctx, VPE_PORT_CHROMA_OUT); device_run()
1155 if (ctx->deinterlacing) { device_run()
1156 add_in_dtd(ctx, VPE_PORT_LUMA3_IN); device_run()
1157 add_in_dtd(ctx, VPE_PORT_CHROMA3_IN); device_run()
1159 add_in_dtd(ctx, VPE_PORT_LUMA2_IN); device_run()
1160 add_in_dtd(ctx, VPE_PORT_CHROMA2_IN); device_run()
1163 add_in_dtd(ctx, VPE_PORT_LUMA1_IN); device_run()
1164 add_in_dtd(ctx, VPE_PORT_CHROMA1_IN); device_run()
1166 if (ctx->deinterlacing) device_run()
1167 add_in_dtd(ctx, VPE_PORT_MV_IN); device_run()
1170 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_LUMA1_IN); device_run()
1171 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_CHROMA1_IN); device_run()
1173 if (ctx->deinterlacing) { device_run()
1174 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, device_run()
1176 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, device_run()
1179 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, device_run()
1181 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, device_run()
1184 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_IN); device_run()
1189 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, device_run()
1192 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, device_run()
1195 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, device_run()
1199 if (ctx->deinterlacing) device_run()
1200 vpdma_add_sync_on_channel_ctd(&ctx->desc_list, VPE_CHAN_MV_OUT); device_run()
1202 enable_irqs(ctx); device_run()
1204 vpdma_map_desc_buf(ctx->dev->vpdma, &ctx->desc_list.buf); device_run()
1205 vpdma_submit_descs(ctx->dev->vpdma, &ctx->desc_list); device_run()
1208 static void dei_error(struct vpe_ctx *ctx) dei_error() argument
1210 dev_warn(ctx->dev->v4l2_dev.dev, dei_error()
1214 static void ds1_uv_error(struct vpe_ctx *ctx) ds1_uv_error() argument
1216 dev_warn(ctx->dev->v4l2_dev.dev, ds1_uv_error()
1223 struct vpe_ctx *ctx; vpe_irq() local
1242 ctx = v4l2_m2m_get_curr_priv(dev->m2m_dev); vpe_irq()
1243 if (!ctx) { vpe_irq()
1251 dei_error(ctx); vpe_irq()
1255 ds1_uv_error(ctx); vpe_irq()
1261 vpdma_clear_list_stat(ctx->dev->vpdma); vpe_irq()
1272 disable_irqs(ctx); vpe_irq()
1274 vpdma_unmap_desc_buf(dev->vpdma, &ctx->desc_list.buf); vpe_irq()
1275 vpdma_unmap_desc_buf(dev->vpdma, &ctx->mmr_adb); vpe_irq()
1276 vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_h); vpe_irq()
1277 vpdma_unmap_desc_buf(dev->vpdma, &ctx->sc_coeff_v); vpe_irq()
1279 vpdma_reset_desc_list(&ctx->desc_list); vpe_irq()
1282 ctx->src_mv_buf_selector = !ctx->src_mv_buf_selector; vpe_irq()
1284 if (ctx->aborting) vpe_irq()
1287 s_vb = ctx->src_vbs[0]; vpe_irq()
1288 d_vb = ctx->dst_vb; vpe_irq()
1298 d_buf->sequence = ctx->sequence; vpe_irq()
1300 d_q_data = &ctx->q_data[Q_DATA_DST]; vpe_irq()
1302 d_buf->field = ctx->field; vpe_irq()
1303 if (ctx->field == V4L2_FIELD_BOTTOM) { vpe_irq()
1304 ctx->sequence++; vpe_irq()
1305 ctx->field = V4L2_FIELD_TOP; vpe_irq()
1307 WARN_ON(ctx->field != V4L2_FIELD_TOP); vpe_irq()
1308 ctx->field = V4L2_FIELD_BOTTOM; vpe_irq()
1312 ctx->sequence++; vpe_irq()
1315 if (ctx->deinterlacing) vpe_irq()
1316 s_vb = ctx->src_vbs[2]; vpe_irq()
1323 if (ctx->deinterlacing) { vpe_irq()
1324 ctx->src_vbs[2] = ctx->src_vbs[1]; vpe_irq()
1325 ctx->src_vbs[1] = ctx->src_vbs[0]; vpe_irq()
1328 ctx->bufs_completed++; vpe_irq()
1329 if (ctx->bufs_completed < ctx->bufs_per_job) { vpe_irq()
1330 device_run(ctx); vpe_irq()
1335 vpe_dbg(ctx->dev, "finishing transaction\n"); vpe_irq()
1336 ctx->bufs_completed = 0; vpe_irq()
1337 v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx); vpe_irq()
1393 struct vpe_ctx *ctx = file2ctx(file); vpe_g_fmt() local
1398 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); vpe_g_fmt()
1402 q_data = get_q_data(ctx, f->type); vpe_g_fmt()
1415 s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE); vpe_g_fmt()
1430 static int __vpe_try_fmt(struct vpe_ctx *ctx, struct v4l2_format *f, __vpe_try_fmt() argument
1439 vpe_err(ctx->dev, "Fourcc format (0x%08x) invalid.\n", __vpe_try_fmt()
1513 struct vpe_ctx *ctx = file2ctx(file); vpe_try_fmt() local
1517 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_OUTPUT); vpe_try_fmt()
1519 return __vpe_try_fmt(ctx, f, fmt, VPE_FMT_TYPE_CAPTURE); vpe_try_fmt()
1522 static int __vpe_s_fmt(struct vpe_ctx *ctx, struct v4l2_format *f) __vpe_s_fmt() argument
1530 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); __vpe_s_fmt()
1535 vpe_err(ctx->dev, "queue busy\n"); __vpe_s_fmt()
1539 q_data = get_q_data(ctx, f->type); __vpe_s_fmt()
1566 vpe_dbg(ctx->dev, "Setting format for type %d, wxh: %dx%d, fmt: %d bpl_y %d", __vpe_s_fmt()
1570 vpe_dbg(ctx->dev, " bpl_uv %d\n", __vpe_s_fmt()
1579 struct vpe_ctx *ctx = file2ctx(file); vpe_s_fmt() local
1585 ret = __vpe_s_fmt(ctx, f); vpe_s_fmt()
1590 set_src_registers(ctx); vpe_s_fmt()
1592 set_dst_registers(ctx); vpe_s_fmt()
1594 return set_srcdst_params(ctx); vpe_s_fmt()
1597 static int __vpe_try_selection(struct vpe_ctx *ctx, struct v4l2_selection *s) __vpe_try_selection() argument
1605 q_data = get_q_data(ctx, s->type); __vpe_try_selection()
1635 vpe_err(ctx->dev, "negative values for top and left\n"); __vpe_try_selection()
1654 struct vpe_ctx *ctx = file2ctx(file); vpe_g_selection() local
1662 q_data = get_q_data(ctx, s->type); vpe_g_selection()
1715 struct vpe_ctx *ctx = file2ctx(file); vpe_s_selection() local
1720 ret = __vpe_try_selection(ctx, &sel); vpe_s_selection()
1724 q_data = get_q_data(ctx, sel.type); vpe_s_selection()
1732 vpe_dbg(ctx->dev, vpe_s_selection()
1739 return set_srcdst_params(ctx); vpe_s_selection()
1750 struct vpe_ctx *ctx = vpe_s_ctrl() local
1755 ctx->bufs_per_job = ctrl->val; vpe_s_ctrl()
1759 vpe_err(ctx->dev, "Invalid control\n"); vpe_s_ctrl()
1806 struct vpe_ctx *ctx = vb2_get_drv_priv(vq); vpe_queue_setup() local
1809 q_data = get_q_data(ctx, vq->type); vpe_queue_setup()
1815 alloc_ctxs[i] = ctx->dev->alloc_ctx; vpe_queue_setup()
1818 vpe_dbg(ctx->dev, "get %d buffer(s) of size %d", *nbuffers, vpe_queue_setup()
1821 vpe_dbg(ctx->dev, " and %d\n", sizes[VPE_CHROMA]); vpe_queue_setup()
1828 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); vpe_buf_prepare() local
1832 vpe_dbg(ctx->dev, "type: %d\n", vb->vb2_queue->type); vpe_buf_prepare()
1834 q_data = get_q_data(ctx, vb->vb2_queue->type); vpe_buf_prepare()
1849 vpe_err(ctx->dev, vpe_buf_prepare()
1865 struct vpe_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); vpe_buf_queue() local
1867 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); vpe_buf_queue()
1879 struct vpe_ctx *ctx = vb2_get_drv_priv(q); vpe_stop_streaming() local
1881 vpe_dump_regs(ctx->dev); vpe_stop_streaming()
1882 vpdma_dump_regs(ctx->dev->vpdma); vpe_stop_streaming()
1898 struct vpe_ctx *ctx = priv; queue_init() local
1899 struct vpe_dev *dev = ctx->dev; queue_init()
1905 src_vq->drv_priv = ctx; queue_init()
1919 dst_vq->drv_priv = ctx; queue_init()
1948 struct vpe_ctx *ctx; vpe_open() local
1953 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); vpe_open()
1954 if (!ctx) vpe_open()
1957 ctx->dev = dev; vpe_open()
1964 ret = vpdma_create_desc_list(&ctx->desc_list, VPE_DESC_LIST_SIZE, vpe_open()
1969 ret = vpdma_alloc_desc_buf(&ctx->mmr_adb, sizeof(struct vpe_mmr_adb)); vpe_open()
1973 ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_h, SC_COEF_SRAM_SIZE); vpe_open()
1977 ret = vpdma_alloc_desc_buf(&ctx->sc_coeff_v, SC_COEF_SRAM_SIZE); vpe_open()
1981 init_adb_hdrs(ctx); vpe_open()
1983 v4l2_fh_init(&ctx->fh, video_devdata(file)); vpe_open()
1984 file->private_data = &ctx->fh; vpe_open()
1986 hdl = &ctx->hdl; vpe_open()
1993 ctx->fh.ctrl_handler = hdl; vpe_open()
1996 s_q_data = &ctx->q_data[Q_DATA_SRC]; vpe_open()
2012 ctx->q_data[Q_DATA_DST] = *s_q_data; vpe_open()
2014 set_dei_shadow_registers(ctx); vpe_open()
2015 set_src_registers(ctx); vpe_open()
2016 set_dst_registers(ctx); vpe_open()
2017 ret = set_srcdst_params(ctx); vpe_open()
2021 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); vpe_open()
2023 if (IS_ERR(ctx->fh.m2m_ctx)) { vpe_open()
2024 ret = PTR_ERR(ctx->fh.m2m_ctx); vpe_open()
2028 v4l2_fh_add(&ctx->fh); vpe_open()
2038 ctx->bufs_per_job = VPE_DEF_BUFS_PER_JOB; vpe_open()
2040 ctx->load_mmrs = true; vpe_open()
2043 ctx, ctx->fh.m2m_ctx); vpe_open()
2050 v4l2_fh_exit(&ctx->fh); vpe_open()
2051 vpdma_free_desc_buf(&ctx->sc_coeff_v); vpe_open()
2053 vpdma_free_desc_buf(&ctx->sc_coeff_h); vpe_open()
2055 vpdma_free_desc_buf(&ctx->mmr_adb); vpe_open()
2057 vpdma_free_desc_list(&ctx->desc_list); vpe_open()
2061 kfree(ctx); vpe_open()
2068 struct vpe_ctx *ctx = file2ctx(file); vpe_release() local
2070 vpe_dbg(dev, "releasing instance %p\n", ctx); vpe_release()
2073 free_vbs(ctx); vpe_release()
2074 free_mv_buffers(ctx); vpe_release()
2075 vpdma_free_desc_list(&ctx->desc_list); vpe_release()
2076 vpdma_free_desc_buf(&ctx->mmr_adb); vpe_release()
2078 v4l2_fh_del(&ctx->fh); vpe_release()
2079 v4l2_fh_exit(&ctx->fh); vpe_release()
2080 v4l2_ctrl_handler_free(&ctx->hdl); vpe_release()
2081 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); vpe_release()
2083 kfree(ctx); vpe_release()
/linux-4.1.27/drivers/net/wireless/iwlwifi/dvm/
H A Drxon.c38 struct iwl_rxon_context *ctx) iwl_connection_init_rx_config()
40 memset(&ctx->staging, 0, sizeof(ctx->staging)); iwl_connection_init_rx_config()
42 if (!ctx->vif) { iwl_connection_init_rx_config()
43 ctx->staging.dev_type = ctx->unused_devtype; iwl_connection_init_rx_config()
45 switch (ctx->vif->type) { iwl_connection_init_rx_config()
47 ctx->staging.dev_type = ctx->ap_devtype; iwl_connection_init_rx_config()
51 ctx->staging.dev_type = ctx->station_devtype; iwl_connection_init_rx_config()
52 ctx->staging.filter_flags = RXON_FILTER_ACCEPT_GRP_MSK; iwl_connection_init_rx_config()
56 ctx->staging.dev_type = ctx->ibss_devtype; iwl_connection_init_rx_config()
57 ctx->staging.flags = RXON_FLG_SHORT_PREAMBLE_MSK; iwl_connection_init_rx_config()
58 ctx->staging.filter_flags = RXON_FILTER_BCON_AWARE_MSK | iwl_connection_init_rx_config()
63 ctx->staging.dev_type = RXON_DEV_TYPE_SNIFFER; iwl_connection_init_rx_config()
68 ctx->vif->type); iwl_connection_init_rx_config()
76 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; iwl_connection_init_rx_config()
78 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; iwl_connection_init_rx_config()
81 ctx->staging.channel = iwl_connection_init_rx_config()
85 iwl_set_flags_for_band(priv, ctx, priv->band, ctx->vif); iwl_connection_init_rx_config()
88 ctx->staging.flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED | iwl_connection_init_rx_config()
90 if (ctx->vif) iwl_connection_init_rx_config()
91 memcpy(ctx->staging.node_addr, ctx->vif->addr, ETH_ALEN); iwl_connection_init_rx_config()
93 ctx->staging.ofdm_ht_single_stream_basic_rates = 0xff; iwl_connection_init_rx_config()
94 ctx->staging.ofdm_ht_dual_stream_basic_rates = 0xff; iwl_connection_init_rx_config()
95 ctx->staging.ofdm_ht_triple_stream_basic_rates = 0xff; iwl_connection_init_rx_config()
99 struct iwl_rxon_context *ctx, iwlagn_disable_bss()
106 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, iwlagn_disable_bss()
119 struct iwl_rxon_context *ctx, iwlagn_disable_pan()
136 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, iwlagn_disable_pan()
156 struct iwl_rxon_context *ctx, iwlagn_disconn_pan()
163 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0, iwlagn_disconn_pan()
172 struct iwl_rxon_context *ctx) iwlagn_update_qos()
176 if (!ctx->is_active) iwlagn_update_qos()
179 ctx->qos_data.def_qos_parm.qos_flags = 0; iwlagn_update_qos()
181 if (ctx->qos_data.qos_active) iwlagn_update_qos()
182 ctx->qos_data.def_qos_parm.qos_flags |= iwlagn_update_qos()
185 if (ctx->ht.enabled) iwlagn_update_qos()
186 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK; iwlagn_update_qos()
189 ctx->qos_data.qos_active, iwlagn_update_qos()
190 ctx->qos_data.def_qos_parm.qos_flags); iwlagn_update_qos()
192 ret = iwl_dvm_send_cmd_pdu(priv, ctx->qos_cmd, 0, iwlagn_update_qos()
194 &ctx->qos_data.def_qos_parm); iwlagn_update_qos()
212 struct iwl_rxon_context *ctx) iwlagn_send_rxon_assoc()
216 const struct iwl_rxon_cmd *rxon1 = &ctx->staging; iwlagn_send_rxon_assoc()
217 const struct iwl_rxon_cmd *rxon2 = &ctx->active; iwlagn_send_rxon_assoc()
235 rxon_assoc.flags = ctx->staging.flags; iwlagn_send_rxon_assoc()
236 rxon_assoc.filter_flags = ctx->staging.filter_flags; iwlagn_send_rxon_assoc()
237 rxon_assoc.ofdm_basic_rates = ctx->staging.ofdm_basic_rates; iwlagn_send_rxon_assoc()
238 rxon_assoc.cck_basic_rates = ctx->staging.cck_basic_rates; iwlagn_send_rxon_assoc()
243 ctx->staging.ofdm_ht_single_stream_basic_rates; iwlagn_send_rxon_assoc()
245 ctx->staging.ofdm_ht_dual_stream_basic_rates; iwlagn_send_rxon_assoc()
246 rxon_assoc.rx_chain_select_flags = ctx->staging.rx_chain; iwlagn_send_rxon_assoc()
248 ctx->staging.ofdm_ht_triple_stream_basic_rates; iwlagn_send_rxon_assoc()
249 rxon_assoc.acquisition_data = ctx->staging.acquisition_data; iwlagn_send_rxon_assoc()
251 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_assoc_cmd, iwlagn_send_rxon_assoc()
292 struct iwl_rxon_context *ctx) iwl_send_rxon_timing()
298 struct ieee80211_vif *vif = ctx->vif; iwl_send_rxon_timing()
304 memset(&ctx->timing, 0, sizeof(struct iwl_rxon_time_cmd)); iwl_send_rxon_timing()
306 ctx->timing.timestamp = cpu_to_le64(priv->timestamp); iwl_send_rxon_timing()
307 ctx->timing.listen_interval = cpu_to_le16(conf->listen_interval); iwl_send_rxon_timing()
315 ctx->timing.atim_window = 0; iwl_send_rxon_timing()
317 if (ctx->ctxid == IWL_RXON_CTX_PAN && iwl_send_rxon_timing()
318 (!ctx->vif || ctx->vif->type != NL80211_IFTYPE_STATION) && iwl_send_rxon_timing()
322 ctx->timing.beacon_interval = iwl_send_rxon_timing()
324 beacon_int = le16_to_cpu(ctx->timing.beacon_interval); iwl_send_rxon_timing()
325 } else if (ctx->ctxid == IWL_RXON_CTX_BSS && iwl_send_rxon_timing()
329 (!iwl_is_associated_ctx(ctx) || !ctx->vif || iwl_send_rxon_timing()
330 !ctx->vif->bss_conf.beacon_int)) { iwl_send_rxon_timing()
331 ctx->timing.beacon_interval = iwl_send_rxon_timing()
333 beacon_int = le16_to_cpu(ctx->timing.beacon_interval); iwl_send_rxon_timing()
337 ctx->timing.beacon_interval = cpu_to_le16(beacon_int); iwl_send_rxon_timing()
340 ctx->beacon_int = beacon_int; iwl_send_rxon_timing()
345 ctx->timing.beacon_init_val = cpu_to_le32(interval_tm - rem); iwl_send_rxon_timing()
347 ctx->timing.dtim_period = vif ? (vif->bss_conf.dtim_period ?: 1) : 1; iwl_send_rxon_timing()
351 le16_to_cpu(ctx->timing.beacon_interval), iwl_send_rxon_timing()
352 le32_to_cpu(ctx->timing.beacon_init_val), iwl_send_rxon_timing()
353 le16_to_cpu(ctx->timing.atim_window)); iwl_send_rxon_timing()
355 return iwl_dvm_send_cmd_pdu(priv, ctx->rxon_timing_cmd, iwl_send_rxon_timing()
356 0, sizeof(ctx->timing), &ctx->timing); iwl_send_rxon_timing()
360 struct iwl_rxon_context *ctx) iwlagn_rxon_disconn()
363 struct iwl_rxon_cmd *active = (void *)&ctx->active; iwlagn_rxon_disconn()
365 if (ctx->ctxid == IWL_RXON_CTX_BSS) { iwlagn_rxon_disconn()
366 ret = iwlagn_disable_bss(priv, ctx, &ctx->staging); iwlagn_rxon_disconn()
368 ret = iwlagn_disable_pan(priv, ctx, &ctx->staging); iwlagn_rxon_disconn()
371 if (ctx->vif) { iwlagn_rxon_disconn()
372 ret = iwl_send_rxon_timing(priv, ctx); iwlagn_rxon_disconn()
377 ret = iwlagn_disconn_pan(priv, ctx, &ctx->staging); iwlagn_rxon_disconn()
387 iwl_clear_ucode_stations(priv, ctx); iwlagn_rxon_disconn()
389 iwl_update_bcast_station(priv, ctx); iwlagn_rxon_disconn()
390 iwl_restore_stations(priv, ctx); iwlagn_rxon_disconn()
391 ret = iwl_restore_default_wep_keys(priv, ctx); iwlagn_rxon_disconn()
397 memcpy(active, &ctx->staging, sizeof(*active)); iwlagn_rxon_disconn()
406 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; iwl_set_tx_power() local
440 memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)); iwl_set_tx_power()
460 struct iwl_rxon_context *ctx) iwlagn_rxon_connect()
463 struct iwl_rxon_cmd *active = (void *)&ctx->active; iwlagn_rxon_connect()
466 if (ctx->ctxid == IWL_RXON_CTX_BSS) { iwlagn_rxon_connect()
467 ret = iwl_send_rxon_timing(priv, ctx); iwlagn_rxon_connect()
474 iwlagn_update_qos(priv, ctx); iwlagn_rxon_connect()
481 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) { iwlagn_rxon_connect()
482 ret = iwlagn_update_beacon(priv, ctx->vif); iwlagn_rxon_connect()
498 ret = iwl_dvm_send_cmd_pdu(priv, ctx->rxon_cmd, 0, iwlagn_rxon_connect()
499 sizeof(struct iwl_rxon_cmd), &ctx->staging); iwlagn_rxon_connect()
504 memcpy(active, &ctx->staging, sizeof(*active)); iwlagn_rxon_connect()
507 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC)) iwlagn_rxon_connect()
508 if (iwlagn_update_beacon(priv, ctx->vif)) iwlagn_rxon_connect()
525 if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION && iwlagn_rxon_connect()
527 ieee80211_request_smps(ctx->vif, iwlagn_rxon_connect()
623 struct iwl_rxon_context *ctx) _iwl_set_rxon_ht()
625 struct iwl_rxon_cmd *rxon = &ctx->staging; _iwl_set_rxon_ht()
627 if (!ctx->ht.enabled) { _iwl_set_rxon_ht()
638 rxon->flags |= cpu_to_le32(ctx->ht.protection << _iwl_set_rxon_ht()
646 if (iwl_is_ht40_tx_allowed(priv, ctx, NULL)) { _iwl_set_rxon_ht()
648 if (ctx->ht.protection == _iwl_set_rxon_ht()
655 switch (ctx->ht.extension_chan_offset) { _iwl_set_rxon_ht()
670 switch (ctx->ht.extension_chan_offset) { _iwl_set_rxon_ht()
695 iwlagn_set_rxon_chain(priv, ctx); _iwl_set_rxon_ht()
699 le32_to_cpu(rxon->flags), ctx->ht.protection, _iwl_set_rxon_ht()
700 ctx->ht.extension_chan_offset); _iwl_set_rxon_ht()
705 struct iwl_rxon_context *ctx; iwl_set_rxon_ht() local
707 for_each_context(priv, ctx) iwl_set_rxon_ht()
708 _iwl_set_rxon_ht(priv, ht_conf, ctx); iwl_set_rxon_ht()
719 struct iwl_rxon_context *ctx) iwl_set_rxon_channel()
724 if ((le16_to_cpu(ctx->staging.channel) == channel) && iwl_set_rxon_channel()
728 ctx->staging.channel = cpu_to_le16(channel); iwl_set_rxon_channel()
730 ctx->staging.flags &= ~RXON_FLG_BAND_24G_MSK; iwl_set_rxon_channel()
732 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; iwl_set_rxon_channel()
741 struct iwl_rxon_context *ctx, iwl_set_flags_for_band()
746 ctx->staging.flags &= iwl_set_flags_for_band()
749 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; iwl_set_flags_for_band()
753 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; iwl_set_flags_for_band()
755 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; iwl_set_flags_for_band()
757 ctx->staging.flags |= RXON_FLG_BAND_24G_MSK; iwl_set_flags_for_band()
758 ctx->staging.flags |= RXON_FLG_AUTO_DETECT_MSK; iwl_set_flags_for_band()
759 ctx->staging.flags &= ~RXON_FLG_CCK_MSK; iwl_set_flags_for_band()
764 struct iwl_rxon_context *ctx, int hw_decrypt) iwl_set_rxon_hwcrypto()
766 struct iwl_rxon_cmd *rxon = &ctx->staging; iwl_set_rxon_hwcrypto()
777 struct iwl_rxon_context *ctx) iwl_check_rxon_cmd()
779 struct iwl_rxon_cmd *rxon = &ctx->staging; iwl_check_rxon_cmd()
857 struct iwl_rxon_context *ctx) iwl_full_rxon_required()
859 const struct iwl_rxon_cmd *staging = &ctx->staging; iwl_full_rxon_required()
860 const struct iwl_rxon_cmd *active = &ctx->active; iwl_full_rxon_required()
877 CHK(!iwl_is_associated_ctx(ctx)); iwl_full_rxon_required()
915 struct iwl_rxon_context *ctx = &priv->contexts[ctxid]; iwl_print_rx_config_cmd() local
916 struct iwl_rxon_cmd *rxon = &ctx->staging; iwl_print_rx_config_cmd()
939 struct iwl_rxon_context *ctx) iwl_calc_basic_rates()
946 if (ctx->vif) { iwl_calc_basic_rates()
948 unsigned long basic = ctx->vif->bss_conf.basic_rates; iwl_calc_basic_rates()
1025 ctx->staging.cck_basic_rates = cck; iwl_calc_basic_rates()
1026 ctx->staging.ofdm_basic_rates = ofdm; iwl_calc_basic_rates()
1047 int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwlagn_commit_rxon() argument
1050 struct iwl_rxon_cmd *active = (void *)&ctx->active; iwlagn_commit_rxon()
1051 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK); iwlagn_commit_rxon()
1062 if (!ctx->is_active) iwlagn_commit_rxon()
1066 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK; iwlagn_commit_rxon()
1069 iwl_calc_basic_rates(priv, ctx); iwlagn_commit_rxon()
1076 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; iwlagn_commit_rxon()
1078 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || iwlagn_commit_rxon()
1079 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) iwlagn_commit_rxon()
1080 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; iwlagn_commit_rxon()
1082 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; iwlagn_commit_rxon()
1084 iwl_print_rx_config_cmd(priv, ctx->ctxid); iwlagn_commit_rxon()
1085 ret = iwl_check_rxon_cmd(priv, ctx); iwlagn_commit_rxon()
1096 (priv->switch_channel != ctx->staging.channel)) { iwlagn_commit_rxon()
1107 if (!iwl_full_rxon_required(priv, ctx)) { iwlagn_commit_rxon()
1108 ret = iwlagn_send_rxon_assoc(priv, ctx); iwlagn_commit_rxon()
1114 memcpy(active, &ctx->staging, sizeof(*active)); iwlagn_commit_rxon()
1127 iwl_set_rxon_hwcrypto(priv, ctx, !iwlwifi_mod_params.sw_crypto); iwlagn_commit_rxon()
1135 le16_to_cpu(ctx->staging.channel), iwlagn_commit_rxon()
1136 ctx->staging.bssid_addr); iwlagn_commit_rxon()
1144 ret = iwlagn_rxon_disconn(priv, ctx); iwlagn_commit_rxon()
1153 return iwlagn_rxon_connect(priv, ctx); iwlagn_commit_rxon()
1159 struct iwl_rxon_context *ctx) iwlagn_config_ht40()
1162 ctx->ht.extension_chan_offset = iwlagn_config_ht40()
1164 ctx->ht.is_40mhz = true; iwlagn_config_ht40()
1166 ctx->ht.extension_chan_offset = iwlagn_config_ht40()
1168 ctx->ht.is_40mhz = true; iwlagn_config_ht40()
1170 ctx->ht.extension_chan_offset = iwlagn_config_ht40()
1172 ctx->ht.is_40mhz = false; iwlagn_config_ht40()
1179 struct iwl_rxon_context *ctx; iwlagn_mac_config() local
1210 for_each_context(priv, ctx) iwlagn_mac_config()
1211 iwlagn_set_rxon_chain(priv, ctx); iwlagn_mac_config()
1215 for_each_context(priv, ctx) { for_each_context()
1217 if (ctx->ht.enabled != conf_is_ht(conf)) for_each_context()
1218 ctx->ht.enabled = conf_is_ht(conf); for_each_context()
1220 if (ctx->ht.enabled) { for_each_context()
1223 if (!ctx->ht.is_40mhz || for_each_context()
1224 !iwl_is_associated_ctx(ctx)) for_each_context()
1225 iwlagn_config_ht40(conf, ctx); for_each_context()
1227 ctx->ht.is_40mhz = false; for_each_context()
1233 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; for_each_context()
1238 if (le16_to_cpu(ctx->staging.channel) != for_each_context()
1240 ctx->staging.flags = 0; for_each_context()
1242 iwl_set_rxon_channel(priv, channel, ctx); for_each_context()
1245 iwl_set_flags_for_band(priv, ctx, channel->band, for_each_context()
1246 ctx->vif); for_each_context()
1266 for_each_context(priv, ctx) { for_each_context()
1267 if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) for_each_context()
1269 iwlagn_commit_rxon(priv, ctx); for_each_context()
1279 struct iwl_rxon_context *ctx, iwlagn_check_needed_chains()
1282 struct ieee80211_vif *vif = ctx->vif; iwlagn_check_needed_chains()
1355 ctx->ht_need_multiple_chains = need_multiple; iwlagn_check_needed_chains()
1413 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); iwlagn_bss_info_changed() local
1433 if (unlikely(!ctx->vif)) { iwlagn_bss_info_changed()
1443 ctx->qos_data.qos_active = bss_conf->qos; iwlagn_bss_info_changed()
1444 iwlagn_update_qos(priv, ctx); iwlagn_bss_info_changed()
1447 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); iwlagn_bss_info_changed()
1449 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; iwlagn_bss_info_changed()
1451 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; iwlagn_bss_info_changed()
1456 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; iwlagn_bss_info_changed()
1458 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; iwlagn_bss_info_changed()
1460 if (ctx->ctxid == IWL_RXON_CTX_BSS) iwlagn_bss_info_changed()
1467 if (ctx->ht.enabled) { iwlagn_bss_info_changed()
1468 ctx->ht.protection = bss_conf->ht_operation_mode & iwlagn_bss_info_changed()
1470 ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode & iwlagn_bss_info_changed()
1472 iwlagn_check_needed_chains(priv, ctx, bss_conf); iwlagn_bss_info_changed()
1476 iwlagn_set_rxon_chain(priv, ctx); iwlagn_bss_info_changed()
1479 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK; iwlagn_bss_info_changed()
1481 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; iwlagn_bss_info_changed()
1484 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN; iwlagn_bss_info_changed()
1486 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN; iwlagn_bss_info_changed()
1488 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); iwlagn_bss_info_changed()
1493 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; iwlagn_bss_info_changed()
1494 priv->beacon_ctx = ctx; iwlagn_bss_info_changed()
1496 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; iwlagn_bss_info_changed()
1511 ctx->staging.filter_flags |= RXON_FILTER_BCON_AWARE_MSK; iwlagn_bss_info_changed()
1513 ctx->staging.filter_flags &= iwlagn_bss_info_changed()
1517 if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) iwlagn_bss_info_changed()
1518 iwlagn_commit_rxon(priv, ctx); iwlagn_bss_info_changed()
1543 if (changes & BSS_CHANGED_BEACON && priv->beacon_ctx == ctx) { iwlagn_bss_info_changed()
1553 struct iwl_rxon_context *ctx; iwlagn_post_scan() local
1566 for_each_context(priv, ctx) iwlagn_post_scan()
1567 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging))) iwlagn_post_scan()
1568 iwlagn_commit_rxon(priv, ctx); iwlagn_post_scan()
37 iwl_connection_init_rx_config(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwl_connection_init_rx_config() argument
98 iwlagn_disable_bss(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_rxon_cmd *send) iwlagn_disable_bss() argument
118 iwlagn_disable_pan(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_rxon_cmd *send) iwlagn_disable_pan() argument
155 iwlagn_disconn_pan(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct iwl_rxon_cmd *send) iwlagn_disconn_pan() argument
171 iwlagn_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwlagn_update_qos() argument
211 iwlagn_send_rxon_assoc(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwlagn_send_rxon_assoc() argument
291 iwl_send_rxon_timing(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwl_send_rxon_timing() argument
359 iwlagn_rxon_disconn(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwlagn_rxon_disconn() argument
459 iwlagn_rxon_connect(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwlagn_rxon_connect() argument
621 _iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf, struct iwl_rxon_context *ctx) _iwl_set_rxon_ht() argument
718 iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch, struct iwl_rxon_context *ctx) iwl_set_rxon_channel() argument
740 iwl_set_flags_for_band(struct iwl_priv *priv, struct iwl_rxon_context *ctx, enum ieee80211_band band, struct ieee80211_vif *vif) iwl_set_flags_for_band() argument
763 iwl_set_rxon_hwcrypto(struct iwl_priv *priv, struct iwl_rxon_context *ctx, int hw_decrypt) iwl_set_rxon_hwcrypto() argument
776 iwl_check_rxon_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwl_check_rxon_cmd() argument
856 iwl_full_rxon_required(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwl_full_rxon_required() argument
938 iwl_calc_basic_rates(struct iwl_priv *priv, struct iwl_rxon_context *ctx) iwl_calc_basic_rates() argument
1158 iwlagn_config_ht40(struct ieee80211_conf *conf, struct iwl_rxon_context *ctx) iwlagn_config_ht40() argument
1278 iwlagn_check_needed_chains(struct iwl_priv *priv, struct iwl_rxon_context *ctx, struct ieee80211_bss_conf *bss_conf) iwlagn_check_needed_chains() argument
/linux-4.1.27/drivers/power/reset/
H A Dsyscon-reboot.c37 struct syscon_reboot_context *ctx = syscon_restart_handle() local
42 regmap_write(ctx->map, ctx->offset, ctx->mask); syscon_restart_handle()
52 struct syscon_reboot_context *ctx; syscon_reboot_probe() local
56 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); syscon_reboot_probe()
57 if (!ctx) syscon_reboot_probe()
60 ctx->map = syscon_regmap_lookup_by_phandle(dev->of_node, "regmap"); syscon_reboot_probe()
61 if (IS_ERR(ctx->map)) syscon_reboot_probe()
62 return PTR_ERR(ctx->map); syscon_reboot_probe()
64 if (of_property_read_u32(pdev->dev.of_node, "offset", &ctx->offset)) syscon_reboot_probe()
67 if (of_property_read_u32(pdev->dev.of_node, "mask", &ctx->mask)) syscon_reboot_probe()
70 ctx->restart_handler.notifier_call = syscon_restart_handle; syscon_reboot_probe()
71 ctx->restart_handler.priority = 192; syscon_reboot_probe()
72 err = register_restart_handler(&ctx->restart_handler); syscon_reboot_probe()
H A Dxgene-reboot.c47 struct xgene_reboot_context *ctx = xgene_restart_handler() local
52 writel(ctx->mask, ctx->csr); xgene_restart_handler()
56 dev_emerg(ctx->dev, "Unable to restart system\n"); xgene_restart_handler()
63 struct xgene_reboot_context *ctx; xgene_reboot_probe() local
67 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); xgene_reboot_probe()
68 if (!ctx) xgene_reboot_probe()
71 ctx->csr = of_iomap(dev->of_node, 0); xgene_reboot_probe()
72 if (!ctx->csr) { xgene_reboot_probe()
77 if (of_property_read_u32(dev->of_node, "mask", &ctx->mask)) xgene_reboot_probe()
78 ctx->mask = 0xFFFFFFFF; xgene_reboot_probe()
80 ctx->dev = dev; xgene_reboot_probe()
81 ctx->restart_handler.notifier_call = xgene_restart_handler; xgene_reboot_probe()
82 ctx->restart_handler.priority = 128; xgene_reboot_probe()
83 err = register_restart_handler(&ctx->restart_handler); xgene_reboot_probe()
/linux-4.1.27/drivers/mmc/core/
H A Dslot-gpio.c47 struct mmc_gpio *ctx = devm_kzalloc(host->parent, mmc_gpio_alloc() local
48 sizeof(*ctx) + 2 * len, GFP_KERNEL); mmc_gpio_alloc()
50 if (ctx) { mmc_gpio_alloc()
51 ctx->ro_label = ctx->cd_label + len; mmc_gpio_alloc()
52 snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent)); mmc_gpio_alloc()
53 snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent)); mmc_gpio_alloc()
54 host->slot.handler_priv = ctx; mmc_gpio_alloc()
58 return ctx ? 0 : -ENOMEM; mmc_gpio_alloc()
63 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpio_get_ro() local
65 if (!ctx || !ctx->ro_gpio) mmc_gpio_get_ro()
68 if (ctx->override_ro_active_level) mmc_gpio_get_ro()
69 return !gpiod_get_raw_value_cansleep(ctx->ro_gpio) ^ mmc_gpio_get_ro()
72 return gpiod_get_value_cansleep(ctx->ro_gpio); mmc_gpio_get_ro()
78 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpio_get_cd() local
80 if (!ctx || !ctx->cd_gpio) mmc_gpio_get_cd()
83 if (ctx->override_cd_active_level) mmc_gpio_get_cd()
84 return !gpiod_get_raw_value_cansleep(ctx->cd_gpio) ^ mmc_gpio_get_cd()
87 return gpiod_get_value_cansleep(ctx->cd_gpio); mmc_gpio_get_cd()
103 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpio_request_ro() local
110 ctx->ro_label); mmc_gpio_request_ro()
114 ctx->override_ro_active_level = true; mmc_gpio_request_ro()
115 ctx->ro_gpio = gpio_to_desc(gpio); mmc_gpio_request_ro()
123 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpiod_request_cd_irq() local
126 if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio) mmc_gpiod_request_cd_irq()
129 irq = gpiod_to_irq(ctx->cd_gpio); mmc_gpiod_request_cd_irq()
140 if (!ctx->cd_gpio_isr) mmc_gpiod_request_cd_irq()
141 ctx->cd_gpio_isr = mmc_gpio_cd_irqt; mmc_gpiod_request_cd_irq()
143 NULL, ctx->cd_gpio_isr, mmc_gpiod_request_cd_irq()
145 ctx->cd_label, host); mmc_gpiod_request_cd_irq()
163 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpio_set_cd_isr() local
165 WARN_ON(ctx->cd_gpio_isr); mmc_gpio_set_cd_isr()
166 ctx->cd_gpio_isr = isr; mmc_gpio_set_cd_isr()
188 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpio_request_cd() local
192 ctx->cd_label); mmc_gpio_request_cd()
207 ctx->override_cd_active_level = true; mmc_gpio_request_cd()
208 ctx->cd_gpio = gpio_to_desc(gpio); mmc_gpio_request_cd()
234 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpiod_request_cd() local
239 con_id = ctx->cd_label; mmc_gpiod_request_cd()
254 ctx->override_cd_active_level = override_active_level; mmc_gpiod_request_cd()
255 ctx->cd_gpio = desc; mmc_gpiod_request_cd()
280 struct mmc_gpio *ctx = host->slot.handler_priv; mmc_gpiod_request_ro() local
285 con_id = ctx->ro_label; mmc_gpiod_request_ro()
300 ctx->override_ro_active_level = override_active_level; mmc_gpiod_request_ro()
301 ctx->ro_gpio = desc; mmc_gpiod_request_ro()
/linux-4.1.27/drivers/gpu/drm/panel/
H A Dpanel-ld9040.c118 static int ld9040_clear_error(struct ld9040 *ctx) ld9040_clear_error() argument
120 int ret = ctx->error; ld9040_clear_error()
122 ctx->error = 0; ld9040_clear_error()
126 static int ld9040_spi_write_word(struct ld9040 *ctx, u16 data) ld9040_spi_write_word() argument
128 struct spi_device *spi = to_spi_device(ctx->dev); ld9040_spi_write_word()
141 static void ld9040_dcs_write(struct ld9040 *ctx, const u8 *data, size_t len) ld9040_dcs_write() argument
145 if (ctx->error < 0 || len == 0) ld9040_dcs_write()
148 dev_dbg(ctx->dev, "writing dcs seq: %*ph\n", (int)len, data); ld9040_dcs_write()
149 ret = ld9040_spi_write_word(ctx, *data); ld9040_dcs_write()
153 ret = ld9040_spi_write_word(ctx, *data | 0x100); ld9040_dcs_write()
157 dev_err(ctx->dev, "error %d writing dcs seq: %*ph\n", ret, ld9040_dcs_write()
159 ctx->error = ret; ld9040_dcs_write()
165 #define ld9040_dcs_write_seq_static(ctx, seq...) \
168 ld9040_dcs_write(ctx, d, ARRAY_SIZE(d));\
171 static void ld9040_brightness_set(struct ld9040 *ctx) ld9040_brightness_set() argument
173 ld9040_dcs_write(ctx, ld9040_gammas[ctx->brightness], ld9040_brightness_set()
174 ARRAY_SIZE(ld9040_gammas[ctx->brightness])); ld9040_brightness_set()
176 ld9040_dcs_write_seq_static(ctx, MCS_GAMMA_CTRL, 0x02, 0x5a); ld9040_brightness_set()
179 static void ld9040_init(struct ld9040 *ctx) ld9040_init() argument
181 ld9040_dcs_write_seq_static(ctx, MCS_USER_SETTING, 0x5a, 0x5a); ld9040_init()
182 ld9040_dcs_write_seq_static(ctx, MCS_PANEL_CONDITION, ld9040_init()
186 ld9040_dcs_write_seq_static(ctx, MCS_DISPCTL, ld9040_init()
188 ld9040_dcs_write_seq_static(ctx, MCS_MANPWR, 0x04); ld9040_init()
189 ld9040_dcs_write_seq_static(ctx, MCS_POWER_CTRL, ld9040_init()
191 ld9040_dcs_write_seq_static(ctx, MCS_ELVSS_ON, 0x0d, 0x00, 0x16); ld9040_init()
192 ld9040_dcs_write_seq_static(ctx, MCS_GTCON, 0x09, 0x00, 0x00); ld9040_init()
193 ld9040_brightness_set(ctx); ld9040_init()
194 ld9040_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE); ld9040_init()
195 ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON); ld9040_init()
198 static int ld9040_power_on(struct ld9040 *ctx) ld9040_power_on() argument
202 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); ld9040_power_on()
206 msleep(ctx->power_on_delay); ld9040_power_on()
207 gpiod_set_value(ctx->reset_gpio, 0); ld9040_power_on()
208 msleep(ctx->reset_delay); ld9040_power_on()
209 gpiod_set_value(ctx->reset_gpio, 1); ld9040_power_on()
210 msleep(ctx->reset_delay); ld9040_power_on()
215 static int ld9040_power_off(struct ld9040 *ctx) ld9040_power_off() argument
217 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); ld9040_power_off()
227 struct ld9040 *ctx = panel_to_ld9040(panel); ld9040_unprepare() local
230 ld9040_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF); ld9040_unprepare()
231 ld9040_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE); ld9040_unprepare()
234 ld9040_clear_error(ctx); ld9040_unprepare()
236 return ld9040_power_off(ctx); ld9040_unprepare()
241 struct ld9040 *ctx = panel_to_ld9040(panel); ld9040_prepare() local
244 ret = ld9040_power_on(ctx); ld9040_prepare()
248 ld9040_init(ctx); ld9040_prepare()
250 ret = ld9040_clear_error(ctx); ld9040_prepare()
266 struct ld9040 *ctx = panel_to_ld9040(panel); ld9040_get_modes() local
275 drm_display_mode_from_videomode(&ctx->vm, mode); ld9040_get_modes()
276 mode->width_mm = ctx->width_mm; ld9040_get_modes()
277 mode->height_mm = ctx->height_mm; ld9040_get_modes()
295 static int ld9040_parse_dt(struct ld9040 *ctx) ld9040_parse_dt() argument
297 struct device *dev = ctx->dev; ld9040_parse_dt()
301 ret = of_get_videomode(np, &ctx->vm, 0); ld9040_parse_dt()
305 of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay); ld9040_parse_dt()
306 of_property_read_u32(np, "reset-delay", &ctx->reset_delay); ld9040_parse_dt()
307 of_property_read_u32(np, "panel-width-mm", &ctx->width_mm); ld9040_parse_dt()
308 of_property_read_u32(np, "panel-height-mm", &ctx->height_mm); ld9040_parse_dt()
316 struct ld9040 *ctx; ld9040_probe() local
319 ctx = devm_kzalloc(dev, sizeof(struct ld9040), GFP_KERNEL); ld9040_probe()
320 if (!ctx) ld9040_probe()
323 spi_set_drvdata(spi, ctx); ld9040_probe()
325 ctx->dev = dev; ld9040_probe()
326 ctx->brightness = ARRAY_SIZE(ld9040_gammas) - 1; ld9040_probe()
328 ret = ld9040_parse_dt(ctx); ld9040_probe()
332 ctx->supplies[0].supply = "vdd3"; ld9040_probe()
333 ctx->supplies[1].supply = "vci"; ld9040_probe()
334 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), ld9040_probe()
335 ctx->supplies); ld9040_probe()
339 ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); ld9040_probe()
340 if (IS_ERR(ctx->reset_gpio)) { ld9040_probe()
342 PTR_ERR(ctx->reset_gpio)); ld9040_probe()
343 return PTR_ERR(ctx->reset_gpio); ld9040_probe()
353 drm_panel_init(&ctx->panel); ld9040_probe()
354 ctx->panel.dev = dev; ld9040_probe()
355 ctx->panel.funcs = &ld9040_drm_funcs; ld9040_probe()
357 return drm_panel_add(&ctx->panel); ld9040_probe()
362 struct ld9040 *ctx = spi_get_drvdata(spi); ld9040_remove() local
364 ld9040_power_off(ctx); ld9040_remove()
365 drm_panel_remove(&ctx->panel); ld9040_remove()
H A Dpanel-s6e8aa0.c128 static int s6e8aa0_clear_error(struct s6e8aa0 *ctx) s6e8aa0_clear_error() argument
130 int ret = ctx->error; s6e8aa0_clear_error()
132 ctx->error = 0; s6e8aa0_clear_error()
136 static void s6e8aa0_dcs_write(struct s6e8aa0 *ctx, const void *data, size_t len) s6e8aa0_dcs_write() argument
138 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); s6e8aa0_dcs_write()
141 if (ctx->error < 0) s6e8aa0_dcs_write()
146 dev_err(ctx->dev, "error %zd writing dcs seq: %*ph\n", ret, s6e8aa0_dcs_write()
148 ctx->error = ret; s6e8aa0_dcs_write()
152 static int s6e8aa0_dcs_read(struct s6e8aa0 *ctx, u8 cmd, void *data, size_t len) s6e8aa0_dcs_read() argument
154 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); s6e8aa0_dcs_read()
157 if (ctx->error < 0) s6e8aa0_dcs_read()
158 return ctx->error; s6e8aa0_dcs_read()
162 dev_err(ctx->dev, "error %d reading dcs seq(%#x)\n", ret, cmd); s6e8aa0_dcs_read()
163 ctx->error = ret; s6e8aa0_dcs_read()
169 #define s6e8aa0_dcs_write_seq(ctx, seq...) \
173 s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
176 #define s6e8aa0_dcs_write_seq_static(ctx, seq...) \
179 s6e8aa0_dcs_write(ctx, d, ARRAY_SIZE(d));\
182 static void s6e8aa0_apply_level_1_key(struct s6e8aa0 *ctx) s6e8aa0_apply_level_1_key() argument
184 s6e8aa0_dcs_write_seq_static(ctx, 0xf0, 0x5a, 0x5a); s6e8aa0_apply_level_1_key()
187 static void s6e8aa0_panel_cond_set_v142(struct s6e8aa0 *ctx) s6e8aa0_panel_cond_set_v142() argument
192 u8 aid = aids[ctx->id >> 5]; s6e8aa0_panel_cond_set_v142()
201 if (ctx->flip_vertical) { s6e8aa0_panel_cond_set_v142()
207 if (ctx->flip_horizontal) { s6e8aa0_panel_cond_set_v142()
213 if (ctx->flip_horizontal || ctx->flip_vertical) { s6e8aa0_panel_cond_set_v142()
249 s6e8aa0_dcs_write_seq(ctx, s6e8aa0_panel_cond_set_v142()
258 static void s6e8aa0_panel_cond_set(struct s6e8aa0 *ctx) s6e8aa0_panel_cond_set() argument
260 if (ctx->version < 142) s6e8aa0_panel_cond_set()
261 s6e8aa0_dcs_write_seq_static(ctx, s6e8aa0_panel_cond_set()
269 s6e8aa0_panel_cond_set_v142(ctx); s6e8aa0_panel_cond_set()
272 static void s6e8aa0_display_condition_set(struct s6e8aa0 *ctx) s6e8aa0_display_condition_set() argument
274 s6e8aa0_dcs_write_seq_static(ctx, 0xf2, 0x80, 0x03, 0x0d); s6e8aa0_display_condition_set()
277 static void s6e8aa0_etc_source_control(struct s6e8aa0 *ctx) s6e8aa0_etc_source_control() argument
279 s6e8aa0_dcs_write_seq_static(ctx, 0xf6, 0x00, 0x02, 0x00); s6e8aa0_etc_source_control()
282 static void s6e8aa0_etc_pentile_control(struct s6e8aa0 *ctx) s6e8aa0_etc_pentile_control() argument
292 if (ctx->version < 142) s6e8aa0_etc_pentile_control()
293 s6e8aa0_dcs_write(ctx, pent32, ARRAY_SIZE(pent32)); s6e8aa0_etc_pentile_control()
295 s6e8aa0_dcs_write(ctx, pent142, ARRAY_SIZE(pent142)); s6e8aa0_etc_pentile_control()
298 static void s6e8aa0_etc_power_control(struct s6e8aa0 *ctx) s6e8aa0_etc_power_control() argument
308 if (ctx->version < 142) s6e8aa0_etc_power_control()
309 s6e8aa0_dcs_write(ctx, pwr32, ARRAY_SIZE(pwr32)); s6e8aa0_etc_power_control()
311 s6e8aa0_dcs_write(ctx, pwr142, ARRAY_SIZE(pwr142)); s6e8aa0_etc_power_control()
314 static void s6e8aa0_etc_elvss_control(struct s6e8aa0 *ctx) s6e8aa0_etc_elvss_control() argument
316 u8 id = ctx->id ? 0 : 0x95; s6e8aa0_etc_elvss_control()
318 s6e8aa0_dcs_write_seq(ctx, 0xb1, 0x04, id); s6e8aa0_etc_elvss_control()
321 static void s6e8aa0_elvss_nvm_set_v142(struct s6e8aa0 *ctx) s6e8aa0_elvss_nvm_set_v142() argument
325 switch (ctx->brightness) { s6e8aa0_elvss_nvm_set_v142()
341 s6e8aa0_dcs_write_seq(ctx, 0xd9, 0x14, 0x40, 0x0c, 0xcb, 0xce, 0x6e, s6e8aa0_elvss_nvm_set_v142()
345 static void s6e8aa0_elvss_nvm_set(struct s6e8aa0 *ctx) s6e8aa0_elvss_nvm_set() argument
347 if (ctx->version < 142) s6e8aa0_elvss_nvm_set()
348 s6e8aa0_dcs_write_seq_static(ctx, s6e8aa0_elvss_nvm_set()
352 s6e8aa0_elvss_nvm_set_v142(ctx); s6e8aa0_elvss_nvm_set()
355 static void s6e8aa0_apply_level_2_key(struct s6e8aa0 *ctx) s6e8aa0_apply_level_2_key() argument
357 s6e8aa0_dcs_write_seq_static(ctx, 0xfc, 0x5a, 0x5a); s6e8aa0_apply_level_2_key()
764 static void s6e8aa0_brightness_set(struct s6e8aa0 *ctx) s6e8aa0_brightness_set() argument
768 if (ctx->error) s6e8aa0_brightness_set()
771 gamma = ctx->variant->gamma_tables[ctx->brightness]; s6e8aa0_brightness_set()
773 if (ctx->version >= 142) s6e8aa0_brightness_set()
774 s6e8aa0_elvss_nvm_set(ctx); s6e8aa0_brightness_set()
776 s6e8aa0_dcs_write(ctx, gamma, GAMMA_TABLE_LEN); s6e8aa0_brightness_set()
779 s6e8aa0_dcs_write_seq_static(ctx, 0xf7, 0x03); s6e8aa0_brightness_set()
782 static void s6e8aa0_panel_init(struct s6e8aa0 *ctx) s6e8aa0_panel_init() argument
784 s6e8aa0_apply_level_1_key(ctx); s6e8aa0_panel_init()
785 s6e8aa0_apply_level_2_key(ctx); s6e8aa0_panel_init()
788 s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_EXIT_SLEEP_MODE); s6e8aa0_panel_init()
791 s6e8aa0_panel_cond_set(ctx); s6e8aa0_panel_init()
792 s6e8aa0_display_condition_set(ctx); s6e8aa0_panel_init()
793 s6e8aa0_brightness_set(ctx); s6e8aa0_panel_init()
794 s6e8aa0_etc_source_control(ctx); s6e8aa0_panel_init()
795 s6e8aa0_etc_pentile_control(ctx); s6e8aa0_panel_init()
796 s6e8aa0_elvss_nvm_set(ctx); s6e8aa0_panel_init()
797 s6e8aa0_etc_power_control(ctx); s6e8aa0_panel_init()
798 s6e8aa0_etc_elvss_control(ctx); s6e8aa0_panel_init()
799 msleep(ctx->init_delay); s6e8aa0_panel_init()
802 static void s6e8aa0_set_maximum_return_packet_size(struct s6e8aa0 *ctx, s6e8aa0_set_maximum_return_packet_size() argument
805 struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev); s6e8aa0_set_maximum_return_packet_size()
808 if (ctx->error < 0) s6e8aa0_set_maximum_return_packet_size()
813 dev_err(ctx->dev, s6e8aa0_set_maximum_return_packet_size()
816 ctx->error = ret; s6e8aa0_set_maximum_return_packet_size()
820 static void s6e8aa0_read_mtp_id(struct s6e8aa0 *ctx) s6e8aa0_read_mtp_id() argument
825 ret = s6e8aa0_dcs_read(ctx, 0xd1, id, ARRAY_SIZE(id)); s6e8aa0_read_mtp_id()
827 dev_err(ctx->dev, "read id failed\n"); s6e8aa0_read_mtp_id()
828 ctx->error = -EIO; s6e8aa0_read_mtp_id()
832 dev_info(ctx->dev, "ID: 0x%2x, 0x%2x, 0x%2x\n", id[0], id[1], id[2]); s6e8aa0_read_mtp_id()
839 dev_err(ctx->dev, "unsupported display version %d\n", id[1]); s6e8aa0_read_mtp_id()
840 ctx->error = -EINVAL; s6e8aa0_read_mtp_id()
844 ctx->variant = &s6e8aa0_variants[i]; s6e8aa0_read_mtp_id()
845 ctx->version = id[1]; s6e8aa0_read_mtp_id()
846 ctx->id = id[2]; s6e8aa0_read_mtp_id()
849 static void s6e8aa0_set_sequence(struct s6e8aa0 *ctx) s6e8aa0_set_sequence() argument
851 s6e8aa0_set_maximum_return_packet_size(ctx, 3); s6e8aa0_set_sequence()
852 s6e8aa0_read_mtp_id(ctx); s6e8aa0_set_sequence()
853 s6e8aa0_panel_init(ctx); s6e8aa0_set_sequence()
854 s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_ON); s6e8aa0_set_sequence()
857 static int s6e8aa0_power_on(struct s6e8aa0 *ctx) s6e8aa0_power_on() argument
861 ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies); s6e8aa0_power_on()
865 msleep(ctx->power_on_delay); s6e8aa0_power_on()
867 gpiod_set_value(ctx->reset_gpio, 0); s6e8aa0_power_on()
869 gpiod_set_value(ctx->reset_gpio, 1); s6e8aa0_power_on()
871 msleep(ctx->reset_delay); s6e8aa0_power_on()
876 static int s6e8aa0_power_off(struct s6e8aa0 *ctx) s6e8aa0_power_off() argument
878 return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies); s6e8aa0_power_off()
888 struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel); s6e8aa0_unprepare() local
890 s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_ENTER_SLEEP_MODE); s6e8aa0_unprepare()
891 s6e8aa0_dcs_write_seq_static(ctx, MIPI_DCS_SET_DISPLAY_OFF); s6e8aa0_unprepare()
894 s6e8aa0_clear_error(ctx); s6e8aa0_unprepare()
896 return s6e8aa0_power_off(ctx); s6e8aa0_unprepare()
901 struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel); s6e8aa0_prepare() local
904 ret = s6e8aa0_power_on(ctx); s6e8aa0_prepare()
908 s6e8aa0_set_sequence(ctx); s6e8aa0_prepare()
909 ret = ctx->error; s6e8aa0_prepare()
925 struct s6e8aa0 *ctx = panel_to_s6e8aa0(panel); s6e8aa0_get_modes() local
934 drm_display_mode_from_videomode(&ctx->vm, mode); s6e8aa0_get_modes()
935 mode->width_mm = ctx->width_mm; s6e8aa0_get_modes()
936 mode->height_mm = ctx->height_mm; s6e8aa0_get_modes()
954 static int s6e8aa0_parse_dt(struct s6e8aa0 *ctx) s6e8aa0_parse_dt() argument
956 struct device *dev = ctx->dev; s6e8aa0_parse_dt()
960 ret = of_get_videomode(np, &ctx->vm, 0); s6e8aa0_parse_dt()
964 of_property_read_u32(np, "power-on-delay", &ctx->power_on_delay); s6e8aa0_parse_dt()
965 of_property_read_u32(np, "reset-delay", &ctx->reset_delay); s6e8aa0_parse_dt()
966 of_property_read_u32(np, "init-delay", &ctx->init_delay); s6e8aa0_parse_dt()
967 of_property_read_u32(np, "panel-width-mm", &ctx->width_mm); s6e8aa0_parse_dt()
968 of_property_read_u32(np, "panel-height-mm", &ctx->height_mm); s6e8aa0_parse_dt()
970 ctx->flip_horizontal = of_property_read_bool(np, "flip-horizontal"); s6e8aa0_parse_dt()
971 ctx->flip_vertical = of_property_read_bool(np, "flip-vertical"); s6e8aa0_parse_dt()
979 struct s6e8aa0 *ctx; s6e8aa0_probe() local
982 ctx = devm_kzalloc(dev, sizeof(struct s6e8aa0), GFP_KERNEL); s6e8aa0_probe()
983 if (!ctx) s6e8aa0_probe()
986 mipi_dsi_set_drvdata(dsi, ctx); s6e8aa0_probe()
988 ctx->dev = dev; s6e8aa0_probe()
997 ret = s6e8aa0_parse_dt(ctx); s6e8aa0_probe()
1001 ctx->supplies[0].supply = "vdd3"; s6e8aa0_probe()
1002 ctx->supplies[1].supply = "vci"; s6e8aa0_probe()
1003 ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies), s6e8aa0_probe()
1004 ctx->supplies); s6e8aa0_probe()
1010 ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); s6e8aa0_probe()
1011 if (IS_ERR(ctx->reset_gpio)) { s6e8aa0_probe()
1013 PTR_ERR(ctx->reset_gpio)); s6e8aa0_probe()
1014 return PTR_ERR(ctx->reset_gpio); s6e8aa0_probe()
1017 ctx->brightness = GAMMA_LEVEL_NUM - 1; s6e8aa0_probe()
1019 drm_panel_init(&ctx->panel); s6e8aa0_probe()
1020 ctx->panel.dev = dev; s6e8aa0_probe()
1021 ctx->panel.funcs = &s6e8aa0_drm_funcs; s6e8aa0_probe()
1023 ret = drm_panel_add(&ctx->panel); s6e8aa0_probe()
1029 drm_panel_remove(&ctx->panel); s6e8aa0_probe()
1036 struct s6e8aa0 *ctx = mipi_dsi_get_drvdata(dsi); s6e8aa0_remove() local
1039 drm_panel_remove(&ctx->panel); s6e8aa0_remove()
/linux-4.1.27/net/mac80211/
H A Dchan.c13 struct ieee80211_chanctx *ctx) ieee80211_chanctx_num_assigned()
20 list_for_each_entry(sdata, &ctx->assigned_vifs, assigned_chanctx_list) ieee80211_chanctx_num_assigned()
27 struct ieee80211_chanctx *ctx) ieee80211_chanctx_num_reserved()
34 list_for_each_entry(sdata, &ctx->reserved_vifs, reserved_chanctx_list) ieee80211_chanctx_num_reserved()
41 struct ieee80211_chanctx *ctx) ieee80211_chanctx_refcount()
43 return ieee80211_chanctx_num_assigned(local, ctx) + ieee80211_chanctx_refcount()
44 ieee80211_chanctx_num_reserved(local, ctx); ieee80211_chanctx_refcount()
49 struct ieee80211_chanctx *ctx; ieee80211_num_chanctx() local
54 list_for_each_entry(ctx, &local->chanctx_list, list) ieee80211_num_chanctx()
82 struct ieee80211_chanctx *ctx, ieee80211_chanctx_reserved_chandef()
89 list_for_each_entry(sdata, &ctx->reserved_vifs, ieee80211_chanctx_reserved_chandef()
105 struct ieee80211_chanctx *ctx, ieee80211_chanctx_non_reserved_chandef()
112 list_for_each_entry(sdata, &ctx->assigned_vifs, ieee80211_chanctx_non_reserved_chandef()
131 struct ieee80211_chanctx *ctx, ieee80211_chanctx_combined_chandef()
136 compat = ieee80211_chanctx_reserved_chandef(local, ctx, compat); ieee80211_chanctx_combined_chandef()
140 compat = ieee80211_chanctx_non_reserved_chandef(local, ctx, compat); ieee80211_chanctx_combined_chandef()
149 struct ieee80211_chanctx *ctx, ieee80211_chanctx_can_reserve_chandef()
154 if (ieee80211_chanctx_combined_chandef(local, ctx, def)) ieee80211_chanctx_can_reserve_chandef()
157 if (!list_empty(&ctx->reserved_vifs) && ieee80211_chanctx_can_reserve_chandef()
158 ieee80211_chanctx_reserved_chandef(local, ctx, def)) ieee80211_chanctx_can_reserve_chandef()
169 struct ieee80211_chanctx *ctx; ieee80211_find_reservation_chanctx() local
176 list_for_each_entry(ctx, &local->chanctx_list, list) { ieee80211_find_reservation_chanctx()
177 if (ctx->replace_state == IEEE80211_CHANCTX_WILL_BE_REPLACED) ieee80211_find_reservation_chanctx()
180 if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) ieee80211_find_reservation_chanctx()
183 if (!ieee80211_chanctx_can_reserve_chandef(local, ctx, ieee80211_find_reservation_chanctx()
187 return ctx; ieee80211_find_reservation_chanctx()
209 * ctx->conf.min_def, we have to make sure to take ieee80211_get_sta_bw()
302 struct ieee80211_chanctx *ctx) ieee80211_recalc_chanctx_min_def()
310 if (ctx->conf.def.width == NL80211_CHAN_WIDTH_5 || ieee80211_recalc_chanctx_min_def()
311 ctx->conf.def.width == NL80211_CHAN_WIDTH_10 || ieee80211_recalc_chanctx_min_def()
312 ctx->conf.radar_enabled) { ieee80211_recalc_chanctx_min_def()
313 ctx->conf.min_def = ctx->conf.def; ieee80211_recalc_chanctx_min_def()
317 max_bw = ieee80211_get_chanctx_max_required_bw(local, &ctx->conf); ieee80211_recalc_chanctx_min_def()
320 min_def = ctx->conf.def; ieee80211_recalc_chanctx_min_def()
324 if (cfg80211_chandef_identical(&ctx->conf.min_def, &min_def)) ieee80211_recalc_chanctx_min_def()
327 ctx->conf.min_def = min_def; ieee80211_recalc_chanctx_min_def()
328 if (!ctx->driver_present) ieee80211_recalc_chanctx_min_def()
331 drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_MIN_WIDTH); ieee80211_recalc_chanctx_min_def()
335 struct ieee80211_chanctx *ctx, ieee80211_change_chanctx()
338 if (cfg80211_chandef_identical(&ctx->conf.def, chandef)) ieee80211_change_chanctx()
341 WARN_ON(!cfg80211_chandef_compatible(&ctx->conf.def, chandef)); ieee80211_change_chanctx()
343 ctx->conf.def = *chandef; ieee80211_change_chanctx()
344 drv_change_chanctx(local, ctx, IEEE80211_CHANCTX_CHANGE_WIDTH); ieee80211_change_chanctx()
345 ieee80211_recalc_chanctx_min_def(local, ctx); ieee80211_change_chanctx()
358 struct ieee80211_chanctx *ctx; ieee80211_find_chanctx() local
365 list_for_each_entry(ctx, &local->chanctx_list, list) { ieee80211_find_chanctx()
368 if (ctx->replace_state != IEEE80211_CHANCTX_REPLACE_NONE) ieee80211_find_chanctx()
371 if (ctx->mode == IEEE80211_CHANCTX_EXCLUSIVE) ieee80211_find_chanctx()
374 compat = cfg80211_chandef_compatible(&ctx->conf.def, chandef); ieee80211_find_chanctx()
378 compat = ieee80211_chanctx_reserved_chandef(local, ctx, ieee80211_find_chanctx()
383 ieee80211_change_chanctx(local, ctx, compat); ieee80211_find_chanctx()
385 return ctx; ieee80211_find_chanctx()
411 struct ieee80211_chanctx *ctx) ieee80211_chanctx_radar_required()
413 struct ieee80211_chanctx_conf *conf = &ctx->conf; ieee80211_chanctx_radar_required()
442 struct ieee80211_chanctx *ctx; ieee80211_alloc_chanctx() local
446 ctx = kzalloc(sizeof(*ctx) + local->hw.chanctx_data_size, GFP_KERNEL); ieee80211_alloc_chanctx()
447 if (!ctx) ieee80211_alloc_chanctx()
450 INIT_LIST_HEAD(&ctx->assigned_vifs); ieee80211_alloc_chanctx()
451 INIT_LIST_HEAD(&ctx->reserved_vifs); ieee80211_alloc_chanctx()
452 ctx->conf.def = *chandef; ieee80211_alloc_chanctx()
453 ctx->conf.rx_chains_static = 1; ieee80211_alloc_chanctx()
454 ctx->conf.rx_chains_dynamic = 1; ieee80211_alloc_chanctx()
455 ctx->mode = mode; ieee80211_alloc_chanctx()
456 ctx->conf.radar_enabled = false; ieee80211_alloc_chanctx()
457 ieee80211_recalc_chanctx_min_def(local, ctx); ieee80211_alloc_chanctx()
459 return ctx; ieee80211_alloc_chanctx()
463 struct ieee80211_chanctx *ctx) ieee80211_add_chanctx()
472 local->hw.conf.radar_enabled = ctx->conf.radar_enabled; ieee80211_add_chanctx()
480 local->_oper_chandef = ctx->conf.def; ieee80211_add_chanctx()
483 err = drv_add_chanctx(local, ctx); ieee80211_add_chanctx()
498 struct ieee80211_chanctx *ctx; ieee80211_new_chanctx() local
504 ctx = ieee80211_alloc_chanctx(local, chandef, mode); ieee80211_new_chanctx()
505 if (!ctx) ieee80211_new_chanctx()
508 err = ieee80211_add_chanctx(local, ctx); ieee80211_new_chanctx()
510 kfree(ctx); ieee80211_new_chanctx()
514 list_add_rcu(&ctx->list, &local->chanctx_list); ieee80211_new_chanctx()
515 return ctx; ieee80211_new_chanctx()
519 struct ieee80211_chanctx *ctx) ieee80211_del_chanctx()
539 drv_remove_chanctx(local, ctx); ieee80211_del_chanctx()
546 struct ieee80211_chanctx *ctx) ieee80211_free_chanctx()
550 WARN_ON_ONCE(ieee80211_chanctx_refcount(local, ctx) != 0); ieee80211_free_chanctx()
552 list_del_rcu(&ctx->list); ieee80211_free_chanctx()
553 ieee80211_del_chanctx(local, ctx); ieee80211_free_chanctx()
554 kfree_rcu(ctx, rcu_head); ieee80211_free_chanctx()
558 struct ieee80211_chanctx *ctx) ieee80211_recalc_chanctx_chantype()
560 struct ieee80211_chanctx_conf *conf = &ctx->conf; ieee80211_recalc_chanctx_chantype()
589 ieee80211_change_chanctx(local, ctx, compat); ieee80211_recalc_chanctx_chantype()
805 struct ieee80211_chanctx *ctx = sdata->reserved_chanctx; ieee80211_vif_unreserve_chanctx() local
809 if (WARN_ON(!ctx)) ieee80211_vif_unreserve_chanctx()
815 if (ieee80211_chanctx_refcount(sdata->local, ctx) == 0) { ieee80211_vif_unreserve_chanctx()
816 if (ctx->replace_state == IEEE80211_CHANCTX_REPLACES_OTHER) { ieee80211_vif_unreserve_chanctx()
817 if (WARN_ON(!ctx->replace_ctx)) ieee80211_vif_unreserve_chanctx()
820 WARN_ON(ctx->replace_ctx->replace_state != ieee80211_vif_unreserve_chanctx()
822 WARN_ON(ctx->replace_ctx->replace_ctx != ctx); ieee80211_vif_unreserve_chanctx()
824 ctx->replace_ctx->replace_ctx = NULL; ieee80211_vif_unreserve_chanctx()
825 ctx->replace_ctx->replace_state = ieee80211_vif_unreserve_chanctx()
828 list_del_rcu(&ctx->list); ieee80211_vif_unreserve_chanctx()
829 kfree_rcu(ctx, rcu_head); ieee80211_vif_unreserve_chanctx()
831 ieee80211_free_chanctx(sdata->local, ctx); ieee80211_vif_unreserve_chanctx()
844 struct ieee80211_chanctx *new_ctx, *curr_ctx, *ctx; ieee80211_vif_reserve_chanctx() local
875 * Consider ctx1..3, vif1..6, each ctx has 2 ieee80211_vif_reserve_chanctx()
885 list_for_each_entry(ctx, &local->chanctx_list, ieee80211_vif_reserve_chanctx()
887 if (ctx->replace_state != ieee80211_vif_reserve_chanctx()
891 if (!list_empty(&ctx->reserved_vifs)) ieee80211_vif_reserve_chanctx()
894 curr_ctx = ctx; ieee80211_vif_reserve_chanctx()
1147 struct ieee80211_chanctx *ctx, *old_ctx; ieee80211_chsw_switch_vifs() local
1158 list_for_each_entry(ctx, &local->chanctx_list, list) { ieee80211_chsw_switch_vifs()
1159 if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) ieee80211_chsw_switch_vifs()
1162 if (WARN_ON(!ctx->replace_ctx)) { ieee80211_chsw_switch_vifs()
1167 list_for_each_entry(sdata, &ctx->reserved_vifs, ieee80211_chsw_switch_vifs()
1176 vif_chsw[i].new_ctx = &ctx->conf; ieee80211_chsw_switch_vifs()
1192 struct ieee80211_chanctx *ctx; ieee80211_chsw_switch_ctxs() local
1198 list_for_each_entry(ctx, &local->chanctx_list, list) { ieee80211_chsw_switch_ctxs()
1199 if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) ieee80211_chsw_switch_ctxs()
1202 if (!list_empty(&ctx->replace_ctx->assigned_vifs)) ieee80211_chsw_switch_ctxs()
1205 ieee80211_del_chanctx(local, ctx->replace_ctx); ieee80211_chsw_switch_ctxs()
1206 err = ieee80211_add_chanctx(local, ctx); ieee80211_chsw_switch_ctxs()
1214 WARN_ON(ieee80211_add_chanctx(local, ctx)); ieee80211_chsw_switch_ctxs()
1215 list_for_each_entry_continue_reverse(ctx, &local->chanctx_list, list) { ieee80211_chsw_switch_ctxs()
1216 if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) ieee80211_chsw_switch_ctxs()
1219 if (!list_empty(&ctx->replace_ctx->assigned_vifs)) ieee80211_chsw_switch_ctxs()
1222 ieee80211_del_chanctx(local, ctx); ieee80211_chsw_switch_ctxs()
1223 WARN_ON(ieee80211_add_chanctx(local, ctx->replace_ctx)); ieee80211_chsw_switch_ctxs()
1232 struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx; ieee80211_vif_use_reserved_switch() local
1256 list_for_each_entry(ctx, &local->chanctx_list, list) { ieee80211_vif_use_reserved_switch()
1257 if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) ieee80211_vif_use_reserved_switch()
1260 if (WARN_ON(!ctx->replace_ctx)) { ieee80211_vif_use_reserved_switch()
1266 new_ctx = ctx; ieee80211_vif_use_reserved_switch()
1274 list_for_each_entry(sdata, &ctx->replace_ctx->assigned_vifs, ieee80211_vif_use_reserved_switch()
1295 ctx->conf.radar_enabled = false; ieee80211_vif_use_reserved_switch()
1296 list_for_each_entry(sdata, &ctx->reserved_vifs, ieee80211_vif_use_reserved_switch()
1314 ctx->conf.radar_enabled = true; ieee80211_vif_use_reserved_switch()
1357 list_for_each_entry(ctx, &local->chanctx_list, list) { ieee80211_vif_use_reserved_switch()
1358 if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) ieee80211_vif_use_reserved_switch()
1361 if (WARN_ON(!ctx->replace_ctx)) { ieee80211_vif_use_reserved_switch()
1366 list_for_each_entry(sdata, &ctx->reserved_vifs, ieee80211_vif_use_reserved_switch()
1373 rcu_assign_pointer(sdata->vif.chanctx_conf, &ctx->conf); ieee80211_vif_use_reserved_switch()
1393 ieee80211_recalc_chanctx_chantype(local, ctx); ieee80211_vif_use_reserved_switch()
1394 ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_vif_use_reserved_switch()
1395 ieee80211_recalc_radar_chanctx(local, ctx); ieee80211_vif_use_reserved_switch()
1396 ieee80211_recalc_chanctx_min_def(local, ctx); ieee80211_vif_use_reserved_switch()
1398 list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs, ieee80211_vif_use_reserved_switch()
1400 if (ieee80211_vif_get_chanctx(sdata) != ctx) ieee80211_vif_use_reserved_switch()
1405 &ctx->assigned_vifs); ieee80211_vif_use_reserved_switch()
1418 list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs, ieee80211_vif_use_reserved_switch()
1424 if (WARN_ON(sdata->reserved_chanctx != ctx)) ieee80211_vif_use_reserved_switch()
1452 list_for_each_entry_safe(ctx, ctx_tmp, &local->chanctx_list, list) { ieee80211_vif_use_reserved_switch()
1453 if (ctx->replace_state != IEEE80211_CHANCTX_WILL_BE_REPLACED) ieee80211_vif_use_reserved_switch()
1456 ctx->replace_ctx->replace_ctx = NULL; ieee80211_vif_use_reserved_switch()
1457 ctx->replace_ctx->replace_state = ieee80211_vif_use_reserved_switch()
1460 list_del_rcu(&ctx->list); ieee80211_vif_use_reserved_switch()
1461 kfree_rcu(ctx, rcu_head); ieee80211_vif_use_reserved_switch()
1467 list_for_each_entry(ctx, &local->chanctx_list, list) { ieee80211_vif_use_reserved_switch()
1468 if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER) ieee80211_vif_use_reserved_switch()
1471 list_for_each_entry_safe(sdata, sdata_tmp, &ctx->reserved_vifs, ieee80211_vif_use_reserved_switch()
1485 struct ieee80211_chanctx *ctx; __ieee80211_vif_release_channel() local
1495 ctx = container_of(conf, struct ieee80211_chanctx, conf); __ieee80211_vif_release_channel()
1508 if (ieee80211_chanctx_refcount(local, ctx) == 0) __ieee80211_vif_release_channel()
1509 ieee80211_free_chanctx(local, ctx); __ieee80211_vif_release_channel()
1523 struct ieee80211_chanctx *ctx; ieee80211_vif_use_channel() local
1550 ctx = ieee80211_find_chanctx(local, chandef, mode); ieee80211_vif_use_channel()
1551 if (!ctx) ieee80211_vif_use_channel()
1552 ctx = ieee80211_new_chanctx(local, chandef, mode); ieee80211_vif_use_channel()
1553 if (IS_ERR(ctx)) { ieee80211_vif_use_channel()
1554 ret = PTR_ERR(ctx); ieee80211_vif_use_channel()
1560 ret = ieee80211_assign_vif_chanctx(sdata, ctx); ieee80211_vif_use_channel()
1563 if (ieee80211_chanctx_refcount(local, ctx) == 0) ieee80211_vif_use_channel()
1564 ieee80211_free_chanctx(local, ctx); ieee80211_vif_use_channel()
1568 ieee80211_recalc_smps_chanctx(local, ctx); ieee80211_vif_use_channel()
1569 ieee80211_recalc_radar_chanctx(local, ctx); ieee80211_vif_use_channel()
1648 struct ieee80211_chanctx *ctx; ieee80211_vif_change_bandwidth() local
1675 ctx = container_of(conf, struct ieee80211_chanctx, conf); ieee80211_vif_change_bandwidth()
1683 switch (ctx->replace_state) { ieee80211_vif_change_bandwidth()
1685 if (!ieee80211_chanctx_reserved_chandef(local, ctx, compat)) { ieee80211_vif_change_bandwidth()
1705 ieee80211_recalc_chanctx_chantype(local, ctx); ieee80211_vif_change_bandwidth()
1752 struct ieee80211_chanctx *ctx; ieee80211_iter_chan_contexts_atomic() local
1755 list_for_each_entry_rcu(ctx, &local->chanctx_list, list) ieee80211_iter_chan_contexts_atomic()
1756 if (ctx->driver_present) ieee80211_iter_chan_contexts_atomic()
1757 iter(hw, &ctx->conf, iter_data); ieee80211_iter_chan_contexts_atomic()
12 ieee80211_chanctx_num_assigned(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) ieee80211_chanctx_num_assigned() argument
26 ieee80211_chanctx_num_reserved(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) ieee80211_chanctx_num_reserved() argument
40 ieee80211_chanctx_refcount(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) ieee80211_chanctx_refcount() argument
81 ieee80211_chanctx_reserved_chandef(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *compat) ieee80211_chanctx_reserved_chandef() argument
104 ieee80211_chanctx_non_reserved_chandef(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *compat) ieee80211_chanctx_non_reserved_chandef() argument
130 ieee80211_chanctx_combined_chandef(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *compat) ieee80211_chanctx_combined_chandef() argument
148 ieee80211_chanctx_can_reserve_chandef(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *def) ieee80211_chanctx_can_reserve_chandef() argument
301 ieee80211_recalc_chanctx_min_def(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) ieee80211_recalc_chanctx_min_def() argument
334 ieee80211_change_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx, const struct cfg80211_chan_def *chandef) ieee80211_change_chanctx() argument
410 ieee80211_chanctx_radar_required(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) ieee80211_chanctx_radar_required() argument
462 ieee80211_add_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) ieee80211_add_chanctx() argument
518 ieee80211_del_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) ieee80211_del_chanctx() argument
545 ieee80211_free_chanctx(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) ieee80211_free_chanctx() argument
557 ieee80211_recalc_chanctx_chantype(struct ieee80211_local *local, struct ieee80211_chanctx *ctx) ieee80211_recalc_chanctx_chantype() argument
/linux-4.1.27/drivers/crypto/qat/qat_common/
H A Dqat_algs.c154 struct qat_alg_aead_ctx *ctx, qat_alg_do_precomputes()
158 SHASH_DESC_ON_STACK(shash, ctx->hash_tfm); qat_alg_do_precomputes()
162 int block_size = crypto_shash_blocksize(ctx->hash_tfm); qat_alg_do_precomputes()
163 int digest_size = crypto_shash_digestsize(ctx->hash_tfm); qat_alg_do_precomputes()
172 shash->tfm = ctx->hash_tfm; qat_alg_do_precomputes()
203 switch (ctx->qat_hash_alg) { qat_alg_do_precomputes()
232 offset = round_up(qat_get_inter_state_size(ctx->qat_hash_alg), 8); qat_alg_do_precomputes()
236 switch (ctx->qat_hash_alg) { qat_alg_do_precomputes()
281 static int qat_alg_aead_init_enc_session(struct qat_alg_aead_ctx *ctx, qat_alg_aead_init_enc_session() argument
285 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); qat_alg_aead_init_enc_session()
287 struct qat_enc *enc_ctx = &ctx->enc_cd->qat_enc_cd; qat_alg_aead_init_enc_session()
292 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->enc_fw_req; qat_alg_aead_init_enc_session()
304 ctx->qat_hash_alg, digestsize); qat_alg_aead_init_enc_session()
306 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); qat_alg_aead_init_enc_session()
308 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen)) qat_alg_aead_init_enc_session()
320 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; qat_alg_aead_init_enc_session()
335 switch (ctx->qat_hash_alg) { qat_alg_aead_init_enc_session()
361 static int qat_alg_aead_init_dec_session(struct qat_alg_aead_ctx *ctx, qat_alg_aead_init_dec_session() argument
365 struct crypto_aead *aead_tfm = __crypto_aead_cast(ctx->tfm); qat_alg_aead_init_dec_session()
367 struct qat_dec *dec_ctx = &ctx->dec_cd->qat_dec_cd; qat_alg_aead_init_dec_session()
372 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2); qat_alg_aead_init_dec_session()
373 struct icp_qat_fw_la_bulk_req *req_tmpl = &ctx->dec_fw_req; qat_alg_aead_init_dec_session()
389 ctx->qat_hash_alg, qat_alg_aead_init_dec_session()
392 cpu_to_be32(crypto_shash_blocksize(ctx->hash_tfm)); qat_alg_aead_init_dec_session()
394 if (qat_alg_do_precomputes(hash, ctx, keys->authkey, keys->authkeylen)) qat_alg_aead_init_dec_session()
406 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; qat_alg_aead_init_dec_session()
414 roundup(crypto_shash_digestsize(ctx->hash_tfm), 8) * 2) >> 3; qat_alg_aead_init_dec_session()
424 switch (ctx->qat_hash_alg) { qat_alg_aead_init_dec_session()
452 static void qat_alg_ablkcipher_init_com(struct qat_alg_ablkcipher_ctx *ctx, qat_alg_ablkcipher_init_com() argument
474 static void qat_alg_ablkcipher_init_enc(struct qat_alg_ablkcipher_ctx *ctx, qat_alg_ablkcipher_init_enc() argument
478 struct icp_qat_hw_cipher_algo_blk *enc_cd = ctx->enc_cd; qat_alg_ablkcipher_init_enc()
479 struct icp_qat_fw_la_bulk_req *req = &ctx->enc_fw_req; qat_alg_ablkcipher_init_enc()
482 qat_alg_ablkcipher_init_com(ctx, req, enc_cd, key, keylen); qat_alg_ablkcipher_init_enc()
483 cd_pars->u.s.content_desc_addr = ctx->enc_cd_paddr; qat_alg_ablkcipher_init_enc()
487 static void qat_alg_ablkcipher_init_dec(struct qat_alg_ablkcipher_ctx *ctx, qat_alg_ablkcipher_init_dec() argument
491 struct icp_qat_hw_cipher_algo_blk *dec_cd = ctx->dec_cd; qat_alg_ablkcipher_init_dec()
492 struct icp_qat_fw_la_bulk_req *req = &ctx->dec_fw_req; qat_alg_ablkcipher_init_dec()
495 qat_alg_ablkcipher_init_com(ctx, req, dec_cd, key, keylen); qat_alg_ablkcipher_init_dec()
496 cd_pars->u.s.content_desc_addr = ctx->dec_cd_paddr; qat_alg_ablkcipher_init_dec()
518 static int qat_alg_aead_init_sessions(struct qat_alg_aead_ctx *ctx, qat_alg_aead_init_sessions() argument
524 if (crypto_rng_get_bytes(crypto_default_rng, ctx->salt, AES_BLOCK_SIZE)) qat_alg_aead_init_sessions()
533 if (qat_alg_aead_init_enc_session(ctx, alg, &keys)) qat_alg_aead_init_sessions()
536 if (qat_alg_aead_init_dec_session(ctx, alg, &keys)) qat_alg_aead_init_sessions()
541 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); qat_alg_aead_init_sessions()
547 static int qat_alg_ablkcipher_init_sessions(struct qat_alg_ablkcipher_ctx *ctx, qat_alg_ablkcipher_init_sessions() argument
556 qat_alg_ablkcipher_init_enc(ctx, alg, key, keylen); qat_alg_ablkcipher_init_sessions()
557 qat_alg_ablkcipher_init_dec(ctx, alg, key, keylen); qat_alg_ablkcipher_init_sessions()
560 crypto_tfm_set_flags(ctx->tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); qat_alg_ablkcipher_init_sessions()
567 struct qat_alg_aead_ctx *ctx = crypto_aead_ctx(tfm); qat_alg_aead_setkey() local
570 spin_lock(&ctx->lock); qat_alg_aead_setkey()
571 if (ctx->enc_cd) { qat_alg_aead_setkey()
573 dev = &GET_DEV(ctx->inst->accel_dev); qat_alg_aead_setkey()
574 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); qat_alg_aead_setkey()
575 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); qat_alg_aead_setkey()
576 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); qat_alg_aead_setkey()
577 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); qat_alg_aead_setkey()
584 spin_unlock(&ctx->lock); qat_alg_aead_setkey()
589 ctx->inst = inst; qat_alg_aead_setkey()
590 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), qat_alg_aead_setkey()
591 &ctx->enc_cd_paddr, qat_alg_aead_setkey()
593 if (!ctx->enc_cd) { qat_alg_aead_setkey()
594 spin_unlock(&ctx->lock); qat_alg_aead_setkey()
597 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), qat_alg_aead_setkey()
598 &ctx->dec_cd_paddr, qat_alg_aead_setkey()
600 if (!ctx->dec_cd) { qat_alg_aead_setkey()
601 spin_unlock(&ctx->lock); qat_alg_aead_setkey()
605 spin_unlock(&ctx->lock); qat_alg_aead_setkey()
606 if (qat_alg_aead_init_sessions(ctx, key, keylen)) qat_alg_aead_setkey()
612 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); qat_alg_aead_setkey()
614 ctx->dec_cd, ctx->dec_cd_paddr); qat_alg_aead_setkey()
615 ctx->dec_cd = NULL; qat_alg_aead_setkey()
617 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); qat_alg_aead_setkey()
619 ctx->enc_cd, ctx->enc_cd_paddr); qat_alg_aead_setkey()
620 ctx->enc_cd = NULL; qat_alg_aead_setkey()
801 struct qat_alg_aead_ctx *ctx = qat_req->aead_ctx; qat_aead_alg_callback() local
802 struct qat_crypto_instance *inst = ctx->inst; qat_aead_alg_callback()
816 struct qat_alg_ablkcipher_ctx *ctx = qat_req->ablkcipher_ctx; qat_ablkcipher_alg_callback() local
817 struct qat_crypto_instance *inst = ctx->inst; qat_ablkcipher_alg_callback()
841 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); qat_alg_aead_dec() local
849 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, qat_alg_aead_dec()
855 *msg = ctx->dec_fw_req; qat_alg_aead_dec()
856 qat_req->aead_ctx = ctx; qat_alg_aead_dec()
871 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); qat_alg_aead_dec()
875 qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_aead_dec()
886 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); qat_alg_aead_enc_internal() local
893 ret = qat_alg_sgl_to_bufl(ctx->inst, areq->assoc, areq->src, areq->dst, qat_alg_aead_enc_internal()
899 *msg = ctx->enc_fw_req; qat_alg_aead_enc_internal()
900 qat_req->aead_ctx = ctx; qat_alg_aead_enc_internal()
921 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); qat_alg_aead_enc_internal()
925 qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_aead_enc_internal()
940 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); qat_alg_aead_genivenc() local
943 memcpy(req->giv, ctx->salt, AES_BLOCK_SIZE); qat_alg_aead_genivenc()
954 struct qat_alg_ablkcipher_ctx *ctx = crypto_ablkcipher_ctx(tfm); qat_alg_ablkcipher_setkey() local
957 spin_lock(&ctx->lock); qat_alg_ablkcipher_setkey()
958 if (ctx->enc_cd) { qat_alg_ablkcipher_setkey()
960 dev = &GET_DEV(ctx->inst->accel_dev); qat_alg_ablkcipher_setkey()
961 memset(ctx->enc_cd, 0, sizeof(*ctx->enc_cd)); qat_alg_ablkcipher_setkey()
962 memset(ctx->dec_cd, 0, sizeof(*ctx->dec_cd)); qat_alg_ablkcipher_setkey()
963 memset(&ctx->enc_fw_req, 0, sizeof(ctx->enc_fw_req)); qat_alg_ablkcipher_setkey()
964 memset(&ctx->dec_fw_req, 0, sizeof(ctx->dec_fw_req)); qat_alg_ablkcipher_setkey()
971 spin_unlock(&ctx->lock); qat_alg_ablkcipher_setkey()
976 ctx->inst = inst; qat_alg_ablkcipher_setkey()
977 ctx->enc_cd = dma_zalloc_coherent(dev, sizeof(*ctx->enc_cd), qat_alg_ablkcipher_setkey()
978 &ctx->enc_cd_paddr, qat_alg_ablkcipher_setkey()
980 if (!ctx->enc_cd) { qat_alg_ablkcipher_setkey()
981 spin_unlock(&ctx->lock); qat_alg_ablkcipher_setkey()
984 ctx->dec_cd = dma_zalloc_coherent(dev, sizeof(*ctx->dec_cd), qat_alg_ablkcipher_setkey()
985 &ctx->dec_cd_paddr, qat_alg_ablkcipher_setkey()
987 if (!ctx->dec_cd) { qat_alg_ablkcipher_setkey()
988 spin_unlock(&ctx->lock); qat_alg_ablkcipher_setkey()
992 spin_unlock(&ctx->lock); qat_alg_ablkcipher_setkey()
993 if (qat_alg_ablkcipher_init_sessions(ctx, key, keylen)) qat_alg_ablkcipher_setkey()
999 memset(ctx->dec_cd, 0, sizeof(*ctx->enc_cd)); qat_alg_ablkcipher_setkey()
1000 dma_free_coherent(dev, sizeof(*ctx->enc_cd), qat_alg_ablkcipher_setkey()
1001 ctx->dec_cd, ctx->dec_cd_paddr); qat_alg_ablkcipher_setkey()
1002 ctx->dec_cd = NULL; qat_alg_ablkcipher_setkey()
1004 memset(ctx->enc_cd, 0, sizeof(*ctx->dec_cd)); qat_alg_ablkcipher_setkey()
1005 dma_free_coherent(dev, sizeof(*ctx->dec_cd), qat_alg_ablkcipher_setkey()
1006 ctx->enc_cd, ctx->enc_cd_paddr); qat_alg_ablkcipher_setkey()
1007 ctx->enc_cd = NULL; qat_alg_ablkcipher_setkey()
1015 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); qat_alg_ablkcipher_encrypt() local
1021 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, qat_alg_ablkcipher_encrypt()
1027 *msg = ctx->enc_fw_req; qat_alg_ablkcipher_encrypt()
1028 qat_req->ablkcipher_ctx = ctx; qat_alg_ablkcipher_encrypt()
1039 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); qat_alg_ablkcipher_encrypt()
1043 qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_ablkcipher_encrypt()
1053 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); qat_alg_ablkcipher_decrypt() local
1059 ret = qat_alg_sgl_to_bufl(ctx->inst, NULL, req->src, req->dst, qat_alg_ablkcipher_decrypt()
1065 *msg = ctx->dec_fw_req; qat_alg_ablkcipher_decrypt()
1066 qat_req->ablkcipher_ctx = ctx; qat_alg_ablkcipher_decrypt()
1077 ret = adf_send_message(ctx->inst->sym_tx, (uint32_t *)msg); qat_alg_ablkcipher_decrypt()
1081 qat_alg_free_bufl(ctx->inst, qat_req); qat_alg_ablkcipher_decrypt()
1091 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); qat_alg_aead_init() local
1093 ctx->hash_tfm = crypto_alloc_shash(hash_name, 0, 0); qat_alg_aead_init()
1094 if (IS_ERR(ctx->hash_tfm)) qat_alg_aead_init()
1096 spin_lock_init(&ctx->lock); qat_alg_aead_init()
1097 ctx->qat_hash_alg = hash; qat_alg_aead_init()
1100 ctx->tfm = tfm; qat_alg_aead_init()
1121 struct qat_alg_aead_ctx *ctx = crypto_tfm_ctx(tfm); qat_alg_aead_exit() local
1122 struct qat_crypto_instance *inst = ctx->inst; qat_alg_aead_exit()
1125 if (!IS_ERR(ctx->hash_tfm)) qat_alg_aead_exit()
1126 crypto_free_shash(ctx->hash_tfm); qat_alg_aead_exit()
1132 if (ctx->enc_cd) { qat_alg_aead_exit()
1133 memset(ctx->enc_cd, 0, sizeof(struct qat_alg_cd)); qat_alg_aead_exit()
1135 ctx->enc_cd, ctx->enc_cd_paddr); qat_alg_aead_exit()
1137 if (ctx->dec_cd) { qat_alg_aead_exit()
1138 memset(ctx->dec_cd, 0, sizeof(struct qat_alg_cd)); qat_alg_aead_exit()
1140 ctx->dec_cd, ctx->dec_cd_paddr); qat_alg_aead_exit()
1147 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); qat_alg_ablkcipher_init() local
1149 spin_lock_init(&ctx->lock); qat_alg_ablkcipher_init()
1152 ctx->tfm = tfm; qat_alg_ablkcipher_init()
1158 struct qat_alg_ablkcipher_ctx *ctx = crypto_tfm_ctx(tfm); qat_alg_ablkcipher_exit() local
1159 struct qat_crypto_instance *inst = ctx->inst; qat_alg_ablkcipher_exit()
1166 if (ctx->enc_cd) { qat_alg_ablkcipher_exit()
1167 memset(ctx->enc_cd, 0, qat_alg_ablkcipher_exit()
1171 ctx->enc_cd, ctx->enc_cd_paddr); qat_alg_ablkcipher_exit()
1173 if (ctx->dec_cd) { qat_alg_ablkcipher_exit()
1174 memset(ctx->dec_cd, 0, qat_alg_ablkcipher_exit()
1178 ctx->dec_cd, ctx->dec_cd_paddr); qat_alg_ablkcipher_exit()
153 qat_alg_do_precomputes(struct icp_qat_hw_auth_algo_blk *hash, struct qat_alg_aead_ctx *ctx, const uint8_t *auth_key, unsigned int auth_keylen) qat_alg_do_precomputes() argument
/linux-4.1.27/fs/cifs/
H A Dasn1.c109 asn1_open(struct asn1_ctx *ctx, unsigned char *buf, unsigned int len) asn1_open() argument
111 ctx->begin = buf; asn1_open()
112 ctx->end = buf + len; asn1_open()
113 ctx->pointer = buf; asn1_open()
114 ctx->error = ASN1_ERR_NOERROR; asn1_open()
118 asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) asn1_octet_decode() argument
120 if (ctx->pointer >= ctx->end) { asn1_octet_decode()
121 ctx->error = ASN1_ERR_DEC_EMPTY; asn1_octet_decode()
124 *ch = *(ctx->pointer)++; asn1_octet_decode()
130 asn1_enum_decode(struct asn1_ctx *ctx, __le32 *val)
134 if (ctx->pointer >= ctx->end) {
135 ctx->error = ASN1_ERR_DEC_EMPTY;
139 ch = *(ctx->pointer)++; /* ch has 0xa, ptr points to length octet */
141 *val = *(++(ctx->pointer)); /* value has enum value */
145 ctx->pointer++;
151 asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) asn1_tag_decode() argument
158 if (!asn1_octet_decode(ctx, &ch)) asn1_tag_decode()
167 asn1_id_decode(struct asn1_ctx *ctx, asn1_id_decode() argument
172 if (!asn1_octet_decode(ctx, &ch)) asn1_id_decode()
180 if (!asn1_tag_decode(ctx, tag)) asn1_id_decode()
187 asn1_length_decode(struct asn1_ctx *ctx, unsigned int *def, unsigned int *len) asn1_length_decode() argument
191 if (!asn1_octet_decode(ctx, &ch)) asn1_length_decode()
206 if (!asn1_octet_decode(ctx, &ch)) asn1_length_decode()
215 /* don't trust len bigger than ctx buffer */ asn1_length_decode()
216 if (*len > ctx->end - ctx->pointer) asn1_length_decode()
223 asn1_header_decode(struct asn1_ctx *ctx, asn1_header_decode() argument
230 if (!asn1_id_decode(ctx, cls, con, tag)) asn1_header_decode()
233 if (!asn1_length_decode(ctx, &def, &len)) asn1_header_decode()
241 *eoc = ctx->pointer + len; asn1_header_decode()
248 asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc) asn1_eoc_decode() argument
253 if (!asn1_octet_decode(ctx, &ch)) asn1_eoc_decode()
257 ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; asn1_eoc_decode()
261 if (!asn1_octet_decode(ctx, &ch)) asn1_eoc_decode()
265 ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; asn1_eoc_decode()
270 if (ctx->pointer != eoc) { asn1_eoc_decode()
271 ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH; asn1_eoc_decode()
278 /* static unsigned char asn1_null_decode(struct asn1_ctx *ctx,
281 ctx->pointer = eoc;
285 static unsigned char asn1_long_decode(struct asn1_ctx *ctx,
291 if (!asn1_octet_decode(ctx, &ch))
297 while (ctx->pointer < eoc) {
299 ctx->error = ASN1_ERR_DEC_BADVALUE;
303 if (!asn1_octet_decode(ctx, &ch))
312 static unsigned char asn1_uint_decode(struct asn1_ctx *ctx,
319 if (!asn1_octet_decode(ctx, &ch))
328 while (ctx->pointer < eoc) {
330 ctx->error = ASN1_ERR_DEC_BADVALUE;
334 if (!asn1_octet_decode(ctx, &ch))
343 static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx,
350 if (!asn1_octet_decode(ctx, &ch))
359 while (ctx->pointer < eoc) {
361 ctx->error = ASN1_ERR_DEC_BADVALUE;
365 if (!asn1_octet_decode(ctx, &ch))
375 asn1_octets_decode(struct asn1_ctx *ctx,
383 *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
389 while (ctx->pointer < eoc) {
390 if (!asn1_octet_decode(ctx, (unsigned char *) ptr++)) {
401 asn1_subid_decode(struct asn1_ctx *ctx, unsigned long *subid) asn1_subid_decode() argument
408 if (!asn1_octet_decode(ctx, &ch)) asn1_subid_decode()
418 asn1_oid_decode(struct asn1_ctx *ctx, asn1_oid_decode() argument
425 size = eoc - ctx->pointer + 1; asn1_oid_decode()
437 if (!asn1_subid_decode(ctx, &subid)) { asn1_oid_decode()
457 while (ctx->pointer < eoc) { asn1_oid_decode()
459 ctx->error = ASN1_ERR_DEC_BADVALUE; asn1_oid_decode()
465 if (!asn1_subid_decode(ctx, optr++)) { asn1_oid_decode()
497 struct asn1_ctx ctx; decode_negTokenInit() local
505 asn1_open(&ctx, security_blob, length); decode_negTokenInit()
508 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { decode_negTokenInit()
518 rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag); decode_negTokenInit()
522 rc = asn1_oid_decode(&ctx, end, &oid, &oidlen); decode_negTokenInit()
539 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { decode_negTokenInit()
550 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { decode_negTokenInit()
561 if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { decode_negTokenInit()
573 (&ctx, &sequence_end, &cls, &con, &tag) == 0) { decode_negTokenInit()
584 while (!asn1_eoc_decode(&ctx, sequence_end)) { decode_negTokenInit()
585 rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag); decode_negTokenInit()
591 if (asn1_oid_decode(&ctx, end, &oid, &oidlen)) { decode_negTokenInit()
/linux-4.1.27/drivers/media/platform/
H A Dm2m-deinterlace.c158 struct deinterlace_ctx *ctx = priv; deinterlace_job_ready() local
159 struct deinterlace_dev *pcdev = ctx->dev; deinterlace_job_ready()
161 if ((v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) > 0) deinterlace_job_ready()
162 && (v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) > 0) deinterlace_job_ready()
163 && (atomic_read(&ctx->dev->busy) == 0)) { deinterlace_job_ready()
175 struct deinterlace_ctx *ctx = priv; deinterlace_job_abort() local
176 struct deinterlace_dev *pcdev = ctx->dev; deinterlace_job_abort()
178 ctx->aborting = 1; deinterlace_job_abort()
182 v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx); deinterlace_job_abort()
187 struct deinterlace_ctx *ctx = priv; deinterlace_lock() local
188 struct deinterlace_dev *pcdev = ctx->dev; deinterlace_lock()
194 struct deinterlace_ctx *ctx = priv; deinterlace_unlock() local
195 struct deinterlace_dev *pcdev = ctx->dev; deinterlace_unlock()
224 static void deinterlace_issue_dma(struct deinterlace_ctx *ctx, int op, deinterlace_issue_dma() argument
229 struct deinterlace_dev *pcdev = ctx->dev; deinterlace_issue_dma()
238 src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); deinterlace_issue_dma()
239 dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); deinterlace_issue_dma()
256 ctx->xt->numf = s_height / 2; deinterlace_issue_dma()
257 ctx->xt->sgl[0].size = s_width; deinterlace_issue_dma()
258 ctx->xt->sgl[0].icg = s_width; deinterlace_issue_dma()
259 ctx->xt->src_start = p_in; deinterlace_issue_dma()
260 ctx->xt->dst_start = p_out; deinterlace_issue_dma()
263 ctx->xt->numf = s_height / 2; deinterlace_issue_dma()
264 ctx->xt->sgl[0].size = s_width; deinterlace_issue_dma()
265 ctx->xt->sgl[0].icg = s_width; deinterlace_issue_dma()
266 ctx->xt->src_start = p_in + s_size / 2; deinterlace_issue_dma()
267 ctx->xt->dst_start = p_out + s_width; deinterlace_issue_dma()
270 ctx->xt->numf = s_height / 4; deinterlace_issue_dma()
271 ctx->xt->sgl[0].size = s_width / 2; deinterlace_issue_dma()
272 ctx->xt->sgl[0].icg = s_width / 2; deinterlace_issue_dma()
273 ctx->xt->src_start = p_in + s_size; deinterlace_issue_dma()
274 ctx->xt->dst_start = p_out + s_size; deinterlace_issue_dma()
277 ctx->xt->numf = s_height / 4; deinterlace_issue_dma()
278 ctx->xt->sgl[0].size = s_width / 2; deinterlace_issue_dma()
279 ctx->xt->sgl[0].icg = s_width / 2; deinterlace_issue_dma()
280 ctx->xt->src_start = p_in + (9 * s_size) / 8; deinterlace_issue_dma()
281 ctx->xt->dst_start = p_out + s_size + s_width / 2; deinterlace_issue_dma()
284 ctx->xt->numf = s_height / 4; deinterlace_issue_dma()
285 ctx->xt->sgl[0].size = s_width / 2; deinterlace_issue_dma()
286 ctx->xt->sgl[0].icg = s_width / 2; deinterlace_issue_dma()
287 ctx->xt->src_start = p_in + (5 * s_size) / 4; deinterlace_issue_dma()
288 ctx->xt->dst_start = p_out + (5 * s_size) / 4; deinterlace_issue_dma()
291 ctx->xt->numf = s_height / 4; deinterlace_issue_dma()
292 ctx->xt->sgl[0].size = s_width / 2; deinterlace_issue_dma()
293 ctx->xt->sgl[0].icg = s_width / 2; deinterlace_issue_dma()
294 ctx->xt->src_start = p_in + (11 * s_size) / 8; deinterlace_issue_dma()
295 ctx->xt->dst_start = p_out + (5 * s_size) / 4 + s_width / 2; deinterlace_issue_dma()
298 ctx->xt->numf = s_height / 2; deinterlace_issue_dma()
299 ctx->xt->sgl[0].size = s_width; deinterlace_issue_dma()
300 ctx->xt->sgl[0].icg = s_width; deinterlace_issue_dma()
301 ctx->xt->src_start = p_in; deinterlace_issue_dma()
302 ctx->xt->dst_start = p_out + s_width; deinterlace_issue_dma()
305 ctx->xt->numf = s_height / 4; deinterlace_issue_dma()
306 ctx->xt->sgl[0].size = s_width / 2; deinterlace_issue_dma()
307 ctx->xt->sgl[0].icg = s_width / 2; deinterlace_issue_dma()
308 ctx->xt->src_start = p_in + s_size; deinterlace_issue_dma()
309 ctx->xt->dst_start = p_out + s_size + s_width / 2; deinterlace_issue_dma()
312 ctx->xt->numf = s_height / 4; deinterlace_issue_dma()
313 ctx->xt->sgl[0].size = s_width / 2; deinterlace_issue_dma()
314 ctx->xt->sgl[0].icg = s_width / 2; deinterlace_issue_dma()
315 ctx->xt->src_start = p_in + (5 * s_size) / 4; deinterlace_issue_dma()
316 ctx->xt->dst_start = p_out + (5 * s_size) / 4 + s_width / 2; deinterlace_issue_dma()
319 ctx->xt->numf = s_height / 2; deinterlace_issue_dma()
320 ctx->xt->sgl[0].size = s_width * 2; deinterlace_issue_dma()
321 ctx->xt->sgl[0].icg = s_width * 2; deinterlace_issue_dma()
322 ctx->xt->src_start = p_in; deinterlace_issue_dma()
323 ctx->xt->dst_start = p_out; deinterlace_issue_dma()
326 ctx->xt->numf = s_height / 2; deinterlace_issue_dma()
327 ctx->xt->sgl[0].size = s_width * 2; deinterlace_issue_dma()
328 ctx->xt->sgl[0].icg = s_width * 2; deinterlace_issue_dma()
329 ctx->xt->src_start = p_in + s_size; deinterlace_issue_dma()
330 ctx->xt->dst_start = p_out + s_width * 2; deinterlace_issue_dma()
334 ctx->xt->numf = s_height / 2; deinterlace_issue_dma()
335 ctx->xt->sgl[0].size = s_width * 2; deinterlace_issue_dma()
336 ctx->xt->sgl[0].icg = s_width * 2; deinterlace_issue_dma()
337 ctx->xt->src_start = p_in; deinterlace_issue_dma()
338 ctx->xt->dst_start = p_out + s_width * 2; deinterlace_issue_dma()
343 ctx->xt->frame_size = 1; deinterlace_issue_dma()
344 ctx->xt->dir = DMA_MEM_TO_MEM; deinterlace_issue_dma()
345 ctx->xt->src_sgl = false; deinterlace_issue_dma()
346 ctx->xt->dst_sgl = true; deinterlace_issue_dma()
349 tx = dmadev->device_prep_interleaved_dma(chan, ctx->xt, flags); deinterlace_issue_dma()
357 tx->callback_param = ctx; deinterlace_issue_dma()
360 ctx->cookie = dmaengine_submit(tx); deinterlace_issue_dma()
361 if (dma_submit_error(ctx->cookie)) { deinterlace_issue_dma()
364 ctx->cookie, (unsigned)p_in, (unsigned)p_out, deinterlace_issue_dma()
374 struct deinterlace_ctx *ctx = priv; deinterlace_device_run() local
377 atomic_set(&ctx->dev->busy, 1); deinterlace_device_run()
379 dprintk(ctx->dev, "%s: DMA try issue.\n", __func__); deinterlace_device_run()
403 dprintk(ctx->dev, "%s: yuv420 interlaced tb.\n", deinterlace_device_run()
405 deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD, 0); deinterlace_device_run()
406 deinterlace_issue_dma(ctx, YUV420_DMA_Y_EVEN, 0); deinterlace_device_run()
407 deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD, 0); deinterlace_device_run()
408 deinterlace_issue_dma(ctx, YUV420_DMA_U_EVEN, 0); deinterlace_device_run()
409 deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD, 0); deinterlace_device_run()
410 deinterlace_issue_dma(ctx, YUV420_DMA_V_EVEN, 1); deinterlace_device_run()
414 dprintk(ctx->dev, "%s: yuv420 interlaced line doubling.\n", deinterlace_device_run()
416 deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD, 0); deinterlace_device_run()
417 deinterlace_issue_dma(ctx, YUV420_DMA_Y_ODD_DOUBLING, 0); deinterlace_device_run()
418 deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD, 0); deinterlace_device_run()
419 deinterlace_issue_dma(ctx, YUV420_DMA_U_ODD_DOUBLING, 0); deinterlace_device_run()
420 deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD, 0); deinterlace_device_run()
421 deinterlace_issue_dma(ctx, YUV420_DMA_V_ODD_DOUBLING, 1); deinterlace_device_run()
430 dprintk(ctx->dev, "%s: yuyv interlaced_tb.\n", deinterlace_device_run()
432 deinterlace_issue_dma(ctx, YUYV_DMA_ODD, 0); deinterlace_device_run()
433 deinterlace_issue_dma(ctx, YUYV_DMA_EVEN, 1); deinterlace_device_run()
437 dprintk(ctx->dev, "%s: yuyv interlaced line doubling.\n", deinterlace_device_run()
439 deinterlace_issue_dma(ctx, YUYV_DMA_ODD, 0); deinterlace_device_run()
440 deinterlace_issue_dma(ctx, YUYV_DMA_EVEN_DOUBLING, 1); deinterlace_device_run()
446 dprintk(ctx->dev, "%s: DMA issue done.\n", __func__); deinterlace_device_run()
512 static int vidioc_g_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f) vidioc_g_fmt() argument
517 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); vidioc_g_fmt()
538 f->fmt.pix.colorspace = ctx->colorspace; vidioc_g_fmt()
574 struct deinterlace_ctx *ctx = priv; vidioc_try_fmt_vid_cap() local
580 f->fmt.pix.colorspace = ctx->colorspace; vidioc_try_fmt_vid_cap()
609 static int vidioc_s_fmt(struct deinterlace_ctx *ctx, struct v4l2_format *f) vidioc_s_fmt() argument
614 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); vidioc_s_fmt()
623 v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__); vidioc_s_fmt()
629 v4l2_err(&ctx->dev->v4l2_dev, vidioc_s_fmt()
651 dprintk(ctx->dev, vidioc_s_fmt()
673 struct deinterlace_ctx *ctx = priv; vidioc_s_fmt_vid_out() local
682 ctx->colorspace = f->fmt.pix.colorspace; vidioc_s_fmt_vid_out()
690 struct deinterlace_ctx *ctx = priv; vidioc_reqbufs() local
692 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); vidioc_reqbufs()
698 struct deinterlace_ctx *ctx = priv; vidioc_querybuf() local
700 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); vidioc_querybuf()
705 struct deinterlace_ctx *ctx = priv; vidioc_qbuf() local
707 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); vidioc_qbuf()
712 struct deinterlace_ctx *ctx = priv; vidioc_dqbuf() local
714 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); vidioc_dqbuf()
721 struct deinterlace_ctx *ctx = priv; vidioc_streamon() local
728 v4l2_err(&ctx->dev->v4l2_dev, vidioc_streamon()
738 v4l2_err(&ctx->dev->v4l2_dev, vidioc_streamon()
747 v4l2_err(&ctx->dev->v4l2_dev, vidioc_streamon()
757 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); vidioc_streamon()
763 struct deinterlace_ctx *ctx = priv; vidioc_streamoff() local
765 return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); vidioc_streamoff()
804 struct deinterlace_ctx *ctx = vb2_get_drv_priv(vq); deinterlace_queue_setup() local
823 alloc_ctxs[0] = ctx->dev->alloc_ctx; deinterlace_queue_setup()
825 dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size); deinterlace_queue_setup()
832 struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); deinterlace_buf_prepare() local
835 dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type); deinterlace_buf_prepare()
840 dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n", deinterlace_buf_prepare()
852 struct deinterlace_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); deinterlace_buf_queue() local
853 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); deinterlace_buf_queue()
865 struct deinterlace_ctx *ctx = priv; queue_init() local
870 src_vq->drv_priv = ctx; queue_init()
887 dst_vq->drv_priv = ctx; queue_init()
907 struct deinterlace_ctx *ctx = NULL; deinterlace_open() local
909 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); deinterlace_open()
910 if (!ctx) deinterlace_open()
913 file->private_data = ctx; deinterlace_open()
914 ctx->dev = pcdev; deinterlace_open()
916 ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init); deinterlace_open()
917 if (IS_ERR(ctx->m2m_ctx)) { deinterlace_open()
918 int ret = PTR_ERR(ctx->m2m_ctx); deinterlace_open()
920 kfree(ctx); deinterlace_open()
924 ctx->xt = kzalloc(sizeof(struct dma_interleaved_template) + deinterlace_open()
926 if (!ctx->xt) { deinterlace_open()
927 kfree(ctx); deinterlace_open()
931 ctx->colorspace = V4L2_COLORSPACE_REC709; deinterlace_open()
933 dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx); deinterlace_open()
941 struct deinterlace_ctx *ctx = file->private_data; deinterlace_release() local
943 dprintk(pcdev, "Releasing instance %p\n", ctx); deinterlace_release()
945 v4l2_m2m_ctx_release(ctx->m2m_ctx); deinterlace_release()
946 kfree(ctx->xt); deinterlace_release()
947 kfree(ctx); deinterlace_release()
955 struct deinterlace_ctx *ctx = file->private_data; deinterlace_poll() local
958 deinterlace_lock(ctx); deinterlace_poll()
959 ret = v4l2_m2m_poll(file, ctx->m2m_ctx, wait); deinterlace_poll()
960 deinterlace_unlock(ctx); deinterlace_poll()
967 struct deinterlace_ctx *ctx = file->private_data; deinterlace_mmap() local
969 return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); deinterlace_mmap()
H A Dvim2m.c187 static struct vim2m_q_data *get_q_data(struct vim2m_ctx *ctx, get_q_data() argument
192 return &ctx->q_data[V4L2_M2M_SRC]; get_q_data()
194 return &ctx->q_data[V4L2_M2M_DST]; get_q_data()
202 static int device_process(struct vim2m_ctx *ctx, device_process() argument
206 struct vim2m_dev *dev = ctx->dev; device_process()
213 q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); device_process()
237 out_vb->v4l2_buf.sequence = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE)->sequence++; device_process()
253 switch (ctx->mode) { device_process()
351 struct vim2m_ctx *ctx = priv; job_ready() local
353 if (v4l2_m2m_num_src_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen job_ready()
354 || v4l2_m2m_num_dst_bufs_ready(ctx->fh.m2m_ctx) < ctx->translen) { job_ready()
355 dprintk(ctx->dev, "Not enough buffers available\n"); job_ready()
364 struct vim2m_ctx *ctx = priv; job_abort() local
367 ctx->aborting = 1; job_abort()
378 struct vim2m_ctx *ctx = priv; device_run() local
379 struct vim2m_dev *dev = ctx->dev; device_run()
382 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); device_run()
383 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); device_run()
385 device_process(ctx, src_buf, dst_buf); device_run()
388 schedule_irq(dev, ctx->transtime); device_run()
482 static int vidioc_g_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f) vidioc_g_fmt() argument
487 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); vidioc_g_fmt()
491 q_data = get_q_data(ctx, f->type); vidioc_g_fmt()
499 f->fmt.pix.colorspace = ctx->colorspace; vidioc_g_fmt()
542 struct vim2m_ctx *ctx = file2ctx(file); vidioc_try_fmt_vid_cap() local
550 v4l2_err(&ctx->dev->v4l2_dev, vidioc_try_fmt_vid_cap()
555 f->fmt.pix.colorspace = ctx->colorspace; vidioc_try_fmt_vid_cap()
564 struct vim2m_ctx *ctx = file2ctx(file); vidioc_try_fmt_vid_out() local
572 v4l2_err(&ctx->dev->v4l2_dev, vidioc_try_fmt_vid_out()
583 static int vidioc_s_fmt(struct vim2m_ctx *ctx, struct v4l2_format *f) vidioc_s_fmt() argument
588 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); vidioc_s_fmt()
592 q_data = get_q_data(ctx, f->type); vidioc_s_fmt()
597 v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__); vidioc_s_fmt()
607 dprintk(ctx->dev, vidioc_s_fmt()
629 struct vim2m_ctx *ctx = file2ctx(file); vidioc_s_fmt_vid_out() local
638 ctx->colorspace = f->fmt.pix.colorspace; vidioc_s_fmt_vid_out()
644 struct vim2m_ctx *ctx = vim2m_s_ctrl() local
650 ctx->mode |= MEM2MEM_HFLIP; vim2m_s_ctrl()
652 ctx->mode &= ~MEM2MEM_HFLIP; vim2m_s_ctrl()
657 ctx->mode |= MEM2MEM_VFLIP; vim2m_s_ctrl()
659 ctx->mode &= ~MEM2MEM_VFLIP; vim2m_s_ctrl()
663 ctx->transtime = ctrl->val; vim2m_s_ctrl()
667 ctx->translen = ctrl->val; vim2m_s_ctrl()
671 v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n"); vim2m_s_ctrl()
719 struct vim2m_ctx *ctx = vb2_get_drv_priv(vq); vim2m_queue_setup() local
723 q_data = get_q_data(ctx, vq->type); vim2m_queue_setup()
739 dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size); vim2m_queue_setup()
746 struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); vim2m_buf_prepare() local
749 dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type); vim2m_buf_prepare()
751 q_data = get_q_data(ctx, vb->vb2_queue->type); vim2m_buf_prepare()
756 dprintk(ctx->dev, "%s field isn't supported\n", vim2m_buf_prepare()
763 dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n", vim2m_buf_prepare()
775 struct vim2m_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); vim2m_buf_queue() local
777 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); vim2m_buf_queue()
782 struct vim2m_ctx *ctx = vb2_get_drv_priv(q); vim2m_start_streaming() local
783 struct vim2m_q_data *q_data = get_q_data(ctx, q->type); vim2m_start_streaming()
791 struct vim2m_ctx *ctx = vb2_get_drv_priv(q); vim2m_stop_streaming() local
797 vb = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); vim2m_stop_streaming()
799 vb = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); vim2m_stop_streaming()
802 spin_lock_irqsave(&ctx->dev->irqlock, flags); vim2m_stop_streaming()
804 spin_unlock_irqrestore(&ctx->dev->irqlock, flags); vim2m_stop_streaming()
820 struct vim2m_ctx *ctx = priv; queue_init() local
825 src_vq->drv_priv = ctx; queue_init()
830 src_vq->lock = &ctx->dev->dev_mutex; queue_init()
838 dst_vq->drv_priv = ctx; queue_init()
843 dst_vq->lock = &ctx->dev->dev_mutex; queue_init()
876 struct vim2m_ctx *ctx = NULL; vim2m_open() local
882 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); vim2m_open()
883 if (!ctx) { vim2m_open()
888 v4l2_fh_init(&ctx->fh, video_devdata(file)); vim2m_open()
889 file->private_data = &ctx->fh; vim2m_open()
890 ctx->dev = dev; vim2m_open()
891 hdl = &ctx->hdl; vim2m_open()
902 ctx->fh.ctrl_handler = hdl; vim2m_open()
905 ctx->q_data[V4L2_M2M_SRC].fmt = &formats[0]; vim2m_open()
906 ctx->q_data[V4L2_M2M_SRC].width = 640; vim2m_open()
907 ctx->q_data[V4L2_M2M_SRC].height = 480; vim2m_open()
908 ctx->q_data[V4L2_M2M_SRC].sizeimage = vim2m_open()
909 ctx->q_data[V4L2_M2M_SRC].width * vim2m_open()
910 ctx->q_data[V4L2_M2M_SRC].height * vim2m_open()
911 (ctx->q_data[V4L2_M2M_SRC].fmt->depth >> 3); vim2m_open()
912 ctx->q_data[V4L2_M2M_DST] = ctx->q_data[V4L2_M2M_SRC]; vim2m_open()
913 ctx->colorspace = V4L2_COLORSPACE_REC709; vim2m_open()
915 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); vim2m_open()
917 if (IS_ERR(ctx->fh.m2m_ctx)) { vim2m_open()
918 rc = PTR_ERR(ctx->fh.m2m_ctx); vim2m_open()
921 kfree(ctx); vim2m_open()
925 v4l2_fh_add(&ctx->fh); vim2m_open()
929 ctx, ctx->fh.m2m_ctx); vim2m_open()
939 struct vim2m_ctx *ctx = file2ctx(file); vim2m_release() local
941 dprintk(dev, "Releasing instance %p\n", ctx); vim2m_release()
943 v4l2_fh_del(&ctx->fh); vim2m_release()
944 v4l2_fh_exit(&ctx->fh); vim2m_release()
945 v4l2_ctrl_handler_free(&ctx->hdl); vim2m_release()
947 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); vim2m_release()
949 kfree(ctx); vim2m_release()
H A Dmx2_emmaprp.c225 static struct emmaprp_q_data *get_q_data(struct emmaprp_ctx *ctx, get_q_data() argument
230 return &(ctx->q_data[V4L2_M2M_SRC]); get_q_data()
232 return &(ctx->q_data[V4L2_M2M_DST]); get_q_data()
244 struct emmaprp_ctx *ctx = priv; emmaprp_job_abort() local
245 struct emmaprp_dev *pcdev = ctx->dev; emmaprp_job_abort()
247 ctx->aborting = 1; emmaprp_job_abort()
251 v4l2_m2m_job_finish(pcdev->m2m_dev, ctx->m2m_ctx); emmaprp_job_abort()
256 struct emmaprp_ctx *ctx = priv; emmaprp_lock() local
257 struct emmaprp_dev *pcdev = ctx->dev; emmaprp_lock()
263 struct emmaprp_ctx *ctx = priv; emmaprp_unlock() local
264 struct emmaprp_dev *pcdev = ctx->dev; emmaprp_unlock()
290 struct emmaprp_ctx *ctx = priv; emmaprp_device_run() local
293 struct emmaprp_dev *pcdev = ctx->dev; emmaprp_device_run()
300 src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx); emmaprp_device_run()
301 dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); emmaprp_device_run()
303 s_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_OUTPUT); emmaprp_device_run()
307 d_q_data = get_q_data(ctx, V4L2_BUF_TYPE_VIDEO_CAPTURE); emmaprp_device_run()
452 static int vidioc_g_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f) vidioc_g_fmt() argument
457 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); vidioc_g_fmt()
461 q_data = get_q_data(ctx, f->type); vidioc_g_fmt()
526 struct emmaprp_ctx *ctx = priv; vidioc_try_fmt_vid_cap() local
530 v4l2_err(&ctx->dev->v4l2_dev, vidioc_try_fmt_vid_cap()
543 struct emmaprp_ctx *ctx = priv; vidioc_try_fmt_vid_out() local
547 v4l2_err(&ctx->dev->v4l2_dev, vidioc_try_fmt_vid_out()
556 static int vidioc_s_fmt(struct emmaprp_ctx *ctx, struct v4l2_format *f) vidioc_s_fmt() argument
562 vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type); vidioc_s_fmt()
566 q_data = get_q_data(ctx, f->type); vidioc_s_fmt()
571 v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__); vidioc_s_fmt()
587 dprintk(ctx->dev, vidioc_s_fmt()
621 struct emmaprp_ctx *ctx = priv; vidioc_reqbufs() local
623 return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs); vidioc_reqbufs()
629 struct emmaprp_ctx *ctx = priv; vidioc_querybuf() local
631 return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf); vidioc_querybuf()
636 struct emmaprp_ctx *ctx = priv; vidioc_qbuf() local
638 return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf); vidioc_qbuf()
643 struct emmaprp_ctx *ctx = priv; vidioc_dqbuf() local
645 return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf); vidioc_dqbuf()
651 struct emmaprp_ctx *ctx = priv; vidioc_streamon() local
653 return v4l2_m2m_streamon(file, ctx->m2m_ctx, type); vidioc_streamon()
659 struct emmaprp_ctx *ctx = priv; vidioc_streamoff() local
661 return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type); vidioc_streamoff()
696 struct emmaprp_ctx *ctx = vb2_get_drv_priv(vq); emmaprp_queue_setup() local
700 q_data = get_q_data(ctx, vq->type); emmaprp_queue_setup()
714 alloc_ctxs[0] = ctx->dev->alloc_ctx; emmaprp_queue_setup()
716 dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size); emmaprp_queue_setup()
723 struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); emmaprp_buf_prepare() local
726 dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type); emmaprp_buf_prepare()
728 q_data = get_q_data(ctx, vb->vb2_queue->type); emmaprp_buf_prepare()
731 dprintk(ctx->dev, "%s data will not fit into plane" emmaprp_buf_prepare()
745 struct emmaprp_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); emmaprp_buf_queue() local
746 v4l2_m2m_buf_queue(ctx->m2m_ctx, vb); emmaprp_buf_queue()
758 struct emmaprp_ctx *ctx = priv; queue_init() local
763 src_vq->drv_priv = ctx; queue_init()
775 dst_vq->drv_priv = ctx; queue_init()
790 struct emmaprp_ctx *ctx; emmaprp_open() local
792 ctx = kzalloc(sizeof *ctx, GFP_KERNEL); emmaprp_open()
793 if (!ctx) emmaprp_open()
796 file->private_data = ctx; emmaprp_open()
797 ctx->dev = pcdev; emmaprp_open()
800 kfree(ctx); emmaprp_open()
804 ctx->m2m_ctx = v4l2_m2m_ctx_init(pcdev->m2m_dev, ctx, &queue_init); emmaprp_open()
806 if (IS_ERR(ctx->m2m_ctx)) { emmaprp_open()
807 int ret = PTR_ERR(ctx->m2m_ctx); emmaprp_open()
810 kfree(ctx); emmaprp_open()
816 ctx->q_data[V4L2_M2M_SRC].fmt = &formats[1]; emmaprp_open()
817 ctx->q_data[V4L2_M2M_DST].fmt = &formats[0]; emmaprp_open()
820 dprintk(pcdev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx); emmaprp_open()
828 struct emmaprp_ctx *ctx = file->private_data; emmaprp_release() local
830 dprintk(pcdev, "Releasing instance %p\n", ctx); emmaprp_release()
835 v4l2_m2m_ctx_release(ctx->m2m_ctx); emmaprp_release()
837 kfree(ctx); emmaprp_release()
846 struct emmaprp_ctx *ctx = file->private_data; emmaprp_poll() local
850 res = v4l2_m2m_poll(file, ctx->m2m_ctx, wait); emmaprp_poll()
858 struct emmaprp_ctx *ctx = file->private_data; emmaprp_mmap() local
863 ret = v4l2_m2m_mmap(file, ctx->m2m_ctx, vma); emmaprp_mmap()
/linux-4.1.27/drivers/staging/unisys/visorchipset/
H A Dparser.c49 struct parser_context *ctx = NULL; parser_init_guts() local
67 ctx = kzalloc(allocbytes, GFP_KERNEL|__GFP_NORETRY); parser_init_guts()
68 if (!ctx) { parser_init_guts()
75 ctx->allocbytes = allocbytes; parser_init_guts()
76 ctx->param_bytes = bytes; parser_init_guts()
77 ctx->curr = NULL; parser_init_guts()
78 ctx->bytes_remaining = 0; parser_init_guts()
79 ctx->byte_stream = FALSE; parser_init_guts()
88 memcpy(ctx->data, p, bytes); parser_init_guts()
95 if (visor_memregion_read(rgn, 0, ctx->data, bytes) < 0) { parser_init_guts()
101 ctx->byte_stream = TRUE; parser_init_guts()
102 rc = ctx; parser_init_guts()
105 phdr = (struct spar_controlvm_parameters_header *)(ctx->data); parser_init_guts()
120 rc = ctx; parser_init_guts()
127 controlvm_payload_bytes_buffered += ctx->param_bytes; parser_init_guts()
129 if (ctx) { parser_init_guts()
130 parser_done(ctx); parser_init_guts()
131 ctx = NULL; parser_init_guts()
157 parser_simpleString_get(struct parser_context *ctx) parser_simpleString_get() argument
159 if (!ctx->byte_stream) parser_simpleString_get()
161 return ctx->data; /* note this IS '\0'-terminated, because of parser_simpleString_get()
168 void *parser_byte_stream_get(struct parser_context *ctx, ulong *nbytes) parser_byte_stream_get() argument
170 if (!ctx->byte_stream) parser_byte_stream_get()
173 *nbytes = ctx->param_bytes; parser_byte_stream_get()
174 return (void *)ctx->data; parser_byte_stream_get()
178 parser_id_get(struct parser_context *ctx) parser_id_get() argument
182 if (ctx == NULL) parser_id_get()
184 phdr = (struct spar_controlvm_parameters_header *)(ctx->data); parser_id_get()
189 parser_param_start(struct parser_context *ctx, PARSER_WHICH_STRING which_string) parser_param_start() argument
193 if (ctx == NULL) parser_param_start()
195 phdr = (struct spar_controlvm_parameters_header *)(ctx->data); parser_param_start()
198 ctx->curr = ctx->data + phdr->initiator_offset; parser_param_start()
199 ctx->bytes_remaining = phdr->initiator_length; parser_param_start()
202 ctx->curr = ctx->data + phdr->target_offset; parser_param_start()
203 ctx->bytes_remaining = phdr->target_length; parser_param_start()
206 ctx->curr = ctx->data + phdr->connection_offset; parser_param_start()
207 ctx->bytes_remaining = phdr->connection_length; parser_param_start()
210 ctx->curr = ctx->data + phdr->name_offset; parser_param_start()
211 ctx->bytes_remaining = phdr->name_length; parser_param_start()
222 parser_done(struct parser_context *ctx) parser_done() argument
224 if (!ctx) parser_done()
226 controlvm_payload_bytes_buffered -= ctx->param_bytes; parser_done()
227 kfree(ctx); parser_done()
265 parser_param_get(struct parser_context *ctx, char *nam, int namesize) argument
274 if (!ctx)
276 pscan = ctx->curr;
277 nscan = ctx->bytes_remaining;
394 ctx->curr = pscan;
395 ctx->bytes_remaining = nscan;
400 parser_string_get(struct parser_context *ctx) parser_string_get() argument
408 if (!ctx) parser_string_get()
410 pscan = ctx->curr; parser_string_get()
411 nscan = ctx->bytes_remaining; parser_string_get()
H A Dparser.h37 void parser_param_start(struct parser_context *ctx,
39 void *parser_param_get(struct parser_context *ctx, char *nam, int namesize);
40 void *parser_string_get(struct parser_context *ctx);
41 uuid_le parser_id_get(struct parser_context *ctx);
42 char *parser_simpleString_get(struct parser_context *ctx);
43 void *parser_byte_stream_get(struct parser_context *ctx, ulong *nbytes);
44 void parser_done(struct parser_context *ctx);
/linux-4.1.27/drivers/video/fbdev/omap2/
H A Dvrfb.c83 static void omap2_sms_write_rot_control(u32 val, unsigned ctx) omap2_sms_write_rot_control() argument
85 __raw_writel(val, vrfb_base + SMS_ROT_CONTROL(ctx)); omap2_sms_write_rot_control()
88 static void omap2_sms_write_rot_size(u32 val, unsigned ctx) omap2_sms_write_rot_size() argument
90 __raw_writel(val, vrfb_base + SMS_ROT_SIZE(ctx)); omap2_sms_write_rot_size()
93 static void omap2_sms_write_rot_physical_ba(u32 val, unsigned ctx) omap2_sms_write_rot_physical_ba() argument
95 __raw_writel(val, vrfb_base + SMS_ROT_PHYSICAL_BA(ctx)); omap2_sms_write_rot_physical_ba()
98 static inline void restore_hw_context(int ctx) restore_hw_context() argument
100 omap2_sms_write_rot_control(ctxs[ctx].control, ctx); restore_hw_context()
101 omap2_sms_write_rot_size(ctxs[ctx].size, ctx); restore_hw_context()
102 omap2_sms_write_rot_physical_ba(ctxs[ctx].physical_ba, ctx); restore_hw_context()
188 u8 ctx = vrfb->context; omap_vrfb_setup() local
192 DBG("omapfb_set_vrfb(%d, %lx, %dx%d, %d, %d)\n", ctx, paddr, omap_vrfb_setup()
223 ctxs[ctx].physical_ba = paddr; omap_vrfb_setup()
224 ctxs[ctx].size = size; omap_vrfb_setup()
225 ctxs[ctx].control = control; omap_vrfb_setup()
227 omap2_sms_write_rot_physical_ba(paddr, ctx); omap_vrfb_setup()
228 omap2_sms_write_rot_size(size, ctx); omap_vrfb_setup()
229 omap2_sms_write_rot_control(control, ctx); omap_vrfb_setup()
264 int ctx = vrfb->context; omap_vrfb_release_ctx() local
266 if (ctx == 0xff) omap_vrfb_release_ctx()
269 DBG("release ctx %d\n", ctx); omap_vrfb_release_ctx()
273 BUG_ON(!(ctx_map & (1 << ctx))); omap_vrfb_release_ctx()
275 clear_bit(ctx, &ctx_map); omap_vrfb_release_ctx()
294 u8 ctx; omap_vrfb_request_ctx() local
297 DBG("request ctx\n"); omap_vrfb_request_ctx()
301 for (ctx = 0; ctx < num_ctxs; ++ctx) omap_vrfb_request_ctx()
302 if ((ctx_map & (1 << ctx)) == 0) omap_vrfb_request_ctx()
305 if (ctx == num_ctxs) { omap_vrfb_request_ctx()
311 DBG("found free ctx %d\n", ctx); omap_vrfb_request_ctx()
313 set_bit(ctx, &ctx_map); omap_vrfb_request_ctx()
317 vrfb->context = ctx; omap_vrfb_request_ctx()
320 paddr = ctxs[ctx].base + SMS_ROT_VIRT_BASE(rot); omap_vrfb_request_ctx()
323 "area for ctx %d, rotation %d\n", omap_vrfb_request_ctx()
324 ctx, rot * 90); omap_vrfb_request_ctx()
332 DBG("VRFB %d/%d: %lx\n", ctx, rot*90, vrfb->paddr[rot]); omap_vrfb_request_ctx()
372 dev_err(&pdev->dev, "can't get vrfb ctx %d address\n", vrfb_probe()
/linux-4.1.27/drivers/acpi/apei/
H A Dapei-internal.h14 typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
37 void apei_exec_ctx_init(struct apei_exec_context *ctx,
43 static inline void apei_exec_ctx_set_input(struct apei_exec_context *ctx, apei_exec_ctx_set_input() argument
46 ctx->value = input; apei_exec_ctx_set_input()
49 static inline u64 apei_exec_ctx_get_output(struct apei_exec_context *ctx) apei_exec_ctx_get_output() argument
51 return ctx->value; apei_exec_ctx_get_output()
54 int __apei_exec_run(struct apei_exec_context *ctx, u8 action, bool optional);
56 static inline int apei_exec_run(struct apei_exec_context *ctx, u8 action) apei_exec_run() argument
58 return __apei_exec_run(ctx, action, 0); apei_exec_run()
62 static inline int apei_exec_run_optional(struct apei_exec_context *ctx, u8 action) apei_exec_run_optional() argument
64 return __apei_exec_run(ctx, action, 1); apei_exec_run_optional()
84 int apei_exec_read_register(struct apei_exec_context *ctx,
86 int apei_exec_read_register_value(struct apei_exec_context *ctx,
88 int apei_exec_write_register(struct apei_exec_context *ctx,
90 int apei_exec_write_register_value(struct apei_exec_context *ctx,
92 int apei_exec_noop(struct apei_exec_context *ctx,
94 int apei_exec_pre_map_gars(struct apei_exec_context *ctx);
95 int apei_exec_post_unmap_gars(struct apei_exec_context *ctx);
117 int apei_exec_collect_resources(struct apei_exec_context *ctx,
H A Derst.c122 static int erst_exec_load_var1(struct apei_exec_context *ctx, erst_exec_load_var1() argument
125 return __apei_exec_read_register(entry, &ctx->var1); erst_exec_load_var1()
128 static int erst_exec_load_var2(struct apei_exec_context *ctx, erst_exec_load_var2() argument
131 return __apei_exec_read_register(entry, &ctx->var2); erst_exec_load_var2()
134 static int erst_exec_store_var1(struct apei_exec_context *ctx, erst_exec_store_var1() argument
137 return __apei_exec_write_register(entry, ctx->var1); erst_exec_store_var1()
140 static int erst_exec_add(struct apei_exec_context *ctx, erst_exec_add() argument
143 ctx->var1 += ctx->var2; erst_exec_add()
147 static int erst_exec_subtract(struct apei_exec_context *ctx, erst_exec_subtract() argument
150 ctx->var1 -= ctx->var2; erst_exec_subtract()
154 static int erst_exec_add_value(struct apei_exec_context *ctx, erst_exec_add_value() argument
163 val += ctx->value; erst_exec_add_value()
168 static int erst_exec_subtract_value(struct apei_exec_context *ctx, erst_exec_subtract_value() argument
177 val -= ctx->value; erst_exec_subtract_value()
182 static int erst_exec_stall(struct apei_exec_context *ctx, erst_exec_stall() argument
187 if (ctx->value > FIRMWARE_MAX_STALL) { erst_exec_stall()
191 ctx->value); erst_exec_stall()
194 stall_time = ctx->value; erst_exec_stall()
199 static int erst_exec_stall_while_true(struct apei_exec_context *ctx, erst_exec_stall_while_true() argument
207 if (ctx->var1 > FIRMWARE_MAX_STALL) { erst_exec_stall_while_true()
211 ctx->var1); erst_exec_stall_while_true()
214 stall_time = ctx->var1; erst_exec_stall_while_true()
220 if (val != ctx->value) erst_exec_stall_while_true()
229 struct apei_exec_context *ctx, erst_exec_skip_next_instruction_if_true()
238 if (val == ctx->value) { erst_exec_skip_next_instruction_if_true()
239 ctx->ip += 2; erst_exec_skip_next_instruction_if_true()
246 static int erst_exec_goto(struct apei_exec_context *ctx, erst_exec_goto() argument
249 ctx->ip = ctx->value; erst_exec_goto()
253 static int erst_exec_set_src_address_base(struct apei_exec_context *ctx, erst_exec_set_src_address_base() argument
256 return __apei_exec_read_register(entry, &ctx->src_base); erst_exec_set_src_address_base()
259 static int erst_exec_set_dst_address_base(struct apei_exec_context *ctx, erst_exec_set_dst_address_base() argument
262 return __apei_exec_read_register(entry, &ctx->dst_base); erst_exec_set_dst_address_base()
265 static int erst_exec_move_data(struct apei_exec_context *ctx, erst_exec_move_data() argument
282 src = ioremap(ctx->src_base + offset, ctx->var2); erst_exec_move_data()
285 dst = ioremap(ctx->dst_base + offset, ctx->var2); erst_exec_move_data()
291 memmove(dst, src, ctx->var2); erst_exec_move_data()
378 static inline void erst_exec_ctx_init(struct apei_exec_context *ctx) erst_exec_ctx_init() argument
380 apei_exec_ctx_init(ctx, erst_ins_type, ARRAY_SIZE(erst_ins_type), erst_exec_ctx_init()
386 struct apei_exec_context ctx; erst_get_erange() local
389 erst_exec_ctx_init(&ctx); erst_get_erange()
390 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_RANGE); erst_get_erange()
393 range->base = apei_exec_ctx_get_output(&ctx); erst_get_erange()
394 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_LENGTH); erst_get_erange()
397 range->size = apei_exec_ctx_get_output(&ctx); erst_get_erange()
398 rc = apei_exec_run(&ctx, ACPI_ERST_GET_ERROR_ATTRIBUTES); erst_get_erange()
401 range->attr = apei_exec_ctx_get_output(&ctx); erst_get_erange()
408 struct apei_exec_context ctx; __erst_get_record_count() local
411 erst_exec_ctx_init(&ctx); __erst_get_record_count()
412 rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_COUNT); __erst_get_record_count()
415 return apei_exec_ctx_get_output(&ctx); __erst_get_record_count()
452 struct apei_exec_context ctx; __erst_get_next_record_id() local
455 erst_exec_ctx_init(&ctx); __erst_get_next_record_id()
456 rc = apei_exec_run(&ctx, ACPI_ERST_GET_RECORD_ID); __erst_get_next_record_id()
459 *record_id = apei_exec_ctx_get_output(&ctx); __erst_get_next_record_id()
639 struct apei_exec_context ctx; __erst_write_to_storage() local
644 erst_exec_ctx_init(&ctx); __erst_write_to_storage()
645 rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_WRITE); __erst_write_to_storage()
648 apei_exec_ctx_set_input(&ctx, offset); __erst_write_to_storage()
649 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); __erst_write_to_storage()
652 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); __erst_write_to_storage()
656 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); __erst_write_to_storage()
659 val = apei_exec_ctx_get_output(&ctx); __erst_write_to_storage()
665 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); __erst_write_to_storage()
668 val = apei_exec_ctx_get_output(&ctx); __erst_write_to_storage()
669 rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); __erst_write_to_storage()
678 struct apei_exec_context ctx; __erst_read_from_storage() local
683 erst_exec_ctx_init(&ctx); __erst_read_from_storage()
684 rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_READ); __erst_read_from_storage()
687 apei_exec_ctx_set_input(&ctx, offset); __erst_read_from_storage()
688 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_OFFSET); __erst_read_from_storage()
691 apei_exec_ctx_set_input(&ctx, record_id); __erst_read_from_storage()
692 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); __erst_read_from_storage()
695 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); __erst_read_from_storage()
699 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); __erst_read_from_storage()
702 val = apei_exec_ctx_get_output(&ctx); __erst_read_from_storage()
708 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); __erst_read_from_storage()
711 val = apei_exec_ctx_get_output(&ctx); __erst_read_from_storage()
712 rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); __erst_read_from_storage()
721 struct apei_exec_context ctx; __erst_clear_from_storage() local
726 erst_exec_ctx_init(&ctx); __erst_clear_from_storage()
727 rc = apei_exec_run_optional(&ctx, ACPI_ERST_BEGIN_CLEAR); __erst_clear_from_storage()
730 apei_exec_ctx_set_input(&ctx, record_id); __erst_clear_from_storage()
731 rc = apei_exec_run(&ctx, ACPI_ERST_SET_RECORD_ID); __erst_clear_from_storage()
734 rc = apei_exec_run(&ctx, ACPI_ERST_EXECUTE_OPERATION); __erst_clear_from_storage()
738 rc = apei_exec_run(&ctx, ACPI_ERST_CHECK_BUSY_STATUS); __erst_clear_from_storage()
741 val = apei_exec_ctx_get_output(&ctx); __erst_clear_from_storage()
747 rc = apei_exec_run(&ctx, ACPI_ERST_GET_COMMAND_STATUS); __erst_clear_from_storage()
750 val = apei_exec_ctx_get_output(&ctx); __erst_clear_from_storage()
751 rc = apei_exec_run_optional(&ctx, ACPI_ERST_END); __erst_clear_from_storage()
1125 struct apei_exec_context ctx; erst_init() local
1157 erst_exec_ctx_init(&ctx); erst_init()
1158 rc = apei_exec_collect_resources(&ctx, &erst_resources); erst_init()
1164 rc = apei_exec_pre_map_gars(&ctx); erst_init()
1220 apei_exec_post_unmap_gars(&ctx); erst_init()
228 erst_exec_skip_next_instruction_if_true( struct apei_exec_context *ctx, struct acpi_whea_header *entry) erst_exec_skip_next_instruction_if_true() argument
/linux-4.1.27/drivers/staging/ozwpan/
H A Dozcdev.c50 struct oz_serial_ctx *ctx; oz_cdev_claim_ctx() local
53 ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL]; oz_cdev_claim_ctx()
54 if (ctx) oz_cdev_claim_ctx()
55 atomic_inc(&ctx->ref_count); oz_cdev_claim_ctx()
57 return ctx; oz_cdev_claim_ctx()
63 static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx) oz_cdev_release_ctx() argument
65 if (atomic_dec_and_test(&ctx->ref_count)) { oz_cdev_release_ctx()
67 kfree(ctx); oz_cdev_release_ctx()
102 struct oz_serial_ctx *ctx; oz_cdev_read() local
111 ctx = oz_cdev_claim_ctx(pd); oz_cdev_read()
112 if (ctx == NULL) oz_cdev_read()
114 n = ctx->rd_in - ctx->rd_out; oz_cdev_read()
119 ix = ctx->rd_out; oz_cdev_read()
123 if (copy_to_user(buf, &ctx->rd_buf[ix], n)) { oz_cdev_read()
131 if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) { oz_cdev_read()
137 ctx->rd_out = ix; oz_cdev_read()
139 oz_cdev_release_ctx(ctx); oz_cdev_read()
156 struct oz_serial_ctx *ctx; oz_cdev_write() local
186 ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL]; oz_cdev_write()
187 if (ctx) { oz_cdev_write()
188 app_hdr->elt_seq_num = ctx->tx_seq_num++; oz_cdev_write()
189 if (ctx->tx_seq_num == 0) oz_cdev_write()
190 ctx->tx_seq_num = 1; oz_cdev_write()
326 struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd); oz_cdev_poll() local
328 if (ctx) { oz_cdev_poll()
329 if (ctx->rd_in != ctx->rd_out) oz_cdev_poll()
331 oz_cdev_release_ctx(ctx); oz_cdev_poll()
432 struct oz_serial_ctx *ctx; oz_cdev_start() local
439 ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC); oz_cdev_start()
440 if (ctx == NULL) oz_cdev_start()
442 atomic_set(&ctx->ref_count, 1); oz_cdev_start()
443 ctx->tx_seq_num = 1; oz_cdev_start()
448 kfree(ctx); oz_cdev_start()
450 pd->app_ctx[OZ_APPID_SERIAL] = ctx; oz_cdev_start()
470 struct oz_serial_ctx *ctx; oz_cdev_stop() local
477 ctx = (struct oz_serial_ctx *) pd->app_ctx[OZ_APPID_SERIAL]; oz_cdev_stop()
480 if (ctx) oz_cdev_stop()
481 oz_cdev_release_ctx(ctx); oz_cdev_stop()
500 struct oz_serial_ctx *ctx; oz_cdev_rx() local
508 ctx = oz_cdev_claim_ctx(pd); oz_cdev_rx()
509 if (ctx == NULL) { oz_cdev_rx()
518 if (((ctx->rx_seq_num - app_hdr->elt_seq_num) & 0x80) == 0) { oz_cdev_rx()
521 app_hdr->elt_seq_num, ctx->rx_seq_num); oz_cdev_rx()
525 ctx->rx_seq_num = app_hdr->elt_seq_num; oz_cdev_rx()
530 space = ctx->rd_out - ctx->rd_in - 1; oz_cdev_rx()
537 ix = ctx->rd_in; oz_cdev_rx()
541 memcpy(&ctx->rd_buf[ix], data, copy_sz); oz_cdev_rx()
547 memcpy(ctx->rd_buf, data+copy_sz, len); oz_cdev_rx()
550 ctx->rd_in = ix; oz_cdev_rx()
553 oz_cdev_release_ctx(ctx); oz_cdev_rx()
H A Dozhcd.h10 struct oz_port *oz_hcd_pd_arrived(void *ctx);
/linux-4.1.27/drivers/phy/
H A Dphy-xgene.c605 static void cmu_wr(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, cmu_wr() argument
608 void __iomem *sds_base = ctx->sds_base; cmu_wr()
622 static void cmu_rd(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, cmu_rd() argument
625 void __iomem *sds_base = ctx->sds_base; cmu_rd()
636 static void cmu_toggle1to0(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, cmu_toggle1to0() argument
641 cmu_rd(ctx, cmu_type, reg, &val); cmu_toggle1to0()
643 cmu_wr(ctx, cmu_type, reg, val); cmu_toggle1to0()
644 cmu_rd(ctx, cmu_type, reg, &val); cmu_toggle1to0()
646 cmu_wr(ctx, cmu_type, reg, val); cmu_toggle1to0()
649 static void cmu_clrbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, cmu_clrbits() argument
654 cmu_rd(ctx, cmu_type, reg, &val); cmu_clrbits()
656 cmu_wr(ctx, cmu_type, reg, val); cmu_clrbits()
659 static void cmu_setbits(struct xgene_phy_ctx *ctx, enum cmu_type_t cmu_type, cmu_setbits() argument
664 cmu_rd(ctx, cmu_type, reg, &val); cmu_setbits()
666 cmu_wr(ctx, cmu_type, reg, val); cmu_setbits()
669 static void serdes_wr(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 data) serdes_wr() argument
671 void __iomem *sds_base = ctx->sds_base; serdes_wr()
684 static void serdes_rd(struct xgene_phy_ctx *ctx, int lane, u32 reg, u32 *data) serdes_rd() argument
686 void __iomem *sds_base = ctx->sds_base; serdes_rd()
695 static void serdes_clrbits(struct xgene_phy_ctx *ctx, int lane, u32 reg, serdes_clrbits() argument
700 serdes_rd(ctx, lane, reg, &val); serdes_clrbits()
702 serdes_wr(ctx, lane, reg, val); serdes_clrbits()
705 static void serdes_setbits(struct xgene_phy_ctx *ctx, int lane, u32 reg, serdes_setbits() argument
710 serdes_rd(ctx, lane, reg, &val); serdes_setbits()
712 serdes_wr(ctx, lane, reg, val); serdes_setbits()
715 static void xgene_phy_cfg_cmu_clk_type(struct xgene_phy_ctx *ctx, xgene_phy_cfg_cmu_clk_type() argument
722 cmu_rd(ctx, cmu_type, CMU_REG12, &val); xgene_phy_cfg_cmu_clk_type()
724 cmu_wr(ctx, cmu_type, CMU_REG12, val); xgene_phy_cfg_cmu_clk_type()
726 cmu_wr(ctx, cmu_type, CMU_REG13, 0x0222); xgene_phy_cfg_cmu_clk_type()
727 cmu_wr(ctx, cmu_type, CMU_REG14, 0x2225); xgene_phy_cfg_cmu_clk_type()
732 cmu_rd(ctx, cmu_type, CMU_REG0, &val); xgene_phy_cfg_cmu_clk_type()
734 cmu_wr(ctx, cmu_type, CMU_REG0, val); xgene_phy_cfg_cmu_clk_type()
736 cmu_rd(ctx, cmu_type, CMU_REG1, &val); xgene_phy_cfg_cmu_clk_type()
738 cmu_wr(ctx, cmu_type, CMU_REG1, val); xgene_phy_cfg_cmu_clk_type()
739 dev_dbg(ctx->dev, "Set external reference clock\n"); xgene_phy_cfg_cmu_clk_type()
742 cmu_rd(ctx, cmu_type, CMU_REG0, &val); xgene_phy_cfg_cmu_clk_type()
744 cmu_wr(ctx, cmu_type, CMU_REG0, val); xgene_phy_cfg_cmu_clk_type()
746 cmu_rd(ctx, cmu_type, CMU_REG1, &val); xgene_phy_cfg_cmu_clk_type()
748 cmu_wr(ctx, cmu_type, CMU_REG1, val); xgene_phy_cfg_cmu_clk_type()
749 dev_dbg(ctx->dev, "Set internal reference clock\n"); xgene_phy_cfg_cmu_clk_type()
757 cmu_rd(ctx, cmu_type, CMU_REG1, &val); xgene_phy_cfg_cmu_clk_type()
759 cmu_wr(ctx, cmu_type, CMU_REG1, val); xgene_phy_cfg_cmu_clk_type()
761 cmu_rd(ctx, cmu_type, CMU_REG1, &val); xgene_phy_cfg_cmu_clk_type()
763 cmu_wr(ctx, cmu_type, CMU_REG1, val); xgene_phy_cfg_cmu_clk_type()
764 dev_dbg(ctx->dev, xgene_phy_cfg_cmu_clk_type()
769 static void xgene_phy_sata_cfg_cmu_core(struct xgene_phy_ctx *ctx, xgene_phy_sata_cfg_cmu_core() argument
778 cmu_rd(ctx, cmu_type, CMU_REG34, &val); xgene_phy_sata_cfg_cmu_core()
783 cmu_wr(ctx, cmu_type, CMU_REG34, val); xgene_phy_sata_cfg_cmu_core()
787 cmu_rd(ctx, cmu_type, CMU_REG0, &val); xgene_phy_sata_cfg_cmu_core()
792 cmu_wr(ctx, cmu_type, CMU_REG0, val); xgene_phy_sata_cfg_cmu_core()
795 cmu_rd(ctx, cmu_type, CMU_REG1, &val); xgene_phy_sata_cfg_cmu_core()
805 cmu_wr(ctx, cmu_type, CMU_REG1, val); xgene_phy_sata_cfg_cmu_core()
808 cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); xgene_phy_sata_cfg_cmu_core()
811 cmu_rd(ctx, cmu_type, CMU_REG2, &val); xgene_phy_sata_cfg_cmu_core()
829 cmu_wr(ctx, cmu_type, CMU_REG2, val); xgene_phy_sata_cfg_cmu_core()
832 cmu_rd(ctx, cmu_type, CMU_REG3, &val); xgene_phy_sata_cfg_cmu_core()
844 cmu_wr(ctx, cmu_type, CMU_REG3, val); xgene_phy_sata_cfg_cmu_core()
847 cmu_rd(ctx, cmu_type, CMU_REG26, &val); xgene_phy_sata_cfg_cmu_core()
849 cmu_wr(ctx, cmu_type, CMU_REG26, val); xgene_phy_sata_cfg_cmu_core()
852 cmu_rd(ctx, cmu_type, CMU_REG5, &val); xgene_phy_sata_cfg_cmu_core()
859 cmu_wr(ctx, cmu_type, CMU_REG5, val); xgene_phy_sata_cfg_cmu_core()
862 cmu_rd(ctx, cmu_type, CMU_REG6, &val); xgene_phy_sata_cfg_cmu_core()
865 cmu_wr(ctx, cmu_type, CMU_REG6, val); xgene_phy_sata_cfg_cmu_core()
869 cmu_rd(ctx, cmu_type, CMU_REG9, &val); xgene_phy_sata_cfg_cmu_core()
879 cmu_wr(ctx, cmu_type, CMU_REG9, val); xgene_phy_sata_cfg_cmu_core()
882 cmu_rd(ctx, cmu_type, CMU_REG10, &val); xgene_phy_sata_cfg_cmu_core()
884 cmu_wr(ctx, cmu_type, CMU_REG10, val); xgene_phy_sata_cfg_cmu_core()
888 cmu_rd(ctx, cmu_type, CMU_REG16, &val); xgene_phy_sata_cfg_cmu_core()
895 cmu_wr(ctx, cmu_type, CMU_REG16, val); xgene_phy_sata_cfg_cmu_core()
898 cmu_rd(ctx, cmu_type, CMU_REG30, &val); xgene_phy_sata_cfg_cmu_core()
901 cmu_wr(ctx, cmu_type, CMU_REG30, val); xgene_phy_sata_cfg_cmu_core()
904 cmu_wr(ctx, cmu_type, CMU_REG31, 0xF); xgene_phy_sata_cfg_cmu_core()
906 cmu_rd(ctx, cmu_type, CMU_REG32, &val); xgene_phy_sata_cfg_cmu_core()
912 cmu_wr(ctx, cmu_type, CMU_REG32, val); xgene_phy_sata_cfg_cmu_core()
916 cmu_wr(ctx, cmu_type, CMU_REG34, 0x8d27); xgene_phy_sata_cfg_cmu_core()
918 cmu_wr(ctx, cmu_type, CMU_REG34, 0x873c); xgene_phy_sata_cfg_cmu_core()
921 cmu_wr(ctx, cmu_type, CMU_REG37, 0xF00F); xgene_phy_sata_cfg_cmu_core()
924 static void xgene_phy_ssc_enable(struct xgene_phy_ctx *ctx, xgene_phy_ssc_enable() argument
930 cmu_rd(ctx, cmu_type, CMU_REG35, &val); xgene_phy_ssc_enable()
932 cmu_wr(ctx, cmu_type, CMU_REG35, val); xgene_phy_ssc_enable()
935 cmu_rd(ctx, cmu_type, CMU_REG36, &val); xgene_phy_ssc_enable()
939 cmu_wr(ctx, cmu_type, CMU_REG36, val); xgene_phy_ssc_enable()
942 cmu_clrbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); xgene_phy_ssc_enable()
943 cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); xgene_phy_ssc_enable()
946 cmu_toggle1to0(ctx, cmu_type, CMU_REG32, xgene_phy_ssc_enable()
950 static void xgene_phy_sata_cfg_lanes(struct xgene_phy_ctx *ctx) xgene_phy_sata_cfg_lanes() argument
958 serdes_wr(ctx, lane, RXTX_REG147, 0x6); xgene_phy_sata_cfg_lanes()
961 serdes_rd(ctx, lane, RXTX_REG0, &val); xgene_phy_sata_cfg_lanes()
965 serdes_wr(ctx, lane, RXTX_REG0, val); xgene_phy_sata_cfg_lanes()
968 serdes_rd(ctx, lane, RXTX_REG1, &val); xgene_phy_sata_cfg_lanes()
971 ctx->sata_param.txboostgain[lane * 3 + xgene_phy_sata_cfg_lanes()
972 ctx->sata_param.speed[lane]]); xgene_phy_sata_cfg_lanes()
973 serdes_wr(ctx, lane, RXTX_REG1, val); xgene_phy_sata_cfg_lanes()
977 serdes_rd(ctx, lane, RXTX_REG2, &val); xgene_phy_sata_cfg_lanes()
981 serdes_wr(ctx, lane, RXTX_REG2, val); xgene_phy_sata_cfg_lanes()
984 serdes_rd(ctx, lane, RXTX_REG4, &val); xgene_phy_sata_cfg_lanes()
986 serdes_wr(ctx, lane, RXTX_REG4, val); xgene_phy_sata_cfg_lanes()
989 serdes_rd(ctx, lane, RXTX_REG1, &val); xgene_phy_sata_cfg_lanes()
992 serdes_wr(ctx, lane, RXTX_REG1, val); xgene_phy_sata_cfg_lanes()
996 serdes_rd(ctx, lane, RXTX_REG5, &val); xgene_phy_sata_cfg_lanes()
998 ctx->sata_param.txprecursor_cn1[lane * 3 + xgene_phy_sata_cfg_lanes()
999 ctx->sata_param.speed[lane]]); xgene_phy_sata_cfg_lanes()
1001 ctx->sata_param.txpostcursor_cp1[lane * 3 + xgene_phy_sata_cfg_lanes()
1002 ctx->sata_param.speed[lane]]); xgene_phy_sata_cfg_lanes()
1004 ctx->sata_param.txprecursor_cn2[lane * 3 + xgene_phy_sata_cfg_lanes()
1005 ctx->sata_param.speed[lane]]); xgene_phy_sata_cfg_lanes()
1006 serdes_wr(ctx, lane, RXTX_REG5, val); xgene_phy_sata_cfg_lanes()
1009 serdes_rd(ctx, lane, RXTX_REG6, &val); xgene_phy_sata_cfg_lanes()
1011 ctx->sata_param.txamplitude[lane * 3 + xgene_phy_sata_cfg_lanes()
1012 ctx->sata_param.speed[lane]]); xgene_phy_sata_cfg_lanes()
1017 serdes_wr(ctx, lane, RXTX_REG6, val); xgene_phy_sata_cfg_lanes()
1020 serdes_rd(ctx, lane, RXTX_REG7, &val); xgene_phy_sata_cfg_lanes()
1023 serdes_wr(ctx, lane, RXTX_REG7, val); xgene_phy_sata_cfg_lanes()
1026 serdes_rd(ctx, lane, RXTX_REG8, &val); xgene_phy_sata_cfg_lanes()
1032 serdes_wr(ctx, lane, RXTX_REG8, val); xgene_phy_sata_cfg_lanes()
1035 serdes_rd(ctx, lane, RXTX_REG11, &val); xgene_phy_sata_cfg_lanes()
1037 serdes_wr(ctx, lane, RXTX_REG11, val); xgene_phy_sata_cfg_lanes()
1040 serdes_rd(ctx, lane, RXTX_REG12, &val); xgene_phy_sata_cfg_lanes()
1044 serdes_wr(ctx, lane, RXTX_REG12, val); xgene_phy_sata_cfg_lanes()
1047 serdes_rd(ctx, lane, RXTX_REG26, &val); xgene_phy_sata_cfg_lanes()
1050 serdes_wr(ctx, lane, RXTX_REG26, val); xgene_phy_sata_cfg_lanes()
1052 serdes_wr(ctx, lane, RXTX_REG28, 0x0); xgene_phy_sata_cfg_lanes()
1055 serdes_wr(ctx, lane, RXTX_REG31, 0x0); xgene_phy_sata_cfg_lanes()
1058 serdes_rd(ctx, lane, RXTX_REG61, &val); xgene_phy_sata_cfg_lanes()
1062 serdes_wr(ctx, lane, RXTX_REG61, val); xgene_phy_sata_cfg_lanes()
1064 serdes_rd(ctx, lane, RXTX_REG62, &val); xgene_phy_sata_cfg_lanes()
1066 serdes_wr(ctx, lane, RXTX_REG62, val); xgene_phy_sata_cfg_lanes()
1071 serdes_rd(ctx, lane, reg, &val); xgene_phy_sata_cfg_lanes()
1075 serdes_wr(ctx, lane, reg, val); xgene_phy_sata_cfg_lanes()
1081 serdes_rd(ctx, lane, reg, &val); xgene_phy_sata_cfg_lanes()
1085 serdes_wr(ctx, lane, reg, val); xgene_phy_sata_cfg_lanes()
1091 serdes_rd(ctx, lane, reg, &val); xgene_phy_sata_cfg_lanes()
1095 serdes_wr(ctx, lane, reg, val); xgene_phy_sata_cfg_lanes()
1098 serdes_rd(ctx, lane, RXTX_REG102, &val); xgene_phy_sata_cfg_lanes()
1100 serdes_wr(ctx, lane, RXTX_REG102, val); xgene_phy_sata_cfg_lanes()
1102 serdes_wr(ctx, lane, RXTX_REG114, 0xffe0); xgene_phy_sata_cfg_lanes()
1104 serdes_rd(ctx, lane, RXTX_REG125, &val); xgene_phy_sata_cfg_lanes()
1106 ctx->sata_param.txeyedirection[lane * 3 + xgene_phy_sata_cfg_lanes()
1107 ctx->sata_param.speed[lane]]); xgene_phy_sata_cfg_lanes()
1109 ctx->sata_param.txeyetuning[lane * 3 + xgene_phy_sata_cfg_lanes()
1110 ctx->sata_param.speed[lane]]); xgene_phy_sata_cfg_lanes()
1112 serdes_wr(ctx, lane, RXTX_REG125, val); xgene_phy_sata_cfg_lanes()
1114 serdes_rd(ctx, lane, RXTX_REG127, &val); xgene_phy_sata_cfg_lanes()
1116 serdes_wr(ctx, lane, RXTX_REG127, val); xgene_phy_sata_cfg_lanes()
1118 serdes_rd(ctx, lane, RXTX_REG128, &val); xgene_phy_sata_cfg_lanes()
1120 serdes_wr(ctx, lane, RXTX_REG128, val); xgene_phy_sata_cfg_lanes()
1122 serdes_rd(ctx, lane, RXTX_REG145, &val); xgene_phy_sata_cfg_lanes()
1132 serdes_wr(ctx, lane, RXTX_REG145, val); xgene_phy_sata_cfg_lanes()
1140 serdes_wr(ctx, lane, reg, 0xFFFF); xgene_phy_sata_cfg_lanes()
1145 static int xgene_phy_cal_rdy_chk(struct xgene_phy_ctx *ctx, xgene_phy_cal_rdy_chk() argument
1149 void __iomem *csr_serdes = ctx->sds_base; xgene_phy_cal_rdy_chk()
1158 cmu_setbits(ctx, cmu_type, CMU_REG5, CMU_REG5_PLL_RESETB_MASK); xgene_phy_cal_rdy_chk()
1165 cmu_rd(ctx, cmu_type, CMU_REG1, &val); xgene_phy_cal_rdy_chk()
1167 cmu_wr(ctx, cmu_type, CMU_REG1, val); xgene_phy_cal_rdy_chk()
1174 cmu_toggle1to0(ctx, cmu_type, CMU_REG32, xgene_phy_cal_rdy_chk()
1191 cmu_rd(ctx, cmu_type, CMU_REG17, &val); xgene_phy_cal_rdy_chk()
1194 cmu_wr(ctx, cmu_type, CMU_REG17, val); xgene_phy_cal_rdy_chk()
1195 cmu_toggle1to0(ctx, cmu_type, CMU_REG17, xgene_phy_cal_rdy_chk()
1202 cmu_rd(ctx, cmu_type, CMU_REG17, &val); xgene_phy_cal_rdy_chk()
1205 cmu_wr(ctx, cmu_type, CMU_REG17, val); xgene_phy_cal_rdy_chk()
1206 cmu_toggle1to0(ctx, cmu_type, CMU_REG16, xgene_phy_cal_rdy_chk()
1209 cmu_rd(ctx, cmu_type, CMU_REG17, &val); xgene_phy_cal_rdy_chk()
1212 cmu_wr(ctx, cmu_type, CMU_REG17, val); xgene_phy_cal_rdy_chk()
1213 cmu_toggle1to0(ctx, cmu_type, CMU_REG16, xgene_phy_cal_rdy_chk()
1220 cmu_rd(ctx, cmu_type, CMU_REG7, &val); xgene_phy_cal_rdy_chk()
1230 cmu_rd(ctx, cmu_type, CMU_REG7, &val); xgene_phy_cal_rdy_chk()
1231 dev_dbg(ctx->dev, "PLL calibration %s\n", xgene_phy_cal_rdy_chk()
1234 dev_err(ctx->dev, xgene_phy_cal_rdy_chk()
1238 dev_dbg(ctx->dev, "PLL calibration successful\n"); xgene_phy_cal_rdy_chk()
1240 cmu_rd(ctx, cmu_type, CMU_REG15, &val); xgene_phy_cal_rdy_chk()
1241 dev_dbg(ctx->dev, "PHY Tx is %sready\n", val & 0x300 ? "" : "not "); xgene_phy_cal_rdy_chk()
1245 static void xgene_phy_pdwn_force_vco(struct xgene_phy_ctx *ctx, xgene_phy_pdwn_force_vco() argument
1251 dev_dbg(ctx->dev, "Reset VCO and re-start again\n"); xgene_phy_pdwn_force_vco()
1253 cmu_rd(ctx, cmu_type, CMU_REG16, &val); xgene_phy_pdwn_force_vco()
1255 cmu_wr(ctx, cmu_type, CMU_REG16, val); xgene_phy_pdwn_force_vco()
1258 cmu_toggle1to0(ctx, cmu_type, CMU_REG0, CMU_REG0_PDOWN_MASK); xgene_phy_pdwn_force_vco()
1259 cmu_toggle1to0(ctx, cmu_type, CMU_REG32, xgene_phy_pdwn_force_vco()
1263 static int xgene_phy_hw_init_sata(struct xgene_phy_ctx *ctx, xgene_phy_hw_init_sata() argument
1266 void __iomem *sds_base = ctx->sds_base; xgene_phy_hw_init_sata()
1271 dev_dbg(ctx->dev, "Reset PHY\n"); xgene_phy_hw_init_sata()
1285 ctx->sata_param.txspeed[ctx->sata_param.speed[0]]); xgene_phy_hw_init_sata()
1288 dev_dbg(ctx->dev, "Set the customer pin mode to SATA\n"); xgene_phy_hw_init_sata()
1294 xgene_phy_cfg_cmu_clk_type(ctx, PHY_CMU, clk_type); xgene_phy_hw_init_sata()
1297 xgene_phy_sata_cfg_cmu_core(ctx, PHY_CMU, clk_type); xgene_phy_hw_init_sata()
1301 xgene_phy_ssc_enable(ctx, PHY_CMU); xgene_phy_hw_init_sata()
1304 xgene_phy_sata_cfg_lanes(ctx); xgene_phy_hw_init_sata()
1315 if (!xgene_phy_cal_rdy_chk(ctx, PHY_CMU, clk_type)) xgene_phy_hw_init_sata()
1318 xgene_phy_pdwn_force_vco(ctx, PHY_CMU, clk_type); xgene_phy_hw_init_sata()
1322 dev_err(ctx->dev, "PLL calibration failed\n"); xgene_phy_hw_init_sata()
1327 static int xgene_phy_hw_initialize(struct xgene_phy_ctx *ctx, xgene_phy_hw_initialize() argument
1333 dev_dbg(ctx->dev, "PHY init clk type %d\n", clk_type); xgene_phy_hw_initialize()
1335 if (ctx->mode == MODE_SATA) { xgene_phy_hw_initialize()
1336 rc = xgene_phy_hw_init_sata(ctx, clk_type, ssc_enable); xgene_phy_hw_initialize()
1340 dev_err(ctx->dev, "Un-supported customer pin mode %d\n", xgene_phy_hw_initialize()
1341 ctx->mode); xgene_phy_hw_initialize()
1354 static void xgene_phy_force_lat_summer_cal(struct xgene_phy_ctx *ctx, int lane) xgene_phy_force_lat_summer_cal() argument
1382 serdes_setbits(ctx, lane, RXTX_REG127, xgene_phy_force_lat_summer_cal()
1389 serdes_clrbits(ctx, lane, RXTX_REG127, xgene_phy_force_lat_summer_cal()
1398 serdes_setbits(ctx, lane, RXTX_REG127, xgene_phy_force_lat_summer_cal()
1405 serdes_clrbits(ctx, lane, RXTX_REG127, xgene_phy_force_lat_summer_cal()
1409 serdes_wr(ctx, lane, RXTX_REG28, 0x7); xgene_phy_force_lat_summer_cal()
1410 serdes_wr(ctx, lane, RXTX_REG31, 0x7e00); xgene_phy_force_lat_summer_cal()
1411 serdes_clrbits(ctx, lane, RXTX_REG4, xgene_phy_force_lat_summer_cal()
1413 serdes_clrbits(ctx, lane, RXTX_REG7, xgene_phy_force_lat_summer_cal()
1416 serdes_wr(ctx, lane, serdes_reg[i].reg, xgene_phy_force_lat_summer_cal()
1420 static void xgene_phy_reset_rxd(struct xgene_phy_ctx *ctx, int lane) xgene_phy_reset_rxd() argument
1423 serdes_clrbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK); xgene_phy_reset_rxd()
1426 serdes_setbits(ctx, lane, RXTX_REG7, RXTX_REG7_RESETB_RXD_MASK); xgene_phy_reset_rxd()
1434 static void xgene_phy_gen_avg_val(struct xgene_phy_ctx *ctx, int lane) xgene_phy_gen_avg_val() argument
1448 dev_dbg(ctx->dev, "Generating avg calibration value for lane %d\n", xgene_phy_gen_avg_val()
1452 serdes_setbits(ctx, lane, RXTX_REG12, xgene_phy_gen_avg_val()
1455 serdes_wr(ctx, lane, RXTX_REG28, 0x0000); xgene_phy_gen_avg_val()
1457 serdes_wr(ctx, lane, RXTX_REG31, 0x0000); xgene_phy_gen_avg_val()
1468 xgene_phy_force_lat_summer_cal(ctx, lane); xgene_phy_gen_avg_val()
1470 serdes_rd(ctx, lane, RXTX_REG21, &val); xgene_phy_gen_avg_val()
1475 serdes_rd(ctx, lane, RXTX_REG22, &val); xgene_phy_gen_avg_val()
1480 serdes_rd(ctx, lane, RXTX_REG23, &val); xgene_phy_gen_avg_val()
1484 serdes_rd(ctx, lane, RXTX_REG24, &val); xgene_phy_gen_avg_val()
1488 serdes_rd(ctx, lane, RXTX_REG121, &val); xgene_phy_gen_avg_val()
1504 dev_dbg(ctx->dev, "Iteration %d:\n", avg_loop); xgene_phy_gen_avg_val()
1505 dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n", xgene_phy_gen_avg_val()
1508 dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n", xgene_phy_gen_avg_val()
1511 dev_dbg(ctx->dev, "SUM 0x%x\n", sum_cal_itr); xgene_phy_gen_avg_val()
1514 dev_err(ctx->dev, xgene_phy_gen_avg_val()
1518 xgene_phy_reset_rxd(ctx, lane); xgene_phy_gen_avg_val()
1522 serdes_rd(ctx, lane, RXTX_REG127, &val); xgene_phy_gen_avg_val()
1527 serdes_wr(ctx, lane, RXTX_REG127, val); xgene_phy_gen_avg_val()
1529 serdes_rd(ctx, lane, RXTX_REG128, &val); xgene_phy_gen_avg_val()
1534 serdes_wr(ctx, lane, RXTX_REG128, val); xgene_phy_gen_avg_val()
1536 serdes_rd(ctx, lane, RXTX_REG129, &val); xgene_phy_gen_avg_val()
1541 serdes_wr(ctx, lane, RXTX_REG129, val); xgene_phy_gen_avg_val()
1543 serdes_rd(ctx, lane, RXTX_REG130, &val); xgene_phy_gen_avg_val()
1548 serdes_wr(ctx, lane, RXTX_REG130, val); xgene_phy_gen_avg_val()
1551 serdes_rd(ctx, lane, RXTX_REG14, &val); xgene_phy_gen_avg_val()
1554 serdes_wr(ctx, lane, RXTX_REG14, val); xgene_phy_gen_avg_val()
1556 dev_dbg(ctx->dev, "Average Value:\n"); xgene_phy_gen_avg_val()
1557 dev_dbg(ctx->dev, "DO 0x%x XO 0x%x EO 0x%x SO 0x%x\n", xgene_phy_gen_avg_val()
1562 dev_dbg(ctx->dev, "DE 0x%x XE 0x%x EE 0x%x SE 0x%x\n", xgene_phy_gen_avg_val()
1567 dev_dbg(ctx->dev, "SUM 0x%x\n", xgene_phy_gen_avg_val()
1570 serdes_rd(ctx, lane, RXTX_REG14, &val); xgene_phy_gen_avg_val()
1572 serdes_wr(ctx, lane, RXTX_REG14, val); xgene_phy_gen_avg_val()
1573 dev_dbg(ctx->dev, "Enable Manual Summer calibration\n"); xgene_phy_gen_avg_val()
1575 serdes_rd(ctx, lane, RXTX_REG127, &val); xgene_phy_gen_avg_val()
1577 dev_dbg(ctx->dev, "Enable Manual Latch calibration\n"); xgene_phy_gen_avg_val()
1578 serdes_wr(ctx, lane, RXTX_REG127, val); xgene_phy_gen_avg_val()
1581 serdes_rd(ctx, lane, RXTX_REG12, &val); xgene_phy_gen_avg_val()
1583 serdes_wr(ctx, lane, RXTX_REG12, val); xgene_phy_gen_avg_val()
1585 serdes_wr(ctx, lane, RXTX_REG28, 0x0007); xgene_phy_gen_avg_val()
1587 serdes_wr(ctx, lane, RXTX_REG31, 0x7e00); xgene_phy_gen_avg_val()
1592 struct xgene_phy_ctx *ctx = phy_get_drvdata(phy); xgene_phy_hw_init() local
1596 rc = xgene_phy_hw_initialize(ctx, CLK_EXT_DIFF, SSC_DISABLE); xgene_phy_hw_init()
1598 dev_err(ctx->dev, "PHY initialize failed %d\n", rc); xgene_phy_hw_init()
1603 if (!IS_ERR(ctx->clk)) { xgene_phy_hw_init()
1605 clk_prepare_enable(ctx->clk); xgene_phy_hw_init()
1606 clk_disable_unprepare(ctx->clk); xgene_phy_hw_init()
1607 clk_prepare_enable(ctx->clk); xgene_phy_hw_init()
1612 xgene_phy_gen_avg_val(ctx, i); xgene_phy_hw_init()
1614 dev_dbg(ctx->dev, "PHY initialized\n"); xgene_phy_hw_init()
1626 struct xgene_phy_ctx *ctx = dev_get_drvdata(dev); xgene_phy_xlate() local
1633 ctx->mode = args->args[0]; xgene_phy_xlate()
1634 return ctx->phy; xgene_phy_xlate()
1658 struct xgene_phy_ctx *ctx; xgene_phy_probe() local
1670 ctx = devm_kzalloc(&pdev->dev, sizeof(*ctx), GFP_KERNEL); xgene_phy_probe()
1671 if (!ctx) xgene_phy_probe()
1674 ctx->dev = &pdev->dev; xgene_phy_probe()
1677 ctx->sds_base = devm_ioremap_resource(&pdev->dev, res); xgene_phy_probe()
1678 if (IS_ERR(ctx->sds_base)) xgene_phy_probe()
1679 return PTR_ERR(ctx->sds_base); xgene_phy_probe()
1682 ctx->clk = clk_get(&pdev->dev, NULL); xgene_phy_probe()
1686 ctx->sata_param.txeyetuning, 6, default_txeye_tuning, 1); xgene_phy_probe()
1688 ctx->sata_param.txeyedirection, 6, default_txeye_direction, 1); xgene_phy_probe()
1690 ctx->sata_param.txboostgain, 6, default_txboost_gain, 1); xgene_phy_probe()
1692 ctx->sata_param.txamplitude, 6, default_txamp, 13300); xgene_phy_probe()
1694 ctx->sata_param.txprecursor_cn1, 6, default_txcn1, 18200); xgene_phy_probe()
1696 ctx->sata_param.txprecursor_cn2, 6, default_txcn2, 18200); xgene_phy_probe()
1698 ctx->sata_param.txpostcursor_cp1, 6, default_txcp1, 18200); xgene_phy_probe()
1700 ctx->sata_param.txspeed, 3, default_spd, 1); xgene_phy_probe()
1702 ctx->sata_param.speed[i] = 2; /* Default to Gen3 */ xgene_phy_probe()
1704 platform_set_drvdata(pdev, ctx); xgene_phy_probe()
1706 ctx->phy = devm_phy_create(ctx->dev, NULL, &xgene_phy_ops); xgene_phy_probe()
1707 if (IS_ERR(ctx->phy)) { xgene_phy_probe()
1709 return PTR_ERR(ctx->phy); xgene_phy_probe()
1711 phy_set_drvdata(ctx->phy, ctx); xgene_phy_probe()
1713 phy_provider = devm_of_phy_provider_register(ctx->dev, xgene_phy_xlate); xgene_phy_probe()
/linux-4.1.27/drivers/media/platform/s5p-jpeg/
H A Djpeg-core.c552 struct s5p_jpeg_ctx *ctx) s5p_jpeg_adjust_fourcc_to_subsampling()
556 if (ctx->subsampling != V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY) { s5p_jpeg_adjust_fourcc_to_subsampling()
563 switch (ctx->subsampling) { s5p_jpeg_adjust_fourcc_to_subsampling()
614 static int s5p_jpeg_to_user_subsampling(struct s5p_jpeg_ctx *ctx) s5p_jpeg_to_user_subsampling() argument
616 WARN_ON(ctx->subsampling > 3); s5p_jpeg_to_user_subsampling()
618 switch (ctx->jpeg->variant->version) { s5p_jpeg_to_user_subsampling()
620 if (ctx->subsampling > 2) s5p_jpeg_to_user_subsampling()
622 return ctx->subsampling; s5p_jpeg_to_user_subsampling()
625 if (ctx->subsampling > 3) s5p_jpeg_to_user_subsampling()
627 return exynos3250_decoded_subsampling[ctx->subsampling]; s5p_jpeg_to_user_subsampling()
629 if (ctx->subsampling > 2) s5p_jpeg_to_user_subsampling()
631 return exynos4x12_decoded_subsampling[ctx->subsampling]; s5p_jpeg_to_user_subsampling()
761 static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx,
763 static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx);
769 struct s5p_jpeg_ctx *ctx; s5p_jpeg_open() local
773 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); s5p_jpeg_open()
774 if (!ctx) s5p_jpeg_open()
782 v4l2_fh_init(&ctx->fh, vfd); s5p_jpeg_open()
784 ctx->fh.ctrl_handler = &ctx->ctrl_handler; s5p_jpeg_open()
785 file->private_data = &ctx->fh; s5p_jpeg_open()
786 v4l2_fh_add(&ctx->fh); s5p_jpeg_open()
788 ctx->jpeg = jpeg; s5p_jpeg_open()
790 ctx->mode = S5P_JPEG_ENCODE; s5p_jpeg_open()
791 out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_RGB565, s5p_jpeg_open()
793 cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG, s5p_jpeg_open()
796 ctx->mode = S5P_JPEG_DECODE; s5p_jpeg_open()
797 out_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_JPEG, s5p_jpeg_open()
799 cap_fmt = s5p_jpeg_find_format(ctx, V4L2_PIX_FMT_YUYV, s5p_jpeg_open()
801 ctx->scale_factor = EXYNOS3250_DEC_SCALE_FACTOR_8_8; s5p_jpeg_open()
804 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(jpeg->m2m_dev, ctx, queue_init); s5p_jpeg_open()
805 if (IS_ERR(ctx->fh.m2m_ctx)) { s5p_jpeg_open()
806 ret = PTR_ERR(ctx->fh.m2m_ctx); s5p_jpeg_open()
810 ctx->out_q.fmt = out_fmt; s5p_jpeg_open()
811 ctx->cap_q.fmt = cap_fmt; s5p_jpeg_open()
813 ret = s5p_jpeg_controls_create(ctx); s5p_jpeg_open()
821 v4l2_fh_del(&ctx->fh); s5p_jpeg_open()
822 v4l2_fh_exit(&ctx->fh); s5p_jpeg_open()
825 kfree(ctx); s5p_jpeg_open()
832 struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data); s5p_jpeg_release() local
835 v4l2_m2m_ctx_release(ctx->fh.m2m_ctx); s5p_jpeg_release()
836 v4l2_ctrl_handler_free(&ctx->ctrl_handler); s5p_jpeg_release()
837 v4l2_fh_del(&ctx->fh); s5p_jpeg_release()
838 v4l2_fh_exit(&ctx->fh); s5p_jpeg_release()
839 kfree(ctx); s5p_jpeg_release()
895 struct s5p_jpeg_ctx *ctx) s5p_jpeg_parse_hdr()
970 ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_444; s5p_jpeg_parse_hdr()
973 ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_422; s5p_jpeg_parse_hdr()
976 ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_420; s5p_jpeg_parse_hdr()
979 ctx->subsampling = V4L2_JPEG_CHROMA_SUBSAMPLING_GRAY; s5p_jpeg_parse_hdr()
991 struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); s5p_jpeg_querycap() local
993 if (ctx->mode == S5P_JPEG_ENCODE) { s5p_jpeg_querycap()
1039 struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); s5p_jpeg_enum_fmt_vid_cap() local
1041 if (ctx->mode == S5P_JPEG_ENCODE) s5p_jpeg_enum_fmt_vid_cap()
1052 struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); s5p_jpeg_enum_fmt_vid_out() local
1054 if (ctx->mode == S5P_JPEG_ENCODE) s5p_jpeg_enum_fmt_vid_out()
1062 static struct s5p_jpeg_q_data *get_q_data(struct s5p_jpeg_ctx *ctx, get_q_data() argument
1066 return &ctx->out_q; get_q_data()
1068 return &ctx->cap_q; get_q_data()
1106 static struct s5p_jpeg_fmt *s5p_jpeg_find_format(struct s5p_jpeg_ctx *ctx, s5p_jpeg_find_format() argument
1111 if (ctx->mode == S5P_JPEG_ENCODE) s5p_jpeg_find_format()
1124 fmt->flags & ctx->jpeg->variant->fmt_ver_flag) { s5p_jpeg_find_format()
1132 static void jpeg_bound_align_image(struct s5p_jpeg_ctx *ctx, jpeg_bound_align_image() argument
1146 if (ctx->jpeg->variant->hw3250_compat) { jpeg_bound_align_image()
1169 struct s5p_jpeg_ctx *ctx, int q_type) vidioc_try_fmt()
1181 jpeg_bound_align_image(ctx, &pix->width, S5P_JPEG_MIN_WIDTH, vidioc_try_fmt()
1186 jpeg_bound_align_image(ctx, &pix->width, S5P_JPEG_MIN_WIDTH, vidioc_try_fmt()
1215 struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); s5p_jpeg_try_fmt_vid_cap() local
1220 fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat, s5p_jpeg_try_fmt_vid_cap()
1223 v4l2_err(&ctx->jpeg->v4l2_dev, s5p_jpeg_try_fmt_vid_cap()
1229 if ((ctx->jpeg->variant->version != SJPEG_EXYNOS4) || s5p_jpeg_try_fmt_vid_cap()
1230 (ctx->mode != S5P_JPEG_DECODE)) s5p_jpeg_try_fmt_vid_cap()
1240 (fmt->subsampling < ctx->subsampling)) { s5p_jpeg_try_fmt_vid_cap()
1241 ret = s5p_jpeg_adjust_fourcc_to_subsampling(ctx->subsampling, s5p_jpeg_try_fmt_vid_cap()
1244 ctx); s5p_jpeg_try_fmt_vid_cap()
1248 fmt = s5p_jpeg_find_format(ctx, pix->pixelformat, s5p_jpeg_try_fmt_vid_cap()
1258 if (ctx->subsampling == V4L2_JPEG_CHROMA_SUBSAMPLING_420 && s5p_jpeg_try_fmt_vid_cap()
1259 (ctx->out_q.w & 1) && s5p_jpeg_try_fmt_vid_cap()
1264 fmt = s5p_jpeg_find_format(ctx, pix->pixelformat, s5p_jpeg_try_fmt_vid_cap()
1269 return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_CAPTURE); s5p_jpeg_try_fmt_vid_cap()
1275 struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); s5p_jpeg_try_fmt_vid_out() local
1278 fmt = s5p_jpeg_find_format(ctx, f->fmt.pix.pixelformat, s5p_jpeg_try_fmt_vid_out()
1281 v4l2_err(&ctx->jpeg->v4l2_dev, s5p_jpeg_try_fmt_vid_out()
1287 return vidioc_try_fmt(f, fmt, ctx, FMT_TYPE_OUTPUT); s5p_jpeg_try_fmt_vid_out()
1290 static int exynos4_jpeg_get_output_buffer_size(struct s5p_jpeg_ctx *ctx, exynos4_jpeg_get_output_buffer_size() argument
1308 jpeg_bound_align_image(ctx, &w, S5P_JPEG_MIN_WIDTH, exynos4_jpeg_get_output_buffer_size()
1316 static int exynos3250_jpeg_try_downscale(struct s5p_jpeg_ctx *ctx,
1428 static int exynos3250_jpeg_try_downscale(struct s5p_jpeg_ctx *ctx, exynos3250_jpeg_try_downscale() argument
1433 w_ratio = ctx->out_q.w / r->width; exynos3250_jpeg_try_downscale()
1434 h_ratio = ctx->out_q.h / r->height; exynos3250_jpeg_try_downscale()
1443 ctx->scale_factor = cur_ratio; exynos3250_jpeg_try_downscale()
1448 r->width = round_down(ctx->out_q.w / ctx->scale_factor, 2); exynos3250_jpeg_try_downscale()
1449 r->height = round_down(ctx->out_q.h / ctx->scale_factor, 2); exynos3250_jpeg_try_downscale()
1451 ctx->crop_rect.width = r->width; exynos3250_jpeg_try_downscale()
1452 ctx->crop_rect.height = r->height; exynos3250_jpeg_try_downscale()
1453 ctx->crop_rect.left = 0; exynos3250_jpeg_try_downscale()
1454 ctx->crop_rect.top = 0; exynos3250_jpeg_try_downscale()
1456 ctx->crop_altered = true; exynos3250_jpeg_try_downscale()
1474 static int exynos3250_jpeg_try_crop(struct s5p_jpeg_ctx *ctx, exynos3250_jpeg_try_crop() argument
1480 switch (ctx->cap_q.fmt->fourcc) { exynos3250_jpeg_try_crop()
1498 base_rect.width = ctx->out_q.w; exynos3250_jpeg_try_crop()
1499 base_rect.height = ctx->out_q.h; exynos3250_jpeg_try_crop()
1509 ctx->crop_rect.left = r->left; exynos3250_jpeg_try_crop()
1510 ctx->crop_rect.top = r->top; exynos3250_jpeg_try_crop()
1511 ctx->crop_rect.width = r->width; exynos3250_jpeg_try_crop()
1512 ctx->crop_rect.height = r->height; exynos3250_jpeg_try_crop()
1514 ctx->crop_altered = true; exynos3250_jpeg_try_crop()
1526 struct s5p_jpeg_ctx *ctx = fh_to_ctx(priv); s5p_jpeg_g_selection() local
1538 s->r.width = ctx->out_q.w; s5p_jpeg_g_selection()
1539 s->r.height = ctx->out_q.h; s5p_jpeg_g_selection()
1546 s->r.width = ctx->crop_rect.width; s5p_jpeg_g_selection()
1547 s->r.height = ctx->crop_rect.height; s5p_jpeg_g_selection()
1548 s->r.left = ctx->crop_rect.left; s5p_jpeg_g_selection()
1549 s->r.top = ctx->crop_rect.top; s5p_jpeg_g_selection()
1563 struct s5p_jpeg_ctx *ctx = fh_to_ctx(file->private_data); s5p_jpeg_s_selection() local
1571 if (ctx->mode != S5P_JPEG_DECODE) s5p_jpeg_s_selection()
1573 if (ctx->jpeg->variant->hw3250_compat) s5p_jpeg_s_selection()
1574 ret = exynos3250_jpeg_try_downscale(ctx, rect); s5p_jpeg_s_selection()
1576 if (ctx->mode != S5P_JPEG_ENCODE) s5p_jpeg_s_selection()
1578 if (ctx->jpeg->variant->hw3250_compat) s5p_jpeg_s_selection()
1579 ret = exynos3250_jpeg_try_crop(ctx, rect); s5p_jpeg_s_selection()
1587 struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl); s5p_jpeg_g_volatile_ctrl() local
1588 struct s5p_jpeg *jpeg = ctx->jpeg; s5p_jpeg_g_volatile_ctrl()
1594 ctrl->val = s5p_jpeg_to_user_subsampling(ctx); s5p_jpeg_g_volatile_ctrl()
1602 static int s5p_jpeg_adjust_subs_ctrl(struct s5p_jpeg_ctx *ctx, int *ctrl_val) s5p_jpeg_adjust_subs_ctrl() argument
1604 switch (ctx->jpeg->variant->version) { s5p_jpeg_adjust_subs_ctrl()
1613 if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB32) s5p_jpeg_adjust_subs_ctrl()
1622 if (ctx->out_q.fmt->fourcc != V4L2_PIX_FMT_GREY && s5p_jpeg_adjust_subs_ctrl()
1633 if (ctx->out_q.fmt->subsampling > *ctrl_val) s5p_jpeg_adjust_subs_ctrl()
1634 *ctrl_val = ctx->out_q.fmt->subsampling; s5p_jpeg_adjust_subs_ctrl()
1641 struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl); s5p_jpeg_try_ctrl() local
1645 spin_lock_irqsave(&ctx->jpeg->slock, flags); s5p_jpeg_try_ctrl()
1648 ret = s5p_jpeg_adjust_subs_ctrl(ctx, &ctrl->val); s5p_jpeg_try_ctrl()
1650 spin_unlock_irqrestore(&ctx->jpeg->slock, flags); s5p_jpeg_try_ctrl()
1656 struct s5p_jpeg_ctx *ctx = ctrl_to_ctx(ctrl); s5p_jpeg_s_ctrl() local
1659 spin_lock_irqsave(&ctx->jpeg->slock, flags); s5p_jpeg_s_ctrl()
1663 ctx->compr_quality = ctrl->val; s5p_jpeg_s_ctrl()
1666 ctx->restart_interval = ctrl->val; s5p_jpeg_s_ctrl()
1669 ctx->subsampling = ctrl->val; s5p_jpeg_s_ctrl()
1673 spin_unlock_irqrestore(&ctx->jpeg->slock, flags); s5p_jpeg_s_ctrl()
1683 static int s5p_jpeg_controls_create(struct s5p_jpeg_ctx *ctx) s5p_jpeg_controls_create() argument
1689 v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3); s5p_jpeg_controls_create()
1691 if (ctx->mode == S5P_JPEG_ENCODE) { s5p_jpeg_controls_create()
1692 v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, s5p_jpeg_controls_create()
1696 v4l2_ctrl_new_std(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, s5p_jpeg_controls_create()
1699 if (ctx->jpeg->variant->version == SJPEG_S5P) s5p_jpeg_controls_create()
1703 ctrl = v4l2_ctrl_new_std_menu(&ctx->ctrl_handler, &s5p_jpeg_ctrl_ops, s5p_jpeg_controls_create()
1708 if (ctx->ctrl_handler.error) { s5p_jpeg_controls_create()
1709 ret = ctx->ctrl_handler.error; s5p_jpeg_controls_create()
1713 if (ctx->mode == S5P_JPEG_DECODE) s5p_jpeg_controls_create()
1717 ret = v4l2_ctrl_handler_setup(&ctx->ctrl_handler); s5p_jpeg_controls_create()
1724 v4l2_ctrl_handler_free(&ctx->ctrl_handler); s5p_jpeg_controls_create()
1763 struct s5p_jpeg_ctx *ctx = priv; s5p_jpeg_device_run() local
1764 struct s5p_jpeg *jpeg = ctx->jpeg; s5p_jpeg_device_run()
1768 spin_lock_irqsave(&ctx->jpeg->slock, flags); s5p_jpeg_device_run()
1770 src_buf = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); s5p_jpeg_device_run()
1771 dst_buf = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); s5p_jpeg_device_run()
1777 s5p_jpeg_proc_mode(jpeg->regs, ctx->mode); s5p_jpeg_device_run()
1778 if (ctx->mode == S5P_JPEG_ENCODE) { s5p_jpeg_device_run()
1779 if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565) s5p_jpeg_device_run()
1785 s5p_jpeg_subsampling_mode(jpeg->regs, ctx->subsampling); s5p_jpeg_device_run()
1786 s5p_jpeg_dri(jpeg->regs, ctx->restart_interval); s5p_jpeg_device_run()
1787 s5p_jpeg_x(jpeg->regs, ctx->out_q.w); s5p_jpeg_device_run()
1788 s5p_jpeg_y(jpeg->regs, ctx->out_q.h); s5p_jpeg_device_run()
1793 s5p_jpeg_enc_stream_int(jpeg->regs, ctx->cap_q.size); s5p_jpeg_device_run()
1810 s5p_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality); s5p_jpeg_device_run()
1811 s5p_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality); s5p_jpeg_device_run()
1829 if (ctx->cap_q.fmt->fourcc == V4L2_PIX_FMT_YUYV) s5p_jpeg_device_run()
1839 spin_unlock_irqrestore(&ctx->jpeg->slock, flags); s5p_jpeg_device_run()
1842 static void exynos4_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) exynos4_jpeg_set_img_addr() argument
1844 struct s5p_jpeg *jpeg = ctx->jpeg; exynos4_jpeg_set_img_addr()
1853 pix_size = ctx->cap_q.w * ctx->cap_q.h; exynos4_jpeg_set_img_addr()
1855 if (ctx->mode == S5P_JPEG_ENCODE) { exynos4_jpeg_set_img_addr()
1856 vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); exynos4_jpeg_set_img_addr()
1857 fmt = ctx->out_q.fmt; exynos4_jpeg_set_img_addr()
1858 if (ctx->out_q.w % 2 && fmt->h_align > 0) exynos4_jpeg_set_img_addr()
1859 padding_bytes = ctx->out_q.h; exynos4_jpeg_set_img_addr()
1861 fmt = ctx->cap_q.fmt; exynos4_jpeg_set_img_addr()
1862 vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); exynos4_jpeg_set_img_addr()
1880 static void exynos4_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) exynos4_jpeg_set_jpeg_addr() argument
1882 struct s5p_jpeg *jpeg = ctx->jpeg; exynos4_jpeg_set_jpeg_addr()
1886 if (ctx->mode == S5P_JPEG_ENCODE) exynos4_jpeg_set_jpeg_addr()
1887 vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); exynos4_jpeg_set_jpeg_addr()
1889 vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); exynos4_jpeg_set_jpeg_addr()
1897 struct s5p_jpeg_ctx *ctx = priv; exynos4_jpeg_device_run() local
1898 struct s5p_jpeg *jpeg = ctx->jpeg; exynos4_jpeg_device_run()
1902 spin_lock_irqsave(&ctx->jpeg->slock, flags); exynos4_jpeg_device_run()
1904 if (ctx->mode == S5P_JPEG_ENCODE) { exynos4_jpeg_device_run()
1915 exynos4_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality); exynos4_jpeg_device_run()
1916 exynos4_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality); exynos4_jpeg_device_run()
1919 ctx->compr_quality); exynos4_jpeg_device_run()
1920 exynos4_jpeg_set_stream_size(jpeg->regs, ctx->cap_q.w, exynos4_jpeg_device_run()
1921 ctx->cap_q.h); exynos4_jpeg_device_run()
1923 exynos4_jpeg_set_enc_out_fmt(jpeg->regs, ctx->subsampling); exynos4_jpeg_device_run()
1924 exynos4_jpeg_set_img_fmt(jpeg->regs, ctx->out_q.fmt->fourcc); exynos4_jpeg_device_run()
1925 exynos4_jpeg_set_img_addr(ctx); exynos4_jpeg_device_run()
1926 exynos4_jpeg_set_jpeg_addr(ctx); exynos4_jpeg_device_run()
1928 ctx->out_q.fmt->fourcc); exynos4_jpeg_device_run()
1932 exynos4_jpeg_set_img_addr(ctx); exynos4_jpeg_device_run()
1933 exynos4_jpeg_set_jpeg_addr(ctx); exynos4_jpeg_device_run()
1934 exynos4_jpeg_set_img_fmt(jpeg->regs, ctx->cap_q.fmt->fourcc); exynos4_jpeg_device_run()
1936 bitstream_size = DIV_ROUND_UP(ctx->out_q.size, 32); exynos4_jpeg_device_run()
1941 exynos4_jpeg_set_enc_dec_mode(jpeg->regs, ctx->mode); exynos4_jpeg_device_run()
1943 spin_unlock_irqrestore(&ctx->jpeg->slock, flags); exynos4_jpeg_device_run()
1946 static void exynos3250_jpeg_set_img_addr(struct s5p_jpeg_ctx *ctx) exynos3250_jpeg_set_img_addr() argument
1948 struct s5p_jpeg *jpeg = ctx->jpeg; exynos3250_jpeg_set_img_addr()
1954 pix_size = ctx->cap_q.w * ctx->cap_q.h; exynos3250_jpeg_set_img_addr()
1956 if (ctx->mode == S5P_JPEG_ENCODE) { exynos3250_jpeg_set_img_addr()
1957 vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); exynos3250_jpeg_set_img_addr()
1958 fmt = ctx->out_q.fmt; exynos3250_jpeg_set_img_addr()
1960 vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); exynos3250_jpeg_set_img_addr()
1961 fmt = ctx->cap_q.fmt; exynos3250_jpeg_set_img_addr()
1979 static void exynos3250_jpeg_set_jpeg_addr(struct s5p_jpeg_ctx *ctx) exynos3250_jpeg_set_jpeg_addr() argument
1981 struct s5p_jpeg *jpeg = ctx->jpeg; exynos3250_jpeg_set_jpeg_addr()
1985 if (ctx->mode == S5P_JPEG_ENCODE) exynos3250_jpeg_set_jpeg_addr()
1986 vb = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); exynos3250_jpeg_set_jpeg_addr()
1988 vb = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); exynos3250_jpeg_set_jpeg_addr()
1996 struct s5p_jpeg_ctx *ctx = priv; exynos3250_jpeg_device_run() local
1997 struct s5p_jpeg *jpeg = ctx->jpeg; exynos3250_jpeg_device_run()
2000 spin_lock_irqsave(&ctx->jpeg->slock, flags); exynos3250_jpeg_device_run()
2006 exynos3250_jpeg_proc_mode(jpeg->regs, ctx->mode); exynos3250_jpeg_device_run()
2008 if (ctx->mode == S5P_JPEG_ENCODE) { exynos3250_jpeg_device_run()
2010 ctx->out_q.fmt->fourcc); exynos3250_jpeg_device_run()
2011 exynos3250_jpeg_dri(jpeg->regs, ctx->restart_interval); exynos3250_jpeg_device_run()
2017 s5p_jpeg_set_qtbl_lum(jpeg->regs, ctx->compr_quality); exynos3250_jpeg_device_run()
2018 s5p_jpeg_set_qtbl_chr(jpeg->regs, ctx->compr_quality); exynos3250_jpeg_device_run()
2043 exynos3250_jpeg_set_x(jpeg->regs, ctx->crop_rect.width); exynos3250_jpeg_device_run()
2044 exynos3250_jpeg_set_y(jpeg->regs, ctx->crop_rect.height); exynos3250_jpeg_device_run()
2045 exynos3250_jpeg_stride(jpeg->regs, ctx->out_q.fmt->fourcc, exynos3250_jpeg_device_run()
2046 ctx->out_q.w); exynos3250_jpeg_device_run()
2047 exynos3250_jpeg_offset(jpeg->regs, ctx->crop_rect.left, exynos3250_jpeg_device_run()
2048 ctx->crop_rect.top); exynos3250_jpeg_device_run()
2049 exynos3250_jpeg_set_img_addr(ctx); exynos3250_jpeg_device_run()
2050 exynos3250_jpeg_set_jpeg_addr(ctx); exynos3250_jpeg_device_run()
2051 exynos3250_jpeg_subsampling_mode(jpeg->regs, ctx->subsampling); exynos3250_jpeg_device_run()
2054 exynos3250_jpeg_enc_stream_bound(jpeg->regs, ctx->cap_q.size); exynos3250_jpeg_device_run()
2056 if (ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565 || exynos3250_jpeg_device_run()
2057 ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB565X || exynos3250_jpeg_device_run()
2058 ctx->out_q.fmt->fourcc == V4L2_PIX_FMT_RGB32) exynos3250_jpeg_device_run()
2061 exynos3250_jpeg_set_img_addr(ctx); exynos3250_jpeg_device_run()
2062 exynos3250_jpeg_set_jpeg_addr(ctx); exynos3250_jpeg_device_run()
2063 exynos3250_jpeg_stride(jpeg->regs, ctx->cap_q.fmt->fourcc, exynos3250_jpeg_device_run()
2064 ctx->cap_q.w); exynos3250_jpeg_device_run()
2067 ctx->scale_factor); exynos3250_jpeg_device_run()
2068 exynos3250_jpeg_dec_stream_size(jpeg->regs, ctx->out_q.size); exynos3250_jpeg_device_run()
2070 ctx->cap_q.fmt->fourcc); exynos3250_jpeg_device_run()
2076 exynos3250_jpeg_coef(jpeg->regs, ctx->mode); exynos3250_jpeg_device_run()
2082 spin_unlock_irqrestore(&ctx->jpeg->slock, flags); exynos3250_jpeg_device_run()
2087 struct s5p_jpeg_ctx *ctx = priv; s5p_jpeg_job_ready() local
2089 if (ctx->mode == S5P_JPEG_DECODE) s5p_jpeg_job_ready()
2090 return ctx->hdr_parsed; s5p_jpeg_job_ready()
2127 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vq); s5p_jpeg_queue_setup() local
2131 q_data = get_q_data(ctx, vq->type); s5p_jpeg_queue_setup()
2140 if (ctx->mode == S5P_JPEG_DECODE) s5p_jpeg_queue_setup()
2146 alloc_ctxs[0] = ctx->jpeg->alloc_ctx; s5p_jpeg_queue_setup()
2153 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); s5p_jpeg_buf_prepare() local
2156 q_data = get_q_data(ctx, vb->vb2_queue->type); s5p_jpeg_buf_prepare()
2173 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); s5p_jpeg_buf_queue() local
2175 if (ctx->mode == S5P_JPEG_DECODE && s5p_jpeg_buf_queue()
2178 ctx->hdr_parsed = s5p_jpeg_parse_hdr(&tmp, s5p_jpeg_buf_queue()
2180 min((unsigned long)ctx->out_q.size, s5p_jpeg_buf_queue()
2181 vb2_get_plane_payload(vb, 0)), ctx); s5p_jpeg_buf_queue()
2182 if (!ctx->hdr_parsed) { s5p_jpeg_buf_queue()
2187 q_data = &ctx->out_q; s5p_jpeg_buf_queue()
2191 q_data = &ctx->cap_q; s5p_jpeg_buf_queue()
2196 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); s5p_jpeg_buf_queue()
2201 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q); s5p_jpeg_start_streaming() local
2204 ret = pm_runtime_get_sync(ctx->jpeg->dev); s5p_jpeg_start_streaming()
2211 struct s5p_jpeg_ctx *ctx = vb2_get_drv_priv(q); s5p_jpeg_stop_streaming() local
2213 pm_runtime_put(ctx->jpeg->dev); s5p_jpeg_stop_streaming()
2229 struct s5p_jpeg_ctx *ctx = priv; queue_init() local
2234 src_vq->drv_priv = ctx; queue_init()
2239 src_vq->lock = &ctx->jpeg->lock; queue_init()
2247 dst_vq->drv_priv = ctx; queue_init()
2252 dst_vq->lock = &ctx->jpeg->lock; queue_init()
548 s5p_jpeg_adjust_fourcc_to_subsampling( enum v4l2_jpeg_chroma_subsampling subs, u32 in_fourcc, u32 *out_fourcc, struct s5p_jpeg_ctx *ctx) s5p_jpeg_adjust_fourcc_to_subsampling() argument
893 s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result, unsigned long buffer, unsigned long size, struct s5p_jpeg_ctx *ctx) s5p_jpeg_parse_hdr() argument
1168 vidioc_try_fmt(struct v4l2_format *f, struct s5p_jpeg_fmt *fmt, struct s5p_jpeg_ctx *ctx, int q_type) vidioc_try_fmt() argument
/linux-4.1.27/drivers/media/platform/s5p-g2d/
H A Dg2d.c91 static struct g2d_frame *get_frame(struct g2d_ctx *ctx, get_frame() argument
96 return &ctx->in; get_frame()
98 return &ctx->out; get_frame()
108 struct g2d_ctx *ctx = vb2_get_drv_priv(vq); g2d_queue_setup() local
109 struct g2d_frame *f = get_frame(ctx, vq->type); g2d_queue_setup()
116 alloc_ctxs[0] = ctx->dev->alloc_ctx; g2d_queue_setup()
126 struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); g2d_buf_prepare() local
127 struct g2d_frame *f = get_frame(ctx, vb->vb2_queue->type); g2d_buf_prepare()
137 struct g2d_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); g2d_buf_queue() local
138 v4l2_m2m_buf_queue(ctx->fh.m2m_ctx, vb); g2d_buf_queue()
150 struct g2d_ctx *ctx = priv; queue_init() local
155 src_vq->drv_priv = ctx; queue_init()
160 src_vq->lock = &ctx->dev->mutex; queue_init()
168 dst_vq->drv_priv = ctx; queue_init()
173 dst_vq->lock = &ctx->dev->mutex; queue_init()
180 struct g2d_ctx *ctx = container_of(ctrl->handler, struct g2d_ctx, g2d_s_ctrl() local
184 spin_lock_irqsave(&ctx->dev->ctrl_lock, flags); g2d_s_ctrl()
188 ctx->rop = ROP4_INVERT; g2d_s_ctrl()
190 ctx->rop = ROP4_COPY; g2d_s_ctrl()
194 ctx->flip = ctx->ctrl_hflip->val | (ctx->ctrl_vflip->val << 1); g2d_s_ctrl()
198 spin_unlock_irqrestore(&ctx->dev->ctrl_lock, flags); g2d_s_ctrl()
206 static int g2d_setup_ctrls(struct g2d_ctx *ctx) g2d_setup_ctrls() argument
208 struct g2d_dev *dev = ctx->dev; g2d_setup_ctrls()
210 v4l2_ctrl_handler_init(&ctx->ctrl_handler, 3); g2d_setup_ctrls()
212 ctx->ctrl_hflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &g2d_ctrl_ops, g2d_setup_ctrls()
215 ctx->ctrl_vflip = v4l2_ctrl_new_std(&ctx->ctrl_handler, &g2d_ctrl_ops, g2d_setup_ctrls()
219 &ctx->ctrl_handler, g2d_setup_ctrls()
226 if (ctx->ctrl_handler.error) { g2d_setup_ctrls()
227 int err = ctx->ctrl_handler.error; g2d_setup_ctrls()
229 v4l2_ctrl_handler_free(&ctx->ctrl_handler); g2d_setup_ctrls()
233 v4l2_ctrl_cluster(2, &ctx->ctrl_hflip); g2d_setup_ctrls()
241 struct g2d_ctx *ctx = NULL; g2d_open() local
244 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); g2d_open()
245 if (!ctx) g2d_open()
247 ctx->dev = dev; g2d_open()
249 ctx->in = def_frame; g2d_open()
250 ctx->out = def_frame; g2d_open()
253 kfree(ctx); g2d_open()
256 ctx->fh.m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init); g2d_open()
257 if (IS_ERR(ctx->fh.m2m_ctx)) { g2d_open()
258 ret = PTR_ERR(ctx->fh.m2m_ctx); g2d_open()
260 kfree(ctx); g2d_open()
263 v4l2_fh_init(&ctx->fh, video_devdata(file)); g2d_open()
264 file->private_data = &ctx->fh; g2d_open()
265 v4l2_fh_add(&ctx->fh); g2d_open()
267 g2d_setup_ctrls(ctx); g2d_open()
269 /* Write the default values to the ctx struct */ g2d_open()
270 v4l2_ctrl_handler_setup(&ctx->ctrl_handler); g2d_open()
272 ctx->fh.ctrl_handler = &ctx->ctrl_handler; g2d_open()
282 struct g2d_ctx *ctx = fh2ctx(file->private_data); g2d_release() local
284 v4l2_ctrl_handler_free(&ctx->ctrl_handler); g2d_release()
285 v4l2_fh_del(&ctx->fh); g2d_release()
286 v4l2_fh_exit(&ctx->fh); g2d_release()
287 kfree(ctx); g2d_release()
317 struct g2d_ctx *ctx = prv; vidioc_g_fmt() local
321 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); vidioc_g_fmt()
324 frm = get_frame(ctx, f->type); vidioc_g_fmt()
369 struct g2d_ctx *ctx = prv; vidioc_s_fmt() local
370 struct g2d_dev *dev = ctx->dev; vidioc_s_fmt()
381 vq = v4l2_m2m_get_vq(ctx->fh.m2m_ctx, f->type); vidioc_s_fmt()
386 frm = get_frame(ctx, f->type); vidioc_s_fmt()
410 struct g2d_ctx *ctx = priv; vidioc_cropcap() local
413 f = get_frame(ctx, cr->type); vidioc_cropcap()
427 struct g2d_ctx *ctx = prv; vidioc_g_crop() local
430 f = get_frame(ctx, cr->type); vidioc_g_crop()
443 struct g2d_ctx *ctx = prv; vidioc_try_crop() local
444 struct g2d_dev *dev = ctx->dev; vidioc_try_crop()
447 f = get_frame(ctx, cr->type); vidioc_try_crop()
462 struct g2d_ctx *ctx = prv; vidioc_s_crop() local
469 f = get_frame(ctx, cr->type); vidioc_s_crop()
484 struct g2d_ctx *ctx = prv; job_abort() local
485 struct g2d_dev *dev = ctx->dev; job_abort()
497 struct g2d_ctx *ctx = prv; device_run() local
498 struct g2d_dev *dev = ctx->dev; device_run()
503 dev->curr = ctx; device_run()
505 src = v4l2_m2m_next_src_buf(ctx->fh.m2m_ctx); device_run()
506 dst = v4l2_m2m_next_dst_buf(ctx->fh.m2m_ctx); device_run()
513 g2d_set_src_size(dev, &ctx->in); device_run()
516 g2d_set_dst_size(dev, &ctx->out); device_run()
519 g2d_set_rop4(dev, ctx->rop); device_run()
520 g2d_set_flip(dev, ctx->flip); device_run()
522 if (ctx->in.c_width != ctx->out.c_width || device_run()
523 ctx->in.c_height != ctx->out.c_height) { device_run()
527 g2d_set_v41_stretch(dev, &ctx->in, &ctx->out); device_run()
539 struct g2d_ctx *ctx = dev->curr; g2d_isr() local
545 BUG_ON(ctx == NULL); g2d_isr()
547 src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); g2d_isr()
548 dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); g2d_isr()
561 v4l2_m2m_job_finish(dev->m2m_dev, ctx->fh.m2m_ctx); g2d_isr()
/linux-4.1.27/drivers/iommu/
H A Dmsm_iommu.c126 static void __reset_context(void __iomem *base, int ctx) __reset_context() argument
128 SET_BPRCOSH(base, ctx, 0); __reset_context()
129 SET_BPRCISH(base, ctx, 0); __reset_context()
130 SET_BPRCNSH(base, ctx, 0); __reset_context()
131 SET_BPSHCFG(base, ctx, 0); __reset_context()
132 SET_BPMTCFG(base, ctx, 0); __reset_context()
133 SET_ACTLR(base, ctx, 0); __reset_context()
134 SET_SCTLR(base, ctx, 0); __reset_context()
135 SET_FSRRESTORE(base, ctx, 0); __reset_context()
136 SET_TTBR0(base, ctx, 0); __reset_context()
137 SET_TTBR1(base, ctx, 0); __reset_context()
138 SET_TTBCR(base, ctx, 0); __reset_context()
139 SET_BFBCR(base, ctx, 0); __reset_context()
140 SET_PAR(base, ctx, 0); __reset_context()
141 SET_FAR(base, ctx, 0); __reset_context()
142 SET_CTX_TLBIALL(base, ctx, 0); __reset_context()
143 SET_TLBFLPTER(base, ctx, 0); __reset_context()
144 SET_TLBSLPTER(base, ctx, 0); __reset_context()
145 SET_TLBLKCR(base, ctx, 0); __reset_context()
146 SET_PRRR(base, ctx, 0); __reset_context()
147 SET_NMRR(base, ctx, 0); __reset_context()
150 static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) __program_context() argument
153 __reset_context(base, ctx); __program_context()
157 SET_TLBMCFG(base, ctx, 0x3); __program_context()
160 SET_V2PCFG(base, ctx, 0x3); __program_context()
162 SET_TTBCR(base, ctx, 0); __program_context()
163 SET_TTBR0_PA(base, ctx, (pgtable >> 14)); __program_context()
166 SET_CTX_TLBIALL(base, ctx, 0); __program_context()
169 SET_IRPTNDX(base, ctx, 0); __program_context()
172 SET_CFEIE(base, ctx, 1); __program_context()
175 SET_CFCFG(base, ctx, 1); __program_context()
178 SET_RCISH(base, ctx, 1); __program_context()
179 SET_RCOSH(base, ctx, 1); __program_context()
180 SET_RCNSH(base, ctx, 1); __program_context()
183 SET_TRE(base, ctx, 1); __program_context()
188 SET_PRRR(base, ctx, prrr); __program_context()
189 SET_NMRR(base, ctx, nmrr); __program_context()
192 SET_BFBDFE(base, ctx, 1); __program_context()
198 SET_TTBR0_SH(base, ctx, 1); __program_context()
199 SET_TTBR1_SH(base, ctx, 1); __program_context()
201 SET_TTBR0_NOS(base, ctx, 1); __program_context()
202 SET_TTBR1_NOS(base, ctx, 1); __program_context()
204 SET_TTBR0_IRGNH(base, ctx, 0); /* WB, WA */ __program_context()
205 SET_TTBR0_IRGNL(base, ctx, 1); __program_context()
207 SET_TTBR1_IRGNH(base, ctx, 0); /* WB, WA */ __program_context()
208 SET_TTBR1_IRGNL(base, ctx, 1); __program_context()
210 SET_TTBR0_ORGN(base, ctx, 1); /* WB, WA */ __program_context()
211 SET_TTBR1_ORGN(base, ctx, 1); /* WB, WA */ __program_context()
215 SET_M(base, ctx, 1); __program_context()
564 int ctx; msm_iommu_iova_to_phys() local
577 ctx = ctx_drvdata->num; msm_iommu_iova_to_phys()
584 SET_CTX_TLBIALL(base, ctx, 0); msm_iommu_iova_to_phys()
585 SET_V2PPR(base, ctx, va & V2Pxx_VA); msm_iommu_iova_to_phys()
587 par = GET_PAR(base, ctx); msm_iommu_iova_to_phys()
590 if (GET_NOFAULT_SS(base, ctx)) msm_iommu_iova_to_phys()
595 if (GET_FAULT(base, ctx)) msm_iommu_iova_to_phys()
609 static void print_ctx_regs(void __iomem *base, int ctx) print_ctx_regs() argument
611 unsigned int fsr = GET_FSR(base, ctx); print_ctx_regs()
613 GET_FAR(base, ctx), GET_PAR(base, ctx)); print_ctx_regs()
627 GET_FSYNR0(base, ctx), GET_FSYNR1(base, ctx)); print_ctx_regs()
629 GET_TTBR0(base, ctx), GET_TTBR1(base, ctx)); print_ctx_regs()
631 GET_SCTLR(base, ctx), GET_ACTLR(base, ctx)); print_ctx_regs()
633 GET_PRRR(base, ctx), GET_NMRR(base, ctx)); print_ctx_regs()
H A Dmsm_iommu_dev.c89 int ctx; msm_iommu_reset() local
103 for (ctx = 0; ctx < ncb; ctx++) { msm_iommu_reset()
104 SET_BPRCOSH(base, ctx, 0); msm_iommu_reset()
105 SET_BPRCISH(base, ctx, 0); msm_iommu_reset()
106 SET_BPRCNSH(base, ctx, 0); msm_iommu_reset()
107 SET_BPSHCFG(base, ctx, 0); msm_iommu_reset()
108 SET_BPMTCFG(base, ctx, 0); msm_iommu_reset()
109 SET_ACTLR(base, ctx, 0); msm_iommu_reset()
110 SET_SCTLR(base, ctx, 0); msm_iommu_reset()
111 SET_FSRRESTORE(base, ctx, 0); msm_iommu_reset()
112 SET_TTBR0(base, ctx, 0); msm_iommu_reset()
113 SET_TTBR1(base, ctx, 0); msm_iommu_reset()
114 SET_TTBCR(base, ctx, 0); msm_iommu_reset()
115 SET_BFBCR(base, ctx, 0); msm_iommu_reset()
116 SET_PAR(base, ctx, 0); msm_iommu_reset()
117 SET_FAR(base, ctx, 0); msm_iommu_reset()
118 SET_CTX_TLBIALL(base, ctx, 0); msm_iommu_reset()
119 SET_TLBFLPTER(base, ctx, 0); msm_iommu_reset()
120 SET_TLBSLPTER(base, ctx, 0); msm_iommu_reset()
121 SET_TLBLKCR(base, ctx, 0); msm_iommu_reset()
122 SET_PRRR(base, ctx, 0); msm_iommu_reset()
123 SET_NMRR(base, ctx, 0); msm_iommu_reset()
124 SET_CONTEXTIDR(base, ctx, 0); msm_iommu_reset()
222 pr_info("device %s mapped at %p, irq %d with %d ctx banks\n", msm_iommu_probe()
/linux-4.1.27/arch/arm64/crypto/
H A Dghash-ce-glue.c41 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_init() local
43 *ctx = (struct ghash_desc_ctx){}; ghash_init()
50 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_update() local
51 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; ghash_update()
53 ctx->count += len; ghash_update()
62 memcpy(ctx->buf + partial, src, p); ghash_update()
71 pmull_ghash_update(blocks, ctx->digest, src, key, ghash_update()
72 partial ? ctx->buf : NULL); ghash_update()
78 memcpy(ctx->buf + partial, src, len); ghash_update()
84 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_final() local
85 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; ghash_final()
90 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); ghash_final()
93 pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL); ghash_final()
96 put_unaligned_be64(ctx->digest[1], dst); ghash_final()
97 put_unaligned_be64(ctx->digest[0], dst + 8); ghash_final()
99 *ctx = (struct ghash_desc_ctx){}; ghash_final()
H A Daes-ce-cipher.c27 static int num_rounds(struct crypto_aes_ctx *ctx) num_rounds() argument
36 return 6 + ctx->key_length / 4; num_rounds()
41 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); aes_cipher_encrypt() local
77 "1"(ctx->key_enc), aes_cipher_encrypt()
78 "2"(num_rounds(ctx) - 2) aes_cipher_encrypt()
86 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); aes_cipher_decrypt() local
122 "1"(ctx->key_dec), aes_cipher_decrypt()
123 "2"(num_rounds(ctx) - 2) aes_cipher_decrypt()
149 int ce_aes_expandkey(struct crypto_aes_ctx *ctx, const u8 *in_key, ce_aes_expandkey() argument
168 memcpy(ctx->key_enc, in_key, key_len); ce_aes_expandkey()
169 ctx->key_length = key_len; ce_aes_expandkey()
173 u32 *rki = ctx->key_enc + (i * kwords); ce_aes_expandkey()
202 key_enc = (struct aes_block *)ctx->key_enc; ce_aes_expandkey()
203 key_dec = (struct aes_block *)ctx->key_dec; ce_aes_expandkey()
204 j = num_rounds(ctx); ce_aes_expandkey()
225 struct crypto_aes_ctx *ctx = crypto_tfm_ctx(tfm); ce_aes_setkey() local
228 ret = ce_aes_expandkey(ctx, in_key, key_len); ce_aes_setkey()
/linux-4.1.27/drivers/clk/samsung/
H A Dclk.c58 struct samsung_clk_provider *ctx; samsung_clk_init() local
62 ctx = kzalloc(sizeof(struct samsung_clk_provider), GFP_KERNEL); samsung_clk_init()
63 if (!ctx) samsung_clk_init()
73 ctx->reg_base = base; samsung_clk_init()
74 ctx->clk_data.clks = clk_table; samsung_clk_init()
75 ctx->clk_data.clk_num = nr_clks; samsung_clk_init()
76 spin_lock_init(&ctx->lock); samsung_clk_init()
78 return ctx; samsung_clk_init()
82 struct samsung_clk_provider *ctx) samsung_clk_of_add_provider()
86 &ctx->clk_data)) samsung_clk_of_add_provider()
92 void samsung_clk_add_lookup(struct samsung_clk_provider *ctx, struct clk *clk, samsung_clk_add_lookup() argument
95 if (ctx->clk_data.clks && id) samsung_clk_add_lookup()
96 ctx->clk_data.clks[id] = clk; samsung_clk_add_lookup()
100 void __init samsung_clk_register_alias(struct samsung_clk_provider *ctx, samsung_clk_register_alias() argument
107 if (!ctx->clk_data.clks) { samsung_clk_register_alias()
119 clk = ctx->clk_data.clks[list->id]; samsung_clk_register_alias()
134 void __init samsung_clk_register_fixed_rate(struct samsung_clk_provider *ctx, samsung_clk_register_fixed_rate() argument
149 samsung_clk_add_lookup(ctx, clk, list->id); samsung_clk_register_fixed_rate()
163 void __init samsung_clk_register_fixed_factor(struct samsung_clk_provider *ctx, samsung_clk_register_fixed_factor() argument
178 samsung_clk_add_lookup(ctx, clk, list->id); samsung_clk_register_fixed_factor()
183 void __init samsung_clk_register_mux(struct samsung_clk_provider *ctx, samsung_clk_register_mux() argument
193 ctx->reg_base + list->offset, samsung_clk_register_mux()
194 list->shift, list->width, list->mux_flags, &ctx->lock); samsung_clk_register_mux()
201 samsung_clk_add_lookup(ctx, clk, list->id); samsung_clk_register_mux()
215 void __init samsung_clk_register_div(struct samsung_clk_provider *ctx, samsung_clk_register_div() argument
226 ctx->reg_base + list->offset, samsung_clk_register_div()
228 list->table, &ctx->lock); samsung_clk_register_div()
232 ctx->reg_base + list->offset, list->shift, samsung_clk_register_div()
233 list->width, list->div_flags, &ctx->lock); samsung_clk_register_div()
240 samsung_clk_add_lookup(ctx, clk, list->id); samsung_clk_register_div()
254 void __init samsung_clk_register_gate(struct samsung_clk_provider *ctx, samsung_clk_register_gate() argument
263 list->flags, ctx->reg_base + list->offset, samsung_clk_register_gate()
264 list->bit_idx, list->gate_flags, &ctx->lock); samsung_clk_register_gate()
280 samsung_clk_add_lookup(ctx, clk, list->id); samsung_clk_register_gate()
288 void __init samsung_clk_of_register_fixed_ext(struct samsung_clk_provider *ctx, samsung_clk_of_register_fixed_ext() argument
302 samsung_clk_register_fixed_rate(ctx, fixed_rate_clk, nr_fixed_rate_clk); samsung_clk_of_register_fixed_ext()
382 struct samsung_clk_provider *ctx; samsung_cmu_register_one() local
390 ctx = samsung_clk_init(np, reg_base, cmu->nr_clk_ids); samsung_cmu_register_one()
391 if (!ctx) { samsung_cmu_register_one()
392 panic("%s: unable to alllocate ctx\n", __func__); samsung_cmu_register_one()
393 return ctx; samsung_cmu_register_one()
397 samsung_clk_register_pll(ctx, cmu->pll_clks, cmu->nr_pll_clks, samsung_cmu_register_one()
400 samsung_clk_register_mux(ctx, cmu->mux_clks, samsung_cmu_register_one()
403 samsung_clk_register_div(ctx, cmu->div_clks, cmu->nr_div_clks); samsung_cmu_register_one()
405 samsung_clk_register_gate(ctx, cmu->gate_clks, samsung_cmu_register_one()
408 samsung_clk_register_fixed_rate(ctx, cmu->fixed_clks, samsung_cmu_register_one()
411 samsung_clk_register_fixed_factor(ctx, cmu->fixed_factor_clks, samsung_cmu_register_one()
417 samsung_clk_of_add_provider(np, ctx); samsung_cmu_register_one()
419 return ctx; samsung_cmu_register_one()
81 samsung_clk_of_add_provider(struct device_node *np, struct samsung_clk_provider *ctx) samsung_clk_of_add_provider() argument
/linux-4.1.27/drivers/crypto/ccp/
H A Dccp-crypto-aes-cmac.c62 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); ccp_do_cmac_update() local
72 if (!ctx->u.aes.key_len) ccp_do_cmac_update()
143 cmac_key_sg = (need_pad) ? &ctx->u.aes.k2_sg ccp_do_cmac_update()
144 : &ctx->u.aes.k1_sg; ccp_do_cmac_update()
149 rctx->cmd.u.aes.type = ctx->u.aes.type; ccp_do_cmac_update()
150 rctx->cmd.u.aes.mode = ctx->u.aes.mode; ccp_do_cmac_update()
152 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; ccp_do_cmac_update()
153 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; ccp_do_cmac_update()
160 rctx->cmd.u.aes.cmac_key_len = ctx->u.aes.kn_len; ccp_do_cmac_update()
244 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); ccp_aes_cmac_setkey() local
254 ctx->u.aes.type = CCP_AES_TYPE_128; ccp_aes_cmac_setkey()
257 ctx->u.aes.type = CCP_AES_TYPE_192; ccp_aes_cmac_setkey()
260 ctx->u.aes.type = CCP_AES_TYPE_256; ccp_aes_cmac_setkey()
266 ctx->u.aes.mode = alg->mode; ccp_aes_cmac_setkey()
269 ctx->u.aes.key_len = 0; ccp_aes_cmac_setkey()
272 ret = crypto_cipher_setkey(ctx->u.aes.tfm_cipher, key, key_len); ccp_aes_cmac_setkey()
277 memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); ccp_aes_cmac_setkey()
278 crypto_cipher_encrypt_one(ctx->u.aes.tfm_cipher, ctx->u.aes.key, ccp_aes_cmac_setkey()
279 ctx->u.aes.key); ccp_aes_cmac_setkey()
282 k0_hi = be64_to_cpu(*((__be64 *)ctx->u.aes.key)); ccp_aes_cmac_setkey()
283 k0_lo = be64_to_cpu(*((__be64 *)ctx->u.aes.key + 1)); ccp_aes_cmac_setkey()
287 if (ctx->u.aes.key[0] & 0x80) { ccp_aes_cmac_setkey()
291 gk = (__be64 *)ctx->u.aes.k1; ccp_aes_cmac_setkey()
298 if (ctx->u.aes.k1[0] & 0x80) { ccp_aes_cmac_setkey()
302 gk = (__be64 *)ctx->u.aes.k2; ccp_aes_cmac_setkey()
307 ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1); ccp_aes_cmac_setkey()
308 sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1)); ccp_aes_cmac_setkey()
309 sg_init_one(&ctx->u.aes.k2_sg, ctx->u.aes.k2, sizeof(ctx->u.aes.k2)); ccp_aes_cmac_setkey()
312 memset(ctx->u.aes.key, 0, sizeof(ctx->u.aes.key)); ccp_aes_cmac_setkey()
313 memcpy(ctx->u.aes.key, key, key_len); ccp_aes_cmac_setkey()
314 ctx->u.aes.key_len = key_len; ccp_aes_cmac_setkey()
315 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); ccp_aes_cmac_setkey()
322 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); ccp_aes_cmac_cra_init() local
326 ctx->complete = ccp_aes_cmac_complete; ccp_aes_cmac_cra_init()
327 ctx->u.aes.key_len = 0; ccp_aes_cmac_cra_init()
338 ctx->u.aes.tfm_cipher = cipher_tfm; ccp_aes_cmac_cra_init()
345 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); ccp_aes_cmac_cra_exit() local
347 if (ctx->u.aes.tfm_cipher) ccp_aes_cmac_cra_exit()
348 crypto_free_cipher(ctx->u.aes.tfm_cipher); ccp_aes_cmac_cra_exit()
349 ctx->u.aes.tfm_cipher = NULL; ccp_aes_cmac_cra_exit()
H A Dccp-crypto-sha.c49 memcpy(req->result, rctx->ctx, digest_size); ccp_sha_complete()
61 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); ccp_do_sha_update() local
94 sg_init_one(&rctx->ctx_sg, rctx->ctx, sizeof(rctx->ctx)); ccp_do_sha_update()
128 rctx->cmd.u.sha.ctx = &rctx->ctx_sg; ccp_do_sha_update()
129 rctx->cmd.u.sha.ctx_len = sizeof(rctx->ctx); ccp_do_sha_update()
132 rctx->cmd.u.sha.opad = ctx->u.sha.key_len ? ccp_do_sha_update()
133 &ctx->u.sha.opad_sg : NULL; ccp_do_sha_update()
134 rctx->cmd.u.sha.opad_len = ctx->u.sha.key_len ? ccp_do_sha_update()
135 ctx->u.sha.opad_count : 0; ccp_do_sha_update()
150 struct ccp_ctx *ctx = crypto_ahash_ctx(tfm); ccp_sha_init() local
162 if (ctx->u.sha.key_len) { ccp_sha_init()
164 memcpy(rctx->buf, ctx->u.sha.ipad, block_size); ccp_sha_init()
208 memcpy(state.ctx, rctx->ctx, sizeof(state.ctx)); ccp_sha_export()
230 memcpy(rctx->ctx, state.ctx, sizeof(rctx->ctx)); ccp_sha_import()
240 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ahash_tfm(tfm)); ccp_sha_setkey() local
241 struct crypto_shash *shash = ctx->u.sha.hmac_tfm; ccp_sha_setkey()
250 ctx->u.sha.key_len = 0; ccp_sha_setkey()
255 memset(ctx->u.sha.key, 0, sizeof(ctx->u.sha.key)); ccp_sha_setkey()
264 ctx->u.sha.key); ccp_sha_setkey()
272 memcpy(ctx->u.sha.key, key, key_len); ccp_sha_setkey()
276 ctx->u.sha.ipad[i] = ctx->u.sha.key[i] ^ 0x36; ccp_sha_setkey()
277 ctx->u.sha.opad[i] = ctx->u.sha.key[i] ^ 0x5c; ccp_sha_setkey()
280 sg_init_one(&ctx->u.sha.opad_sg, ctx->u.sha.opad, block_size); ccp_sha_setkey()
281 ctx->u.sha.opad_count = block_size; ccp_sha_setkey()
283 ctx->u.sha.key_len = key_len; ccp_sha_setkey()
290 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); ccp_sha_cra_init() local
293 ctx->complete = ccp_sha_complete; ccp_sha_cra_init()
294 ctx->u.sha.key_len = 0; ccp_sha_cra_init()
307 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); ccp_hmac_sha_cra_init() local
318 ctx->u.sha.hmac_tfm = hmac_tfm; ccp_hmac_sha_cra_init()
325 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); ccp_hmac_sha_cra_exit() local
327 if (ctx->u.sha.hmac_tfm) ccp_hmac_sha_cra_exit()
328 crypto_free_shash(ctx->u.sha.hmac_tfm); ccp_hmac_sha_cra_exit()
H A Dccp-crypto-aes.c28 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ccp_aes_complete() local
34 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) ccp_aes_complete()
43 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); ccp_aes_setkey() local
49 ctx->u.aes.type = CCP_AES_TYPE_128; ccp_aes_setkey()
52 ctx->u.aes.type = CCP_AES_TYPE_192; ccp_aes_setkey()
55 ctx->u.aes.type = CCP_AES_TYPE_256; ccp_aes_setkey()
61 ctx->u.aes.mode = alg->mode; ccp_aes_setkey()
62 ctx->u.aes.key_len = key_len; ccp_aes_setkey()
64 memcpy(ctx->u.aes.key, key, key_len); ccp_aes_setkey()
65 sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len); ccp_aes_setkey()
72 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ccp_aes_crypt() local
78 if (!ctx->u.aes.key_len) ccp_aes_crypt()
81 if (((ctx->u.aes.mode == CCP_AES_MODE_ECB) || ccp_aes_crypt()
82 (ctx->u.aes.mode == CCP_AES_MODE_CBC) || ccp_aes_crypt()
83 (ctx->u.aes.mode == CCP_AES_MODE_CFB)) && ccp_aes_crypt()
87 if (ctx->u.aes.mode != CCP_AES_MODE_ECB) { ccp_aes_crypt()
100 rctx->cmd.u.aes.type = ctx->u.aes.type; ccp_aes_crypt()
101 rctx->cmd.u.aes.mode = ctx->u.aes.mode; ccp_aes_crypt()
104 rctx->cmd.u.aes.key = &ctx->u.aes.key_sg; ccp_aes_crypt()
105 rctx->cmd.u.aes.key_len = ctx->u.aes.key_len; ccp_aes_crypt()
129 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); ccp_aes_cra_init() local
131 ctx->complete = ccp_aes_complete; ccp_aes_cra_init()
132 ctx->u.aes.key_len = 0; ccp_aes_cra_init()
158 struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm)); ccp_aes_rfc3686_setkey() local
164 memcpy(ctx->u.aes.nonce, key + key_len, CTR_RFC3686_NONCE_SIZE); ccp_aes_rfc3686_setkey()
171 struct ccp_ctx *ctx = crypto_tfm_ctx(req->base.tfm); ccp_aes_rfc3686_crypt() local
177 memcpy(iv, ctx->u.aes.nonce, CTR_RFC3686_NONCE_SIZE); ccp_aes_rfc3686_crypt()
204 struct ccp_ctx *ctx = crypto_tfm_ctx(tfm); ccp_aes_rfc3686_cra_init() local
206 ctx->complete = ccp_aes_rfc3686_complete; ccp_aes_rfc3686_cra_init()
207 ctx->u.aes.key_len = 0; ccp_aes_rfc3686_cra_init()
/linux-4.1.27/include/crypto/
H A Dcast6.h17 int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key,
21 void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
22 void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
H A Dserpent.h20 int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
24 void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
25 void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
H A Dcast5.h20 void __cast5_encrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
21 void __cast5_decrypt(struct cast5_ctx *ctx, u8 *dst, const u8 *src);
/linux-4.1.27/drivers/crypto/vmx/
H A Daes.c42 struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); p8_aes_init() local
60 ctx->fallback = fallback; p8_aes_init()
67 struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); p8_aes_exit() local
69 if (ctx->fallback) { p8_aes_exit()
70 crypto_free_cipher(ctx->fallback); p8_aes_exit()
71 ctx->fallback = NULL; p8_aes_exit()
79 struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); p8_aes_setkey() local
84 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); p8_aes_setkey()
85 ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); p8_aes_setkey()
88 ret += crypto_cipher_setkey(ctx->fallback, key, keylen); p8_aes_setkey()
94 struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); p8_aes_encrypt() local
97 crypto_cipher_encrypt_one(ctx->fallback, dst, src); p8_aes_encrypt()
102 aes_p8_encrypt(src, dst, &ctx->enc_key); p8_aes_encrypt()
109 struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm); p8_aes_decrypt() local
112 crypto_cipher_decrypt_one(ctx->fallback, dst, src); p8_aes_decrypt()
117 aes_p8_decrypt(src, dst, &ctx->dec_key); p8_aes_decrypt()
H A Daes_cbc.c43 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); p8_aes_cbc_init() local
61 ctx->fallback = fallback; p8_aes_cbc_init()
68 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); p8_aes_cbc_exit() local
70 if (ctx->fallback) { p8_aes_cbc_exit()
71 crypto_free_blkcipher(ctx->fallback); p8_aes_cbc_exit()
72 ctx->fallback = NULL; p8_aes_cbc_exit()
80 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm); p8_aes_cbc_setkey() local
85 ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key); p8_aes_cbc_setkey()
86 ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key); p8_aes_cbc_setkey()
89 ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen); p8_aes_cbc_setkey()
99 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( p8_aes_cbc_encrypt() local
102 .tfm = ctx->fallback, p8_aes_cbc_encrypt()
118 nbytes & AES_BLOCK_MASK, &ctx->enc_key, walk.iv, 1); p8_aes_cbc_encrypt()
135 struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx( p8_aes_cbc_decrypt() local
138 .tfm = ctx->fallback, p8_aes_cbc_decrypt()
154 nbytes & AES_BLOCK_MASK, &ctx->dec_key, walk.iv, 0); p8_aes_cbc_decrypt()
H A Dghash.c61 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); p8_ghash_init_tfm() local
79 ctx->fallback = fallback; p8_ghash_init_tfm()
89 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(tfm); p8_ghash_exit_tfm() local
91 if (ctx->fallback) { p8_ghash_exit_tfm()
92 crypto_free_shash(ctx->fallback); p8_ghash_exit_tfm()
93 ctx->fallback = NULL; p8_ghash_exit_tfm()
99 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); p8_ghash_init() local
104 dctx->fallback_desc.tfm = ctx->fallback; p8_ghash_init()
112 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(tfm)); p8_ghash_setkey() local
121 gcm_init_p8(ctx->htable, (const u64 *) key); p8_ghash_setkey()
123 return crypto_shash_setkey(ctx->fallback, key, keylen); p8_ghash_setkey()
130 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); p8_ghash_update() local
148 gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, p8_ghash_update()
161 gcm_ghash_p8(dctx->shash, ctx->htable, src, len); p8_ghash_update()
177 struct p8_ghash_ctx *ctx = crypto_tfm_ctx(crypto_shash_tfm(desc->tfm)); p8_ghash_final() local
190 gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer, p8_ghash_final()
/linux-4.1.27/arch/arm/crypto/
H A Daes_glue.c18 struct AES_CTX *ctx = crypto_tfm_ctx(tfm); aes_encrypt() local
19 AES_encrypt(src, dst, &ctx->enc_key); aes_encrypt()
24 struct AES_CTX *ctx = crypto_tfm_ctx(tfm); aes_decrypt() local
25 AES_decrypt(src, dst, &ctx->dec_key); aes_decrypt()
31 struct AES_CTX *ctx = crypto_tfm_ctx(tfm); aes_set_key() local
48 if (private_AES_set_encrypt_key(in_key, key_len, &ctx->enc_key) == -1) { aes_set_key()
53 ctx->dec_key = ctx->enc_key; aes_set_key()
54 if (private_AES_set_decrypt_key(in_key, key_len, &ctx->dec_key) == -1) { aes_set_key()
H A Dghash-ce-glue.c48 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_init() local
50 *ctx = (struct ghash_desc_ctx){}; ghash_init()
57 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_update() local
58 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; ghash_update()
60 ctx->count += len; ghash_update()
69 memcpy(ctx->buf + partial, src, p); ghash_update()
78 pmull_ghash_update(blocks, ctx->digest, src, key, ghash_update()
79 partial ? ctx->buf : NULL); ghash_update()
85 memcpy(ctx->buf + partial, src, len); ghash_update()
91 struct ghash_desc_ctx *ctx = shash_desc_ctx(desc); ghash_final() local
92 unsigned int partial = ctx->count % GHASH_BLOCK_SIZE; ghash_final()
97 memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial); ghash_final()
99 pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL); ghash_final()
102 put_unaligned_be64(ctx->digest[1], dst); ghash_final()
103 put_unaligned_be64(ctx->digest[0], dst + 8); ghash_final()
105 *ctx = (struct ghash_desc_ctx){}; ghash_final()
154 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); ghash_async_init() local
156 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; ghash_async_init()
178 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); ghash_async_update() local
179 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; ghash_async_update()
196 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); ghash_async_final() local
197 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; ghash_async_final()
211 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); ghash_async_digest() local
213 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; ghash_async_digest()
232 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); ghash_async_setkey() local
233 struct crypto_ahash *child = &ctx->cryptd_tfm->base; ghash_async_setkey()
249 struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); ghash_async_init_tfm() local
256 ctx->cryptd_tfm = cryptd_tfm; ghash_async_init_tfm()
266 struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); ghash_async_exit_tfm() local
268 cryptd_free_ahash(ctx->cryptd_tfm); ghash_async_exit_tfm()
/linux-4.1.27/include/linux/
H A Deventfd.h33 struct eventfd_ctx *eventfd_ctx_get(struct eventfd_ctx *ctx);
34 void eventfd_ctx_put(struct eventfd_ctx *ctx);
38 __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
39 ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
40 int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
59 static inline int eventfd_signal(struct eventfd_ctx *ctx, int n) eventfd_signal() argument
64 static inline void eventfd_ctx_put(struct eventfd_ctx *ctx) eventfd_ctx_put() argument
69 static inline ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, eventfd_ctx_read() argument
75 static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, eventfd_ctx_remove_wait_queue() argument
H A Dww_mutex.h47 struct ww_acquire_ctx *ctx; member in struct:ww_mutex
89 lock->ctx = NULL; ww_mutex_init()
97 * @ctx: w/w acquire context to initialize
119 static inline void ww_acquire_init(struct ww_acquire_ctx *ctx, ww_acquire_init() argument
122 ctx->task = current; ww_acquire_init()
123 ctx->stamp = atomic_long_inc_return(&ww_class->stamp); ww_acquire_init()
124 ctx->acquired = 0; ww_acquire_init()
126 ctx->ww_class = ww_class; ww_acquire_init()
127 ctx->done_acquire = 0; ww_acquire_init()
128 ctx->contending_lock = NULL; ww_acquire_init()
131 debug_check_no_locks_freed((void *)ctx, sizeof(*ctx)); ww_acquire_init()
132 lockdep_init_map(&ctx->dep_map, ww_class->acquire_name, ww_acquire_init()
134 mutex_acquire(&ctx->dep_map, 0, 0, _RET_IP_); ww_acquire_init()
137 ctx->deadlock_inject_interval = 1; ww_acquire_init()
138 ctx->deadlock_inject_countdown = ctx->stamp & 0xf; ww_acquire_init()
144 * @ctx: the acquire context
153 static inline void ww_acquire_done(struct ww_acquire_ctx *ctx) ww_acquire_done() argument
156 lockdep_assert_held(ctx); ww_acquire_done()
158 DEBUG_LOCKS_WARN_ON(ctx->done_acquire); ww_acquire_done()
159 ctx->done_acquire = 1; ww_acquire_done()
165 * @ctx: the acquire context to free
170 static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx) ww_acquire_fini() argument
173 mutex_release(&ctx->dep_map, 0, _THIS_IP_); ww_acquire_fini()
175 DEBUG_LOCKS_WARN_ON(ctx->acquired); ww_acquire_fini()
181 ctx->done_acquire = 1; ww_acquire_fini()
185 ctx->acquired = ~0U; ww_acquire_fini()
190 struct ww_acquire_ctx *ctx);
192 struct ww_acquire_ctx *ctx);
197 * @ctx: w/w acquire context, or NULL to acquire only a single lock.
223 static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ww_mutex_lock() argument
225 if (ctx) ww_mutex_lock()
226 return __ww_mutex_lock(lock, ctx); ww_mutex_lock()
235 * @ctx: w/w acquire context
263 struct ww_acquire_ctx *ctx) ww_mutex_lock_interruptible()
265 if (ctx) ww_mutex_lock_interruptible()
266 return __ww_mutex_lock_interruptible(lock, ctx); ww_mutex_lock_interruptible()
274 * @ctx: w/w acquire context
295 ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ww_mutex_lock_slow() argument
299 DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); ww_mutex_lock_slow()
301 ret = ww_mutex_lock(lock, ctx); ww_mutex_lock_slow()
308 * @ctx: w/w acquire context
332 struct ww_acquire_ctx *ctx) ww_mutex_lock_slow_interruptible()
335 DEBUG_LOCKS_WARN_ON(!ctx->contending_lock); ww_mutex_lock_slow_interruptible()
337 return ww_mutex_lock_interruptible(lock, ctx); ww_mutex_lock_slow_interruptible()
262 ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ww_mutex_lock_interruptible() argument
331 ww_mutex_lock_slow_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ww_mutex_lock_slow_interruptible() argument
/linux-4.1.27/security/selinux/
H A Dxfrm.c61 static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx) selinux_authorizable_ctx() argument
63 return (ctx && selinux_authorizable_ctx()
64 (ctx->ctx_doi == XFRM_SC_DOI_LSM) && selinux_authorizable_ctx()
65 (ctx->ctx_alg == XFRM_SC_ALG_SELINUX)); selinux_authorizable_ctx()
86 struct xfrm_sec_ctx *ctx = NULL; selinux_xfrm_alloc_user() local
98 ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp); selinux_xfrm_alloc_user()
99 if (!ctx) selinux_xfrm_alloc_user()
102 ctx->ctx_doi = XFRM_SC_DOI_LSM; selinux_xfrm_alloc_user()
103 ctx->ctx_alg = XFRM_SC_ALG_SELINUX; selinux_xfrm_alloc_user()
104 ctx->ctx_len = str_len; selinux_xfrm_alloc_user()
105 memcpy(ctx->ctx_str, &uctx[1], str_len); selinux_xfrm_alloc_user()
106 ctx->ctx_str[str_len] = '\0'; selinux_xfrm_alloc_user()
107 rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp); selinux_xfrm_alloc_user()
111 rc = avc_has_perm(tsec->sid, ctx->ctx_sid, selinux_xfrm_alloc_user()
116 *ctxp = ctx; selinux_xfrm_alloc_user()
121 kfree(ctx); selinux_xfrm_alloc_user()
128 static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx) selinux_xfrm_free() argument
130 if (!ctx) selinux_xfrm_free()
134 kfree(ctx); selinux_xfrm_free()
140 static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx) selinux_xfrm_delete() argument
144 if (!ctx) selinux_xfrm_delete()
147 return avc_has_perm(tsec->sid, ctx->ctx_sid, selinux_xfrm_delete()
156 int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) selinux_xfrm_policy_lookup() argument
162 if (!ctx) selinux_xfrm_policy_lookup()
166 if (!selinux_authorizable_ctx(ctx)) selinux_xfrm_policy_lookup()
169 rc = avc_has_perm(fl_secid, ctx->ctx_sid, selinux_xfrm_policy_lookup()
239 struct xfrm_sec_ctx *ctx = x->security; selinux_xfrm_skb_sid_ingress() local
242 sid_session = ctx->ctx_sid; selinux_xfrm_skb_sid_ingress()
245 } else if (sid_session != ctx->ctx_sid) { selinux_xfrm_skb_sid_ingress()
317 void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) selinux_xfrm_policy_free() argument
319 selinux_xfrm_free(ctx); selinux_xfrm_policy_free()
325 int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) selinux_xfrm_policy_delete() argument
327 return selinux_xfrm_delete(ctx); selinux_xfrm_policy_delete()
348 struct xfrm_sec_ctx *ctx; selinux_xfrm_state_alloc_acquire() local
362 ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC); selinux_xfrm_state_alloc_acquire()
363 if (!ctx) { selinux_xfrm_state_alloc_acquire()
368 ctx->ctx_doi = XFRM_SC_DOI_LSM; selinux_xfrm_state_alloc_acquire()
369 ctx->ctx_alg = XFRM_SC_ALG_SELINUX; selinux_xfrm_state_alloc_acquire()
370 ctx->ctx_sid = secid; selinux_xfrm_state_alloc_acquire()
371 ctx->ctx_len = str_len; selinux_xfrm_state_alloc_acquire()
372 memcpy(ctx->ctx_str, ctx_str, str_len); selinux_xfrm_state_alloc_acquire()
374 x->security = ctx; selinux_xfrm_state_alloc_acquire()
416 struct xfrm_sec_ctx *ctx = x->security; selinux_xfrm_sock_rcv_skb() local
417 peer_sid = ctx->ctx_sid; selinux_xfrm_sock_rcv_skb()
/linux-4.1.27/arch/ia64/kernel/
H A Dperfmon.c99 #define PMC_OVFL_NOTIFY(ctx, i) ((ctx)->ctx_pmds[i].flags & PFM_REGFL_OVFL_NOTIFY)
126 #define CTX_USED_PMD(ctx, mask) (ctx)->ctx_used_pmds[0] |= (mask)
127 #define CTX_IS_USED_PMD(ctx, c) (((ctx)->ctx_used_pmds[0] & (1UL << (c))) != 0UL)
129 #define CTX_USED_MONITOR(ctx, mask) (ctx)->ctx_used_monitors[0] |= (mask)
131 #define CTX_USED_IBR(ctx,n) (ctx)->ctx_used_ibrs[(n)>>6] |= 1UL<< ((n) % 64)
132 #define CTX_USED_DBR(ctx,n) (ctx)->ctx_used_dbrs[(n)>>6] |= 1UL<< ((n) % 64)
133 #define CTX_USES_DBREGS(ctx) (((pfm_context_t *)(ctx))->ctx_fl_using_dbreg==1)
163 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
165 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
170 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
348 #define SET_LAST_CPU(ctx, v) (ctx)->ctx_last_cpu = (v)
349 #define GET_LAST_CPU(ctx) (ctx)->ctx_last_cpu
351 #define SET_LAST_CPU(ctx, v) do {} while(0)
352 #define GET_LAST_CPU(ctx) do {} while(0)
388 typedef int (*pfm_reg_check_t)(struct task_struct *task, pfm_context_t *ctx, unsigned int cnum, unsigned long *val, struct pt_regs *regs);
469 int (*cmd_func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
573 static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
643 static int pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
659 static int pfm_end_notify_user(pfm_context_t *ctx);
747 pfm_read_soft_counter(pfm_context_t *ctx, int i) pfm_read_soft_counter() argument
749 return ctx->ctx_pmds[i].val + (ia64_get_pmd(i) & pmu_conf->ovfl_val); pfm_read_soft_counter()
756 pfm_write_soft_counter(pfm_context_t *ctx, int i, unsigned long val) pfm_write_soft_counter() argument
760 ctx->ctx_pmds[i].val = val & ~ovfl_val; pfm_write_soft_counter()
769 pfm_get_new_msg(pfm_context_t *ctx) pfm_get_new_msg() argument
773 next = (ctx->ctx_msgq_tail+1) % PFM_MAX_MSGS; pfm_get_new_msg()
775 DPRINT(("ctx_fd=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); pfm_get_new_msg()
776 if (next == ctx->ctx_msgq_head) return NULL; pfm_get_new_msg()
778 idx = ctx->ctx_msgq_tail; pfm_get_new_msg()
779 ctx->ctx_msgq_tail = next; pfm_get_new_msg()
781 DPRINT(("ctx=%p head=%d tail=%d msg=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, idx)); pfm_get_new_msg()
783 return ctx->ctx_msgq+idx; pfm_get_new_msg()
787 pfm_get_next_msg(pfm_context_t *ctx) pfm_get_next_msg() argument
791 DPRINT(("ctx=%p head=%d tail=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); pfm_get_next_msg()
793 if (PFM_CTXQ_EMPTY(ctx)) return NULL; pfm_get_next_msg()
798 msg = ctx->ctx_msgq+ctx->ctx_msgq_head; pfm_get_next_msg()
803 ctx->ctx_msgq_head = (ctx->ctx_msgq_head+1) % PFM_MAX_MSGS; pfm_get_next_msg()
805 DPRINT(("ctx=%p head=%d tail=%d type=%d\n", ctx, ctx->ctx_msgq_head, ctx->ctx_msgq_tail, msg->pfm_gen_msg.msg_type)); pfm_get_next_msg()
811 pfm_reset_msgq(pfm_context_t *ctx) pfm_reset_msgq() argument
813 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; pfm_reset_msgq()
814 DPRINT(("ctx=%p msgq reset\n", ctx)); pfm_reset_msgq()
858 pfm_context_t *ctx; pfm_context_alloc() local
864 ctx = kzalloc(sizeof(pfm_context_t), GFP_KERNEL); pfm_context_alloc()
865 if (ctx) { pfm_context_alloc()
866 DPRINT(("alloc ctx @%p\n", ctx)); pfm_context_alloc()
871 spin_lock_init(&ctx->ctx_lock); pfm_context_alloc()
876 ctx->ctx_state = PFM_CTX_UNLOADED; pfm_context_alloc()
881 ctx->ctx_fl_block = (ctx_flags & PFM_FL_NOTIFY_BLOCK) ? 1 : 0; pfm_context_alloc()
882 ctx->ctx_fl_system = (ctx_flags & PFM_FL_SYSTEM_WIDE) ? 1: 0; pfm_context_alloc()
883 ctx->ctx_fl_no_msg = (ctx_flags & PFM_FL_OVFL_NO_MSG) ? 1: 0; pfm_context_alloc()
886 * ctx->ctx_fl_excl_idle = (ctx_flags & PFM_FL_EXCL_IDLE) ? 1: 0; pfm_context_alloc()
892 init_completion(&ctx->ctx_restart_done); pfm_context_alloc()
897 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; pfm_context_alloc()
898 SET_LAST_CPU(ctx, -1); pfm_context_alloc()
903 ctx->ctx_msgq_head = ctx->ctx_msgq_tail = 0; pfm_context_alloc()
904 init_waitqueue_head(&ctx->ctx_msgq_wait); pfm_context_alloc()
905 init_waitqueue_head(&ctx->ctx_zombieq); pfm_context_alloc()
908 return ctx; pfm_context_alloc()
912 pfm_context_free(pfm_context_t *ctx) pfm_context_free() argument
914 if (ctx) { pfm_context_free()
915 DPRINT(("free ctx @%p\n", ctx)); pfm_context_free()
916 kfree(ctx); pfm_context_free()
923 pfm_context_t *ctx = PFM_GET_CTX(task); pfm_mask_monitoring() local
944 * As a consequence to this call, the ctx->th_pmds[] array pfm_mask_monitoring()
949 mask = ctx->ctx_used_pmds[0]; pfm_mask_monitoring()
959 ctx->ctx_pmds[i].val += (val & ovfl_mask); pfm_mask_monitoring()
961 ctx->ctx_pmds[i].val = val; pfm_mask_monitoring()
965 ctx->ctx_pmds[i].val, pfm_mask_monitoring()
976 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; pfm_mask_monitoring()
979 ia64_set_pmc(i, ctx->th_pmcs[i] & ~0xfUL); pfm_mask_monitoring()
980 ctx->th_pmcs[i] &= ~0xfUL; pfm_mask_monitoring()
981 DPRINT_ovfl(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i])); pfm_mask_monitoring()
997 pfm_context_t *ctx = PFM_GET_CTX(task); pfm_restore_monitoring() local
1002 is_system = ctx->ctx_fl_system; pfm_restore_monitoring()
1009 if (ctx->ctx_state != PFM_CTX_MASKED) { pfm_restore_monitoring()
1011 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state); pfm_restore_monitoring()
1035 mask = ctx->ctx_used_pmds[0]; pfm_restore_monitoring()
1045 val = ctx->ctx_pmds[i].val & ovfl_mask; pfm_restore_monitoring()
1046 ctx->ctx_pmds[i].val &= ~ovfl_mask; pfm_restore_monitoring()
1048 val = ctx->ctx_pmds[i].val; pfm_restore_monitoring()
1054 ctx->ctx_pmds[i].val, pfm_restore_monitoring()
1060 mask = ctx->ctx_used_monitors[0] >> PMU_FIRST_COUNTER; pfm_restore_monitoring()
1063 ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; pfm_restore_monitoring()
1064 ia64_set_pmc(i, ctx->th_pmcs[i]); pfm_restore_monitoring()
1066 task_pid_nr(task), i, ctx->th_pmcs[i])); pfm_restore_monitoring()
1074 if (ctx->ctx_fl_using_dbreg) { pfm_restore_monitoring()
1075 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); pfm_restore_monitoring()
1076 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); pfm_restore_monitoring()
1123 pfm_copy_pmds(struct task_struct *task, pfm_context_t *ctx) pfm_copy_pmds() argument
1126 unsigned long mask = ctx->ctx_all_pmds[0]; pfm_copy_pmds()
1134 val = ctx->ctx_pmds[i].val; pfm_copy_pmds()
1143 ctx->ctx_pmds[i].val = val & ~ovfl_val; pfm_copy_pmds()
1146 ctx->th_pmds[i] = val; pfm_copy_pmds()
1150 ctx->th_pmds[i], pfm_copy_pmds()
1151 ctx->ctx_pmds[i].val)); pfm_copy_pmds()
1159 pfm_copy_pmcs(struct task_struct *task, pfm_context_t *ctx) pfm_copy_pmcs() argument
1161 unsigned long mask = ctx->ctx_all_pmcs[0]; pfm_copy_pmcs()
1168 ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; pfm_copy_pmcs()
1169 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->th_pmcs[i])); pfm_copy_pmcs()
1393 pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu) pfm_unreserve_session() argument
1414 if (ctx && ctx->ctx_fl_using_dbreg) { pfm_unreserve_session()
1416 printk(KERN_ERR "perfmon: invalid release for ctx %p sys_use_dbregs=0\n", ctx); pfm_unreserve_session()
1478 pfm_free_smpl_buffer(pfm_context_t *ctx)
1482 if (ctx->ctx_smpl_hdr == NULL) goto invalid_free;
1487 fmt = ctx->ctx_buf_fmt;
1490 ctx->ctx_smpl_hdr,
1491 ctx->ctx_smpl_size,
1492 ctx->ctx_smpl_vaddr));
1499 pfm_rvfree(ctx->ctx_smpl_hdr, ctx->ctx_smpl_size);
1501 ctx->ctx_smpl_hdr = NULL;
1502 ctx->ctx_smpl_size = 0UL;
1547 pfm_context_t *ctx; pfm_read() local
1557 ctx = filp->private_data; pfm_read()
1558 if (ctx == NULL) { pfm_read()
1559 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current)); pfm_read()
1567 DPRINT(("message is too small ctx=%p (>=%ld)\n", ctx, sizeof(pfm_msg_t))); pfm_read()
1571 PROTECT_CTX(ctx, flags); pfm_read()
1576 add_wait_queue(&ctx->ctx_msgq_wait, &wait); pfm_read()
1586 DPRINT(("head=%d tail=%d\n", ctx->ctx_msgq_head, ctx->ctx_msgq_tail)); pfm_read()
1589 if(PFM_CTXQ_EMPTY(ctx) == 0) break; pfm_read()
1591 UNPROTECT_CTX(ctx, flags); pfm_read()
1611 PROTECT_CTX(ctx, flags); pfm_read()
1615 remove_wait_queue(&ctx->ctx_msgq_wait, &wait); pfm_read()
1620 msg = pfm_get_next_msg(ctx); pfm_read()
1622 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current)); pfm_read()
1632 UNPROTECT_CTX(ctx, flags); pfm_read()
1648 pfm_context_t *ctx; pfm_poll() local
1657 ctx = filp->private_data; pfm_poll()
1658 if (ctx == NULL) { pfm_poll()
1659 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current)); pfm_poll()
1664 DPRINT(("pfm_poll ctx_fd=%d before poll_wait\n", ctx->ctx_fd)); pfm_poll()
1666 poll_wait(filp, &ctx->ctx_msgq_wait, wait); pfm_poll()
1668 PROTECT_CTX(ctx, flags); pfm_poll()
1670 if (PFM_CTXQ_EMPTY(ctx) == 0) pfm_poll()
1673 UNPROTECT_CTX(ctx, flags); pfm_poll()
1675 DPRINT(("pfm_poll ctx_fd=%d mask=0x%x\n", ctx->ctx_fd, mask)); pfm_poll()
1691 pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on) pfm_do_fasync() argument
1695 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); pfm_do_fasync()
1701 ctx->ctx_async_queue, ret)); pfm_do_fasync()
1709 pfm_context_t *ctx; pfm_fasync() local
1717 ctx = filp->private_data; pfm_fasync()
1718 if (ctx == NULL) { pfm_fasync()
1719 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current)); pfm_fasync()
1729 ret = pfm_do_fasync(fd, filp, ctx, on); pfm_fasync()
1735 ctx->ctx_async_queue, ret)); pfm_fasync()
1749 pfm_context_t *ctx = (pfm_context_t *)info; pfm_syswide_force_stop() local
1755 if (ctx->ctx_cpu != smp_processor_id()) { pfm_syswide_force_stop()
1757 ctx->ctx_cpu, pfm_syswide_force_stop()
1762 if (owner != ctx->ctx_task) { pfm_syswide_force_stop()
1765 task_pid_nr(owner), task_pid_nr(ctx->ctx_task)); pfm_syswide_force_stop()
1768 if (GET_PMU_CTX() != ctx) { pfm_syswide_force_stop()
1769 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected ctx %p instead of %p\n", pfm_syswide_force_stop()
1771 GET_PMU_CTX(), ctx); pfm_syswide_force_stop()
1775 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task))); pfm_syswide_force_stop()
1783 ret = pfm_context_unload(ctx, NULL, 0, regs); pfm_syswide_force_stop()
1795 pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx) pfm_syswide_cleanup_other_cpu() argument
1799 DPRINT(("calling CPU%d for cleanup\n", ctx->ctx_cpu)); pfm_syswide_cleanup_other_cpu()
1800 ret = smp_call_function_single(ctx->ctx_cpu, pfm_syswide_force_stop, ctx, 1); pfm_syswide_cleanup_other_cpu()
1801 DPRINT(("called CPU%d for cleanup ret=%d\n", ctx->ctx_cpu, ret)); pfm_syswide_cleanup_other_cpu()
1812 pfm_context_t *ctx; pfm_flush() local
1825 ctx = filp->private_data; pfm_flush()
1826 if (ctx == NULL) { pfm_flush()
1827 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current)); pfm_flush()
1844 PROTECT_CTX(ctx, flags); pfm_flush()
1846 state = ctx->ctx_state; pfm_flush()
1847 is_system = ctx->ctx_fl_system; pfm_flush()
1849 task = PFM_CTX_TASK(ctx); pfm_flush()
1872 if (is_system && ctx->ctx_cpu != smp_processor_id()) { pfm_flush()
1874 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); pfm_flush()
1880 pfm_syswide_cleanup_other_cpu(ctx); pfm_flush()
1899 pfm_context_unload(ctx, NULL, 0, regs); pfm_flush()
1901 DPRINT(("ctx_state=%d\n", ctx->ctx_state)); pfm_flush()
1907 * cannot reset ctx field until last user is calling close(). pfm_flush()
1916 if (ctx->ctx_smpl_vaddr && current->mm) { pfm_flush()
1917 smpl_buf_vaddr = ctx->ctx_smpl_vaddr; pfm_flush()
1918 smpl_buf_size = ctx->ctx_smpl_size; pfm_flush()
1921 UNPROTECT_CTX(ctx, flags); pfm_flush()
1951 pfm_context_t *ctx; pfm_close() local
1968 ctx = filp->private_data; pfm_close()
1969 if (ctx == NULL) { pfm_close()
1970 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current)); pfm_close()
1974 PROTECT_CTX(ctx, flags); pfm_close()
1976 state = ctx->ctx_state; pfm_close()
1977 is_system = ctx->ctx_fl_system; pfm_close()
1979 task = PFM_CTX_TASK(ctx); pfm_close()
2003 if (state == PFM_CTX_MASKED && CTX_OVFL_NOBLOCK(ctx) == 0) { pfm_close()
2019 ctx->ctx_fl_going_zombie = 1; pfm_close()
2024 complete(&ctx->ctx_restart_done); pfm_close()
2037 add_wait_queue(&ctx->ctx_zombieq, &wait); pfm_close()
2039 UNPROTECT_CTX(ctx, flags); pfm_close()
2049 PROTECT_CTX(ctx, flags); pfm_close()
2052 remove_wait_queue(&ctx->ctx_zombieq, &wait); pfm_close()
2065 ctx->ctx_state = PFM_CTX_ZOMBIE; pfm_close()
2067 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task))); pfm_close()
2074 pfm_context_unload(ctx, NULL, 0, regs); pfm_close()
2080 state = ctx->ctx_state; pfm_close()
2096 if (ctx->ctx_smpl_hdr) { pfm_close()
2097 smpl_buf_addr = ctx->ctx_smpl_hdr; pfm_close()
2098 smpl_buf_size = ctx->ctx_smpl_size; pfm_close()
2100 ctx->ctx_smpl_hdr = NULL; pfm_close()
2101 ctx->ctx_fl_is_sampling = 0; pfm_close()
2110 if (smpl_buf_addr) pfm_exit_smpl_buffer(ctx->ctx_buf_fmt); pfm_close()
2116 pfm_unreserve_session(ctx, ctx->ctx_fl_system , ctx->ctx_cpu); pfm_close()
2132 UNPROTECT_CTX(ctx, flags); pfm_close()
2143 if (free_possible) pfm_context_free(ctx); pfm_close()
2172 pfm_alloc_file(pfm_context_t *ctx) pfm_alloc_file() argument
2211 file->private_data = ctx; pfm_alloc_file()
2239 pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned long rsize, void **user_vaddr) pfm_smpl_buffer_alloc() argument
2299 ctx->ctx_smpl_hdr = smpl_buf; pfm_smpl_buffer_alloc()
2300 ctx->ctx_smpl_size = size; /* aligned size */ pfm_smpl_buffer_alloc()
2320 DPRINT(("aligned size=%ld, hdr=%p mapped @0x%lx\n", size, ctx->ctx_smpl_hdr, vma->vm_start)); pfm_smpl_buffer_alloc()
2342 ctx->ctx_smpl_vaddr = (void *)vma->vm_start; pfm_smpl_buffer_alloc()
2416 pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t *ctx, unsigned int ctx_flags, pfm_setup_buffer_fmt() argument
2445 ctx->ctx_buf_fmt = fmt; pfm_setup_buffer_fmt()
2446 ctx->ctx_fl_is_sampling = 1; /* assume record() is defined */ pfm_setup_buffer_fmt()
2458 ret = pfm_smpl_buffer_alloc(current, filp, ctx, size, &uaddr); pfm_setup_buffer_fmt()
2464 ret = pfm_buf_fmt_init(fmt, task, ctx->ctx_smpl_hdr, ctx_flags, cpu, fmt_arg); pfm_setup_buffer_fmt()
2471 pfm_reset_pmu_state(pfm_context_t *ctx) pfm_reset_pmu_state() argument
2480 ctx->ctx_pmcs[i] = PMC_DFL_VAL(i); pfm_reset_pmu_state()
2481 DPRINT(("pmc[%d]=0x%lx\n", i, ctx->ctx_pmcs[i])); pfm_reset_pmu_state()
2510 ctx->ctx_all_pmcs[0] = pmu_conf->impl_pmcs[0] & ~0x1; pfm_reset_pmu_state()
2515 ctx->ctx_all_pmds[0] = pmu_conf->impl_pmds[0]; pfm_reset_pmu_state()
2517 DPRINT(("<%d> all_pmcs=0x%lx all_pmds=0x%lx\n", ctx->ctx_fd, ctx->ctx_all_pmcs[0],ctx->ctx_all_pmds[0])); pfm_reset_pmu_state()
2522 ctx->ctx_used_ibrs[0] = 0UL; pfm_reset_pmu_state()
2523 ctx->ctx_used_dbrs[0] = 0UL; pfm_reset_pmu_state()
2557 pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task) pfm_task_incompatible() argument
2573 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { pfm_task_incompatible()
2603 pfm_get_task(pfm_context_t *ctx, pid_t pid, struct task_struct **task) pfm_get_task() argument
2625 ret = pfm_task_incompatible(ctx, p); pfm_get_task()
2637 pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_context_create() argument
2659 ctx = pfm_context_alloc(ctx_flags); pfm_context_create()
2660 if (!ctx) pfm_context_create()
2663 filp = pfm_alloc_file(ctx); pfm_context_create()
2669 req->ctx_fd = ctx->ctx_fd = fd; pfm_context_create()
2675 ret = pfm_setup_buffer_fmt(current, filp, ctx, ctx_flags, 0, req); pfm_context_create()
2680 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n", pfm_context_create()
2681 ctx, pfm_context_create()
2683 ctx->ctx_fl_system, pfm_context_create()
2684 ctx->ctx_fl_block, pfm_context_create()
2685 ctx->ctx_fl_excl_idle, pfm_context_create()
2686 ctx->ctx_fl_no_msg, pfm_context_create()
2687 ctx->ctx_fd)); pfm_context_create()
2692 pfm_reset_pmu_state(ctx); pfm_context_create()
2703 if (ctx->ctx_buf_fmt) { pfm_context_create()
2704 pfm_buf_fmt_exit(ctx->ctx_buf_fmt, current, NULL, regs); pfm_context_create()
2707 pfm_context_free(ctx); pfm_context_create()
2734 pfm_reset_regs_masked(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset) pfm_reset_regs_masked() argument
2749 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); pfm_reset_regs_masked()
2750 reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; pfm_reset_regs_masked()
2762 ctx->ctx_pmds[i].val = val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset); pfm_reset_regs_masked()
2770 pfm_reset_regs(pfm_context_t *ctx, unsigned long *ovfl_regs, int is_long_reset) pfm_reset_regs() argument
2779 if (ctx->ctx_state == PFM_CTX_MASKED) { pfm_reset_regs()
2780 pfm_reset_regs_masked(ctx, ovfl_regs, is_long_reset); pfm_reset_regs()
2792 val = pfm_new_counter_value(ctx->ctx_pmds+ i, is_long_reset); pfm_reset_regs()
2793 reset_others |= ctx->ctx_pmds[i].reset_pmds[0]; pfm_reset_regs()
2797 pfm_write_soft_counter(ctx, i, val); pfm_reset_regs()
2807 val = pfm_new_counter_value(ctx->ctx_pmds + i, is_long_reset); pfm_reset_regs()
2810 pfm_write_soft_counter(ctx, i, val); pfm_reset_regs()
2821 pfm_write_pmcs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_write_pmcs() argument
2834 state = ctx->ctx_state; pfm_write_pmcs()
2836 is_system = ctx->ctx_fl_system; pfm_write_pmcs()
2837 task = ctx->ctx_task; pfm_write_pmcs()
2848 if (is_system && ctx->ctx_cpu != smp_processor_id()) { pfm_write_pmcs()
2849 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); pfm_write_pmcs()
2935 ret = (*wr_func)(task, ctx, cnum, &value, regs); pfm_write_pmcs()
2956 ctx->ctx_pmds[cnum].flags = flags; pfm_write_pmcs()
2958 ctx->ctx_pmds[cnum].reset_pmds[0] = reset_pmds; pfm_write_pmcs()
2959 ctx->ctx_pmds[cnum].smpl_pmds[0] = smpl_pmds; pfm_write_pmcs()
2960 ctx->ctx_pmds[cnum].eventid = req->reg_smpl_eventid; pfm_write_pmcs()
2973 CTX_USED_PMD(ctx, reset_pmds); pfm_write_pmcs()
2974 CTX_USED_PMD(ctx, smpl_pmds); pfm_write_pmcs()
2979 if (state == PFM_CTX_MASKED) ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; pfm_write_pmcs()
2986 CTX_USED_PMD(ctx, pmu_conf->pmc_desc[cnum].dep_pmd[0]); pfm_write_pmcs()
3000 if (is_monitor) CTX_USED_MONITOR(ctx, 1UL << cnum); pfm_write_pmcs()
3005 ctx->ctx_pmcs[cnum] = value; pfm_write_pmcs()
3011 if (is_system == 0) ctx->th_pmcs[cnum] = value; pfm_write_pmcs()
3028 ctx->ctx_reload_pmcs[0] |= 1UL << cnum; pfm_write_pmcs()
3039 ctx->ctx_all_pmcs[0], pfm_write_pmcs()
3040 ctx->ctx_used_pmds[0], pfm_write_pmcs()
3041 ctx->ctx_pmds[cnum].eventid, pfm_write_pmcs()
3044 ctx->ctx_reload_pmcs[0], pfm_write_pmcs()
3045 ctx->ctx_used_monitors[0], pfm_write_pmcs()
3046 ctx->ctx_ovfl_regs[0])); pfm_write_pmcs()
3061 pfm_write_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_write_pmds() argument
3073 state = ctx->ctx_state; pfm_write_pmds()
3075 is_system = ctx->ctx_fl_system; pfm_write_pmds()
3077 task = ctx->ctx_task; pfm_write_pmds()
3091 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { pfm_write_pmds()
3092 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); pfm_write_pmds()
3117 ret = (*wr_func)(task, ctx, cnum, &v, regs); pfm_write_pmds()
3141 ctx->ctx_pmds[cnum].lval = value; pfm_write_pmds()
3154 ctx->ctx_pmds[cnum].long_reset = req->reg_long_reset; pfm_write_pmds()
3155 ctx->ctx_pmds[cnum].short_reset = req->reg_short_reset; pfm_write_pmds()
3160 ctx->ctx_pmds[cnum].seed = req->reg_random_seed; pfm_write_pmds()
3161 ctx->ctx_pmds[cnum].mask = req->reg_random_mask; pfm_write_pmds()
3166 ctx->ctx_pmds[cnum].val = value; pfm_write_pmds()
3174 CTX_USED_PMD(ctx, PMD_PMD_DEP(cnum)); pfm_write_pmds()
3179 CTX_USED_PMD(ctx, RDEP(cnum)); pfm_write_pmds()
3186 ctx->ctx_ovfl_regs[0] &= ~1UL << cnum; pfm_write_pmds()
3193 if (is_system == 0) ctx->th_pmds[cnum] = hw_value; pfm_write_pmds()
3207 ctx->ctx_reload_pmds[0] |= 1UL << cnum; pfm_write_pmds()
3219 ctx->ctx_pmds[cnum].val, pfm_write_pmds()
3220 ctx->ctx_pmds[cnum].short_reset, pfm_write_pmds()
3221 ctx->ctx_pmds[cnum].long_reset, pfm_write_pmds()
3222 PMC_OVFL_NOTIFY(ctx, cnum) ? 'Y':'N', pfm_write_pmds()
3223 ctx->ctx_pmds[cnum].seed, pfm_write_pmds()
3224 ctx->ctx_pmds[cnum].mask, pfm_write_pmds()
3225 ctx->ctx_used_pmds[0], pfm_write_pmds()
3226 ctx->ctx_pmds[cnum].reset_pmds[0], pfm_write_pmds()
3227 ctx->ctx_reload_pmds[0], pfm_write_pmds()
3228 ctx->ctx_all_pmds[0], pfm_write_pmds()
3229 ctx->ctx_ovfl_regs[0])); pfm_write_pmds()
3257 pfm_read_pmds(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_read_pmds() argument
3273 state = ctx->ctx_state; pfm_read_pmds()
3275 is_system = ctx->ctx_fl_system; pfm_read_pmds()
3277 task = ctx->ctx_task; pfm_read_pmds()
3287 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { pfm_read_pmds()
3288 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); pfm_read_pmds()
3324 if (unlikely(!CTX_IS_USED_PMD(ctx, cnum))) goto error; pfm_read_pmds()
3326 sval = ctx->ctx_pmds[cnum].val; pfm_read_pmds()
3327 lval = ctx->ctx_pmds[cnum].lval; pfm_read_pmds()
3343 val = is_loaded ? ctx->th_pmds[cnum] : 0UL; pfm_read_pmds()
3360 ret = (*rd_func)(ctx->ctx_task, ctx, cnum, &v, regs); pfm_read_pmds()
3390 pfm_context_t *ctx; pfm_mod_write_pmcs() local
3394 ctx = GET_PMU_CTX(); pfm_mod_write_pmcs()
3396 if (ctx == NULL) return -EINVAL; pfm_mod_write_pmcs()
3402 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; pfm_mod_write_pmcs()
3404 return pfm_write_pmcs(ctx, req, nreq, regs); pfm_mod_write_pmcs()
3411 pfm_context_t *ctx; pfm_mod_read_pmds() local
3415 ctx = GET_PMU_CTX(); pfm_mod_read_pmds()
3417 if (ctx == NULL) return -EINVAL; pfm_mod_read_pmds()
3423 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; pfm_mod_read_pmds()
3425 return pfm_read_pmds(ctx, req, nreq, regs); pfm_mod_read_pmds()
3436 pfm_context_t *ctx = task->thread.pfm_context; pfm_use_debug_registers() local
3457 if (ctx && ctx->ctx_fl_using_dbreg == 1) return -1; pfm_use_debug_registers()
3510 pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_restart() argument
3518 state = ctx->ctx_state; pfm_restart()
3519 fmt = ctx->ctx_buf_fmt; pfm_restart()
3520 is_system = ctx->ctx_fl_system; pfm_restart()
3521 task = PFM_CTX_TASK(ctx); pfm_restart()
3527 if (CTX_HAS_SMPL(ctx) && fmt->fmt_restart_active) break; pfm_restart()
3543 if (is_system && ctx->ctx_cpu != smp_processor_id()) { pfm_restart()
3544 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); pfm_restart()
3556 fmt = ctx->ctx_buf_fmt; pfm_restart()
3560 ctx->ctx_ovfl_regs[0])); pfm_restart()
3562 if (CTX_HAS_SMPL(ctx)) { pfm_restart()
3564 prefetch(ctx->ctx_smpl_hdr); pfm_restart()
3570 ret = pfm_buf_fmt_restart_active(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); pfm_restart()
3572 ret = pfm_buf_fmt_restart(fmt, task, &rst_ctrl, ctx->ctx_smpl_hdr, regs); pfm_restart()
3580 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); pfm_restart()
3595 ctx->ctx_ovfl_regs[0] = 0UL; pfm_restart()
3600 ctx->ctx_state = PFM_CTX_LOADED; pfm_restart()
3605 ctx->ctx_fl_can_restart = 0; pfm_restart()
3619 if (ctx->ctx_fl_can_restart == 0) return -EINVAL; pfm_restart()
3624 ctx->ctx_fl_can_restart = 0; pfm_restart()
3643 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { pfm_restart()
3645 complete(&ctx->ctx_restart_done); pfm_restart()
3649 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; pfm_restart()
3663 pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_debug() argument
3682 pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_write_ibr_dbr() argument
3697 state = ctx->ctx_state; pfm_write_ibr_dbr()
3699 is_system = ctx->ctx_fl_system; pfm_write_ibr_dbr()
3700 task = ctx->ctx_task; pfm_write_ibr_dbr()
3715 if (unlikely(is_system && ctx->ctx_cpu != smp_processor_id())) { pfm_write_ibr_dbr()
3716 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); pfm_write_ibr_dbr()
3729 first_time = ctx->ctx_fl_using_dbreg == 0; pfm_write_ibr_dbr()
3764 ctx->ctx_fl_using_dbreg = 1; pfm_write_ibr_dbr()
3829 CTX_USED_IBR(ctx, rnum); pfm_write_ibr_dbr()
3836 ctx->ctx_ibrs[rnum] = dbreg.val; pfm_write_ibr_dbr()
3839 rnum, dbreg.val, ctx->ctx_used_ibrs[0], is_loaded, can_access_pmu)); pfm_write_ibr_dbr()
3841 CTX_USED_DBR(ctx, rnum); pfm_write_ibr_dbr()
3847 ctx->ctx_dbrs[rnum] = dbreg.val; pfm_write_ibr_dbr()
3850 rnum, dbreg.val, ctx->ctx_used_dbrs[0], is_loaded, can_access_pmu)); pfm_write_ibr_dbr()
3862 if (ctx->ctx_fl_system) { pfm_write_ibr_dbr()
3866 ctx->ctx_fl_using_dbreg = 0; pfm_write_ibr_dbr()
3877 pfm_write_ibrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_write_ibrs() argument
3879 return pfm_write_ibr_dbr(PFM_CODE_RR, ctx, arg, count, regs); pfm_write_ibrs()
3883 pfm_write_dbrs(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_write_dbrs() argument
3885 return pfm_write_ibr_dbr(PFM_DATA_RR, ctx, arg, count, regs); pfm_write_dbrs()
3891 pfm_context_t *ctx; pfm_mod_write_ibrs() local
3895 ctx = GET_PMU_CTX(); pfm_mod_write_ibrs()
3897 if (ctx == NULL) return -EINVAL; pfm_mod_write_ibrs()
3903 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; pfm_mod_write_ibrs()
3905 return pfm_write_ibrs(ctx, req, nreq, regs); pfm_mod_write_ibrs()
3912 pfm_context_t *ctx; pfm_mod_write_dbrs() local
3916 ctx = GET_PMU_CTX(); pfm_mod_write_dbrs()
3918 if (ctx == NULL) return -EINVAL; pfm_mod_write_dbrs()
3924 if (task != current && ctx->ctx_fl_system == 0) return -EBUSY; pfm_mod_write_dbrs()
3926 return pfm_write_dbrs(ctx, req, nreq, regs); pfm_mod_write_dbrs()
3932 pfm_get_features(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_get_features() argument
3941 pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_stop() argument
3944 struct task_struct *task = PFM_CTX_TASK(ctx); pfm_stop()
3947 state = ctx->ctx_state; pfm_stop()
3948 is_system = ctx->ctx_fl_system; pfm_stop()
3960 if (is_system && ctx->ctx_cpu != smp_processor_id()) { pfm_stop()
3961 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); pfm_stop()
3965 task_pid_nr(PFM_CTX_TASK(ctx)), pfm_stop()
4022 ctx->ctx_saved_psr_up = 0; pfm_stop()
4030 pfm_start(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_start() argument
4035 state = ctx->ctx_state; pfm_start()
4036 is_system = ctx->ctx_fl_system; pfm_start()
4045 if (is_system && ctx->ctx_cpu != smp_processor_id()) { pfm_start()
4046 DPRINT(("should be running on CPU%d\n", ctx->ctx_cpu)); pfm_start()
4083 if (ctx->ctx_task == current) { pfm_start()
4094 tregs = task_pt_regs(ctx->ctx_task); pfm_start()
4100 ctx->ctx_saved_psr_up = IA64_PSR_UP; pfm_start()
4111 pfm_get_pmc_reset(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_get_pmc_reset() argument
4138 pfm_check_task_exist(pfm_context_t *ctx) pfm_check_task_exist() argument
4146 if (t->thread.pfm_context == ctx) { do_each_thread()
4154 DPRINT(("pfm_check_task_exist: ret=%d ctx=%p\n", ret, ctx));
4160 pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_context_load() argument
4175 state = ctx->ctx_state; pfm_context_load()
4176 is_system = ctx->ctx_fl_system; pfm_context_load()
4183 ctx->ctx_state)); pfm_context_load()
4187 DPRINT(("load_pid [%d] using_dbreg=%d\n", req->load_pid, ctx->ctx_fl_using_dbreg)); pfm_context_load()
4189 if (CTX_OVFL_NOBLOCK(ctx) == 0 && req->load_pid == current->pid) { pfm_context_load()
4194 ret = pfm_get_task(ctx, req->load_pid, &task); pfm_context_load()
4218 if (ctx->ctx_fl_using_dbreg) { pfm_context_load()
4258 the_cpu = ctx->ctx_cpu = smp_processor_id(); pfm_context_load()
4277 thread->pfm_context, ctx)); pfm_context_load()
4280 old = ia64_cmpxchg(acq, &thread->pfm_context, NULL, ctx, sizeof(pfm_context_t *)); pfm_context_load()
4286 pfm_reset_msgq(ctx); pfm_context_load()
4288 ctx->ctx_state = PFM_CTX_LOADED; pfm_context_load()
4293 ctx->ctx_task = task; pfm_context_load()
4302 if (ctx->ctx_fl_excl_idle) PFM_CPUINFO_SET(PFM_CPUINFO_EXCL_IDLE); pfm_context_load()
4310 pfm_copy_pmds(task, ctx); pfm_context_load()
4311 pfm_copy_pmcs(task, ctx); pfm_context_load()
4313 pmcs_source = ctx->th_pmcs; pfm_context_load()
4314 pmds_source = ctx->th_pmds; pfm_context_load()
4327 SET_LAST_CPU(ctx, smp_processor_id()); pfm_context_load()
4329 SET_ACTIVATION(ctx); pfm_context_load()
4339 * load all PMD from ctx to PMU (as opposed to thread state) pfm_context_load()
4340 * restore all PMC from ctx to PMU pfm_context_load()
4342 pfm_restore_pmds(pmds_source, ctx->ctx_all_pmds[0]); pfm_context_load()
4343 pfm_restore_pmcs(pmcs_source, ctx->ctx_all_pmcs[0]); pfm_context_load()
4345 ctx->ctx_reload_pmcs[0] = 0UL; pfm_context_load()
4346 ctx->ctx_reload_pmds[0] = 0UL; pfm_context_load()
4351 if (ctx->ctx_fl_using_dbreg) { pfm_context_load()
4352 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); pfm_context_load()
4353 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); pfm_context_load()
4358 SET_PMU_OWNER(task, ctx); pfm_context_load()
4368 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; pfm_context_load()
4369 SET_LAST_CPU(ctx, -1); pfm_context_load()
4372 ctx->ctx_saved_psr_up = 0UL; pfm_context_load()
4379 if (ret) pfm_unreserve_session(ctx, ctx->ctx_fl_system, the_cpu); pfm_context_load()
4396 ret = pfm_check_task_exist(ctx); pfm_context_load()
4398 ctx->ctx_state = PFM_CTX_UNLOADED; pfm_context_load()
4399 ctx->ctx_task = NULL; pfm_context_load()
4414 static void pfm_flush_pmds(struct task_struct *, pfm_context_t *ctx);
4417 pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs) pfm_context_unload() argument
4419 struct task_struct *task = PFM_CTX_TASK(ctx); pfm_context_unload()
4424 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1)); pfm_context_unload()
4426 prev_state = ctx->ctx_state; pfm_context_unload()
4427 is_system = ctx->ctx_fl_system; pfm_context_unload()
4440 ret = pfm_stop(ctx, NULL, 0, regs); pfm_context_unload()
4443 ctx->ctx_state = PFM_CTX_UNLOADED; pfm_context_unload()
4464 pfm_flush_pmds(current, ctx); pfm_context_unload()
4471 pfm_unreserve_session(ctx, 1 , ctx->ctx_cpu); pfm_context_unload()
4480 ctx->ctx_task = NULL; pfm_context_unload()
4505 pfm_flush_pmds(task, ctx); pfm_context_unload()
4514 pfm_unreserve_session(ctx, 0 , ctx->ctx_cpu); pfm_context_unload()
4519 ctx->ctx_last_activation = PFM_INVALID_ACTIVATION; pfm_context_unload()
4520 SET_LAST_CPU(ctx, -1); pfm_context_unload()
4531 ctx->ctx_task = NULL; pfm_context_unload()
4535 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; pfm_context_unload()
4536 ctx->ctx_fl_can_restart = 0; pfm_context_unload()
4537 ctx->ctx_fl_going_zombie = 0; pfm_context_unload()
4552 pfm_context_t *ctx; pfm_exit_thread() local
4558 ctx = PFM_GET_CTX(task); pfm_exit_thread()
4560 PROTECT_CTX(ctx, flags); pfm_exit_thread()
4562 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task))); pfm_exit_thread()
4564 state = ctx->ctx_state; pfm_exit_thread()
4571 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task)); pfm_exit_thread()
4575 ret = pfm_context_unload(ctx, NULL, 0, regs); pfm_exit_thread()
4579 DPRINT(("ctx unloaded for current state was %d\n", state)); pfm_exit_thread()
4581 pfm_end_notify_user(ctx); pfm_exit_thread()
4584 ret = pfm_context_unload(ctx, NULL, 0, regs); pfm_exit_thread()
4594 UNPROTECT_CTX(ctx, flags); pfm_exit_thread()
4607 if (free_ok) pfm_context_free(ctx); pfm_exit_thread()
4658 pfm_check_task_state(pfm_context_t *ctx, int cmd, unsigned long flags) pfm_check_task_state() argument
4664 state = ctx->ctx_state; pfm_check_task_state()
4665 task = ctx->ctx_task; pfm_check_task_state()
4668 DPRINT(("context %d no task, state=%d\n", ctx->ctx_fd, state)); pfm_check_task_state()
4673 ctx->ctx_fd, pfm_check_task_state()
4685 if (task == current || ctx->ctx_fl_system) return 0; pfm_check_task_state()
4741 UNPROTECT_CTX(ctx, flags); pfm_check_task_state()
4745 PROTECT_CTX(ctx, flags); pfm_check_task_state()
4750 if (ctx->ctx_state != old_state) { pfm_check_task_state()
4751 DPRINT(("old_state=%d new_state=%d\n", old_state, ctx->ctx_state)); pfm_check_task_state()
4765 pfm_context_t *ctx = NULL; sys_perfmonctl() local
4771 int (*func)(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs); sys_perfmonctl()
4871 ctx = f.file->private_data; sys_perfmonctl()
4872 if (unlikely(ctx == NULL)) { sys_perfmonctl()
4876 prefetch(&ctx->ctx_state); sys_perfmonctl()
4878 PROTECT_CTX(ctx, flags); sys_perfmonctl()
4883 ret = pfm_check_task_state(ctx, cmd, flags); sys_perfmonctl()
4887 ret = (*func)(ctx, args_k, count, task_pt_regs(current)); sys_perfmonctl()
4892 if (likely(ctx)) { sys_perfmonctl()
4894 UNPROTECT_CTX(ctx, flags); sys_perfmonctl()
4912 pfm_resume_after_ovfl(pfm_context_t *ctx, unsigned long ovfl_regs, struct pt_regs *regs) pfm_resume_after_ovfl() argument
4914 pfm_buffer_fmt_t *fmt = ctx->ctx_buf_fmt; pfm_resume_after_ovfl()
4919 state = ctx->ctx_state; pfm_resume_after_ovfl()
4924 if (CTX_HAS_SMPL(ctx)) { pfm_resume_after_ovfl()
4930 ret = pfm_buf_fmt_restart_active(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); pfm_resume_after_ovfl()
4932 ret = pfm_buf_fmt_restart(fmt, current, &rst_ctrl, ctx->ctx_smpl_hdr, regs); pfm_resume_after_ovfl()
4940 pfm_reset_regs(ctx, &ovfl_regs, PFM_PMD_LONG_RESET); pfm_resume_after_ovfl()
4944 if (ctx->ctx_state == PFM_CTX_MASKED) pfm_restore_monitoring(current); pfm_resume_after_ovfl()
4949 ctx->ctx_state = PFM_CTX_LOADED; pfm_resume_after_ovfl()
4958 pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs) pfm_context_force_terminate() argument
4964 ret = pfm_context_unload(ctx, NULL, 0, regs); pfm_context_force_terminate()
4972 wake_up_interruptible(&ctx->ctx_zombieq); pfm_context_force_terminate()
4981 static int pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds);
4995 pfm_context_t *ctx; pfm_handle_work() local
5002 ctx = PFM_GET_CTX(current); pfm_handle_work()
5003 if (ctx == NULL) { pfm_handle_work()
5009 PROTECT_CTX(ctx, flags); pfm_handle_work()
5018 reason = ctx->ctx_fl_trap_reason; pfm_handle_work()
5019 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_NONE; pfm_handle_work()
5020 ovfl_regs = ctx->ctx_ovfl_regs[0]; pfm_handle_work()
5022 DPRINT(("reason=%d state=%d\n", reason, ctx->ctx_state)); pfm_handle_work()
5027 if (ctx->ctx_fl_going_zombie || ctx->ctx_state == PFM_CTX_ZOMBIE) pfm_handle_work()
5030 //if (CTX_OVFL_NOBLOCK(ctx)) goto skip_blocking; pfm_handle_work()
5038 UNPROTECT_CTX(ctx, flags); pfm_handle_work()
5051 ret = wait_for_completion_interruptible(&ctx->ctx_restart_done); pfm_handle_work()
5061 PROTECT_CTX(ctx, dummy_flags); pfm_handle_work()
5069 ovfl_regs = ctx->ctx_ovfl_regs[0]; pfm_handle_work()
5071 if (ctx->ctx_fl_going_zombie) { pfm_handle_work()
5074 pfm_context_force_terminate(ctx, regs); pfm_handle_work()
5084 pfm_resume_after_ovfl(ctx, ovfl_regs, regs); pfm_handle_work()
5085 ctx->ctx_ovfl_regs[0] = 0UL; pfm_handle_work()
5091 UNPROTECT_CTX(ctx, flags); pfm_handle_work()
5095 pfm_notify_user(pfm_context_t *ctx, pfm_msg_t *msg) pfm_notify_user() argument
5097 if (ctx->ctx_state == PFM_CTX_ZOMBIE) { pfm_notify_user()
5104 if (msg) wake_up_interruptible(&ctx->ctx_msgq_wait); pfm_notify_user()
5110 kill_fasync (&ctx->ctx_async_queue, SIGIO, POLL_IN); pfm_notify_user()
5116 pfm_ovfl_notify_user(pfm_context_t *ctx, unsigned long ovfl_pmds) pfm_ovfl_notify_user() argument
5120 if (ctx->ctx_fl_no_msg == 0) { pfm_ovfl_notify_user()
5121 msg = pfm_get_new_msg(ctx); pfm_ovfl_notify_user()
5128 msg->pfm_ovfl_msg.msg_ctx_fd = ctx->ctx_fd; pfm_ovfl_notify_user()
5139 ctx->ctx_fl_no_msg, pfm_ovfl_notify_user()
5140 ctx->ctx_fd, pfm_ovfl_notify_user()
5143 return pfm_notify_user(ctx, msg); pfm_ovfl_notify_user()
5147 pfm_end_notify_user(pfm_context_t *ctx) pfm_end_notify_user() argument
5151 msg = pfm_get_new_msg(ctx); pfm_end_notify_user()
5160 msg->pfm_end_msg.msg_ctx_fd = ctx->ctx_fd; pfm_end_notify_user()
5165 ctx->ctx_fl_no_msg, pfm_end_notify_user()
5166 ctx->ctx_fd)); pfm_end_notify_user()
5168 return pfm_notify_user(ctx, msg); pfm_end_notify_user()
5175 static void pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, pfm_overflow_handler() argument
5187 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) goto stop_monitoring; pfm_overflow_handler()
5197 has_smpl = CTX_HAS_SMPL(ctx); pfm_overflow_handler()
5204 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking", pfm_overflow_handler()
5205 ctx->ctx_used_pmds[0])); pfm_overflow_handler()
5223 old_val = new_val = ctx->ctx_pmds[i].val; pfm_overflow_handler()
5225 ctx->ctx_pmds[i].val = new_val; pfm_overflow_handler()
5232 if (PMC_OVFL_NOTIFY(ctx, i)) ovfl_notify |= 1UL << i; pfm_overflow_handler()
5266 ovfl_arg = &ctx->ctx_ovfl_arg; pfm_overflow_handler()
5268 prefetch(ctx->ctx_smpl_hdr); pfm_overflow_handler()
5280 ovfl_arg->smpl_pmds[0] = smpl_pmds = ctx->ctx_pmds[i].smpl_pmds[0]; pfm_overflow_handler()
5282 ovfl_arg->pmd_value = ctx->ctx_pmds[i].val; pfm_overflow_handler()
5283 ovfl_arg->pmd_last_reset = ctx->ctx_pmds[i].lval; pfm_overflow_handler()
5284 ovfl_arg->pmd_eventid = ctx->ctx_pmds[i].eventid; pfm_overflow_handler()
5293 ovfl_arg->smpl_pmds_values[k++] = PMD_IS_COUNTING(j) ? pfm_read_soft_counter(ctx, j) : ia64_get_pmd(j); pfm_overflow_handler()
5305 ret = (*ctx->ctx_buf_fmt->fmt_handler)(task, ctx->ctx_smpl_hdr, ovfl_arg, regs, tstamp); pfm_overflow_handler()
5356 pfm_reset_regs(ctx, &bm, PFM_PMD_SHORT_RESET); pfm_overflow_handler()
5363 ctx->ctx_ovfl_regs[0] = ovfl_pmds; pfm_overflow_handler()
5368 if (CTX_OVFL_NOBLOCK(ctx) == 0 && ovfl_ctrl.bits.block_task) { pfm_overflow_handler()
5370 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_BLOCK; pfm_overflow_handler()
5393 ctx->ctx_fl_trap_reason, pfm_overflow_handler()
5402 ctx->ctx_state = PFM_CTX_MASKED; pfm_overflow_handler()
5403 ctx->ctx_fl_can_restart = 1; pfm_overflow_handler()
5409 if (must_notify) pfm_ovfl_notify_user(ctx, ovfl_notify); pfm_overflow_handler()
5449 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1)); pfm_overflow_handler()
5460 pfm_context_t *ctx; pfm_do_interrupt_handler() local
5474 ctx = GET_PMU_CTX(); pfm_do_interrupt_handler()
5486 if (!ctx) goto report_spurious1; pfm_do_interrupt_handler()
5488 if (ctx->ctx_fl_system == 0 && (task->thread.flags & IA64_THREAD_PM_VALID) == 0) pfm_do_interrupt_handler()
5491 PROTECT_CTX_NOPRINT(ctx, flags); pfm_do_interrupt_handler()
5493 pfm_overflow_handler(task, ctx, pmc0, regs); pfm_do_interrupt_handler()
5495 UNPROTECT_CTX_NOPRINT(ctx, flags); pfm_do_interrupt_handler()
5779 pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs) pfm_force_cleanup() argument
5781 struct task_struct *task = ctx->ctx_task; pfm_force_cleanup()
5788 task_pid_nr(ctx->ctx_task))); pfm_force_cleanup()
5810 pfm_context_t *ctx; pfm_save_regs() local
5815 ctx = PFM_GET_CTX(task); pfm_save_regs()
5816 if (ctx == NULL) return; pfm_save_regs()
5823 flags = pfm_protect_ctx_ctxsw(ctx); pfm_save_regs()
5825 if (ctx->ctx_state == PFM_CTX_ZOMBIE) { pfm_save_regs()
5830 pfm_force_cleanup(ctx, regs); pfm_save_regs()
5832 BUG_ON(ctx->ctx_smpl_hdr); pfm_save_regs()
5834 pfm_unprotect_ctx_ctxsw(ctx, flags); pfm_save_regs()
5836 pfm_context_free(ctx); pfm_save_regs()
5860 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; pfm_save_regs()
5874 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]); pfm_save_regs()
5881 ctx->th_pmcs[0] = ia64_get_pmc(0); pfm_save_regs()
5886 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); pfm_save_regs()
5892 pfm_unprotect_ctx_ctxsw(ctx, flags); pfm_save_regs()
5899 pfm_context_t *ctx; pfm_save_regs() local
5902 ctx = PFM_GET_CTX(task); pfm_save_regs()
5903 if (ctx == NULL) return; pfm_save_regs()
5924 ctx->ctx_saved_psr_up = psr & IA64_PSR_UP; pfm_save_regs()
5930 pfm_context_t *ctx; pfm_lazy_save_regs() local
5937 ctx = PFM_GET_CTX(task); pfm_lazy_save_regs()
5948 PROTECT_CTX(ctx,flags); pfm_lazy_save_regs()
5962 pfm_save_pmds(ctx->th_pmds, ctx->ctx_used_pmds[0]); pfm_lazy_save_regs()
5969 ctx->th_pmcs[0] = ia64_get_pmc(0); pfm_lazy_save_regs()
5974 if (ctx->th_pmcs[0] & ~0x1UL) pfm_unfreeze_pmu(); pfm_lazy_save_regs()
5981 UNPROTECT_CTX(ctx,flags); pfm_lazy_save_regs()
5992 pfm_context_t *ctx; pfm_load_regs() local
5998 ctx = PFM_GET_CTX(task); pfm_load_regs()
5999 if (unlikely(ctx == NULL)) return; pfm_load_regs()
6013 flags = pfm_protect_ctx_ctxsw(ctx); pfm_load_regs()
6021 if (unlikely(ctx->ctx_state == PFM_CTX_ZOMBIE)) { pfm_load_regs()
6024 BUG_ON(ctx->ctx_smpl_hdr); pfm_load_regs()
6026 pfm_force_cleanup(ctx, regs); pfm_load_regs()
6028 pfm_unprotect_ctx_ctxsw(ctx, flags); pfm_load_regs()
6033 pfm_context_free(ctx); pfm_load_regs()
6042 if (ctx->ctx_fl_using_dbreg) { pfm_load_regs()
6043 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); pfm_load_regs()
6044 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); pfm_load_regs()
6049 psr_up = ctx->ctx_saved_psr_up; pfm_load_regs()
6055 if (GET_LAST_CPU(ctx) == smp_processor_id() && ctx->ctx_last_activation == GET_ACTIVATION()) { pfm_load_regs()
6060 pmc_mask = ctx->ctx_reload_pmcs[0]; pfm_load_regs()
6061 pmd_mask = ctx->ctx_reload_pmds[0]; pfm_load_regs()
6070 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0]; pfm_load_regs()
6079 pmc_mask = ctx->ctx_all_pmcs[0]; pfm_load_regs()
6088 if (pmd_mask) pfm_restore_pmds(ctx->th_pmds, pmd_mask); pfm_load_regs()
6089 if (pmc_mask) pfm_restore_pmcs(ctx->th_pmcs, pmc_mask); pfm_load_regs()
6095 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) { pfm_load_regs()
6100 ia64_set_pmc(0, ctx->th_pmcs[0]); pfm_load_regs()
6102 ctx->th_pmcs[0] = 0UL; pfm_load_regs()
6115 ctx->ctx_reload_pmcs[0] = 0UL; pfm_load_regs()
6116 ctx->ctx_reload_pmds[0] = 0UL; pfm_load_regs()
6118 SET_LAST_CPU(ctx, smp_processor_id()); pfm_load_regs()
6127 SET_ACTIVATION(ctx); pfm_load_regs()
6132 SET_PMU_OWNER(task, ctx); pfm_load_regs()
6145 pfm_unprotect_ctx_ctxsw(ctx, flags); pfm_load_regs()
6155 pfm_context_t *ctx; pfm_load_regs() local
6162 ctx = PFM_GET_CTX(task); pfm_load_regs()
6176 if (ctx->ctx_fl_using_dbreg) { pfm_load_regs()
6177 pfm_restore_ibrs(ctx->ctx_ibrs, pmu_conf->num_ibrs); pfm_load_regs()
6178 pfm_restore_dbrs(ctx->ctx_dbrs, pmu_conf->num_dbrs); pfm_load_regs()
6184 psr_up = ctx->ctx_saved_psr_up; pfm_load_regs()
6214 pmd_mask = pfm_sysctl.fastctxsw ? ctx->ctx_used_pmds[0] : ctx->ctx_all_pmds[0]; pfm_load_regs()
6223 pmc_mask = ctx->ctx_all_pmcs[0]; pfm_load_regs()
6225 pfm_restore_pmds(ctx->th_pmds, pmd_mask); pfm_load_regs()
6226 pfm_restore_pmcs(ctx->th_pmcs, pmc_mask); pfm_load_regs()
6232 if (unlikely(PMC0_HAS_OVFL(ctx->th_pmcs[0]))) { pfm_load_regs()
6237 ia64_set_pmc(0, ctx->th_pmcs[0]); pfm_load_regs()
6240 ctx->th_pmcs[0] = 0UL; pfm_load_regs()
6253 SET_PMU_OWNER(task, ctx); pfm_load_regs()
6269 pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx) pfm_flush_pmds() argument
6280 is_self = ctx->ctx_task == task ? 1 : 0; pfm_flush_pmds()
6289 can_access_pmu = (GET_PMU_OWNER() == task) || (ctx->ctx_fl_system && ctx->ctx_cpu == smp_processor_id()); pfm_flush_pmds()
6315 pmc0 = ctx->th_pmcs[0]; pfm_flush_pmds()
6319 ctx->th_pmcs[0] = 0; pfm_flush_pmds()
6328 mask2 = ctx->ctx_used_pmds[0]; pfm_flush_pmds()
6340 val = pmd_val = can_access_pmu ? ia64_get_pmd(i) : ctx->th_pmds[i]; pfm_flush_pmds()
6346 ctx->ctx_pmds[i].val, pfm_flush_pmds()
6352 val = ctx->ctx_pmds[i].val + (val & ovfl_val); pfm_flush_pmds()
6372 if (is_self) ctx->th_pmds[i] = pmd_val; pfm_flush_pmds()
6374 ctx->ctx_pmds[i].val = val; pfm_flush_pmds()
6687 pfm_context_t *ctx; dump_pmu_state() local
6711 ctx = GET_PMU_CTX(); dump_pmu_state()
6713 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx); dump_pmu_state()
6732 printk("->CPU%d pmc[%d]=0x%lx thread_pmc[%d]=0x%lx\n", this_cpu, i, ia64_get_pmc(i), i, ctx->th_pmcs[i]); dump_pmu_state()
6737 printk("->CPU%d pmd[%d]=0x%lx thread_pmd[%d]=0x%lx\n", this_cpu, i, ia64_get_pmd(i), i, ctx->th_pmds[i]); dump_pmu_state()
6740 if (ctx) { dump_pmu_state()
6743 ctx->ctx_state, dump_pmu_state()
6744 ctx->ctx_smpl_vaddr, dump_pmu_state()
6745 ctx->ctx_smpl_hdr, dump_pmu_state()
6746 ctx->ctx_msgq_head, dump_pmu_state()
6747 ctx->ctx_msgq_tail, dump_pmu_state()
6748 ctx->ctx_saved_psr_up); dump_pmu_state()
/linux-4.1.27/drivers/vfio/pci/
H A Dvfio_pci_intrs.c36 eventfd_signal(vdev->ctx[0].trigger, 1); vfio_send_intx_eventfd()
55 } else if (!vdev->ctx[0].masked) { vfio_pci_intx_mask()
65 vdev->ctx[0].masked = true; vfio_pci_intx_mask()
93 } else if (vdev->ctx[0].masked && !vdev->virq_disabled) { vfio_pci_intx_unmask_handler()
105 vdev->ctx[0].masked = (ret > 0); vfio_pci_intx_unmask_handler()
129 vdev->ctx[0].masked = true; vfio_intx_handler()
131 } else if (!vdev->ctx[0].masked && /* may be shared */ vfio_intx_handler()
133 vdev->ctx[0].masked = true; vfio_intx_handler()
153 vdev->ctx = kzalloc(sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); vfio_intx_enable()
154 if (!vdev->ctx) vfio_intx_enable()
165 vdev->ctx[0].masked = vdev->virq_disabled; vfio_intx_enable()
167 pci_intx(vdev->pdev, !vdev->ctx[0].masked); vfio_intx_enable()
182 if (vdev->ctx[0].trigger) { vfio_intx_set_signal()
184 kfree(vdev->ctx[0].name); vfio_intx_set_signal()
185 eventfd_ctx_put(vdev->ctx[0].trigger); vfio_intx_set_signal()
186 vdev->ctx[0].trigger = NULL; vfio_intx_set_signal()
192 vdev->ctx[0].name = kasprintf(GFP_KERNEL, "vfio-intx(%s)", vfio_intx_set_signal()
194 if (!vdev->ctx[0].name) vfio_intx_set_signal()
199 kfree(vdev->ctx[0].name); vfio_intx_set_signal()
203 vdev->ctx[0].trigger = trigger; vfio_intx_set_signal()
209 irqflags, vdev->ctx[0].name, vdev); vfio_intx_set_signal()
211 vdev->ctx[0].trigger = NULL; vfio_intx_set_signal()
212 kfree(vdev->ctx[0].name); vfio_intx_set_signal()
222 if (!vdev->pci_2_3 && vdev->ctx[0].masked) vfio_intx_set_signal()
232 vfio_virqfd_disable(&vdev->ctx[0].unmask); vfio_intx_disable()
233 vfio_virqfd_disable(&vdev->ctx[0].mask); vfio_intx_disable()
236 kfree(vdev->ctx); vfio_intx_disable()
258 vdev->ctx = kzalloc(nvec * sizeof(struct vfio_pci_irq_ctx), GFP_KERNEL); vfio_msi_enable()
259 if (!vdev->ctx) vfio_msi_enable()
268 kfree(vdev->ctx); vfio_msi_enable()
280 kfree(vdev->ctx); vfio_msi_enable()
288 kfree(vdev->ctx); vfio_msi_enable()
320 if (vdev->ctx[vector].trigger) { vfio_msi_set_vector_signal()
321 free_irq(irq, vdev->ctx[vector].trigger); vfio_msi_set_vector_signal()
322 kfree(vdev->ctx[vector].name); vfio_msi_set_vector_signal()
323 eventfd_ctx_put(vdev->ctx[vector].trigger); vfio_msi_set_vector_signal()
324 vdev->ctx[vector].trigger = NULL; vfio_msi_set_vector_signal()
330 vdev->ctx[vector].name = kasprintf(GFP_KERNEL, "%s[%d](%s)", vfio_msi_set_vector_signal()
332 if (!vdev->ctx[vector].name) vfio_msi_set_vector_signal()
337 kfree(vdev->ctx[vector].name); vfio_msi_set_vector_signal()
356 vdev->ctx[vector].name, trigger); vfio_msi_set_vector_signal()
358 kfree(vdev->ctx[vector].name); vfio_msi_set_vector_signal()
363 vdev->ctx[vector].trigger = trigger; vfio_msi_set_vector_signal()
397 vfio_virqfd_disable(&vdev->ctx[i].unmask); vfio_msi_disable()
398 vfio_virqfd_disable(&vdev->ctx[i].mask); vfio_msi_disable()
409 kfree(vdev->ctx); vfio_msi_disable()
434 &vdev->ctx[0].unmask, fd); vfio_pci_set_intx_unmask()
436 vfio_virqfd_disable(&vdev->ctx[0].unmask); vfio_pci_set_intx_unmask()
543 if (!vdev->ctx[i].trigger) vfio_pci_set_msi_trigger()
546 eventfd_signal(vdev->ctx[i].trigger, 1); vfio_pci_set_msi_trigger()
550 eventfd_signal(vdev->ctx[i].trigger, 1); vfio_pci_set_msi_trigger()
556 static int vfio_pci_set_ctx_trigger_single(struct eventfd_ctx **ctx, vfio_pci_set_ctx_trigger_single() argument
566 if (*ctx) vfio_pci_set_ctx_trigger_single()
567 eventfd_signal(*ctx, 1); vfio_pci_set_ctx_trigger_single()
571 if (trigger && *ctx) vfio_pci_set_ctx_trigger_single()
572 eventfd_signal(*ctx, 1); vfio_pci_set_ctx_trigger_single()
578 if (*ctx) vfio_pci_set_ctx_trigger_single()
579 eventfd_ctx_put(*ctx); vfio_pci_set_ctx_trigger_single()
580 *ctx = NULL; vfio_pci_set_ctx_trigger_single()
587 if (*ctx) vfio_pci_set_ctx_trigger_single()
588 eventfd_ctx_put(*ctx); vfio_pci_set_ctx_trigger_single()
589 *ctx = efdctx; vfio_pci_set_ctx_trigger_single()
/linux-4.1.27/drivers/net/wireless/brcm80211/brcmfmac/
H A Dcommonring.h28 int (*cr_ring_bell)(void *ctx);
29 int (*cr_update_rptr)(void *ctx);
30 int (*cr_update_wptr)(void *ctx);
31 int (*cr_write_rptr)(void *ctx);
32 int (*cr_write_wptr)(void *ctx);
46 int (*cr_ring_bell)(void *ctx),
47 int (*cr_update_rptr)(void *ctx),
48 int (*cr_update_wptr)(void *ctx),
49 int (*cr_write_rptr)(void *ctx),
50 int (*cr_write_wptr)(void *ctx), void *ctx);
/linux-4.1.27/arch/x86/crypto/sha-mb/
H A Dsha1_mb.c91 static inline struct ahash_request *cast_mcryptd_ctx_to_req(struct mcryptd_hash_request_ctx *ctx) cast_mcryptd_ctx_to_req() argument
93 return container_of((void *) ctx, struct ahash_request, __ctx); cast_mcryptd_ctx_to_req()
137 static struct sha1_hash_ctx *sha1_ctx_mgr_resubmit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx) sha1_ctx_mgr_resubmit() argument
139 while (ctx) { sha1_ctx_mgr_resubmit()
140 if (ctx->status & HASH_CTX_STS_COMPLETE) { sha1_ctx_mgr_resubmit()
142 ctx->status = HASH_CTX_STS_COMPLETE; sha1_ctx_mgr_resubmit()
143 return ctx; sha1_ctx_mgr_resubmit()
150 if (ctx->partial_block_buffer_length == 0 && sha1_ctx_mgr_resubmit()
151 ctx->incoming_buffer_length) { sha1_ctx_mgr_resubmit()
153 const void *buffer = ctx->incoming_buffer; sha1_ctx_mgr_resubmit()
154 uint32_t len = ctx->incoming_buffer_length; sha1_ctx_mgr_resubmit()
165 memcpy(ctx->partial_block_buffer, sha1_ctx_mgr_resubmit()
168 ctx->partial_block_buffer_length = copy_len; sha1_ctx_mgr_resubmit()
171 ctx->incoming_buffer_length = 0; sha1_ctx_mgr_resubmit()
181 ctx->job.buffer = (uint8_t *) buffer; sha1_ctx_mgr_resubmit()
182 ctx->job.len = len; sha1_ctx_mgr_resubmit()
183 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, sha1_ctx_mgr_resubmit()
184 &ctx->job); sha1_ctx_mgr_resubmit()
194 if (ctx->status & HASH_CTX_STS_LAST) { sha1_ctx_mgr_resubmit()
196 uint8_t *buf = ctx->partial_block_buffer; sha1_ctx_mgr_resubmit()
197 uint32_t n_extra_blocks = sha1_pad(buf, ctx->total_length); sha1_ctx_mgr_resubmit()
199 ctx->status = (HASH_CTX_STS_PROCESSING | sha1_ctx_mgr_resubmit()
201 ctx->job.buffer = buf; sha1_ctx_mgr_resubmit()
202 ctx->job.len = (uint32_t) n_extra_blocks; sha1_ctx_mgr_resubmit()
203 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); sha1_ctx_mgr_resubmit()
207 ctx->status = HASH_CTX_STS_IDLE; sha1_ctx_mgr_resubmit()
208 return ctx; sha1_ctx_mgr_resubmit()
223 struct sha1_hash_ctx *ctx; sha1_ctx_mgr_get_comp_ctx() local
225 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_get_comp_job(&mgr->mgr); sha1_ctx_mgr_get_comp_ctx()
226 return sha1_ctx_mgr_resubmit(mgr, ctx); sha1_ctx_mgr_get_comp_ctx()
235 struct sha1_hash_ctx *ctx, sha1_ctx_mgr_submit()
242 ctx->error = HASH_CTX_ERROR_INVALID_FLAGS; sha1_ctx_mgr_submit()
243 return ctx; sha1_ctx_mgr_submit()
246 if (ctx->status & HASH_CTX_STS_PROCESSING) { sha1_ctx_mgr_submit()
248 ctx->error = HASH_CTX_ERROR_ALREADY_PROCESSING; sha1_ctx_mgr_submit()
249 return ctx; sha1_ctx_mgr_submit()
252 if ((ctx->status & HASH_CTX_STS_COMPLETE) && !(flags & HASH_FIRST)) { sha1_ctx_mgr_submit()
254 ctx->error = HASH_CTX_ERROR_ALREADY_COMPLETED; sha1_ctx_mgr_submit()
255 return ctx; sha1_ctx_mgr_submit()
261 sha1_init_digest(ctx->job.result_digest); sha1_ctx_mgr_submit()
264 ctx->total_length = 0; sha1_ctx_mgr_submit()
267 ctx->partial_block_buffer_length = 0; sha1_ctx_mgr_submit()
271 ctx->error = HASH_CTX_ERROR_NONE; sha1_ctx_mgr_submit()
274 ctx->incoming_buffer = buffer; sha1_ctx_mgr_submit()
275 ctx->incoming_buffer_length = len; sha1_ctx_mgr_submit()
277 /* Store the user's request flags and mark this ctx as currently being processed. */ sha1_ctx_mgr_submit()
278 ctx->status = (flags & HASH_LAST) ? sha1_ctx_mgr_submit()
283 ctx->total_length += len; sha1_ctx_mgr_submit()
291 if ((ctx->partial_block_buffer_length) | (len < SHA1_BLOCK_SIZE)) { sha1_ctx_mgr_submit()
293 uint32_t copy_len = SHA1_BLOCK_SIZE - ctx->partial_block_buffer_length; sha1_ctx_mgr_submit()
299 memcpy(&ctx->partial_block_buffer[ctx->partial_block_buffer_length], sha1_ctx_mgr_submit()
302 ctx->partial_block_buffer_length += copy_len; sha1_ctx_mgr_submit()
303 ctx->incoming_buffer = (const void *)((const char *)buffer + copy_len); sha1_ctx_mgr_submit()
304 ctx->incoming_buffer_length = len - copy_len; sha1_ctx_mgr_submit()
308 assert(ctx->partial_block_buffer_length <= SHA1_BLOCK_SIZE); sha1_ctx_mgr_submit()
311 if (ctx->partial_block_buffer_length >= SHA1_BLOCK_SIZE) { sha1_ctx_mgr_submit()
312 ctx->partial_block_buffer_length = 0; sha1_ctx_mgr_submit()
314 ctx->job.buffer = ctx->partial_block_buffer; sha1_ctx_mgr_submit()
315 ctx->job.len = 1; sha1_ctx_mgr_submit()
316 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_submit(&mgr->mgr, &ctx->job); sha1_ctx_mgr_submit()
320 return sha1_ctx_mgr_resubmit(mgr, ctx); sha1_ctx_mgr_submit()
325 struct sha1_hash_ctx *ctx; sha1_ctx_mgr_flush() local
328 ctx = (struct sha1_hash_ctx *) sha1_job_mgr_flush(&mgr->mgr); sha1_ctx_mgr_flush()
331 if (!ctx) sha1_ctx_mgr_flush()
337 ctx = sha1_ctx_mgr_resubmit(mgr, ctx); sha1_ctx_mgr_flush()
344 if (ctx) sha1_ctx_mgr_flush()
345 return ctx; sha1_ctx_mgr_flush()
708 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); sha1_mb_async_init() local
710 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; sha1_mb_async_init()
722 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); sha1_mb_async_update() local
723 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; sha1_mb_async_update()
735 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); sha1_mb_async_finup() local
736 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; sha1_mb_async_finup()
748 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); sha1_mb_async_final() local
749 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; sha1_mb_async_final()
759 struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm); sha1_mb_async_digest() local
761 struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm; sha1_mb_async_digest()
771 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); sha1_mb_async_init_tfm() local
781 ctx->mcryptd_tfm = mcryptd_tfm; sha1_mb_async_init_tfm()
791 struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm); sha1_mb_async_exit_tfm() local
793 mcryptd_free_ahash(ctx->mcryptd_tfm); sha1_mb_async_exit_tfm()
234 sha1_ctx_mgr_submit(struct sha1_ctx_mgr *mgr, struct sha1_hash_ctx *ctx, const void *buffer, uint32_t len, int flags) sha1_ctx_mgr_submit() argument
/linux-4.1.27/drivers/crypto/ux500/cryp/
H A Dcryp_core.c178 static void add_session_id(struct cryp_ctx *ctx) add_session_id() argument
187 ctx->session_id = atomic_read(&session_id); add_session_id()
192 struct cryp_ctx *ctx; cryp_interrupt_handler() local
204 ctx = device_data->current_ctx; cryp_interrupt_handler()
206 if (ctx == NULL) { cryp_interrupt_handler()
207 BUG_ON(!ctx); cryp_interrupt_handler()
211 dev_dbg(ctx->device->dev, "[%s] (len: %d) %s, ", __func__, ctx->outlen, cryp_interrupt_handler()
217 if (ctx->outlen / ctx->blocksize > 0) { cryp_interrupt_handler()
218 count = ctx->blocksize / 4; cryp_interrupt_handler()
220 readsl(&device_data->base->dout, ctx->outdata, count); cryp_interrupt_handler()
221 ctx->outdata += count; cryp_interrupt_handler()
222 ctx->outlen -= count; cryp_interrupt_handler()
224 if (ctx->outlen == 0) { cryp_interrupt_handler()
231 if (ctx->datalen / ctx->blocksize > 0) { cryp_interrupt_handler()
232 count = ctx->blocksize / 4; cryp_interrupt_handler()
234 writesl(&device_data->base->din, ctx->indata, count); cryp_interrupt_handler()
236 ctx->indata += count; cryp_interrupt_handler()
237 ctx->datalen -= count; cryp_interrupt_handler()
239 if (ctx->datalen == 0) cryp_interrupt_handler()
243 if (ctx->config.algomode == CRYP_ALGO_AES_XTS) { cryp_interrupt_handler()
280 static int cfg_ivs(struct cryp_device_data *device_data, struct cryp_ctx *ctx) cfg_ivs() argument
284 int num_of_regs = ctx->blocksize / 8; cfg_ivs()
296 __func__, ctx->blocksize); cfg_ivs()
300 for (i = 0; i < ctx->blocksize / 4; i++) cfg_ivs()
301 iv[i] = uint8p_to_uint32_be(ctx->iv + i*4); cfg_ivs()
335 static int cfg_keys(struct cryp_ctx *ctx) cfg_keys() argument
338 int num_of_regs = ctx->keylen / 8; cfg_keys()
342 dev_dbg(ctx->device->dev, "[%s]", __func__); cfg_keys()
344 if (mode_is_aes(ctx->config.algomode)) { cfg_keys()
345 swap_words_in_key_and_bits_in_byte((u8 *)ctx->key, cfg_keys()
347 ctx->keylen); cfg_keys()
349 for (i = 0; i < ctx->keylen / 4; i++) cfg_keys()
350 swapped_key[i] = uint8p_to_uint32_be(ctx->key + i*4); cfg_keys()
354 cryp_error = set_key(ctx->device, cfg_keys()
360 dev_err(ctx->device->dev, "[%s]: set_key() failed!", cfg_keys()
368 static int cryp_setup_context(struct cryp_ctx *ctx, cryp_setup_context() argument
386 if (ctx->updated == 0) { cryp_setup_context()
388 if (cfg_keys(ctx) != 0) { cryp_setup_context()
389 dev_err(ctx->device->dev, "[%s]: cfg_keys failed!", cryp_setup_context()
394 if (ctx->iv && cryp_setup_context()
395 CRYP_ALGO_AES_ECB != ctx->config.algomode && cryp_setup_context()
396 CRYP_ALGO_DES_ECB != ctx->config.algomode && cryp_setup_context()
397 CRYP_ALGO_TDES_ECB != ctx->config.algomode) { cryp_setup_context()
398 if (cfg_ivs(device_data, ctx) != 0) cryp_setup_context()
402 cryp_set_configuration(device_data, &ctx->config, cryp_setup_context()
404 add_session_id(ctx); cryp_setup_context()
405 } else if (ctx->updated == 1 && cryp_setup_context()
406 ctx->session_id != atomic_read(&session_id)) { cryp_setup_context()
408 cryp_restore_device_context(device_data, &ctx->dev_ctx); cryp_setup_context()
410 add_session_id(ctx); cryp_setup_context()
411 control_register = ctx->dev_ctx.cr; cryp_setup_context()
413 control_register = ctx->dev_ctx.cr; cryp_setup_context()
422 static int cryp_get_device_data(struct cryp_ctx *ctx, cryp_get_device_data() argument
448 local_device_data->current_ctx = ctx; cryp_get_device_data()
449 ctx->device = local_device_data; cryp_get_device_data()
513 struct cryp_ctx *ctx = (struct cryp_ctx *) data; cryp_dma_out_callback() local
514 dev_dbg(ctx->device->dev, "[%s]: ", __func__); cryp_dma_out_callback()
516 complete(&ctx->device->dma.cryp_dma_complete); cryp_dma_out_callback()
519 static int cryp_set_dma_transfer(struct cryp_ctx *ctx, cryp_set_dma_transfer() argument
528 dev_dbg(ctx->device->dev, "[%s]: ", __func__); cryp_set_dma_transfer()
531 dev_err(ctx->device->dev, "[%s]: Data in sg list isn't " cryp_set_dma_transfer()
538 channel = ctx->device->dma.chan_mem2cryp; cryp_set_dma_transfer()
539 ctx->device->dma.sg_src = sg; cryp_set_dma_transfer()
540 ctx->device->dma.sg_src_len = dma_map_sg(channel->device->dev, cryp_set_dma_transfer()
541 ctx->device->dma.sg_src, cryp_set_dma_transfer()
542 ctx->device->dma.nents_src, cryp_set_dma_transfer()
545 if (!ctx->device->dma.sg_src_len) { cryp_set_dma_transfer()
546 dev_dbg(ctx->device->dev, cryp_set_dma_transfer()
552 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " cryp_set_dma_transfer()
556 ctx->device->dma.sg_src, cryp_set_dma_transfer()
557 ctx->device->dma.sg_src_len, cryp_set_dma_transfer()
562 channel = ctx->device->dma.chan_cryp2mem; cryp_set_dma_transfer()
563 ctx->device->dma.sg_dst = sg; cryp_set_dma_transfer()
564 ctx->device->dma.sg_dst_len = dma_map_sg(channel->device->dev, cryp_set_dma_transfer()
565 ctx->device->dma.sg_dst, cryp_set_dma_transfer()
566 ctx->device->dma.nents_dst, cryp_set_dma_transfer()
569 if (!ctx->device->dma.sg_dst_len) { cryp_set_dma_transfer()
570 dev_dbg(ctx->device->dev, cryp_set_dma_transfer()
576 dev_dbg(ctx->device->dev, "[%s]: Setting up DMA for buffer " cryp_set_dma_transfer()
580 ctx->device->dma.sg_dst, cryp_set_dma_transfer()
581 ctx->device->dma.sg_dst_len, cryp_set_dma_transfer()
587 desc->callback_param = ctx; cryp_set_dma_transfer()
591 dev_dbg(ctx->device->dev, "[%s]: Invalid DMA direction", cryp_set_dma_transfer()
602 static void cryp_dma_done(struct cryp_ctx *ctx) cryp_dma_done() argument
606 dev_dbg(ctx->device->dev, "[%s]: ", __func__); cryp_dma_done()
608 chan = ctx->device->dma.chan_mem2cryp; cryp_dma_done()
610 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_src, cryp_dma_done()
611 ctx->device->dma.sg_src_len, DMA_TO_DEVICE); cryp_dma_done()
613 chan = ctx->device->dma.chan_cryp2mem; cryp_dma_done()
615 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg_dst, cryp_dma_done()
616 ctx->device->dma.sg_dst_len, DMA_FROM_DEVICE); cryp_dma_done()
619 static int cryp_dma_write(struct cryp_ctx *ctx, struct scatterlist *sg, cryp_dma_write() argument
622 int error = cryp_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); cryp_dma_write()
623 dev_dbg(ctx->device->dev, "[%s]: ", __func__); cryp_dma_write()
626 dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " cryp_dma_write()
634 static int cryp_dma_read(struct cryp_ctx *ctx, struct scatterlist *sg, int len) cryp_dma_read() argument
636 int error = cryp_set_dma_transfer(ctx, sg, len, DMA_FROM_DEVICE); cryp_dma_read()
638 dev_dbg(ctx->device->dev, "[%s]: cryp_set_dma_transfer() " cryp_dma_read()
646 static void cryp_polling_mode(struct cryp_ctx *ctx, cryp_polling_mode() argument
649 int len = ctx->blocksize / BYTES_PER_WORD; cryp_polling_mode()
650 int remaining_length = ctx->datalen; cryp_polling_mode()
651 u32 *indata = (u32 *)ctx->indata; cryp_polling_mode()
652 u32 *outdata = (u32 *)ctx->outdata; cryp_polling_mode()
745 static int hw_crypt_noxts(struct cryp_ctx *ctx, hw_crypt_noxts() argument
750 const u8 *indata = ctx->indata; hw_crypt_noxts()
751 u8 *outdata = ctx->outdata; hw_crypt_noxts()
752 u32 datalen = ctx->datalen; hw_crypt_noxts()
757 ctx->outlen = ctx->datalen; hw_crypt_noxts()
765 ret = cryp_setup_context(ctx, device_data); hw_crypt_noxts()
775 * ctx->outlen is decremented in the cryp_interrupt_handler hw_crypt_noxts()
779 while (ctx->outlen > 0) hw_crypt_noxts()
791 cryp_polling_mode(ctx, device_data); hw_crypt_noxts()
793 dev_err(ctx->device->dev, "[%s]: Invalid operation mode!", hw_crypt_noxts()
799 cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode); hw_crypt_noxts()
800 ctx->updated = 1; hw_crypt_noxts()
803 ctx->indata = indata; hw_crypt_noxts()
804 ctx->outdata = outdata; hw_crypt_noxts()
805 ctx->datalen = datalen; hw_crypt_noxts()
806 ctx->outlen = outlen; hw_crypt_noxts()
827 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); ablk_dma_crypt() local
836 ctx->datalen = areq->nbytes; ablk_dma_crypt()
837 ctx->outlen = areq->nbytes; ablk_dma_crypt()
839 ret = cryp_get_device_data(ctx, &device_data); ablk_dma_crypt()
843 ret = cryp_setup_context(ctx, device_data); ablk_dma_crypt()
848 ctx->device->dma.nents_src = get_nents(areq->src, ctx->datalen); ablk_dma_crypt()
849 ctx->device->dma.nents_dst = get_nents(areq->dst, ctx->outlen); ablk_dma_crypt()
854 bytes_written = cryp_dma_write(ctx, areq->src, ctx->datalen); ablk_dma_crypt()
855 bytes_read = cryp_dma_read(ctx, areq->dst, bytes_written); ablk_dma_crypt()
857 wait_for_completion(&ctx->device->dma.cryp_dma_complete); ablk_dma_crypt()
858 cryp_dma_done(ctx); ablk_dma_crypt()
860 cryp_save_device_context(device_data, &ctx->dev_ctx, cryp_mode); ablk_dma_crypt()
861 ctx->updated = 1; ablk_dma_crypt()
866 ctx->device = NULL; ablk_dma_crypt()
885 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); ablk_crypt() local
894 ret = cryp_get_device_data(ctx, &device_data); ablk_crypt()
908 ctx->iv = walk.iv; ablk_crypt()
910 ctx->indata = phys_to_virt(src_paddr); ablk_crypt()
913 ctx->outdata = phys_to_virt(dst_paddr); ablk_crypt()
915 ctx->datalen = nbytes - (nbytes % ctx->blocksize); ablk_crypt()
917 ret = hw_crypt_noxts(ctx, device_data); ablk_crypt()
921 nbytes -= ctx->datalen; ablk_crypt()
932 ctx->device = NULL; ablk_crypt()
947 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); aes_ablkcipher_setkey() local
954 ctx->config.keysize = CRYP_KEY_SIZE_128; aes_ablkcipher_setkey()
958 ctx->config.keysize = CRYP_KEY_SIZE_192; aes_ablkcipher_setkey()
962 ctx->config.keysize = CRYP_KEY_SIZE_256; aes_ablkcipher_setkey()
971 memcpy(ctx->key, key, keylen); aes_ablkcipher_setkey()
972 ctx->keylen = keylen; aes_ablkcipher_setkey()
974 ctx->updated = 0; aes_ablkcipher_setkey()
982 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); des_ablkcipher_setkey() local
1003 memcpy(ctx->key, key, keylen); des_ablkcipher_setkey()
1004 ctx->keylen = keylen; des_ablkcipher_setkey()
1006 ctx->updated = 0; des_ablkcipher_setkey()
1013 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); des3_ablkcipher_setkey() local
1046 memcpy(ctx->key, key, keylen); des3_ablkcipher_setkey()
1047 ctx->keylen = keylen; des3_ablkcipher_setkey()
1049 ctx->updated = 0; des3_ablkcipher_setkey()
1056 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); cryp_blk_encrypt() local
1060 ctx->config.algodir = CRYP_ALGORITHM_ENCRYPT; cryp_blk_encrypt()
1064 if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode)) cryp_blk_encrypt()
1074 struct cryp_ctx *ctx = crypto_ablkcipher_ctx(cipher); cryp_blk_decrypt() local
1078 ctx->config.algodir = CRYP_ALGORITHM_DECRYPT; cryp_blk_decrypt()
1081 if (cryp_mode == CRYP_MODE_DMA && mode_is_aes(ctx->config.algomode)) cryp_blk_decrypt()
1095 struct cryp_ctx *ctx = crypto_tfm_ctx(tfm); cryp_cra_init() local
1101 ctx->config.algomode = cryp_alg->algomode; cryp_cra_init()
1102 ctx->blocksize = crypto_tfm_alg_blocksize(tfm); cryp_cra_init()
/linux-4.1.27/drivers/net/wireless/orinoco/
H A Dorinoco_usb.c287 static void ezusb_ctx_complete(struct request_context *ctx);
299 static void ezusb_request_context_put(struct request_context *ctx) ezusb_request_context_put() argument
301 if (!atomic_dec_and_test(&ctx->refcount)) ezusb_request_context_put()
304 WARN_ON(!ctx->done.done); ezusb_request_context_put()
305 BUG_ON(ctx->outurb->status == -EINPROGRESS); ezusb_request_context_put()
306 BUG_ON(timer_pending(&ctx->timer)); ezusb_request_context_put()
307 usb_free_urb(ctx->outurb); ezusb_request_context_put()
308 kfree(ctx->buf); ezusb_request_context_put()
309 kfree(ctx); ezusb_request_context_put()
323 struct request_context *ctx = (void *) _ctx; ezusb_request_timerfn() local
325 ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; ezusb_request_timerfn()
326 if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) { ezusb_request_timerfn()
327 ctx->state = EZUSB_CTX_REQ_TIMEOUT; ezusb_request_timerfn()
329 ctx->state = EZUSB_CTX_RESP_TIMEOUT; ezusb_request_timerfn()
330 dev_dbg(&ctx->outurb->dev->dev, "couldn't unlink\n"); ezusb_request_timerfn()
331 atomic_inc(&ctx->refcount); ezusb_request_timerfn()
332 ctx->killed = 1; ezusb_request_timerfn()
333 ezusb_ctx_complete(ctx); ezusb_request_timerfn()
334 ezusb_request_context_put(ctx); ezusb_request_timerfn()
341 struct request_context *ctx; ezusb_alloc_ctx() local
343 ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); ezusb_alloc_ctx()
344 if (!ctx) ezusb_alloc_ctx()
347 ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC); ezusb_alloc_ctx()
348 if (!ctx->buf) { ezusb_alloc_ctx()
349 kfree(ctx); ezusb_alloc_ctx()
352 ctx->outurb = usb_alloc_urb(0, GFP_ATOMIC); ezusb_alloc_ctx()
353 if (!ctx->outurb) { ezusb_alloc_ctx()
354 kfree(ctx->buf); ezusb_alloc_ctx()
355 kfree(ctx); ezusb_alloc_ctx()
359 ctx->upriv = upriv; ezusb_alloc_ctx()
360 ctx->state = EZUSB_CTX_START; ezusb_alloc_ctx()
361 ctx->out_rid = out_rid; ezusb_alloc_ctx()
362 ctx->in_rid = in_rid; ezusb_alloc_ctx()
364 atomic_set(&ctx->refcount, 1); ezusb_alloc_ctx()
365 init_completion(&ctx->done); ezusb_alloc_ctx()
367 setup_timer(&ctx->timer, ezusb_request_timerfn, (u_long)ctx); ezusb_alloc_ctx()
368 return ctx; ezusb_alloc_ctx()
382 static void ezusb_ctx_complete(struct request_context *ctx) ezusb_ctx_complete() argument
384 struct ezusb_priv *upriv = ctx->upriv; ezusb_ctx_complete()
389 list_del_init(&ctx->list); ezusb_ctx_complete()
396 switch (ctx->state) { ezusb_ctx_complete()
404 if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) { ezusb_ctx_complete()
409 if (ctx->state != EZUSB_CTX_COMPLETE) ezusb_ctx_complete()
416 ezusb_complete_all(&ctx->done); ezusb_ctx_complete()
417 ezusb_request_context_put(ctx); ezusb_ctx_complete()
426 ezusb_complete_all(&ctx->done); ezusb_ctx_complete()
427 ezusb_request_context_put(ctx); ezusb_ctx_complete()
451 struct request_context *ctx; ezusb_req_queue_run() local
462 ctx = ezusb_req_queue_run()
466 if (!ctx->upriv->udev) ezusb_req_queue_run()
470 list_move_tail(&ctx->list, &upriv->req_active); ezusb_req_queue_run()
472 if (ctx->state == EZUSB_CTX_QUEUED) { ezusb_req_queue_run()
473 atomic_inc(&ctx->refcount); ezusb_req_queue_run()
474 result = usb_submit_urb(ctx->outurb, GFP_ATOMIC); ezusb_req_queue_run()
476 ctx->state = EZUSB_CTX_REQSUBMIT_FAIL; ezusb_req_queue_run()
483 ezusb_ctx_complete(ctx); ezusb_req_queue_run()
484 ezusb_request_context_put(ctx); ezusb_req_queue_run()
488 ctx->state = EZUSB_CTX_REQ_SUBMITTED; ezusb_req_queue_run()
489 ezusb_mod_timer(ctx->upriv, &ctx->timer, ezusb_req_queue_run()
501 struct request_context *ctx) ezusb_req_enqueue_run()
507 if (!ctx->upriv->udev) { ezusb_req_enqueue_run()
511 atomic_inc(&ctx->refcount); ezusb_req_enqueue_run()
512 list_add_tail(&ctx->list, &upriv->req_pending); ezusb_req_enqueue_run()
515 ctx->state = EZUSB_CTX_QUEUED; ezusb_req_enqueue_run()
526 struct request_context *ctx = urb->context; ezusb_request_out_callback() local
527 struct ezusb_priv *upriv = ctx->upriv; ezusb_request_out_callback()
531 del_timer(&ctx->timer); ezusb_request_out_callback()
533 if (ctx->killed) { ezusb_request_out_callback()
535 pr_warn("interrupt called with dead ctx\n"); ezusb_request_out_callback()
539 state = ctx->state; ezusb_request_out_callback()
544 if (ctx->in_rid) { ezusb_request_out_callback()
545 ctx->state = EZUSB_CTX_REQ_COMPLETE; ezusb_request_out_callback()
547 ezusb_mod_timer(upriv, &ctx->timer, ezusb_request_out_callback()
556 ctx->state = EZUSB_CTX_COMPLETE; ezusb_request_out_callback()
558 ezusb_ctx_complete(ctx); ezusb_request_out_callback()
574 ctx->state = EZUSB_CTX_REQ_FAILED; ezusb_request_out_callback()
581 ezusb_ctx_complete(ctx); ezusb_request_out_callback()
593 ezusb_request_context_put(ctx); ezusb_request_out_callback()
600 struct request_context *ctx = NULL; ezusb_request_in_callback() local
618 ctx = c; ezusb_request_in_callback()
627 if (ctx == NULL) { ezusb_request_in_callback()
636 urb->transfer_buffer = ctx->buf; ezusb_request_in_callback()
637 ctx->buf = (void *) ans; ezusb_request_in_callback()
638 ctx->buf_length = urb->actual_length; ezusb_request_in_callback()
640 state = ctx->state; ezusb_request_in_callback()
648 ctx->state = EZUSB_CTX_RESP_RECEIVED; ezusb_request_in_callback()
659 ctx->state = EZUSB_CTX_COMPLETE; ezusb_request_in_callback()
662 del_timer(&ctx->timer); ezusb_request_in_callback()
666 ezusb_ctx_complete(ctx); ezusb_request_in_callback()
675 del_timer(&ctx->timer); ezusb_request_in_callback()
676 ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; ezusb_request_in_callback()
677 usb_unlink_urb(ctx->outurb); ezusb_request_in_callback()
685 struct request_context *ctx) ezusb_req_ctx_wait()
687 switch (ctx->state) { ezusb_req_ctx_wait()
697 while (!ctx->done.done && msecs--) ezusb_req_ctx_wait()
700 wait_event_interruptible(ctx->done.wait, ezusb_req_ctx_wait()
701 ctx->done.done); ezusb_req_ctx_wait()
856 struct request_context *ctx, ezusb_access_ltv()
876 req_size = ezusb_fill_req(ctx->buf, length, ctx->out_rid, data, ezusb_access_ltv()
878 usb_fill_bulk_urb(ctx->outurb, upriv->udev, upriv->write_pipe, ezusb_access_ltv()
879 ctx->buf, req_size, ezusb_access_ltv()
880 ezusb_request_out_callback, ctx); ezusb_access_ltv()
882 if (ctx->in_rid) ezusb_access_ltv()
885 ezusb_req_enqueue_run(upriv, ctx); ezusb_access_ltv()
889 if (ctx->in_rid) ezusb_access_ltv()
890 ezusb_req_ctx_wait(upriv, ctx); ezusb_access_ltv()
892 state = ctx->state; ezusb_access_ltv()
895 retval = ctx->outurb->status; ezusb_access_ltv()
900 if (!ctx->in_rid) ezusb_access_ltv()
915 printk(KERN_ERR PFX "ctx timed out\n"); ezusb_access_ltv()
918 printk(KERN_ERR PFX "ctx failed\n"); ezusb_access_ltv()
923 if (ctx->in_rid) { ezusb_access_ltv()
924 struct ezusb_packet *ans = ctx->buf; ezusb_access_ltv()
932 if (exp_len != ctx->buf_length) { ezusb_access_ltv()
935 ctx->in_rid, exp_len, ctx->buf_length); ezusb_access_ltv()
946 ezusb_request_context_put(ctx); ezusb_access_ltv()
955 struct request_context *ctx; ezusb_write_ltv() local
967 ctx = ezusb_alloc_ctx(upriv, rid, EZUSB_RID_ACK); ezusb_write_ltv()
968 if (!ctx) ezusb_write_ltv()
976 return ezusb_access_ltv(upriv, ctx, length, data, frame_type, ezusb_write_ltv()
984 struct request_context *ctx; ezusb_read_ltv() local
989 ctx = ezusb_alloc_ctx(upriv, rid, rid); ezusb_read_ltv()
990 if (!ctx) ezusb_read_ltv()
993 return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL, ezusb_read_ltv()
1001 struct request_context *ctx; ezusb_doicmd_wait() local
1012 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK); ezusb_doicmd_wait()
1013 if (!ctx) ezusb_doicmd_wait()
1016 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, ezusb_doicmd_wait()
1024 struct request_context *ctx; ezusb_docmd_wait() local
1033 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK); ezusb_docmd_wait()
1034 if (!ctx) ezusb_docmd_wait()
1037 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, ezusb_docmd_wait()
1078 struct request_context *ctx; ezusb_read_pda() local
1083 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_READ_PDA, EZUSB_RID_READ_PDA); ezusb_read_pda()
1084 if (!ctx) ezusb_read_pda()
1094 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, ezusb_read_pda()
1102 struct request_context *ctx; ezusb_program_init() local
1105 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_INIT, EZUSB_RID_ACK); ezusb_program_init()
1106 if (!ctx) ezusb_program_init()
1109 return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, ezusb_program_init()
1116 struct request_context *ctx; ezusb_program_end() local
1118 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_END, EZUSB_RID_ACK); ezusb_program_end()
1119 if (!ctx) ezusb_program_end()
1122 return ezusb_access_ltv(upriv, ctx, 0, NULL, ezusb_program_end()
1130 struct request_context *ctx; ezusb_program_bytes() local
1134 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_SET_ADDR, EZUSB_RID_ACK); ezusb_program_bytes()
1135 if (!ctx) ezusb_program_bytes()
1138 err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data, ezusb_program_bytes()
1143 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_BYTES, EZUSB_RID_ACK); ezusb_program_bytes()
1144 if (!ctx) ezusb_program_bytes()
1147 return ezusb_access_ltv(upriv, ctx, len, buf, ezusb_program_bytes()
1192 struct request_context *ctx; ezusb_xmit() local
1227 ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); ezusb_xmit()
1228 if (!ctx) ezusb_xmit()
1231 memset(ctx->buf, 0, BULK_BUF_SIZE); ezusb_xmit()
1232 buf = ctx->buf->data; ezusb_xmit()
1265 tx_size = ALIGN(buf - ctx->buf->data, 2); ezusb_xmit()
1267 err = ezusb_access_ltv(upriv, ctx, tx_size, NULL, ezusb_xmit()
1464 struct request_context *ctx; ezusb_delete() local
1467 ctx = list_entry(item, struct request_context, list); ezusb_delete()
1468 atomic_inc(&ctx->refcount); ezusb_delete()
1470 ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; ezusb_delete()
1471 err = usb_unlink_urb(ctx->outurb); ezusb_delete()
1475 wait_for_completion(&ctx->done); ezusb_delete()
1477 del_timer_sync(&ctx->timer); ezusb_delete()
1480 if (!list_empty(&ctx->list)) ezusb_delete()
1481 ezusb_ctx_complete(ctx); ezusb_delete()
1483 ezusb_request_context_put(ctx); ezusb_delete()
500 ezusb_req_enqueue_run(struct ezusb_priv *upriv, struct request_context *ctx) ezusb_req_enqueue_run() argument
684 ezusb_req_ctx_wait(struct ezusb_priv *upriv, struct request_context *ctx) ezusb_req_ctx_wait() argument
855 ezusb_access_ltv(struct ezusb_priv *upriv, struct request_context *ctx, u16 length, const void *data, u16 frame_type, void *ans_buff, unsigned ans_size, u16 *ans_length) ezusb_access_ltv() argument
/linux-4.1.27/drivers/media/firewire/
H A Dfiredtv-fw.c84 static int queue_iso(struct fdtv_ir_context *ctx, int index) queue_iso() argument
89 p.interrupt = !(++ctx->interrupt_packet & (IRQ_INTERVAL - 1)); queue_iso()
93 return fw_iso_context_queue(ctx->context, &p, &ctx->buffer, queue_iso()
101 struct fdtv_ir_context *ctx = fdtv->ir_context; handle_iso() local
103 int length, err, i = ctx->current_packet; handle_iso()
113 p = ctx->pages[i / PACKETS_PER_PAGE] handle_iso()
121 err = queue_iso(ctx, i); handle_iso()
127 fw_iso_context_queue_flush(ctx->context); handle_iso()
128 ctx->current_packet = i; handle_iso()
133 struct fdtv_ir_context *ctx; fdtv_start_iso() local
137 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL); fdtv_start_iso()
138 if (!ctx) fdtv_start_iso()
141 ctx->context = fw_iso_context_create(device->card, fdtv_start_iso()
144 if (IS_ERR(ctx->context)) { fdtv_start_iso()
145 err = PTR_ERR(ctx->context); fdtv_start_iso()
149 err = fw_iso_buffer_init(&ctx->buffer, device->card, fdtv_start_iso()
154 ctx->interrupt_packet = 0; fdtv_start_iso()
155 ctx->current_packet = 0; fdtv_start_iso()
158 ctx->pages[i] = page_address(ctx->buffer.pages[i]); fdtv_start_iso()
161 err = queue_iso(ctx, i); fdtv_start_iso()
166 err = fw_iso_context_start(ctx->context, -1, 0, fdtv_start_iso()
171 fdtv->ir_context = ctx; fdtv_start_iso()
175 fw_iso_buffer_destroy(&ctx->buffer, device->card); fdtv_start_iso()
177 fw_iso_context_destroy(ctx->context); fdtv_start_iso()
179 kfree(ctx); fdtv_start_iso()
186 struct fdtv_ir_context *ctx = fdtv->ir_context; fdtv_stop_iso() local
188 fw_iso_context_stop(ctx->context); fdtv_stop_iso()
189 fw_iso_buffer_destroy(&ctx->buffer, device_of(fdtv)->card); fdtv_stop_iso()
190 fw_iso_context_destroy(ctx->context); fdtv_stop_iso()
191 kfree(ctx); fdtv_stop_iso()
/linux-4.1.27/arch/x86/crypto/
H A Dcamellia_aesni_avx_glue.c30 asmlinkage void camellia_ecb_enc_16way(struct camellia_ctx *ctx, u8 *dst,
34 asmlinkage void camellia_ecb_dec_16way(struct camellia_ctx *ctx, u8 *dst,
38 asmlinkage void camellia_cbc_dec_16way(struct camellia_ctx *ctx, u8 *dst,
42 asmlinkage void camellia_ctr_16way(struct camellia_ctx *ctx, u8 *dst,
46 asmlinkage void camellia_xts_enc_16way(struct camellia_ctx *ctx, u8 *dst,
50 asmlinkage void camellia_xts_dec_16way(struct camellia_ctx *ctx, u8 *dst,
54 void camellia_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) camellia_xts_enc() argument
56 glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_xts_enc()
61 void camellia_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) camellia_xts_dec() argument
63 glue_xts_crypt_128bit_one(ctx, dst, src, iv, camellia_xts_dec()
210 struct camellia_ctx *ctx; member in struct:crypt_priv
217 struct crypt_priv *ctx = priv; encrypt_callback() local
220 ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes); encrypt_callback()
223 camellia_ecb_enc_16way(ctx->ctx, srcdst, srcdst); encrypt_callback()
229 camellia_enc_blk_2way(ctx->ctx, srcdst, srcdst); encrypt_callback()
235 camellia_enc_blk(ctx->ctx, srcdst, srcdst); encrypt_callback()
241 struct crypt_priv *ctx = priv; decrypt_callback() local
244 ctx->fpu_enabled = camellia_fpu_begin(ctx->fpu_enabled, nbytes); decrypt_callback()
247 camellia_ecb_dec_16way(ctx->ctx, srcdst, srcdst); decrypt_callback()
253 camellia_dec_blk_2way(ctx->ctx, srcdst, srcdst); decrypt_callback()
259 camellia_dec_blk(ctx->ctx, srcdst, srcdst); decrypt_callback()
265 struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_encrypt() local
268 .ctx = &ctx->camellia_ctx, lrw_encrypt()
275 .table_ctx = &ctx->lrw_table, lrw_encrypt()
291 struct camellia_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_decrypt() local
294 .ctx = &ctx->camellia_ctx, lrw_decrypt()
301 .table_ctx = &ctx->lrw_table, lrw_decrypt()
317 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt() local
321 &ctx->tweak_ctx, &ctx->crypt_ctx); xts_encrypt()
327 struct camellia_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt() local
331 &ctx->tweak_ctx, &ctx->crypt_ctx); xts_decrypt()
H A Dserpent_avx2_glue.c31 asmlinkage void serpent_ecb_enc_16way(struct serpent_ctx *ctx, u8 *dst,
33 asmlinkage void serpent_ecb_dec_16way(struct serpent_ctx *ctx, u8 *dst,
35 asmlinkage void serpent_cbc_dec_16way(void *ctx, u128 *dst, const u128 *src);
37 asmlinkage void serpent_ctr_16way(void *ctx, u128 *dst, const u128 *src,
39 asmlinkage void serpent_xts_enc_16way(struct serpent_ctx *ctx, u8 *dst,
41 asmlinkage void serpent_xts_dec_16way(struct serpent_ctx *ctx, u8 *dst,
184 struct serpent_ctx *ctx; member in struct:crypt_priv
191 struct crypt_priv *ctx = priv; encrypt_callback() local
194 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); encrypt_callback()
197 serpent_ecb_enc_16way(ctx->ctx, srcdst, srcdst); encrypt_callback()
203 serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst); encrypt_callback()
209 __serpent_encrypt(ctx->ctx, srcdst, srcdst); encrypt_callback()
215 struct crypt_priv *ctx = priv; decrypt_callback() local
218 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); decrypt_callback()
221 serpent_ecb_dec_16way(ctx->ctx, srcdst, srcdst); decrypt_callback()
227 serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst); decrypt_callback()
233 __serpent_decrypt(ctx->ctx, srcdst, srcdst); decrypt_callback()
239 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_encrypt() local
242 .ctx = &ctx->serpent_ctx, lrw_encrypt()
249 .table_ctx = &ctx->lrw_table, lrw_encrypt()
265 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_decrypt() local
268 .ctx = &ctx->serpent_ctx, lrw_decrypt()
275 .table_ctx = &ctx->lrw_table, lrw_decrypt()
291 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt() local
295 &ctx->tweak_ctx, &ctx->crypt_ctx); xts_encrypt()
301 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt() local
305 &ctx->tweak_ctx, &ctx->crypt_ctx); xts_decrypt()
H A Daesni-intel_glue.c83 asmlinkage int aesni_set_key(struct crypto_aes_ctx *ctx, const u8 *in_key,
85 asmlinkage void aesni_enc(struct crypto_aes_ctx *ctx, u8 *out,
87 asmlinkage void aesni_dec(struct crypto_aes_ctx *ctx, u8 *out,
89 asmlinkage void aesni_ecb_enc(struct crypto_aes_ctx *ctx, u8 *out,
91 asmlinkage void aesni_ecb_dec(struct crypto_aes_ctx *ctx, u8 *out,
93 asmlinkage void aesni_cbc_enc(struct crypto_aes_ctx *ctx, u8 *out,
95 asmlinkage void aesni_cbc_dec(struct crypto_aes_ctx *ctx, u8 *out,
106 static void (*aesni_ctr_enc_tfm)(struct crypto_aes_ctx *ctx, u8 *out,
108 asmlinkage void aesni_ctr_enc(struct crypto_aes_ctx *ctx, u8 *out,
111 asmlinkage void aesni_xts_crypt8(struct crypto_aes_ctx *ctx, u8 *out,
115 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
130 asmlinkage void aesni_gcm_enc(void *ctx, u8 *out,
136 * void *ctx, AES Key schedule. Starts on a 16 byte boundary.
151 asmlinkage void aesni_gcm_dec(void *ctx, u8 *out,
171 asmlinkage void aesni_gcm_enc_avx_gen2(void *ctx, u8 *out,
176 asmlinkage void aesni_gcm_dec_avx_gen2(void *ctx, u8 *out,
181 static void aesni_gcm_enc_avx(void *ctx, u8 *out, aesni_gcm_enc_avx() argument
186 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; aesni_gcm_enc_avx()
188 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, aesni_gcm_enc_avx()
191 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); aesni_gcm_enc_avx()
192 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, aesni_gcm_enc_avx()
197 static void aesni_gcm_dec_avx(void *ctx, u8 *out, aesni_gcm_dec_avx() argument
202 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; aesni_gcm_dec_avx()
204 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aad, aesni_gcm_dec_avx()
207 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); aesni_gcm_dec_avx()
208 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, aesni_gcm_dec_avx()
222 asmlinkage void aesni_gcm_enc_avx_gen4(void *ctx, u8 *out,
227 asmlinkage void aesni_gcm_dec_avx_gen4(void *ctx, u8 *out,
232 static void aesni_gcm_enc_avx2(void *ctx, u8 *out, aesni_gcm_enc_avx2() argument
237 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; aesni_gcm_enc_avx2()
239 aesni_gcm_enc(ctx, out, in, plaintext_len, iv, hash_subkey, aad, aesni_gcm_enc_avx2()
242 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); aesni_gcm_enc_avx2()
243 aesni_gcm_enc_avx_gen2(ctx, out, in, plaintext_len, iv, aad, aesni_gcm_enc_avx2()
246 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); aesni_gcm_enc_avx2()
247 aesni_gcm_enc_avx_gen4(ctx, out, in, plaintext_len, iv, aad, aesni_gcm_enc_avx2()
252 static void aesni_gcm_dec_avx2(void *ctx, u8 *out, aesni_gcm_dec_avx2() argument
257 struct crypto_aes_ctx *aes_ctx = (struct crypto_aes_ctx*)ctx; aesni_gcm_dec_avx2()
259 aesni_gcm_dec(ctx, out, in, ciphertext_len, iv, hash_subkey, aesni_gcm_dec_avx2()
262 aesni_gcm_precomp_avx_gen2(ctx, hash_subkey); aesni_gcm_dec_avx2()
263 aesni_gcm_dec_avx_gen2(ctx, out, in, ciphertext_len, iv, aad, aesni_gcm_dec_avx2()
266 aesni_gcm_precomp_avx_gen4(ctx, hash_subkey); aesni_gcm_dec_avx2()
267 aesni_gcm_dec_avx_gen4(ctx, out, in, ciphertext_len, iv, aad, aesni_gcm_dec_avx2()
273 static void (*aesni_gcm_enc_tfm)(void *ctx, u8 *out,
278 static void (*aesni_gcm_dec_tfm)(void *ctx, u8 *out,
306 struct crypto_aes_ctx *ctx = aes_ctx(raw_ctx); aes_set_key_common() local
317 err = crypto_aes_expand_key(ctx, in_key, key_len); aes_set_key_common()
320 err = aesni_set_key(ctx, in_key, key_len); aes_set_key_common()
335 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); aes_encrypt() local
338 crypto_aes_encrypt_x86(ctx, dst, src); aes_encrypt()
341 aesni_enc(ctx, dst, src); aes_encrypt()
348 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); aes_decrypt() local
351 crypto_aes_decrypt_x86(ctx, dst, src); aes_decrypt()
354 aesni_dec(ctx, dst, src); aes_decrypt()
361 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); __aes_encrypt() local
363 aesni_enc(ctx, dst, src); __aes_encrypt()
368 struct crypto_aes_ctx *ctx = aes_ctx(crypto_tfm_ctx(tfm)); __aes_decrypt() local
370 aesni_dec(ctx, dst, src); __aes_decrypt()
377 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); ecb_encrypt() local
387 aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, ecb_encrypt()
401 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); ecb_decrypt() local
411 aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, ecb_decrypt()
425 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); cbc_encrypt() local
435 aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, cbc_encrypt()
449 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); cbc_decrypt() local
459 aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, cbc_decrypt()
470 static void ctr_crypt_final(struct crypto_aes_ctx *ctx, ctr_crypt_final() argument
479 aesni_enc(ctx, keystream, ctrblk); ctr_crypt_final()
486 static void aesni_ctr_enc_avx_tfm(struct crypto_aes_ctx *ctx, u8 *out, aesni_ctr_enc_avx_tfm() argument
495 if (ctx->key_length == AES_KEYSIZE_128) aesni_ctr_enc_avx_tfm()
496 aes_ctr_enc_128_avx_by8(in, iv, (void *)ctx, out, len); aesni_ctr_enc_avx_tfm()
497 else if (ctx->key_length == AES_KEYSIZE_192) aesni_ctr_enc_avx_tfm()
498 aes_ctr_enc_192_avx_by8(in, iv, (void *)ctx, out, len); aesni_ctr_enc_avx_tfm()
500 aes_ctr_enc_256_avx_by8(in, iv, (void *)ctx, out, len); aesni_ctr_enc_avx_tfm()
508 struct crypto_aes_ctx *ctx = aes_ctx(crypto_blkcipher_ctx(desc->tfm)); ctr_crypt() local
518 aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, ctr_crypt()
524 ctr_crypt_final(ctx, &walk); ctr_crypt()
558 static void lrw_xts_encrypt_callback(void *ctx, u8 *blks, unsigned int nbytes) lrw_xts_encrypt_callback() argument
560 aesni_ecb_enc(ctx, blks, blks, nbytes); lrw_xts_encrypt_callback()
563 static void lrw_xts_decrypt_callback(void *ctx, u8 *blks, unsigned int nbytes) lrw_xts_decrypt_callback() argument
565 aesni_ecb_dec(ctx, blks, blks, nbytes); lrw_xts_decrypt_callback()
571 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_aesni_setkey() local
574 err = aes_set_key_common(tfm, ctx->raw_aes_ctx, key, lrw_aesni_setkey()
579 return lrw_init_table(&ctx->lrw_table, key + keylen - AES_BLOCK_SIZE); lrw_aesni_setkey()
584 struct aesni_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_aesni_exit_tfm() local
586 lrw_free_table(&ctx->lrw_table); lrw_aesni_exit_tfm()
592 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_encrypt() local
598 .table_ctx = &ctx->lrw_table, lrw_encrypt()
599 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), lrw_encrypt()
616 struct aesni_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_decrypt() local
622 .table_ctx = &ctx->lrw_table, lrw_decrypt()
623 .crypt_ctx = aes_ctx(ctx->raw_aes_ctx), lrw_decrypt()
640 struct aesni_xts_ctx *ctx = crypto_tfm_ctx(tfm); xts_aesni_setkey() local
653 err = aes_set_key_common(tfm, ctx->raw_crypt_ctx, key, keylen / 2); xts_aesni_setkey()
658 return aes_set_key_common(tfm, ctx->raw_tweak_ctx, key + keylen / 2, xts_aesni_setkey()
663 static void aesni_xts_tweak(void *ctx, u8 *out, const u8 *in) aesni_xts_tweak() argument
665 aesni_enc(ctx, out, in); aesni_xts_tweak()
670 static void aesni_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) aesni_xts_enc() argument
672 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_enc)); aesni_xts_enc()
675 static void aesni_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) aesni_xts_dec() argument
677 glue_xts_crypt_128bit_one(ctx, dst, src, iv, GLUE_FUNC_CAST(aesni_dec)); aesni_xts_dec()
680 static void aesni_xts_enc8(void *ctx, u128 *dst, const u128 *src, le128 *iv) aesni_xts_enc8() argument
682 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, true, (u8 *)iv); aesni_xts_enc8()
685 static void aesni_xts_dec8(void *ctx, u128 *dst, const u128 *src, le128 *iv) aesni_xts_dec8() argument
687 aesni_xts_crypt8(ctx, (u8 *)dst, (const u8 *)src, false, (u8 *)iv); aesni_xts_dec8()
719 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt() local
723 aes_ctx(ctx->raw_tweak_ctx), xts_encrypt()
724 aes_ctx(ctx->raw_crypt_ctx)); xts_encrypt()
730 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt() local
734 aes_ctx(ctx->raw_tweak_ctx), xts_decrypt()
735 aes_ctx(ctx->raw_crypt_ctx)); xts_decrypt()
743 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt() local
749 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), xts_encrypt()
751 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), xts_encrypt()
768 struct aesni_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt() local
774 .tweak_ctx = aes_ctx(ctx->raw_tweak_ctx), xts_decrypt()
776 .crypt_ctx = aes_ctx(ctx->raw_crypt_ctx), xts_decrypt()
796 struct aesni_rfc4106_gcm_ctx *ctx = (struct aesni_rfc4106_gcm_ctx *) rfc4106_init() local
808 memcpy(child_ctx, ctx, sizeof(*ctx)); rfc4106_init()
809 ctx->cryptd_tfm = cryptd_tfm; rfc4106_init()
817 struct aesni_rfc4106_gcm_ctx *ctx = rfc4106_exit() local
820 if (!IS_ERR(ctx->cryptd_tfm)) rfc4106_exit()
821 cryptd_free_aead(ctx->cryptd_tfm); rfc4106_exit()
900 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(aead); common_rfc4106_set_key() local
915 memcpy(ctx->nonce, key + key_len, sizeof(ctx->nonce)); common_rfc4106_set_key()
917 if ((unsigned long)(&(ctx->aes_key_expanded.key_enc[0])) % AESNI_ALIGN) common_rfc4106_set_key()
932 ret = crypto_aes_expand_key(&(ctx->aes_key_expanded), common_rfc4106_set_key()
936 ret = aesni_set_key(&(ctx->aes_key_expanded), key, key_len); common_rfc4106_set_key()
940 if ((unsigned long)(&(ctx->hash_subkey[0])) % AESNI_ALIGN) { common_rfc4106_set_key()
944 ret = rfc4106_set_hash_subkey(ctx->hash_subkey, key, key_len); common_rfc4106_set_key()
953 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); rfc4106_set_key() local
954 struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm); rfc4106_set_key()
956 struct cryptd_aead *cryptd_tfm = ctx->cryptd_tfm; rfc4106_set_key()
961 memcpy(ctx, c_ctx, sizeof(*ctx)); rfc4106_set_key()
962 ctx->cryptd_tfm = cryptd_tfm; rfc4106_set_key()
987 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(parent); rfc4106_set_authsize() local
988 struct crypto_aead *child = cryptd_aead_child(ctx->cryptd_tfm); rfc4106_set_authsize()
1003 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); __driver_rfc4106_encrypt() local
1004 u32 key_len = ctx->aes_key_expanded.key_length; __driver_rfc4106_encrypt()
1005 void *aes_ctx = &(ctx->aes_key_expanded); __driver_rfc4106_encrypt()
1028 *(iv+i) = ctx->nonce[i]; __driver_rfc4106_encrypt()
1059 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, dst __driver_rfc4106_encrypt()
1089 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); __driver_rfc4106_decrypt() local
1090 u32 key_len = ctx->aes_key_expanded.key_length; __driver_rfc4106_decrypt()
1091 void *aes_ctx = &(ctx->aes_key_expanded); __driver_rfc4106_decrypt()
1118 *(iv+i) = ctx->nonce[i]; __driver_rfc4106_decrypt()
1148 ctx->hash_subkey, assoc, (unsigned long)req->assoclen, __driver_rfc4106_decrypt()
1175 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); rfc4106_encrypt() local
1182 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); rfc4106_encrypt()
1196 struct aesni_rfc4106_gcm_ctx *ctx = aesni_rfc4106_gcm_ctx_get(tfm); rfc4106_decrypt() local
1203 aead_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); rfc4106_decrypt()
H A Dcrct10dif-pclmul_glue.c50 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_init() local
52 ctx->crc = 0; chksum_init()
60 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_update() local
64 ctx->crc = crc_t10dif_pcl(ctx->crc, data, length); chksum_update()
67 ctx->crc = crc_t10dif_generic(ctx->crc, data, length); chksum_update()
73 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_final() local
75 *(__u16 *)out = ctx->crc; chksum_final()
94 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_finup() local
96 return __chksum_finup(&ctx->crc, data, len, out); chksum_finup()
102 struct chksum_desc_ctx *ctx = shash_desc_ctx(desc); chksum_digest() local
104 return __chksum_finup(&ctx->crc, data, length, out); chksum_digest()
H A Dcast6_avx_glue.c45 asmlinkage void cast6_ecb_enc_8way(struct cast6_ctx *ctx, u8 *dst,
47 asmlinkage void cast6_ecb_dec_8way(struct cast6_ctx *ctx, u8 *dst,
50 asmlinkage void cast6_cbc_dec_8way(struct cast6_ctx *ctx, u8 *dst,
52 asmlinkage void cast6_ctr_8way(struct cast6_ctx *ctx, u8 *dst, const u8 *src,
55 asmlinkage void cast6_xts_enc_8way(struct cast6_ctx *ctx, u8 *dst,
57 asmlinkage void cast6_xts_dec_8way(struct cast6_ctx *ctx, u8 *dst,
60 static void cast6_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) cast6_xts_enc() argument
62 glue_xts_crypt_128bit_one(ctx, dst, src, iv, cast6_xts_enc()
66 static void cast6_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) cast6_xts_dec() argument
68 glue_xts_crypt_128bit_one(ctx, dst, src, iv, cast6_xts_dec()
72 static void cast6_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) cast6_crypt_ctr() argument
79 __cast6_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); cast6_crypt_ctr()
205 struct cast6_ctx *ctx; member in struct:crypt_priv
212 struct crypt_priv *ctx = priv; encrypt_callback() local
215 ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes); encrypt_callback()
218 cast6_ecb_enc_8way(ctx->ctx, srcdst, srcdst); encrypt_callback()
223 __cast6_encrypt(ctx->ctx, srcdst, srcdst); encrypt_callback()
229 struct crypt_priv *ctx = priv; decrypt_callback() local
232 ctx->fpu_enabled = cast6_fpu_begin(ctx->fpu_enabled, nbytes); decrypt_callback()
235 cast6_ecb_dec_8way(ctx->ctx, srcdst, srcdst); decrypt_callback()
240 __cast6_decrypt(ctx->ctx, srcdst, srcdst); decrypt_callback()
251 struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_cast6_setkey() local
254 err = __cast6_setkey(&ctx->cast6_ctx, key, keylen - CAST6_BLOCK_SIZE, lrw_cast6_setkey()
259 return lrw_init_table(&ctx->lrw_table, key + keylen - CAST6_BLOCK_SIZE); lrw_cast6_setkey()
265 struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_encrypt() local
268 .ctx = &ctx->cast6_ctx, lrw_encrypt()
275 .table_ctx = &ctx->lrw_table, lrw_encrypt()
291 struct cast6_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_decrypt() local
294 .ctx = &ctx->cast6_ctx, lrw_decrypt()
301 .table_ctx = &ctx->lrw_table, lrw_decrypt()
316 struct cast6_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_exit_tfm() local
318 lrw_free_table(&ctx->lrw_table); lrw_exit_tfm()
329 struct cast6_xts_ctx *ctx = crypto_tfm_ctx(tfm); xts_cast6_setkey() local
342 err = __cast6_setkey(&ctx->crypt_ctx, key, keylen / 2, flags); xts_cast6_setkey()
347 return __cast6_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2, xts_cast6_setkey()
354 struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt() local
358 &ctx->tweak_ctx, &ctx->crypt_ctx); xts_encrypt()
364 struct cast6_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt() local
368 &ctx->tweak_ctx, &ctx->crypt_ctx); xts_decrypt()
H A Dserpent_avx_glue.c45 asmlinkage void serpent_ecb_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
49 asmlinkage void serpent_ecb_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
53 asmlinkage void serpent_cbc_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
57 asmlinkage void serpent_ctr_8way_avx(struct serpent_ctx *ctx, u8 *dst,
61 asmlinkage void serpent_xts_enc_8way_avx(struct serpent_ctx *ctx, u8 *dst,
65 asmlinkage void serpent_xts_dec_8way_avx(struct serpent_ctx *ctx, u8 *dst,
69 void __serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) __serpent_crypt_ctr() argument
76 __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); __serpent_crypt_ctr()
81 void serpent_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) serpent_xts_enc() argument
83 glue_xts_crypt_128bit_one(ctx, dst, src, iv, serpent_xts_enc()
88 void serpent_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) serpent_xts_dec() argument
90 glue_xts_crypt_128bit_one(ctx, dst, src, iv, serpent_xts_dec()
218 struct serpent_ctx *ctx; member in struct:crypt_priv
225 struct crypt_priv *ctx = priv; encrypt_callback() local
228 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); encrypt_callback()
231 serpent_ecb_enc_8way_avx(ctx->ctx, srcdst, srcdst); encrypt_callback()
236 __serpent_encrypt(ctx->ctx, srcdst, srcdst); encrypt_callback()
242 struct crypt_priv *ctx = priv; decrypt_callback() local
245 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); decrypt_callback()
248 serpent_ecb_dec_8way_avx(ctx->ctx, srcdst, srcdst); decrypt_callback()
253 __serpent_decrypt(ctx->ctx, srcdst, srcdst); decrypt_callback()
259 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_serpent_setkey() local
262 err = __serpent_setkey(&ctx->serpent_ctx, key, keylen - lrw_serpent_setkey()
267 return lrw_init_table(&ctx->lrw_table, key + keylen - lrw_serpent_setkey()
275 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_encrypt() local
278 .ctx = &ctx->serpent_ctx, lrw_encrypt()
285 .table_ctx = &ctx->lrw_table, lrw_encrypt()
301 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_decrypt() local
304 .ctx = &ctx->serpent_ctx, lrw_decrypt()
311 .table_ctx = &ctx->lrw_table, lrw_decrypt()
326 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_serpent_exit_tfm() local
328 lrw_free_table(&ctx->lrw_table); lrw_serpent_exit_tfm()
335 struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm); xts_serpent_setkey() local
348 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2); xts_serpent_setkey()
353 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); xts_serpent_setkey()
360 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt() local
364 &ctx->tweak_ctx, &ctx->crypt_ctx); xts_encrypt()
370 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt() local
374 &ctx->tweak_ctx, &ctx->crypt_ctx); xts_decrypt()
H A Dtwofish_glue_3way.c39 static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, twofish_enc_blk_3way() argument
42 __twofish_enc_blk_3way(ctx, dst, src, false); twofish_enc_blk_3way()
45 static inline void twofish_enc_blk_xor_3way(struct twofish_ctx *ctx, u8 *dst, twofish_enc_blk_xor_3way() argument
48 __twofish_enc_blk_3way(ctx, dst, src, true); twofish_enc_blk_xor_3way()
51 void twofish_dec_blk_cbc_3way(void *ctx, u128 *dst, const u128 *src) twofish_dec_blk_cbc_3way() argument
58 twofish_dec_blk_3way(ctx, (u8 *)dst, (u8 *)src); twofish_dec_blk_cbc_3way()
65 void twofish_enc_blk_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) twofish_enc_blk_ctr() argument
75 twofish_enc_blk(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); twofish_enc_blk_ctr()
80 void twofish_enc_blk_ctr_3way(void *ctx, u128 *dst, const u128 *src, twofish_enc_blk_ctr_3way() argument
98 twofish_enc_blk_xor_3way(ctx, (u8 *)dst, (u8 *)ctrblks); twofish_enc_blk_ctr_3way()
189 struct twofish_ctx *ctx = priv; encrypt_callback() local
193 twofish_enc_blk_3way(ctx, srcdst, srcdst); encrypt_callback()
198 twofish_enc_blk(ctx, srcdst, srcdst); encrypt_callback()
204 struct twofish_ctx *ctx = priv; decrypt_callback() local
208 twofish_dec_blk_3way(ctx, srcdst, srcdst); decrypt_callback()
213 twofish_dec_blk(ctx, srcdst, srcdst); decrypt_callback()
219 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_twofish_setkey() local
222 err = __twofish_setkey(&ctx->twofish_ctx, key, keylen - TF_BLOCK_SIZE, lrw_twofish_setkey()
227 return lrw_init_table(&ctx->lrw_table, key + keylen - TF_BLOCK_SIZE); lrw_twofish_setkey()
234 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_encrypt() local
240 .table_ctx = &ctx->lrw_table, lrw_encrypt()
241 .crypt_ctx = &ctx->twofish_ctx, lrw_encrypt()
251 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_decrypt() local
257 .table_ctx = &ctx->lrw_table, lrw_decrypt()
258 .crypt_ctx = &ctx->twofish_ctx, lrw_decrypt()
267 struct twofish_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_twofish_exit_tfm() local
269 lrw_free_table(&ctx->lrw_table); lrw_twofish_exit_tfm()
276 struct twofish_xts_ctx *ctx = crypto_tfm_ctx(tfm); xts_twofish_setkey() local
289 err = __twofish_setkey(&ctx->crypt_ctx, key, keylen / 2, flags); xts_twofish_setkey()
294 return __twofish_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2, xts_twofish_setkey()
302 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt() local
308 .tweak_ctx = &ctx->tweak_ctx, xts_encrypt()
310 .crypt_ctx = &ctx->crypt_ctx, xts_encrypt()
320 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt() local
326 .tweak_ctx = &ctx->tweak_ctx, xts_decrypt()
328 .crypt_ctx = &ctx->crypt_ctx, xts_decrypt()
H A Dserpent_sse2_glue.c48 static void serpent_decrypt_cbc_xway(void *ctx, u128 *dst, const u128 *src) serpent_decrypt_cbc_xway() argument
56 serpent_dec_blk_xway(ctx, (u8 *)dst, (u8 *)src); serpent_decrypt_cbc_xway()
62 static void serpent_crypt_ctr(void *ctx, u128 *dst, const u128 *src, le128 *iv) serpent_crypt_ctr() argument
69 __serpent_encrypt(ctx, (u8 *)&ctrblk, (u8 *)&ctrblk); serpent_crypt_ctr()
73 static void serpent_crypt_ctr_xway(void *ctx, u128 *dst, const u128 *src, serpent_crypt_ctr_xway() argument
87 serpent_enc_blk_xway_xor(ctx, (u8 *)dst, (u8 *)ctrblks); serpent_crypt_ctr_xway()
186 struct serpent_ctx *ctx; member in struct:crypt_priv
193 struct crypt_priv *ctx = priv; encrypt_callback() local
196 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); encrypt_callback()
199 serpent_enc_blk_xway(ctx->ctx, srcdst, srcdst); encrypt_callback()
204 __serpent_encrypt(ctx->ctx, srcdst, srcdst); encrypt_callback()
210 struct crypt_priv *ctx = priv; decrypt_callback() local
213 ctx->fpu_enabled = serpent_fpu_begin(ctx->fpu_enabled, nbytes); decrypt_callback()
216 serpent_dec_blk_xway(ctx->ctx, srcdst, srcdst); decrypt_callback()
221 __serpent_decrypt(ctx->ctx, srcdst, srcdst); decrypt_callback()
232 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_serpent_setkey() local
235 err = __serpent_setkey(&ctx->serpent_ctx, key, keylen - lrw_serpent_setkey()
240 return lrw_init_table(&ctx->lrw_table, key + keylen - lrw_serpent_setkey()
247 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_encrypt() local
250 .ctx = &ctx->serpent_ctx, lrw_encrypt()
257 .table_ctx = &ctx->lrw_table, lrw_encrypt()
273 struct serpent_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_decrypt() local
276 .ctx = &ctx->serpent_ctx, lrw_decrypt()
283 .table_ctx = &ctx->lrw_table, lrw_decrypt()
298 struct serpent_lrw_ctx *ctx = crypto_tfm_ctx(tfm); lrw_exit_tfm() local
300 lrw_free_table(&ctx->lrw_table); lrw_exit_tfm()
311 struct serpent_xts_ctx *ctx = crypto_tfm_ctx(tfm); xts_serpent_setkey() local
324 err = __serpent_setkey(&ctx->crypt_ctx, key, keylen / 2); xts_serpent_setkey()
329 return __serpent_setkey(&ctx->tweak_ctx, key + keylen / 2, keylen / 2); xts_serpent_setkey()
335 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt() local
338 .ctx = &ctx->crypt_ctx, xts_encrypt()
345 .tweak_ctx = &ctx->tweak_ctx, xts_encrypt()
362 struct serpent_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt() local
365 .ctx = &ctx->crypt_ctx, xts_decrypt()
372 .tweak_ctx = &ctx->tweak_ctx, xts_decrypt()
H A Dtwofish_avx_glue.c51 asmlinkage void twofish_ecb_enc_8way(struct twofish_ctx *ctx, u8 *dst,
53 asmlinkage void twofish_ecb_dec_8way(struct twofish_ctx *ctx, u8 *dst,
56 asmlinkage void twofish_cbc_dec_8way(struct twofish_ctx *ctx, u8 *dst,
58 asmlinkage void twofish_ctr_8way(struct twofish_ctx *ctx, u8 *dst,
61 asmlinkage void twofish_xts_enc_8way(struct twofish_ctx *ctx, u8 *dst,
63 asmlinkage void twofish_xts_dec_8way(struct twofish_ctx *ctx, u8 *dst,
66 static inline void twofish_enc_blk_3way(struct twofish_ctx *ctx, u8 *dst, twofish_enc_blk_3way() argument
69 __twofish_enc_blk_3way(ctx, dst, src, false); twofish_enc_blk_3way()
72 static void twofish_xts_enc(void *ctx, u128 *dst, const u128 *src, le128 *iv) twofish_xts_enc() argument
74 glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_xts_enc()
78 static void twofish_xts_dec(void *ctx, u128 *dst, const u128 *src, le128 *iv) twofish_xts_dec() argument
80 glue_xts_crypt_128bit_one(ctx, dst, src, iv, twofish_xts_dec()
219 struct twofish_ctx *ctx; member in struct:crypt_priv
226 struct crypt_priv *ctx = priv; encrypt_callback() local
229 ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes); encrypt_callback()
232 twofish_ecb_enc_8way(ctx->ctx, srcdst, srcdst); encrypt_callback()
237 twofish_enc_blk_3way(ctx->ctx, srcdst, srcdst); encrypt_callback()
242 twofish_enc_blk(ctx->ctx, srcdst, srcdst); encrypt_callback()
248 struct crypt_priv *ctx = priv; decrypt_callback() local
251 ctx->fpu_enabled = twofish_fpu_begin(ctx->fpu_enabled, nbytes); decrypt_callback()
254 twofish_ecb_dec_8way(ctx->ctx, srcdst, srcdst); decrypt_callback()
259 twofish_dec_blk_3way(ctx->ctx, srcdst, srcdst); decrypt_callback()
264 twofish_dec_blk(ctx->ctx, srcdst, srcdst); decrypt_callback()
270 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_encrypt() local
273 .ctx = &ctx->twofish_ctx, lrw_encrypt()
280 .table_ctx = &ctx->lrw_table, lrw_encrypt()
296 struct twofish_lrw_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); lrw_decrypt() local
299 .ctx = &ctx->twofish_ctx, lrw_decrypt()
306 .table_ctx = &ctx->lrw_table, lrw_decrypt()
322 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_encrypt() local
326 &ctx->tweak_ctx, &ctx->crypt_ctx); xts_encrypt()
332 struct twofish_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); xts_decrypt() local
336 &ctx->tweak_ctx, &ctx->crypt_ctx); xts_decrypt()
H A Dghash-clmulni-intel_glue.c58 struct ghash_ctx *ctx = crypto_shash_ctx(tfm); ghash_setkey() local
71 ctx->shash.a = (b << 1) | (a >> 63); ghash_setkey()
72 ctx->shash.b = (a << 1) | (b >> 63); ghash_setkey()
75 ctx->shash.b ^= ((u64)0xc2) << 56; ghash_setkey()
84 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); ghash_update() local
99 clmul_ghash_mul(dst, &ctx->shash); ghash_update()
102 clmul_ghash_update(dst, src, srclen, &ctx->shash); ghash_update()
116 static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) ghash_flush() argument
127 clmul_ghash_mul(dst, &ctx->shash); ghash_flush()
137 struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); ghash_final() local
140 ghash_flush(ctx, dctx); ghash_final()
168 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); ghash_async_init() local
170 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; ghash_async_init()
192 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); ghash_async_update() local
193 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; ghash_async_update()
210 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); ghash_async_final() local
211 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; ghash_async_final()
225 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); ghash_async_digest() local
227 struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm; ghash_async_digest()
246 struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm); ghash_async_setkey() local
247 struct crypto_ahash *child = &ctx->cryptd_tfm->base; ghash_async_setkey()
263 struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); ghash_async_init_tfm() local
270 ctx->cryptd_tfm = cryptd_tfm; ghash_async_init_tfm()
280 struct ghash_async_ctx *ctx = crypto_tfm_ctx(tfm); ghash_async_exit_tfm() local
282 cryptd_free_ahash(ctx->cryptd_tfm); ghash_async_exit_tfm()
/linux-4.1.27/arch/frv/mm/
H A Dmmu-context.c44 static unsigned get_cxn(mm_context_t *ctx) get_cxn() argument
50 if (!list_empty(&ctx->id_link)) { get_cxn()
51 list_move_tail(&ctx->id_link, &cxn_owners_lru); get_cxn()
78 ctx->id = cxn; get_cxn()
79 list_add_tail(&ctx->id_link, &cxn_owners_lru); get_cxn()
82 return ctx->id; get_cxn()
90 void change_mm_context(mm_context_t *old, mm_context_t *ctx, pgd_t *pgd) change_mm_context() argument
106 get_cxn(ctx); change_mm_context()
107 ctx->id_busy = 1; change_mm_context()
110 asm volatile("movgs %0,cxnr" : : "r"(ctx->id)); change_mm_context()
113 asm volatile("movgs %0,scr0" : : "r"(ctx->itlb_cached_pge)); change_mm_context()
114 asm volatile("movgs %0,dampr4" : : "r"(ctx->itlb_ptd_mapping)); change_mm_context()
115 asm volatile("movgs %0,scr1" : : "r"(ctx->dtlb_cached_pge)); change_mm_context()
116 asm volatile("movgs %0,dampr5" : : "r"(ctx->dtlb_ptd_mapping)); change_mm_context()
132 mm_context_t *ctx = &mm->context; destroy_context() local
136 if (!list_empty(&ctx->id_link)) { destroy_context()
137 if (ctx->id == cxn_pinned) destroy_context()
140 list_del_init(&ctx->id_link); destroy_context()
141 clear_bit(ctx->id, cxn_bitmap); destroy_context()
142 __flush_tlb_mm(ctx->id); destroy_context()
143 ctx->id = 0; destroy_context()
/linux-4.1.27/drivers/firewire/
H A Dohci.c114 typedef int (*descriptor_callback_t)(struct context *ctx,
665 static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i) ar_buffer_bus() argument
667 return page_private(ctx->pages[i]); ar_buffer_bus()
670 static void ar_context_link_page(struct ar_context *ctx, unsigned int index) ar_context_link_page() argument
674 d = &ctx->descriptors[index]; ar_context_link_page()
680 d = &ctx->descriptors[ctx->last_buffer_index]; ar_context_link_page()
683 ctx->last_buffer_index = index; ar_context_link_page()
685 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); ar_context_link_page()
688 static void ar_context_release(struct ar_context *ctx) ar_context_release() argument
692 vunmap(ctx->buffer); ar_context_release()
695 if (ctx->pages[i]) { ar_context_release()
696 dma_unmap_page(ctx->ohci->card.device, ar_context_release()
697 ar_buffer_bus(ctx, i), ar_context_release()
699 __free_page(ctx->pages[i]); ar_context_release()
703 static void ar_context_abort(struct ar_context *ctx, const char *error_msg) ar_context_abort() argument
705 struct fw_ohci *ohci = ctx->ohci; ar_context_abort()
707 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) { ar_context_abort()
708 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); ar_context_abort()
721 static inline unsigned int ar_first_buffer_index(struct ar_context *ctx) ar_first_buffer_index() argument
723 return ar_next_buffer_index(ctx->last_buffer_index); ar_first_buffer_index()
730 static unsigned int ar_search_last_active_buffer(struct ar_context *ctx, ar_search_last_active_buffer() argument
733 unsigned int i, next_i, last = ctx->last_buffer_index; ar_search_last_active_buffer()
736 i = ar_first_buffer_index(ctx); ar_search_last_active_buffer()
737 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count); ar_search_last_active_buffer()
746 ctx->descriptors[next_i].res_count); ar_search_last_active_buffer()
763 ctx->descriptors[next_i].res_count); ar_search_last_active_buffer()
781 ar_context_abort(ctx, "corrupted descriptor"); ar_search_last_active_buffer()
787 static void ar_sync_buffers_for_cpu(struct ar_context *ctx, ar_sync_buffers_for_cpu() argument
793 i = ar_first_buffer_index(ctx); ar_sync_buffers_for_cpu()
795 dma_sync_single_for_cpu(ctx->ohci->card.device, ar_sync_buffers_for_cpu()
796 ar_buffer_bus(ctx, i), ar_sync_buffers_for_cpu()
801 dma_sync_single_for_cpu(ctx->ohci->card.device, ar_sync_buffers_for_cpu()
802 ar_buffer_bus(ctx, i), ar_sync_buffers_for_cpu()
813 static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer) handle_ar_packet() argument
815 struct fw_ohci *ohci = ctx->ohci; handle_ar_packet()
847 ar_context_abort(ctx, "invalid packet length"); handle_ar_packet()
860 ar_context_abort(ctx, "invalid tcode"); handle_ar_packet()
902 } else if (ctx == &ohci->ar_request_ctx) { handle_ar_packet()
911 static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end) handle_ar_packets() argument
916 next = handle_ar_packet(ctx, p); handle_ar_packets()
925 static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer) ar_recycle_buffers() argument
929 i = ar_first_buffer_index(ctx); ar_recycle_buffers()
931 dma_sync_single_for_device(ctx->ohci->card.device, ar_recycle_buffers()
932 ar_buffer_bus(ctx, i), ar_recycle_buffers()
934 ar_context_link_page(ctx, i); ar_recycle_buffers()
941 struct ar_context *ctx = (struct ar_context *)data; ar_context_tasklet() local
945 p = ctx->pointer; ar_context_tasklet()
949 end_buffer_index = ar_search_last_active_buffer(ctx, ar_context_tasklet()
951 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset); ar_context_tasklet()
952 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset; ar_context_tasklet()
954 if (end_buffer_index < ar_first_buffer_index(ctx)) { ar_context_tasklet()
961 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE; ar_context_tasklet()
962 p = handle_ar_packets(ctx, p, buffer_end); ar_context_tasklet()
969 p = handle_ar_packets(ctx, p, end); ar_context_tasklet()
972 ar_context_abort(ctx, "inconsistent descriptor"); ar_context_tasklet()
976 ctx->pointer = p; ar_context_tasklet()
977 ar_recycle_buffers(ctx, end_buffer_index); ar_context_tasklet()
982 ctx->pointer = NULL; ar_context_tasklet()
985 static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci, ar_context_init() argument
993 ctx->regs = regs; ar_context_init()
994 ctx->ohci = ohci; ar_context_init()
995 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx); ar_context_init()
998 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32); ar_context_init()
999 if (!ctx->pages[i]) ar_context_init()
1001 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i], ar_context_init()
1004 __free_page(ctx->pages[i]); ar_context_init()
1005 ctx->pages[i] = NULL; ar_context_init()
1008 set_page_private(ctx->pages[i], dma_addr); ar_context_init()
1012 pages[i] = ctx->pages[i]; ar_context_init()
1014 pages[AR_BUFFERS + i] = ctx->pages[i]; ar_context_init()
1015 ctx->buffer = vmap(pages, ARRAY_SIZE(pages), VM_MAP, PAGE_KERNEL); ar_context_init()
1016 if (!ctx->buffer) ar_context_init()
1019 ctx->descriptors = ohci->misc_buffer + descriptors_offset; ar_context_init()
1020 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset; ar_context_init()
1023 d = &ctx->descriptors[i]; ar_context_init()
1028 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i)); ar_context_init()
1029 d->branch_address = cpu_to_le32(ctx->descriptors_bus + ar_context_init()
1036 ar_context_release(ctx); ar_context_init()
1041 static void ar_context_run(struct ar_context *ctx) ar_context_run() argument
1046 ar_context_link_page(ctx, i); ar_context_run()
1048 ctx->pointer = ctx->buffer; ar_context_run()
1050 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1); ar_context_run()
1051 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN); ar_context_run()
1069 struct context *ctx = (struct context *) data; context_tasklet() local
1075 desc = list_entry(ctx->buffer_list.next, context_tasklet()
1077 last = ctx->last; context_tasklet()
1083 ctx->current_bus = address; context_tasklet()
1094 if (!ctx->callback(ctx, d, last)) context_tasklet()
1102 spin_lock_irqsave(&ctx->ohci->lock, flags); context_tasklet()
1103 list_move_tail(&old_desc->list, &ctx->buffer_list); context_tasklet()
1104 spin_unlock_irqrestore(&ctx->ohci->lock, flags); context_tasklet()
1106 ctx->last = last; context_tasklet()
1114 static int context_add_buffer(struct context *ctx) context_add_buffer() argument
1124 if (ctx->total_allocation >= 16*1024*1024) context_add_buffer()
1127 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE, context_add_buffer()
1137 list_add_tail(&desc->list, &ctx->buffer_list); context_add_buffer()
1138 ctx->total_allocation += PAGE_SIZE; context_add_buffer()
1143 static int context_init(struct context *ctx, struct fw_ohci *ohci, context_init() argument
1146 ctx->ohci = ohci; context_init()
1147 ctx->regs = regs; context_init()
1148 ctx->total_allocation = 0; context_init()
1150 INIT_LIST_HEAD(&ctx->buffer_list); context_init()
1151 if (context_add_buffer(ctx) < 0) context_init()
1154 ctx->buffer_tail = list_entry(ctx->buffer_list.next, context_init()
1157 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx); context_init()
1158 ctx->callback = callback; context_init()
1165 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer)); context_init()
1166 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST); context_init()
1167 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011); context_init()
1168 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer); context_init()
1169 ctx->last = ctx->buffer_tail->buffer; context_init()
1170 ctx->prev = ctx->buffer_tail->buffer; context_init()
1171 ctx->prev_z = 1; context_init()
1176 static void context_release(struct context *ctx) context_release() argument
1178 struct fw_card *card = &ctx->ohci->card; context_release()
1181 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list) context_release()
1188 static struct descriptor *context_get_descriptors(struct context *ctx, context_get_descriptors() argument
1192 struct descriptor_buffer *desc = ctx->buffer_tail; context_get_descriptors()
1201 if (desc->list.next == &ctx->buffer_list) { context_get_descriptors()
1204 if (context_add_buffer(ctx) < 0) context_get_descriptors()
1209 ctx->buffer_tail = desc; context_get_descriptors()
1219 static void context_run(struct context *ctx, u32 extra) context_run() argument
1221 struct fw_ohci *ohci = ctx->ohci; context_run()
1223 reg_write(ohci, COMMAND_PTR(ctx->regs), context_run()
1224 le32_to_cpu(ctx->last->branch_address)); context_run()
1225 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0); context_run()
1226 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra); context_run()
1227 ctx->running = true; context_run()
1231 static void context_append(struct context *ctx, context_append() argument
1235 struct descriptor_buffer *desc = ctx->buffer_tail; context_append()
1244 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z); context_append()
1256 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) && context_append()
1257 d_branch != ctx->prev && context_append()
1258 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) == context_append()
1260 ctx->prev->branch_address = cpu_to_le32(d_bus | z); context_append()
1263 ctx->prev = d; context_append()
1264 ctx->prev_z = z; context_append()
1267 static void context_stop(struct context *ctx) context_stop() argument
1269 struct fw_ohci *ohci = ctx->ohci; context_stop()
1273 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN); context_stop()
1274 ctx->running = false; context_stop()
1277 reg = reg_read(ohci, CONTROL_SET(ctx->regs)); context_stop()
1297 static int at_context_queue_packet(struct context *ctx, at_context_queue_packet() argument
1300 struct fw_ohci *ohci = ctx->ohci; at_context_queue_packet()
1307 d = context_get_descriptors(ctx, 4, &d_bus); at_context_queue_packet()
1417 context_append(ctx, d, z, 4 - z); at_context_queue_packet()
1419 if (ctx->running) at_context_queue_packet()
1420 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); at_context_queue_packet()
1422 context_run(ctx, 0); at_context_queue_packet()
1427 static void at_context_flush(struct context *ctx) at_context_flush() argument
1429 tasklet_disable(&ctx->tasklet); at_context_flush()
1431 ctx->flushing = true; at_context_flush()
1432 context_tasklet((unsigned long)ctx); at_context_flush()
1433 ctx->flushing = false; at_context_flush()
1435 tasklet_enable(&ctx->tasklet); at_context_flush()
1600 static void handle_local_request(struct context *ctx, struct fw_packet *packet) handle_local_request() argument
1604 if (ctx == &ctx->ohci->at_request_ctx) { handle_local_request()
1606 packet->callback(packet, &ctx->ohci->card, packet->ack); handle_local_request()
1617 handle_local_rom(ctx->ohci, packet, csr); handle_local_request()
1623 handle_local_lock(ctx->ohci, packet, csr); handle_local_request()
1626 if (ctx == &ctx->ohci->at_request_ctx) handle_local_request()
1627 fw_core_handle_request(&ctx->ohci->card, packet); handle_local_request()
1629 fw_core_handle_response(&ctx->ohci->card, packet); handle_local_request()
1633 if (ctx == &ctx->ohci->at_response_ctx) { handle_local_request()
1635 packet->callback(packet, &ctx->ohci->card, packet->ack); handle_local_request()
1639 static void at_context_transmit(struct context *ctx, struct fw_packet *packet) at_context_transmit() argument
1644 spin_lock_irqsave(&ctx->ohci->lock, flags); at_context_transmit()
1646 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id && at_context_transmit()
1647 ctx->ohci->generation == packet->generation) { at_context_transmit()
1648 spin_unlock_irqrestore(&ctx->ohci->lock, flags); at_context_transmit()
1649 handle_local_request(ctx, packet); at_context_transmit()
1653 ret = at_context_queue_packet(ctx, packet); at_context_transmit()
1654 spin_unlock_irqrestore(&ctx->ohci->lock, flags); at_context_transmit()
1657 packet->callback(packet, &ctx->ohci->card, packet->ack); at_context_transmit()
2556 struct context *ctx = &ohci->at_request_ctx; ohci_cancel_packet() local
2560 tasklet_disable(&ctx->tasklet); ohci_cancel_packet()
2575 tasklet_enable(&ctx->tasklet); ohci_cancel_packet()
2734 static void flush_iso_completions(struct iso_context *ctx) flush_iso_completions() argument
2736 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp, flush_iso_completions()
2737 ctx->header_length, ctx->header, flush_iso_completions()
2738 ctx->base.callback_data); flush_iso_completions()
2739 ctx->header_length = 0; flush_iso_completions()
2742 static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr) copy_iso_headers() argument
2746 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE) { copy_iso_headers()
2747 if (ctx->base.drop_overflow_headers) copy_iso_headers()
2749 flush_iso_completions(ctx); copy_iso_headers()
2752 ctx_hdr = ctx->header + ctx->header_length; copy_iso_headers()
2753 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]); copy_iso_headers()
2760 if (ctx->base.header_size > 0) copy_iso_headers()
2762 if (ctx->base.header_size > 4) copy_iso_headers()
2764 if (ctx->base.header_size > 8) copy_iso_headers()
2765 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8); copy_iso_headers()
2766 ctx->header_length += ctx->base.header_size; copy_iso_headers()
2773 struct iso_context *ctx = handle_ir_packet_per_buffer() local
2795 copy_iso_headers(ctx, (u32 *) (last + 1)); handle_ir_packet_per_buffer()
2798 flush_iso_completions(ctx); handle_ir_packet_per_buffer()
2808 struct iso_context *ctx = handle_ir_buffer_fill() local
2819 ctx->mc_buffer_bus = buffer_dma; handle_ir_buffer_fill()
2820 ctx->mc_completed = completed; handle_ir_buffer_fill()
2833 ctx->base.callback.mc(&ctx->base, handle_ir_buffer_fill()
2835 ctx->base.callback_data); handle_ir_buffer_fill()
2836 ctx->mc_completed = 0; handle_ir_buffer_fill()
2842 static void flush_ir_buffer_fill(struct iso_context *ctx) flush_ir_buffer_fill() argument
2844 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device, flush_ir_buffer_fill()
2845 ctx->mc_buffer_bus & PAGE_MASK, flush_ir_buffer_fill()
2846 ctx->mc_buffer_bus & ~PAGE_MASK, flush_ir_buffer_fill()
2847 ctx->mc_completed, DMA_FROM_DEVICE); flush_ir_buffer_fill()
2849 ctx->base.callback.mc(&ctx->base, flush_ir_buffer_fill()
2850 ctx->mc_buffer_bus + ctx->mc_completed, flush_ir_buffer_fill()
2851 ctx->base.callback_data); flush_ir_buffer_fill()
2852 ctx->mc_completed = 0; flush_ir_buffer_fill()
2896 struct iso_context *ctx = handle_it_packet() local
2910 if (ctx->header_length + 4 > PAGE_SIZE) { handle_it_packet()
2911 if (ctx->base.drop_overflow_headers) handle_it_packet()
2913 flush_iso_completions(ctx); handle_it_packet()
2916 ctx_hdr = ctx->header + ctx->header_length; handle_it_packet()
2917 ctx->last_timestamp = le16_to_cpu(last->res_count); handle_it_packet()
2921 ctx->header_length += 4; handle_it_packet()
2924 flush_iso_completions(ctx); handle_it_packet()
2945 struct iso_context *uninitialized_var(ctx); ohci_allocate_iso_context()
2961 ctx = &ohci->it_context_list[index]; ohci_allocate_iso_context()
2974 ctx = &ohci->ir_context_list[index]; ohci_allocate_iso_context()
2986 ctx = &ohci->ir_context_list[index]; ohci_allocate_iso_context()
3000 memset(ctx, 0, sizeof(*ctx)); ohci_allocate_iso_context()
3001 ctx->header_length = 0; ohci_allocate_iso_context()
3002 ctx->header = (void *) __get_free_page(GFP_KERNEL); ohci_allocate_iso_context()
3003 if (ctx->header == NULL) { ohci_allocate_iso_context()
3007 ret = context_init(&ctx->context, ohci, regs, callback); ohci_allocate_iso_context()
3013 ctx->mc_completed = 0; ohci_allocate_iso_context()
3016 return &ctx->base; ohci_allocate_iso_context()
3019 free_page((unsigned long)ctx->header); ohci_allocate_iso_context()
3042 struct iso_context *ctx = container_of(base, struct iso_context, base); ohci_start_iso() local
3043 struct fw_ohci *ohci = ctx->context.ohci; ohci_start_iso()
3048 if (ctx->context.last->branch_address == 0) ohci_start_iso()
3051 switch (ctx->base.type) { ohci_start_iso()
3053 index = ctx - ohci->it_context_list; ohci_start_iso()
3061 context_run(&ctx->context, match); ohci_start_iso()
3068 index = ctx - ohci->ir_context_list; ohci_start_iso()
3069 match = (tags << 28) | (sync << 8) | ctx->base.channel; ohci_start_iso()
3077 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match); ohci_start_iso()
3078 context_run(&ctx->context, control); ohci_start_iso()
3080 ctx->sync = sync; ohci_start_iso()
3081 ctx->tags = tags; ohci_start_iso()
3092 struct iso_context *ctx = container_of(base, struct iso_context, base); ohci_stop_iso() local
3095 switch (ctx->base.type) { ohci_stop_iso()
3097 index = ctx - ohci->it_context_list; ohci_stop_iso()
3103 index = ctx - ohci->ir_context_list; ohci_stop_iso()
3108 context_stop(&ctx->context); ohci_stop_iso()
3109 tasklet_kill(&ctx->context.tasklet); ohci_stop_iso()
3117 struct iso_context *ctx = container_of(base, struct iso_context, base); ohci_free_iso_context() local
3122 context_release(&ctx->context); ohci_free_iso_context()
3123 free_page((unsigned long)ctx->header); ohci_free_iso_context()
3129 index = ctx - ohci->it_context_list; ohci_free_iso_context()
3134 index = ctx - ohci->ir_context_list; ohci_free_iso_context()
3140 index = ctx - ohci->ir_context_list; ohci_free_iso_context()
3185 struct iso_context *ctx; ohci_resume_iso_dma() local
3188 ctx = &ohci->ir_context_list[i]; ohci_resume_iso_dma()
3189 if (ctx->context.running) ohci_resume_iso_dma()
3190 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); ohci_resume_iso_dma()
3194 ctx = &ohci->it_context_list[i]; ohci_resume_iso_dma()
3195 if (ctx->context.running) ohci_resume_iso_dma()
3196 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags); ohci_resume_iso_dma()
3201 static int queue_iso_transmit(struct iso_context *ctx, queue_iso_transmit() argument
3236 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus); queue_iso_transmit()
3256 IT_HEADER_CHANNEL(ctx->base.channel) | queue_iso_transmit()
3257 IT_HEADER_SPEED(ctx->base.speed)); queue_iso_transmit()
3282 dma_sync_single_range_for_device(ctx->context.ohci->card.device, queue_iso_transmit()
3300 context_append(&ctx->context, d, z, header_z); queue_iso_transmit()
3305 static int queue_iso_packet_per_buffer(struct iso_context *ctx, queue_iso_packet_per_buffer() argument
3310 struct device *device = ctx->context.ohci->card.device; queue_iso_packet_per_buffer()
3321 packet_count = packet->header_length / ctx->base.header_size; queue_iso_packet_per_buffer()
3322 header_size = max(ctx->base.header_size, (size_t)8); queue_iso_packet_per_buffer()
3333 d = context_get_descriptors(&ctx->context, queue_iso_packet_per_buffer()
3380 context_append(&ctx->context, d, z, header_z); queue_iso_packet_per_buffer()
3386 static int queue_iso_buffer_fill(struct iso_context *ctx, queue_iso_buffer_fill() argument
3406 d = context_get_descriptors(&ctx->context, 1, &d_bus); queue_iso_buffer_fill()
3428 dma_sync_single_range_for_device(ctx->context.ohci->card.device, queue_iso_buffer_fill()
3436 context_append(&ctx->context, d, 1, 0); queue_iso_buffer_fill()
3447 struct iso_context *ctx = container_of(base, struct iso_context, base); ohci_queue_iso() local
3451 spin_lock_irqsave(&ctx->context.ohci->lock, flags); ohci_queue_iso()
3454 ret = queue_iso_transmit(ctx, packet, buffer, payload); ohci_queue_iso()
3457 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload); ohci_queue_iso()
3460 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload); ohci_queue_iso()
3463 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags); ohci_queue_iso()
3470 struct context *ctx = ohci_flush_queue_iso() local
3473 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE); ohci_flush_queue_iso()
3478 struct iso_context *ctx = container_of(base, struct iso_context, base); ohci_flush_iso_completions() local
3481 tasklet_disable(&ctx->context.tasklet); ohci_flush_iso_completions()
3483 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) { ohci_flush_iso_completions()
3484 context_tasklet((unsigned long)&ctx->context); ohci_flush_iso_completions()
3489 if (ctx->header_length != 0) ohci_flush_iso_completions()
3490 flush_iso_completions(ctx); ohci_flush_iso_completions()
3493 if (ctx->mc_completed != 0) ohci_flush_iso_completions()
3494 flush_ir_buffer_fill(ctx); ohci_flush_iso_completions()
3500 clear_bit_unlock(0, &ctx->flushing_completions); ohci_flush_iso_completions()
3504 tasklet_enable(&ctx->context.tasklet); ohci_flush_iso_completions()
H A Dcore-iso.c167 struct fw_iso_context *ctx; fw_iso_context_create() local
169 ctx = card->driver->allocate_iso_context(card, fw_iso_context_create()
171 if (IS_ERR(ctx)) fw_iso_context_create()
172 return ctx; fw_iso_context_create()
174 ctx->card = card; fw_iso_context_create()
175 ctx->type = type; fw_iso_context_create()
176 ctx->channel = channel; fw_iso_context_create()
177 ctx->speed = speed; fw_iso_context_create()
178 ctx->header_size = header_size; fw_iso_context_create()
179 ctx->callback.sc = callback; fw_iso_context_create()
180 ctx->callback_data = callback_data; fw_iso_context_create()
182 return ctx; fw_iso_context_create()
186 void fw_iso_context_destroy(struct fw_iso_context *ctx) fw_iso_context_destroy() argument
188 ctx->card->driver->free_iso_context(ctx); fw_iso_context_destroy()
192 int fw_iso_context_start(struct fw_iso_context *ctx, fw_iso_context_start() argument
195 return ctx->card->driver->start_iso(ctx, cycle, sync, tags); fw_iso_context_start()
199 int fw_iso_context_set_channels(struct fw_iso_context *ctx, u64 *channels) fw_iso_context_set_channels() argument
201 return ctx->card->driver->set_iso_channels(ctx, channels); fw_iso_context_set_channels()
204 int fw_iso_context_queue(struct fw_iso_context *ctx, fw_iso_context_queue() argument
209 return ctx->card->driver->queue_iso(ctx, packet, buffer, payload); fw_iso_context_queue()
213 void fw_iso_context_queue_flush(struct fw_iso_context *ctx) fw_iso_context_queue_flush() argument
215 ctx->card->driver->flush_queue_iso(ctx); fw_iso_context_queue_flush()
219 int fw_iso_context_flush_completions(struct fw_iso_context *ctx) fw_iso_context_flush_completions() argument
221 return ctx->card->driver->flush_iso_completions(ctx); fw_iso_context_flush_completions()
225 int fw_iso_context_stop(struct fw_iso_context *ctx) fw_iso_context_stop() argument
227 return ctx->card->driver->stop_iso(ctx); fw_iso_context_stop()
/linux-4.1.27/include/net/netfilter/
H A Dnft_masq.h10 int nft_masq_init(const struct nft_ctx *ctx,
16 int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
H A Dnft_redir.h12 int nft_redir_init(const struct nft_ctx *ctx,
18 int nft_redir_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
/linux-4.1.27/arch/powerpc/platforms/cell/
H A Dspu_notify.c31 void spu_switch_notify(struct spu *spu, struct spu_context *ctx) spu_switch_notify() argument
34 ctx ? ctx->object_id : 0, spu); spu_switch_notify()
54 void spu_set_profile_private_kref(struct spu_context *ctx, spu_set_profile_private_kref() argument
58 ctx->prof_priv_kref = prof_info_kref; spu_set_profile_private_kref()
59 ctx->prof_priv_release = prof_info_release; spu_set_profile_private_kref()
63 void *spu_get_profile_private_kref(struct spu_context *ctx) spu_get_profile_private_kref() argument
65 return ctx->prof_priv_kref; spu_get_profile_private_kref()
/linux-4.1.27/tools/perf/ui/gtk/
H A Dutil.c12 struct perf_gtk_context *ctx; perf_gtk__activate_context() local
14 ctx = malloc(sizeof(*pgctx)); perf_gtk__activate_context()
15 if (ctx) perf_gtk__activate_context()
16 ctx->main_window = window; perf_gtk__activate_context()
18 return ctx; perf_gtk__activate_context()
21 int perf_gtk__deactivate_context(struct perf_gtk_context **ctx) perf_gtk__deactivate_context() argument
23 if (!perf_gtk__is_active_context(*ctx)) perf_gtk__deactivate_context()
26 zfree(ctx); perf_gtk__deactivate_context()
H A Dgtk.h28 static inline bool perf_gtk__is_active_context(struct perf_gtk_context *ctx) perf_gtk__is_active_context() argument
30 return ctx && ctx->main_window; perf_gtk__is_active_context()
34 int perf_gtk__deactivate_context(struct perf_gtk_context **ctx);
/linux-4.1.27/samples/bpf/
H A Dtracex4_kern.c28 int bpf_prog1(struct pt_regs *ctx) bpf_prog1() argument
30 long ptr = ctx->si; bpf_prog1()
37 int bpf_prog2(struct pt_regs *ctx) bpf_prog2() argument
39 long ptr = ctx->ax; bpf_prog2()
43 bpf_probe_read(&ip, sizeof(ip), (void *)(ctx->bp + sizeof(ip))); bpf_prog2()
/linux-4.1.27/drivers/gpu/drm/nouveau/nvkm/subdev/mxm/
H A Dnv50.c43 struct context *ctx = info; mxm_match_tmds_partner() local
48 desc.dig_conn == ctx->desc.dig_conn) mxm_match_tmds_partner()
57 struct context *ctx = info; mxm_match_dcb() local
60 mxms_output_device(mxm, data, &ctx->desc); mxm_match_dcb()
63 if ((ctx->outp[0] & 0x0000000f) != ctx->desc.outp_type) mxm_match_dcb()
72 u8 link = mxm_sor_map(bios, ctx->desc.dig_conn); mxm_match_dcb()
73 if ((ctx->outp[0] & 0x0f000000) != (link & 0x0f) << 24) mxm_match_dcb()
78 if ((link & ((ctx->outp[1] & 0x00000030) >> 4)) != link) mxm_match_dcb()
88 if (ctx->desc.outp_type == 6 && ctx->desc.conn_type == 6 && mxm_match_dcb()
89 mxms_foreach(mxm, 0x01, mxm_match_tmds_partner, ctx)) { mxm_match_dcb()
102 struct context ctx = { .outp = (u32 *)(bios->data + pdcb) }; mxm_dcb_sanitise_entry() local
109 if (mxms_foreach(mxm, 0x01, mxm_match_dcb, &ctx)) { mxm_dcb_sanitise_entry()
111 idx, ctx.outp[0], ctx.outp[1]); mxm_dcb_sanitise_entry()
112 ctx.outp[0] |= 0x0000000f; mxm_dcb_sanitise_entry()
120 i2cidx = mxm_ddc_map(bios, ctx.desc.ddc_port); mxm_dcb_sanitise_entry()
121 if ((ctx.outp[0] & 0x0000000f) != DCB_OUTPUT_DP) mxm_dcb_sanitise_entry()
127 ctx.outp[0] &= ~0x000000f0; mxm_dcb_sanitise_entry()
128 ctx.outp[0] |= i2cidx; mxm_dcb_sanitise_entry()
132 switch (ctx.desc.outp_type) { mxm_dcb_sanitise_entry()
137 link = mxm_sor_map(bios, ctx.desc.dig_conn) & 0x30; mxm_dcb_sanitise_entry()
138 ctx.outp[1] &= ~0x00000030; mxm_dcb_sanitise_entry()
139 ctx.outp[1] |= link; mxm_dcb_sanitise_entry()
151 conn += nvbios_connEe(bios, (ctx.outp[0] & 0x0000f000) >> 12, &ver, &len); mxm_dcb_sanitise_entry()
153 switch (ctx.desc.conn_type) { mxm_dcb_sanitise_entry()
155 ctx.outp[1] |= 0x00000004; /* use_power_scripts */ mxm_dcb_sanitise_entry()
165 ctx.outp[1] |= 0x00010000; mxm_dcb_sanitise_entry()
167 ctx.outp[1] |= 0x00000004; /* use_power_scripts? */ mxm_dcb_sanitise_entry()
/linux-4.1.27/lib/mpi/
H A Dmpih-mul.c337 struct karatsuba_ctx *ctx) mpihelp_mul_karatsuba_case()
341 if (!ctx->tspace || ctx->tspace_size < vsize) { mpihelp_mul_karatsuba_case()
342 if (ctx->tspace) mpihelp_mul_karatsuba_case()
343 mpi_free_limb_space(ctx->tspace); mpihelp_mul_karatsuba_case()
344 ctx->tspace = mpi_alloc_limb_space(2 * vsize); mpihelp_mul_karatsuba_case()
345 if (!ctx->tspace) mpihelp_mul_karatsuba_case()
347 ctx->tspace_size = vsize; mpihelp_mul_karatsuba_case()
350 MPN_MUL_N_RECURSE(prodp, up, vp, vsize, ctx->tspace); mpihelp_mul_karatsuba_case()
356 if (!ctx->tp || ctx->tp_size < vsize) { mpihelp_mul_karatsuba_case()
357 if (ctx->tp) mpihelp_mul_karatsuba_case()
358 mpi_free_limb_space(ctx->tp); mpihelp_mul_karatsuba_case()
359 ctx->tp = mpi_alloc_limb_space(2 * vsize); mpihelp_mul_karatsuba_case()
360 if (!ctx->tp) { mpihelp_mul_karatsuba_case()
361 if (ctx->tspace) mpihelp_mul_karatsuba_case()
362 mpi_free_limb_space(ctx->tspace); mpihelp_mul_karatsuba_case()
363 ctx->tspace = NULL; mpihelp_mul_karatsuba_case()
366 ctx->tp_size = vsize; mpihelp_mul_karatsuba_case()
370 MPN_MUL_N_RECURSE(ctx->tp, up, vp, vsize, ctx->tspace); mpihelp_mul_karatsuba_case()
371 cy = mpihelp_add_n(prodp, prodp, ctx->tp, vsize); mpihelp_mul_karatsuba_case()
372 mpihelp_add_1(prodp + vsize, ctx->tp + vsize, vsize, mpihelp_mul_karatsuba_case()
383 if (mpihelp_mul(ctx->tspace, vp, vsize, up, usize, &tmp) mpihelp_mul_karatsuba_case()
387 if (!ctx->next) { mpihelp_mul_karatsuba_case()
388 ctx->next = kzalloc(sizeof *ctx, GFP_KERNEL); mpihelp_mul_karatsuba_case()
389 if (!ctx->next) mpihelp_mul_karatsuba_case()
392 if (mpihelp_mul_karatsuba_case(ctx->tspace, mpihelp_mul_karatsuba_case()
395 ctx->next) < 0) mpihelp_mul_karatsuba_case()
399 cy = mpihelp_add_n(prodp, prodp, ctx->tspace, vsize); mpihelp_mul_karatsuba_case()
400 mpihelp_add_1(prodp + vsize, ctx->tspace + vsize, usize, cy); mpihelp_mul_karatsuba_case()
406 void mpihelp_release_karatsuba_ctx(struct karatsuba_ctx *ctx) mpihelp_release_karatsuba_ctx() argument
410 if (ctx->tp) mpihelp_release_karatsuba_ctx()
411 mpi_free_limb_space(ctx->tp); mpihelp_release_karatsuba_ctx()
412 if (ctx->tspace) mpihelp_release_karatsuba_ctx()
413 mpi_free_limb_space(ctx->tspace); mpihelp_release_karatsuba_ctx()
414 for (ctx = ctx->next; ctx; ctx = ctx2) { mpihelp_release_karatsuba_ctx()
415 ctx2 = ctx->next; mpihelp_release_karatsuba_ctx()
416 if (ctx->tp) mpihelp_release_karatsuba_ctx()
417 mpi_free_limb_space(ctx->tp); mpihelp_release_karatsuba_ctx()
418 if (ctx->tspace) mpihelp_release_karatsuba_ctx()
419 mpi_free_limb_space(ctx->tspace); mpihelp_release_karatsuba_ctx()
420 kfree(ctx); mpihelp_release_karatsuba_ctx()
445 struct karatsuba_ctx ctx; mpihelp_mul() local
491 memset(&ctx, 0, sizeof ctx); mpihelp_mul()
492 if (mpihelp_mul_karatsuba_case(prodp, up, usize, vp, vsize, &ctx) < 0) mpihelp_mul()
494 mpihelp_release_karatsuba_ctx(&ctx); mpihelp_mul()
334 mpihelp_mul_karatsuba_case(mpi_ptr_t prodp, mpi_ptr_t up, mpi_size_t usize, mpi_ptr_t vp, mpi_size_t vsize, struct karatsuba_ctx *ctx) mpihelp_mul_karatsuba_case() argument
/linux-4.1.27/arch/powerpc/crypto/
H A Daes-spe-glue.c95 struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm); ppc_aes_setkey() local
106 ctx->rounds = 4; ppc_aes_setkey()
107 ppc_expand_key_128(ctx->key_enc, in_key); ppc_aes_setkey()
110 ctx->rounds = 5; ppc_aes_setkey()
111 ppc_expand_key_192(ctx->key_enc, in_key); ppc_aes_setkey()
114 ctx->rounds = 6; ppc_aes_setkey()
115 ppc_expand_key_256(ctx->key_enc, in_key); ppc_aes_setkey()
119 ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len); ppc_aes_setkey()
127 struct ppc_xts_ctx *ctx = crypto_tfm_ctx(tfm); ppc_xts_setkey() local
140 ctx->rounds = 4; ppc_xts_setkey()
141 ppc_expand_key_128(ctx->key_enc, in_key); ppc_xts_setkey()
142 ppc_expand_key_128(ctx->key_twk, in_key + AES_KEYSIZE_128); ppc_xts_setkey()
145 ctx->rounds = 5; ppc_xts_setkey()
146 ppc_expand_key_192(ctx->key_enc, in_key); ppc_xts_setkey()
147 ppc_expand_key_192(ctx->key_twk, in_key + AES_KEYSIZE_192); ppc_xts_setkey()
150 ctx->rounds = 6; ppc_xts_setkey()
151 ppc_expand_key_256(ctx->key_enc, in_key); ppc_xts_setkey()
152 ppc_expand_key_256(ctx->key_twk, in_key + AES_KEYSIZE_256); ppc_xts_setkey()
156 ppc_generate_decrypt_key(ctx->key_dec, ctx->key_enc, key_len); ppc_xts_setkey()
163 struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm); ppc_aes_encrypt() local
166 ppc_encrypt_aes(out, in, ctx->key_enc, ctx->rounds); ppc_aes_encrypt()
172 struct ppc_aes_ctx *ctx = crypto_tfm_ctx(tfm); ppc_aes_decrypt() local
175 ppc_decrypt_aes(out, in, ctx->key_dec, ctx->rounds); ppc_aes_decrypt()
182 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_ecb_encrypt() local
198 ctx->key_enc, ctx->rounds, nbytes); ppc_ecb_encrypt()
210 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_ecb_decrypt() local
226 ctx->key_dec, ctx->rounds, nbytes); ppc_ecb_decrypt()
238 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_cbc_encrypt() local
254 ctx->key_enc, ctx->rounds, nbytes, walk.iv); ppc_cbc_encrypt()
266 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_cbc_decrypt() local
282 ctx->key_dec, ctx->rounds, nbytes, walk.iv); ppc_cbc_decrypt()
294 struct ppc_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_ctr_crypt() local
311 ctx->key_enc, ctx->rounds, pbytes , walk.iv); ppc_ctr_crypt()
324 struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_xts_encrypt() local
333 twk = ctx->key_twk; ppc_xts_encrypt()
342 ctx->key_enc, ctx->rounds, nbytes, walk.iv, twk); ppc_xts_encrypt()
355 struct ppc_xts_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ppc_xts_decrypt() local
364 twk = ctx->key_twk; ppc_xts_decrypt()
373 ctx->key_dec, ctx->rounds, nbytes, walk.iv, twk); ppc_xts_decrypt()
/linux-4.1.27/fs/qnx4/
H A Ddir.c17 static int qnx4_readdir(struct file *file, struct dir_context *ctx) qnx4_readdir() argument
29 QNX4DEBUG((KERN_INFO "pos = %ld\n", (long) ctx->pos)); qnx4_readdir()
31 while (ctx->pos < inode->i_size) { qnx4_readdir()
32 blknum = qnx4_block_map(inode, ctx->pos >> QNX4_BLOCK_SIZE_BITS); qnx4_readdir()
38 ix = (ctx->pos >> QNX4_DIR_ENTRY_SIZE_BITS) % QNX4_INODES_PER_BLOCK; qnx4_readdir()
39 for (; ix < QNX4_INODES_PER_BLOCK; ix++, ctx->pos += QNX4_DIR_ENTRY_SIZE) { qnx4_readdir()
60 if (!dir_emit(ctx, de->di_fname, size, ino, DT_UNKNOWN)) { qnx4_readdir()
/linux-4.1.27/arch/metag/include/asm/
H A Dsyscall.h32 if (get_user(insn, (unsigned long *)(regs->ctx.CurrPC - 4))) syscall_get_nr()
36 return regs->ctx.DX[0].U1; syscall_get_nr()
50 unsigned long error = regs->ctx.DX[0].U0; syscall_get_error()
57 return regs->ctx.DX[0].U0; syscall_get_return_value()
64 regs->ctx.DX[0].U0 = (long) error ?: val; syscall_set_return_value()
77 args[j] = regs->ctx.DX[(reg + 1) / 2].U0; syscall_get_arguments()
79 args[j] = regs->ctx.DX[reg / 2].U1; syscall_get_arguments()
93 regs->ctx.DX[(reg + 1) / 2].U0 = args[i]; syscall_set_arguments()
95 regs->ctx.DX[reg / 2].U1 = args[i]; syscall_set_arguments()
/linux-4.1.27/net/ipv4/netfilter/
H A Dnf_nat_snmp_basic.c152 static void asn1_open(struct asn1_ctx *ctx, asn1_open() argument
156 ctx->begin = buf; asn1_open()
157 ctx->end = buf + len; asn1_open()
158 ctx->pointer = buf; asn1_open()
159 ctx->error = ASN1_ERR_NOERROR; asn1_open()
162 static unsigned char asn1_octet_decode(struct asn1_ctx *ctx, unsigned char *ch) asn1_octet_decode() argument
164 if (ctx->pointer >= ctx->end) { asn1_octet_decode()
165 ctx->error = ASN1_ERR_DEC_EMPTY; asn1_octet_decode()
168 *ch = *(ctx->pointer)++; asn1_octet_decode()
172 static unsigned char asn1_tag_decode(struct asn1_ctx *ctx, unsigned int *tag) asn1_tag_decode() argument
180 if (!asn1_octet_decode(ctx, &ch)) asn1_tag_decode()
188 static unsigned char asn1_id_decode(struct asn1_ctx *ctx, asn1_id_decode() argument
195 if (!asn1_octet_decode(ctx, &ch)) asn1_id_decode()
203 if (!asn1_tag_decode(ctx, tag)) asn1_id_decode()
209 static unsigned char asn1_length_decode(struct asn1_ctx *ctx, asn1_length_decode() argument
215 if (!asn1_octet_decode(ctx, &ch)) asn1_length_decode()
230 if (!asn1_octet_decode(ctx, &ch)) asn1_length_decode()
239 /* don't trust len bigger than ctx buffer */ asn1_length_decode()
240 if (*len > ctx->end - ctx->pointer) asn1_length_decode()
246 static unsigned char asn1_header_decode(struct asn1_ctx *ctx, asn1_header_decode() argument
254 if (!asn1_id_decode(ctx, cls, con, tag)) asn1_header_decode()
258 if (!asn1_length_decode(ctx, &def, &len)) asn1_header_decode()
266 *eoc = ctx->pointer + len; asn1_header_decode()
272 static unsigned char asn1_eoc_decode(struct asn1_ctx *ctx, unsigned char *eoc) asn1_eoc_decode() argument
277 if (!asn1_octet_decode(ctx, &ch)) asn1_eoc_decode()
281 ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; asn1_eoc_decode()
285 if (!asn1_octet_decode(ctx, &ch)) asn1_eoc_decode()
289 ctx->error = ASN1_ERR_DEC_EOC_MISMATCH; asn1_eoc_decode()
294 if (ctx->pointer != eoc) { asn1_eoc_decode()
295 ctx->error = ASN1_ERR_DEC_LENGTH_MISMATCH; asn1_eoc_decode()
302 static unsigned char asn1_null_decode(struct asn1_ctx *ctx, unsigned char *eoc) asn1_null_decode() argument
304 ctx->pointer = eoc; asn1_null_decode()
308 static unsigned char asn1_long_decode(struct asn1_ctx *ctx, asn1_long_decode() argument
315 if (!asn1_octet_decode(ctx, &ch)) asn1_long_decode()
321 while (ctx->pointer < eoc) { asn1_long_decode()
323 ctx->error = ASN1_ERR_DEC_BADVALUE; asn1_long_decode()
327 if (!asn1_octet_decode(ctx, &ch)) asn1_long_decode()
336 static unsigned char asn1_uint_decode(struct asn1_ctx *ctx, asn1_uint_decode() argument
343 if (!asn1_octet_decode(ctx, &ch)) asn1_uint_decode()
350 while (ctx->pointer < eoc) { asn1_uint_decode()
352 ctx->error = ASN1_ERR_DEC_BADVALUE; asn1_uint_decode()
356 if (!asn1_octet_decode(ctx, &ch)) asn1_uint_decode()
365 static unsigned char asn1_ulong_decode(struct asn1_ctx *ctx, asn1_ulong_decode() argument
372 if (!asn1_octet_decode(ctx, &ch)) asn1_ulong_decode()
379 while (ctx->pointer < eoc) { asn1_ulong_decode()
381 ctx->error = ASN1_ERR_DEC_BADVALUE; asn1_ulong_decode()
385 if (!asn1_octet_decode(ctx, &ch)) asn1_ulong_decode()
394 static unsigned char asn1_octets_decode(struct asn1_ctx *ctx, asn1_octets_decode() argument
403 *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); asn1_octets_decode()
408 while (ctx->pointer < eoc) { asn1_octets_decode()
409 if (!asn1_octet_decode(ctx, ptr++)) { asn1_octets_decode()
419 static unsigned char asn1_subid_decode(struct asn1_ctx *ctx, asn1_subid_decode() argument
427 if (!asn1_octet_decode(ctx, &ch)) asn1_subid_decode()
436 static unsigned char asn1_oid_decode(struct asn1_ctx *ctx, asn1_oid_decode() argument
445 size = eoc - ctx->pointer + 1; asn1_oid_decode()
457 if (!asn1_subid_decode(ctx, &subid)) { asn1_oid_decode()
477 while (ctx->pointer < eoc) { asn1_oid_decode()
479 ctx->error = ASN1_ERR_DEC_BADVALUE; asn1_oid_decode()
485 if (!asn1_subid_decode(ctx, optr++)) { asn1_oid_decode()
671 static unsigned char snmp_object_decode(struct asn1_ctx *ctx, snmp_object_decode() argument
684 if (!asn1_header_decode(ctx, &eoc, &cls, &con, &tag)) snmp_object_decode()
690 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) snmp_object_decode()
696 if (!asn1_oid_decode(ctx, end, &id, &idlen)) snmp_object_decode()
699 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) { snmp_object_decode()
719 if (!asn1_long_decode(ctx, end, &l)) { snmp_object_decode()
732 if (!asn1_octets_decode(ctx, end, &p, &len)) { snmp_object_decode()
755 if (!asn1_null_decode(ctx, end)) { snmp_object_decode()
763 if (!asn1_oid_decode(ctx, end, &lp, &len)) { snmp_object_decode()
778 if (!asn1_octets_decode(ctx, end, &p, &len)) { snmp_object_decode()
800 if (!asn1_ulong_decode(ctx, end, &ul)) { snmp_object_decode()
821 if (!asn1_eoc_decode(ctx, eoc)) { snmp_object_decode()
830 static unsigned char snmp_request_decode(struct asn1_ctx *ctx, snmp_request_decode() argument
836 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) snmp_request_decode()
842 if (!asn1_ulong_decode(ctx, end, &request->id)) snmp_request_decode()
845 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) snmp_request_decode()
851 if (!asn1_uint_decode(ctx, end, &request->error_status)) snmp_request_decode()
854 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) snmp_request_decode()
860 if (!asn1_uint_decode(ctx, end, &request->error_index)) snmp_request_decode()
923 static unsigned char snmp_trap_decode(struct asn1_ctx *ctx, snmp_trap_decode() argument
931 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) snmp_trap_decode()
937 if (!asn1_oid_decode(ctx, end, &trap->id, &trap->id_len)) snmp_trap_decode()
940 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) snmp_trap_decode()
947 if (!asn1_octets_decode(ctx, end, (unsigned char **)&trap->ip_address, &len)) snmp_trap_decode()
954 mangle_address(ctx->begin, ctx->pointer - 4, map, check); snmp_trap_decode()
956 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) snmp_trap_decode()
962 if (!asn1_uint_decode(ctx, end, &trap->general)) snmp_trap_decode()
965 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) snmp_trap_decode()
971 if (!asn1_uint_decode(ctx, end, &trap->specific)) snmp_trap_decode()
974 if (!asn1_header_decode(ctx, &end, &cls, &con, &tag)) snmp_trap_decode()
981 if (!asn1_ulong_decode(ctx, end, &trap->time)) snmp_trap_decode()
1024 struct asn1_ctx ctx; snmp_parse_mangle() local
1031 asn1_open(&ctx, msg, len); snmp_parse_mangle()
1036 if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) snmp_parse_mangle()
1044 if (!asn1_header_decode(&ctx, &end, &cls, &con, &tag)) snmp_parse_mangle()
1048 if (!asn1_uint_decode (&ctx, end, &vers)) snmp_parse_mangle()
1058 if (!asn1_header_decode (&ctx, &end, &cls, &con, &tag)) snmp_parse_mangle()
1062 if (!asn1_octets_decode(&ctx, end, &comm.data, &comm.len)) snmp_parse_mangle()
1077 if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &pdutype)) snmp_parse_mangle()
1107 unsigned char ret = snmp_trap_decode(&ctx, &trap, map, check); snmp_parse_mangle()
1118 if (!snmp_request_decode(&ctx, &req)) snmp_parse_mangle()
1130 if (!asn1_header_decode(&ctx, &eoc, &cls, &con, &tag)) snmp_parse_mangle()
1136 while (!asn1_eoc_decode(&ctx, eoc)) { snmp_parse_mangle()
1139 if (!snmp_object_decode(&ctx, &obj)) { snmp_parse_mangle()
1159 mangle_address(ctx.begin, ctx.pointer - 4 , map, check); snmp_parse_mangle()
1165 if (!asn1_eoc_decode(&ctx, eoc)) snmp_parse_mangle()
/linux-4.1.27/arch/powerpc/mm/
H A Dmmu_context_hash32.c57 * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
66 unsigned long ctx = next_mmu_context; __init_new_context() local
68 while (test_and_set_bit(ctx, context_map)) { __init_new_context()
69 ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx); __init_new_context()
70 if (ctx > LAST_CONTEXT) __init_new_context()
71 ctx = 0; __init_new_context()
73 next_mmu_context = (ctx + 1) & LAST_CONTEXT; __init_new_context()
75 return ctx; __init_new_context()
92 void __destroy_context(unsigned long ctx) __destroy_context() argument
94 clear_bit(ctx, context_map); __destroy_context()
/linux-4.1.27/arch/metag/kernel/
H A Dprocess.c135 pr_info(" SaveMask = 0x%04hx\n", regs->ctx.SaveMask); show_regs()
136 pr_info(" Flags = 0x%04hx (%c%c%c%c)\n", regs->ctx.Flags, show_regs()
137 regs->ctx.Flags & FLAG_Z ? 'Z' : 'z', show_regs()
138 regs->ctx.Flags & FLAG_N ? 'N' : 'n', show_regs()
139 regs->ctx.Flags & FLAG_O ? 'O' : 'o', show_regs()
140 regs->ctx.Flags & FLAG_C ? 'C' : 'c'); show_regs()
141 pr_info(" TXRPT = 0x%08x\n", regs->ctx.CurrRPT); show_regs()
142 pr_info(" PC = 0x%08x\n", regs->ctx.CurrPC); show_regs()
148 regs->ctx.AX[i].U0); show_regs()
151 regs->ctx.AX[i].U1); show_regs()
154 if (regs->ctx.SaveMask & TBICTX_XEXT_BIT) show_regs()
159 regs->ctx.Ext.AX2.U0); show_regs()
161 regs->ctx.Ext.AX2.U1); show_regs()
165 pr_info(" A0.%d = 0x%08x ", i + 3, regs->ctx.AX3[i].U0); show_regs()
166 printk(" A1.%d = 0x%08x\n", i + 3, regs->ctx.AX3[i].U1); show_regs()
170 pr_info(" %s = 0x%08x ", DX0_names[i], regs->ctx.DX[i].U0); show_regs()
171 printk(" %s = 0x%08x\n", DX1_names[i], regs->ctx.DX[i].U1); show_regs()
174 show_trace(NULL, (unsigned long *)regs->ctx.AX[0].U0, regs); show_regs()
206 childregs->ctx.AX[0].U1 = (unsigned long) global_base; copy_thread()
207 childregs->ctx.AX[0].U0 = (unsigned long) kernel_context; copy_thread()
209 childregs->ctx.DX[4].U1 = usp; copy_thread()
210 childregs->ctx.DX[3].U1 = kthread_arg; copy_thread()
224 childregs->ctx.AX[0].U0 = ALIGN(usp, 8); copy_thread()
228 childregs->ctx.DX[0].U0 = 0; copy_thread()
233 (__force void __user *)childregs->ctx.DX[1].U1; copy_thread()
237 struct meta_fpu_context *ctx; copy_thread() local
239 ctx = kmemdup(tsk->thread.fpu_context, copy_thread()
241 tsk->thread.fpu_context = ctx; copy_thread()
247 struct meta_ext_context *ctx; copy_thread() local
250 ctx = kmemdup(tsk->thread.dsp_context, copy_thread()
253 ctx->ram[i] = kmemdup(ctx->ram[i], ctx->ram_sz[i], copy_thread()
255 tsk->thread.dsp_context = ctx; copy_thread()
315 state.Sig.pCtx = &regs->ctx; __switch_to()
/linux-4.1.27/arch/sparc/crypto/
H A Daes_glue.c169 struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm); aes_set_key() local
174 ctx->expanded_key_length = 0xb0; aes_set_key()
175 ctx->ops = &aes128_ops; aes_set_key()
179 ctx->expanded_key_length = 0xd0; aes_set_key()
180 ctx->ops = &aes192_ops; aes_set_key()
184 ctx->expanded_key_length = 0xf0; aes_set_key()
185 ctx->ops = &aes256_ops; aes_set_key()
193 aes_sparc64_key_expand((const u32 *)in_key, &ctx->key[0], key_len); aes_set_key()
194 ctx->key_length = key_len; aes_set_key()
201 struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm); aes_encrypt() local
203 ctx->ops->encrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst); aes_encrypt()
208 struct crypto_sparc64_aes_ctx *ctx = crypto_tfm_ctx(tfm); aes_decrypt() local
210 ctx->ops->decrypt(&ctx->key[0], (const u32 *) src, (u32 *) dst); aes_decrypt()
219 struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_encrypt() local
227 ctx->ops->load_encrypt_keys(&ctx->key[0]); ecb_encrypt()
232 ctx->ops->ecb_encrypt(&ctx->key[0], ecb_encrypt()
248 struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ecb_decrypt() local
257 ctx->ops->load_decrypt_keys(&ctx->key[0]); ecb_decrypt()
258 key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)]; ecb_decrypt()
263 ctx->ops->ecb_decrypt(key_end, ecb_decrypt()
279 struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_encrypt() local
287 ctx->ops->load_encrypt_keys(&ctx->key[0]); cbc_encrypt()
292 ctx->ops->cbc_encrypt(&ctx->key[0], cbc_encrypt()
308 struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); cbc_decrypt() local
317 ctx->ops->load_decrypt_keys(&ctx->key[0]); cbc_decrypt()
318 key_end = &ctx->key[ctx->expanded_key_length / sizeof(u64)]; cbc_decrypt()
323 ctx->ops->cbc_decrypt(key_end, cbc_decrypt()
336 static void ctr_crypt_final(struct crypto_sparc64_aes_ctx *ctx, ctr_crypt_final() argument
345 ctx->ops->ecb_encrypt(&ctx->key[0], (const u64 *)ctrblk, ctr_crypt_final()
356 struct crypto_sparc64_aes_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); ctr_crypt() local
364 ctx->ops->load_encrypt_keys(&ctx->key[0]); ctr_crypt()
369 ctx->ops->ctr_crypt(&ctx->key[0], ctr_crypt()
378 ctr_crypt_final(ctx, &walk); ctr_crypt()
/linux-4.1.27/block/
H A Dblk-mq-sysfs.c36 struct blk_mq_ctx *ctx; blk_mq_sysfs_show() local
41 ctx = container_of(kobj, struct blk_mq_ctx, kobj); blk_mq_sysfs_show()
42 q = ctx->queue; blk_mq_sysfs_show()
50 res = entry->show(ctx, page); blk_mq_sysfs_show()
59 struct blk_mq_ctx *ctx; blk_mq_sysfs_store() local
64 ctx = container_of(kobj, struct blk_mq_ctx, kobj); blk_mq_sysfs_store()
65 q = ctx->queue; blk_mq_sysfs_store()
73 res = entry->store(ctx, page, length); blk_mq_sysfs_store()
125 static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page) blk_mq_sysfs_dispatched_show() argument
127 return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1], blk_mq_sysfs_dispatched_show()
128 ctx->rq_dispatched[0]); blk_mq_sysfs_dispatched_show()
131 static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page) blk_mq_sysfs_merged_show() argument
133 return sprintf(page, "%lu\n", ctx->rq_merged); blk_mq_sysfs_merged_show()
136 static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page) blk_mq_sysfs_completed_show() argument
138 return sprintf(page, "%lu %lu\n", ctx->rq_completed[1], blk_mq_sysfs_completed_show()
139 ctx->rq_completed[0]); blk_mq_sysfs_completed_show()
166 static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page) blk_mq_sysfs_rq_list_show() argument
170 spin_lock(&ctx->lock); blk_mq_sysfs_rq_list_show()
171 ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending"); blk_mq_sysfs_rq_list_show()
172 spin_unlock(&ctx->lock); blk_mq_sysfs_rq_list_show()
343 struct blk_mq_ctx *ctx; blk_mq_unregister_hctx() local
349 hctx_for_each_ctx(hctx, ctx, i) blk_mq_unregister_hctx()
350 kobject_del(&ctx->kobj); blk_mq_unregister_hctx()
358 struct blk_mq_ctx *ctx; blk_mq_register_hctx() local
368 hctx_for_each_ctx(hctx, ctx, i) { hctx_for_each_ctx()
369 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu); hctx_for_each_ctx()
381 struct blk_mq_ctx *ctx; blk_mq_unregister_disk() local
387 hctx_for_each_ctx(hctx, ctx, j) queue_for_each_hw_ctx()
388 kobject_put(&ctx->kobj); queue_for_each_hw_ctx()
403 struct blk_mq_ctx *ctx; blk_mq_sysfs_init() local
411 queue_for_each_ctx(q, ctx, i) blk_mq_sysfs_init()
412 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype); blk_mq_sysfs_init()
/linux-4.1.27/drivers/infiniband/hw/mlx4/
H A Dmad.c107 __be64 mlx4_ib_get_new_demux_tid(struct mlx4_ib_demux_ctx *ctx) mlx4_ib_get_new_demux_tid() argument
109 return cpu_to_be64(atomic_inc_return(&ctx->tid)) | mlx4_ib_get_new_demux_tid()
1109 struct mlx4_ib_demux_pv_ctx *ctx = cq->cq_context; mlx4_ib_tunnel_comp_handler() local
1110 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); mlx4_ib_tunnel_comp_handler()
1112 if (!dev->sriov.is_going_down && ctx->state == DEMUX_PV_STATE_ACTIVE) mlx4_ib_tunnel_comp_handler()
1113 queue_work(ctx->wq, &ctx->work); mlx4_ib_tunnel_comp_handler()
1117 static int mlx4_ib_post_pv_qp_buf(struct mlx4_ib_demux_pv_ctx *ctx, mlx4_ib_post_pv_qp_buf() argument
1130 sg_list.lkey = ctx->mr->lkey; mlx4_ib_post_pv_qp_buf()
1137 ib_dma_sync_single_for_device(ctx->ib_dev, tun_qp->ring[index].map, mlx4_ib_post_pv_qp_buf()
1281 static void mlx4_ib_multiplex_mad(struct mlx4_ib_demux_pv_ctx *ctx, struct ib_wc *wc) mlx4_ib_multiplex_mad() argument
1283 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); mlx4_ib_multiplex_mad()
1284 struct mlx4_ib_demux_pv_qp *tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc->wr_id)]; mlx4_ib_multiplex_mad()
1296 (wc->src_qp & 0x1) != ctx->port - 1 || mlx4_ib_multiplex_mad()
1298 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d\n", wc->src_qp); mlx4_ib_multiplex_mad()
1302 if (slave != ctx->slave) { mlx4_ib_multiplex_mad()
1303 mlx4_ib_warn(ctx->ib_dev, "can't multiplex bad sqp:%d: " mlx4_ib_multiplex_mad()
1309 ib_dma_sync_single_for_cpu(ctx->ib_dev, tun_qp->ring[wr_ix].map, mlx4_ib_multiplex_mad()
1322 mlx4_ib_warn(ctx->ib_dev, "egress mad has non-null tid msb:%d " mlx4_ib_multiplex_mad()
1337 !mlx4_vf_smi_enabled(dev->dev, slave, ctx->port)) mlx4_ib_multiplex_mad()
1341 if (mlx4_ib_multiplex_sa_handler(ctx->ib_dev, ctx->port, slave, mlx4_ib_multiplex_mad()
1346 if (mlx4_ib_multiplex_cm_handler(ctx->ib_dev, ctx->port, slave, mlx4_ib_multiplex_mad()
1358 mlx4_ib_warn(ctx->ib_dev, "dropping unsupported egress mad from class:%d " mlx4_ib_multiplex_mad()
1367 ah.ibah.device = ctx->ib_dev; mlx4_ib_multiplex_mad()
1370 fill_in_real_sgid_index(dev, slave, ctx->port, &ah_attr); mlx4_ib_multiplex_mad()
1379 mlx4_get_slave_default_vlan(dev->dev, ctx->port, slave, mlx4_ib_multiplex_mad()
1382 mlx4_ib_send_to_wire(dev, slave, ctx->port, mlx4_ib_multiplex_mad()
1391 static int mlx4_ib_alloc_pv_bufs(struct mlx4_ib_demux_pv_ctx *ctx, mlx4_ib_alloc_pv_bufs() argument
1401 tun_qp = &ctx->qp[qp_type]; mlx4_ib_alloc_pv_bufs()
1429 tun_qp->ring[i].map = ib_dma_map_single(ctx->ib_dev, mlx4_ib_alloc_pv_bufs()
1433 if (ib_dma_mapping_error(ctx->ib_dev, tun_qp->ring[i].map)) { mlx4_ib_alloc_pv_bufs()
1445 ib_dma_map_single(ctx->ib_dev, mlx4_ib_alloc_pv_bufs()
1449 if (ib_dma_mapping_error(ctx->ib_dev, mlx4_ib_alloc_pv_bufs()
1466 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, mlx4_ib_alloc_pv_bufs()
1476 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, mlx4_ib_alloc_pv_bufs()
1485 static void mlx4_ib_free_pv_qp_bufs(struct mlx4_ib_demux_pv_ctx *ctx, mlx4_ib_free_pv_qp_bufs() argument
1495 tun_qp = &ctx->qp[qp_type]; mlx4_ib_free_pv_qp_bufs()
1506 ib_dma_unmap_single(ctx->ib_dev, tun_qp->ring[i].map, mlx4_ib_free_pv_qp_bufs()
1512 ib_dma_unmap_single(ctx->ib_dev, tun_qp->tx_ring[i].buf.map, mlx4_ib_free_pv_qp_bufs()
1524 struct mlx4_ib_demux_pv_ctx *ctx; mlx4_ib_tunnel_comp_worker() local
1528 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); mlx4_ib_tunnel_comp_worker()
1529 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); mlx4_ib_tunnel_comp_worker()
1531 while (ib_poll_cq(ctx->cq, 1, &wc) == 1) { mlx4_ib_tunnel_comp_worker()
1532 tun_qp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; mlx4_ib_tunnel_comp_worker()
1536 mlx4_ib_multiplex_mad(ctx, &wc); mlx4_ib_tunnel_comp_worker()
1537 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, mlx4_ib_tunnel_comp_worker()
1563 ctx->slave, wc.status, wc.wr_id); mlx4_ib_tunnel_comp_worker()
1586 static int create_pv_sqp(struct mlx4_ib_demux_pv_ctx *ctx, create_pv_sqp() argument
1598 tun_qp = &ctx->qp[qp_type]; create_pv_sqp()
1601 qp_init_attr.init_attr.send_cq = ctx->cq; create_pv_sqp()
1602 qp_init_attr.init_attr.recv_cq = ctx->cq; create_pv_sqp()
1611 qp_init_attr.port = ctx->port; create_pv_sqp()
1612 qp_init_attr.slave = ctx->slave; create_pv_sqp()
1621 qp_init_attr.init_attr.port_num = ctx->port; create_pv_sqp()
1622 qp_init_attr.init_attr.qp_context = ctx; create_pv_sqp()
1624 tun_qp->qp = ib_create_qp(ctx->pd, &qp_init_attr.init_attr); create_pv_sqp()
1637 ret = find_slave_port_pkey_ix(to_mdev(ctx->ib_dev), ctx->slave, create_pv_sqp()
1638 ctx->port, IB_DEFAULT_PKEY_FULL, create_pv_sqp()
1642 to_mdev(ctx->ib_dev)->pkeys.virt2phys_pkey[ctx->slave][ctx->port - 1][0]; create_pv_sqp()
1644 attr.port_num = ctx->port; create_pv_sqp()
1668 ret = mlx4_ib_post_pv_qp_buf(ctx, tun_qp, i); create_pv_sqp()
1688 struct mlx4_ib_demux_pv_ctx *ctx; mlx4_ib_sqp_comp_worker() local
1694 ctx = container_of(work, struct mlx4_ib_demux_pv_ctx, work); mlx4_ib_sqp_comp_worker()
1695 ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); mlx4_ib_sqp_comp_worker()
1697 while (mlx4_ib_poll_cq(ctx->cq, 1, &wc) == 1) { mlx4_ib_sqp_comp_worker()
1698 sqp = &ctx->qp[MLX4_TUN_WRID_QPN(wc.wr_id)]; mlx4_ib_sqp_comp_worker()
1717 mlx4_ib_demux_mad(ctx->ib_dev, ctx->port, &wc, grh, mad); mlx4_ib_sqp_comp_worker()
1718 if (mlx4_ib_post_pv_qp_buf(ctx, sqp, wc.wr_id & mlx4_ib_sqp_comp_worker()
1730 ctx->slave, wc.status, wc.wr_id); mlx4_ib_sqp_comp_worker()
1747 struct mlx4_ib_demux_pv_ctx *ctx; alloc_pv_object() local
1750 ctx = kzalloc(sizeof (struct mlx4_ib_demux_pv_ctx), GFP_KERNEL); alloc_pv_object()
1751 if (!ctx) { alloc_pv_object()
1757 ctx->ib_dev = &dev->ib_dev; alloc_pv_object()
1758 ctx->port = port; alloc_pv_object()
1759 ctx->slave = slave; alloc_pv_object()
1760 *ret_ctx = ctx; alloc_pv_object()
1773 int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) create_pv_resources()
1777 if (ctx->state != DEMUX_PV_STATE_DOWN) create_pv_resources()
1780 ctx->state = DEMUX_PV_STATE_STARTING; create_pv_resources()
1782 if (rdma_port_get_link_layer(ibdev, ctx->port) == create_pv_resources()
1784 ctx->has_smi = 1; create_pv_resources()
1786 if (ctx->has_smi) { create_pv_resources()
1787 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_SMI, create_tun); create_pv_resources()
1794 ret = mlx4_ib_alloc_pv_bufs(ctx, IB_QPT_GSI, create_tun); create_pv_resources()
1801 if (ctx->has_smi) create_pv_resources()
1804 ctx->cq = ib_create_cq(ctx->ib_dev, mlx4_ib_tunnel_comp_handler, create_pv_resources()
1805 NULL, ctx, cq_size, 0); create_pv_resources()
1806 if (IS_ERR(ctx->cq)) { create_pv_resources()
1807 ret = PTR_ERR(ctx->cq); create_pv_resources()
1812 ctx->pd = ib_alloc_pd(ctx->ib_dev); create_pv_resources()
1813 if (IS_ERR(ctx->pd)) { create_pv_resources()
1814 ret = PTR_ERR(ctx->pd); create_pv_resources()
1819 ctx->mr = ib_get_dma_mr(ctx->pd, IB_ACCESS_LOCAL_WRITE); create_pv_resources()
1820 if (IS_ERR(ctx->mr)) { create_pv_resources()
1821 ret = PTR_ERR(ctx->mr); create_pv_resources()
1826 if (ctx->has_smi) { create_pv_resources()
1827 ret = create_pv_sqp(ctx, IB_QPT_SMI, create_tun); create_pv_resources()
1835 ret = create_pv_sqp(ctx, IB_QPT_GSI, create_tun); create_pv_resources()
1843 INIT_WORK(&ctx->work, mlx4_ib_tunnel_comp_worker); create_pv_resources()
1845 INIT_WORK(&ctx->work, mlx4_ib_sqp_comp_worker); create_pv_resources()
1847 ctx->wq = to_mdev(ibdev)->sriov.demux[port - 1].wq; create_pv_resources()
1849 ret = ib_req_notify_cq(ctx->cq, IB_CQ_NEXT_COMP); create_pv_resources()
1854 ctx->state = DEMUX_PV_STATE_ACTIVE; create_pv_resources()
1858 ctx->wq = NULL; create_pv_resources()
1859 ib_destroy_qp(ctx->qp[1].qp); create_pv_resources()
1860 ctx->qp[1].qp = NULL; create_pv_resources()
1864 if (ctx->has_smi) create_pv_resources()
1865 ib_destroy_qp(ctx->qp[0].qp); create_pv_resources()
1866 ctx->qp[0].qp = NULL; create_pv_resources()
1869 ib_dereg_mr(ctx->mr); create_pv_resources()
1870 ctx->mr = NULL; create_pv_resources()
1873 ib_dealloc_pd(ctx->pd); create_pv_resources()
1874 ctx->pd = NULL; create_pv_resources()
1877 ib_destroy_cq(ctx->cq); create_pv_resources()
1878 ctx->cq = NULL; create_pv_resources()
1881 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, create_tun); create_pv_resources()
1884 if (ctx->has_smi) create_pv_resources()
1885 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, create_tun); create_pv_resources()
1887 ctx->state = DEMUX_PV_STATE_DOWN; create_pv_resources()
1892 struct mlx4_ib_demux_pv_ctx *ctx, int flush) destroy_pv_resources()
1894 if (!ctx) destroy_pv_resources()
1896 if (ctx->state > DEMUX_PV_STATE_DOWN) { destroy_pv_resources()
1897 ctx->state = DEMUX_PV_STATE_DOWNING; destroy_pv_resources()
1899 flush_workqueue(ctx->wq); destroy_pv_resources()
1900 if (ctx->has_smi) { destroy_pv_resources()
1901 ib_destroy_qp(ctx->qp[0].qp); destroy_pv_resources()
1902 ctx->qp[0].qp = NULL; destroy_pv_resources()
1903 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_SMI, 1); destroy_pv_resources()
1905 ib_destroy_qp(ctx->qp[1].qp); destroy_pv_resources()
1906 ctx->qp[1].qp = NULL; destroy_pv_resources()
1907 mlx4_ib_free_pv_qp_bufs(ctx, IB_QPT_GSI, 1); destroy_pv_resources()
1908 ib_dereg_mr(ctx->mr); destroy_pv_resources()
1909 ctx->mr = NULL; destroy_pv_resources()
1910 ib_dealloc_pd(ctx->pd); destroy_pv_resources()
1911 ctx->pd = NULL; destroy_pv_resources()
1912 ib_destroy_cq(ctx->cq); destroy_pv_resources()
1913 ctx->cq = NULL; destroy_pv_resources()
1914 ctx->state = DEMUX_PV_STATE_DOWN; destroy_pv_resources()
1958 struct mlx4_ib_demux_ctx *ctx, mlx4_ib_alloc_demux_ctx()
1965 ctx->tun = kcalloc(dev->dev->caps.sqp_demux, mlx4_ib_alloc_demux_ctx()
1967 if (!ctx->tun) mlx4_ib_alloc_demux_ctx()
1970 ctx->dev = dev; mlx4_ib_alloc_demux_ctx()
1971 ctx->port = port; mlx4_ib_alloc_demux_ctx()
1972 ctx->ib_dev = &dev->ib_dev; mlx4_ib_alloc_demux_ctx()
1984 ret = alloc_pv_object(dev, i, port, &ctx->tun[i]); mlx4_ib_alloc_demux_ctx()
1991 ret = mlx4_ib_mcg_port_init(ctx); mlx4_ib_alloc_demux_ctx()
1998 ctx->wq = create_singlethread_workqueue(name); mlx4_ib_alloc_demux_ctx()
1999 if (!ctx->wq) { mlx4_ib_alloc_demux_ctx()
2006 ctx->ud_wq = create_singlethread_workqueue(name); mlx4_ib_alloc_demux_ctx()
2007 if (!ctx->ud_wq) { mlx4_ib_alloc_demux_ctx()
2016 destroy_workqueue(ctx->wq); mlx4_ib_alloc_demux_ctx()
2017 ctx->wq = NULL; mlx4_ib_alloc_demux_ctx()
2020 mlx4_ib_mcg_port_cleanup(ctx, 1); mlx4_ib_alloc_demux_ctx()
2024 kfree(ctx->tun); mlx4_ib_alloc_demux_ctx()
2025 ctx->tun = NULL; mlx4_ib_alloc_demux_ctx()
2052 static void mlx4_ib_free_demux_ctx(struct mlx4_ib_demux_ctx *ctx) mlx4_ib_free_demux_ctx() argument
2055 if (ctx) { mlx4_ib_free_demux_ctx()
2056 struct mlx4_ib_dev *dev = to_mdev(ctx->ib_dev); mlx4_ib_free_demux_ctx()
2057 mlx4_ib_mcg_port_cleanup(ctx, 1); mlx4_ib_free_demux_ctx()
2059 if (!ctx->tun[i]) mlx4_ib_free_demux_ctx()
2061 if (ctx->tun[i]->state > DEMUX_PV_STATE_DOWN) mlx4_ib_free_demux_ctx()
2062 ctx->tun[i]->state = DEMUX_PV_STATE_DOWNING; mlx4_ib_free_demux_ctx()
2064 flush_workqueue(ctx->wq); mlx4_ib_free_demux_ctx()
2066 destroy_pv_resources(dev, i, ctx->port, ctx->tun[i], 0); mlx4_ib_free_demux_ctx()
2067 free_pv_object(dev, i, ctx->port); mlx4_ib_free_demux_ctx()
2069 kfree(ctx->tun); mlx4_ib_free_demux_ctx()
2070 destroy_workqueue(ctx->ud_wq); mlx4_ib_free_demux_ctx()
2071 destroy_workqueue(ctx->wq); mlx4_ib_free_demux_ctx()
1772 create_pv_resources(struct ib_device *ibdev, int slave, int port, int create_tun, struct mlx4_ib_demux_pv_ctx *ctx) create_pv_resources() argument
1891 destroy_pv_resources(struct mlx4_ib_dev *dev, int slave, int port, struct mlx4_ib_demux_pv_ctx *ctx, int flush) destroy_pv_resources() argument
1957 mlx4_ib_alloc_demux_ctx(struct mlx4_ib_dev *dev, struct mlx4_ib_demux_ctx *ctx, int port) mlx4_ib_alloc_demux_ctx() argument
/linux-4.1.27/arch/metag/mm/
H A Dextable.c12 regs->ctx.CurrPC = fixup->fixup; fixup_exception()
/linux-4.1.27/drivers/video/fbdev/omap2/dss/
H A Ddpi.c158 struct dpi_clk_calc_ctx *ctx = data; dpi_calc_dispc_cb() local
165 if (ctx->pck_min >= 100000000) { dpi_calc_dispc_cb()
173 ctx->dispc_cinfo.lck_div = lckd; dpi_calc_dispc_cb()
174 ctx->dispc_cinfo.pck_div = pckd; dpi_calc_dispc_cb()
175 ctx->dispc_cinfo.lck = lck; dpi_calc_dispc_cb()
176 ctx->dispc_cinfo.pck = pck; dpi_calc_dispc_cb()
185 struct dpi_clk_calc_ctx *ctx = data; dpi_calc_hsdiv_cb() local
192 if (m_dispc > 1 && m_dispc % 2 != 0 && ctx->pck_min >= 100000000) dpi_calc_hsdiv_cb()
195 ctx->dsi_cinfo.mX[HSDIV_DISPC] = m_dispc; dpi_calc_hsdiv_cb()
196 ctx->dsi_cinfo.clkout[HSDIV_DISPC] = dispc; dpi_calc_hsdiv_cb()
198 return dispc_div_calc(dispc, ctx->pck_min, ctx->pck_max, dpi_calc_hsdiv_cb()
199 dpi_calc_dispc_cb, ctx); dpi_calc_hsdiv_cb()
207 struct dpi_clk_calc_ctx *ctx = data; dpi_calc_pll_cb() local
209 ctx->dsi_cinfo.n = n; dpi_calc_pll_cb()
210 ctx->dsi_cinfo.m = m; dpi_calc_pll_cb()
211 ctx->dsi_cinfo.fint = fint; dpi_calc_pll_cb()
212 ctx->dsi_cinfo.clkdco = clkdco; dpi_calc_pll_cb()
214 return dss_pll_hsdiv_calc(ctx->pll, clkdco, dpi_calc_pll_cb()
215 ctx->pck_min, dss_feat_get_param_max(FEAT_PARAM_DSS_FCK), dpi_calc_pll_cb()
216 dpi_calc_hsdiv_cb, ctx); dpi_calc_pll_cb()
221 struct dpi_clk_calc_ctx *ctx = data; dpi_calc_dss_cb() local
223 ctx->fck = fck; dpi_calc_dss_cb()
225 return dispc_div_calc(fck, ctx->pck_min, ctx->pck_max, dpi_calc_dss_cb()
226 dpi_calc_dispc_cb, ctx); dpi_calc_dss_cb()
230 struct dpi_clk_calc_ctx *ctx) dpi_dsi_clk_calc()
235 memset(ctx, 0, sizeof(*ctx)); dpi_dsi_clk_calc()
236 ctx->pll = dpi->pll; dpi_dsi_clk_calc()
237 ctx->pck_min = pck - 1000; dpi_dsi_clk_calc()
238 ctx->pck_max = pck + 1000; dpi_dsi_clk_calc()
243 clkin = clk_get_rate(ctx->pll->clkin); dpi_dsi_clk_calc()
245 return dss_pll_calc(ctx->pll, clkin, dpi_dsi_clk_calc()
247 dpi_calc_pll_cb, ctx); dpi_dsi_clk_calc()
250 static bool dpi_dss_clk_calc(unsigned long pck, struct dpi_clk_calc_ctx *ctx) dpi_dss_clk_calc() argument
264 memset(ctx, 0, sizeof(*ctx)); dpi_dss_clk_calc()
266 ctx->pck_min = max(pck - 1000 * i * i * i, 0lu); dpi_dss_clk_calc()
268 ctx->pck_min = 0; dpi_dss_clk_calc()
269 ctx->pck_max = pck + 1000 * i * i * i; dpi_dss_clk_calc()
271 ok = dss_div_calc(pck, ctx->pck_min, dpi_calc_dss_cb, ctx); dpi_dss_clk_calc()
285 struct dpi_clk_calc_ctx ctx; dpi_set_dsi_clk() local
289 ok = dpi_dsi_clk_calc(dpi, pck_req, &ctx); dpi_set_dsi_clk()
293 r = dss_pll_set_config(dpi->pll, &ctx.dsi_cinfo); dpi_set_dsi_clk()
300 dpi->mgr_config.clock_info = ctx.dispc_cinfo; dpi_set_dsi_clk()
302 *fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; dpi_set_dsi_clk()
303 *lck_div = ctx.dispc_cinfo.lck_div; dpi_set_dsi_clk()
304 *pck_div = ctx.dispc_cinfo.pck_div; dpi_set_dsi_clk()
312 struct dpi_clk_calc_ctx ctx; dpi_set_dispc_clk() local
316 ok = dpi_dss_clk_calc(pck_req, &ctx); dpi_set_dispc_clk()
320 r = dss_set_fck_rate(ctx.fck); dpi_set_dispc_clk()
324 dpi->mgr_config.clock_info = ctx.dispc_cinfo; dpi_set_dispc_clk()
326 *fck = ctx.fck; dpi_set_dispc_clk()
327 *lck_div = ctx.dispc_cinfo.lck_div; dpi_set_dispc_clk()
328 *pck_div = ctx.dispc_cinfo.pck_div; dpi_set_dispc_clk()
512 struct dpi_clk_calc_ctx ctx; dpi_check_timings() local
522 ok = dpi_dsi_clk_calc(dpi, timings->pixelclock, &ctx); dpi_check_timings()
526 fck = ctx.dsi_cinfo.clkout[HSDIV_DISPC]; dpi_check_timings()
528 ok = dpi_dss_clk_calc(timings->pixelclock, &ctx); dpi_check_timings()
532 fck = ctx.fck; dpi_check_timings()
535 lck_div = ctx.dispc_cinfo.lck_div; dpi_check_timings()
536 pck_div = ctx.dispc_cinfo.pck_div; dpi_check_timings()
229 dpi_dsi_clk_calc(struct dpi_data *dpi, unsigned long pck, struct dpi_clk_calc_ctx *ctx) dpi_dsi_clk_calc() argument
/linux-4.1.27/fs/hpfs/
H A Ddir.c60 static int hpfs_readdir(struct file *file, struct dir_context *ctx) hpfs_readdir() argument
108 if (ctx->pos == 12) { /* diff -r requires this (note, that diff -r */ hpfs_readdir()
109 ctx->pos = 13; /* also fails on msdos filesystem in 2.0) */ hpfs_readdir()
112 if (ctx->pos == 13) { hpfs_readdir()
123 if (hpfs_stop_cycles(inode->i_sb, ctx->pos, &c1, &c2, "hpfs_readdir")) { hpfs_readdir()
127 if (ctx->pos == 12) hpfs_readdir()
129 if (ctx->pos == 3 || ctx->pos == 4 || ctx->pos == 5) { hpfs_readdir()
130 pr_err("pos==%d\n", (int)ctx->pos); hpfs_readdir()
133 if (ctx->pos == 0) { hpfs_readdir()
134 if (!dir_emit_dot(file, ctx)) hpfs_readdir()
136 ctx->pos = 11; hpfs_readdir()
138 if (ctx->pos == 11) { hpfs_readdir()
139 if (!dir_emit(ctx, "..", 2, hpfs_inode->i_parent_dir, DT_DIR)) hpfs_readdir()
141 ctx->pos = 1; hpfs_readdir()
143 if (ctx->pos == 1) { hpfs_readdir()
144 ctx->pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1; hpfs_readdir()
148 next_pos = ctx->pos; hpfs_readdir()
150 ctx->pos = next_pos; hpfs_readdir()
158 hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", (unsigned long)ctx->pos); hpfs_readdir()
160 hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", (unsigned long)ctx->pos); hpfs_readdir()
163 ctx->pos = next_pos; hpfs_readdir()
167 if (!dir_emit(ctx, tempname, de->namelen, le32_to_cpu(de->fnode), DT_UNKNOWN)) { hpfs_readdir()
172 ctx->pos = next_pos; hpfs_readdir()
/linux-4.1.27/fs/affs/
H A Ddir.c43 affs_readdir(struct file *file, struct dir_context *ctx) affs_readdir() argument
57 pr_debug("%s(ino=%lu,f_pos=%llx)\n", __func__, inode->i_ino, ctx->pos); affs_readdir()
59 if (ctx->pos < 2) { affs_readdir()
61 if (!dir_emit_dots(file, ctx)) affs_readdir()
66 chain_pos = (ctx->pos - 2) & 0xffff; affs_readdir()
67 hash_pos = (ctx->pos - 2) >> 16; affs_readdir()
72 ctx->pos = ((hash_pos << 16) | chain_pos) + 2; affs_readdir()
107 ctx->pos = (hash_pos << 16) + 2; affs_readdir()
121 namelen, name, ino, hash_pos, ctx->pos); affs_readdir()
123 if (!dir_emit(ctx, name, namelen, ino, DT_UNKNOWN)) affs_readdir()
125 ctx->pos++; affs_readdir()
/linux-4.1.27/drivers/crypto/ux500/hash/
H A Dhash_core.c148 struct hash_ctx *ctx = data; hash_dma_callback() local
150 complete(&ctx->device->dma.complete); hash_dma_callback()
153 static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg, hash_set_dma_transfer() argument
161 dev_err(ctx->device->dev, "%s: Invalid DMA direction\n", hash_set_dma_transfer()
168 channel = ctx->device->dma.chan_mem2hash; hash_set_dma_transfer()
169 ctx->device->dma.sg = sg; hash_set_dma_transfer()
170 ctx->device->dma.sg_len = dma_map_sg(channel->device->dev, hash_set_dma_transfer()
171 ctx->device->dma.sg, ctx->device->dma.nents, hash_set_dma_transfer()
174 if (!ctx->device->dma.sg_len) { hash_set_dma_transfer()
175 dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n", hash_set_dma_transfer()
180 dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n", hash_set_dma_transfer()
183 ctx->device->dma.sg, ctx->device->dma.sg_len, hash_set_dma_transfer()
186 dev_err(ctx->device->dev, hash_set_dma_transfer()
192 desc->callback_param = ctx; hash_set_dma_transfer()
200 static void hash_dma_done(struct hash_ctx *ctx) hash_dma_done() argument
204 chan = ctx->device->dma.chan_mem2hash; hash_dma_done()
206 dma_unmap_sg(chan->device->dev, ctx->device->dma.sg, hash_dma_done()
207 ctx->device->dma.sg_len, DMA_TO_DEVICE); hash_dma_done()
210 static int hash_dma_write(struct hash_ctx *ctx, hash_dma_write() argument
213 int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE); hash_dma_write()
215 dev_dbg(ctx->device->dev, hash_dma_write()
236 struct hash_ctx *ctx = device_data->current_ctx; get_empty_message_digest() local
240 * Caller responsible for ctx != NULL. get_empty_message_digest()
243 if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) { get_empty_message_digest()
244 if (HASH_ALGO_SHA1 == ctx->config.algorithm) { get_empty_message_digest()
250 ctx->config.algorithm) { get_empty_message_digest()
261 } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) { get_empty_message_digest()
262 if (!ctx->keylen) { get_empty_message_digest()
263 if (HASH_ALGO_SHA1 == ctx->config.algorithm) { get_empty_message_digest()
268 } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) { get_empty_message_digest()
380 static int hash_get_device_data(struct hash_ctx *ctx, hash_get_device_data() argument
404 local_device_data->current_ctx = ctx; hash_get_device_data()
405 ctx->device = local_device_data; hash_get_device_data()
480 * @ctx: The hash context.
486 struct hash_ctx *ctx) init_hash_hw()
490 ret = hash_setconfiguration(device_data, &ctx->config); init_hash_hw()
497 hash_begin(device_data, ctx); init_hash_hw()
499 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) init_hash_hw()
500 hash_hw_write_key(device_data, ctx->key, ctx->keylen); init_hash_hw()
567 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); hash_init() local
570 if (!ctx->key) hash_init()
571 ctx->keylen = 0; hash_init()
669 * @ctx: Hash context
675 static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr) hash_incrementlength() argument
677 ctx->state.length.low_word += incr; hash_incrementlength()
680 if (ctx->state.length.low_word < incr) hash_incrementlength()
681 ctx->state.length.high_word++; hash_incrementlength()
755 * @ctx: Hash context.
757 void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx) hash_begin() argument
779 struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, hash_process_data()
808 ret = init_hash_hw(device_data, ctx); hash_process_data()
872 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); hash_dma_final() local
878 ret = hash_get_device_data(ctx, &device_data); hash_dma_final()
882 dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); hash_dma_final()
895 ret = hash_setconfiguration(device_data, &ctx->config); hash_dma_final()
916 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC) hash_dma_final()
917 hash_hw_write_key(device_data, ctx->key, ctx->keylen); hash_dma_final()
925 ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL); hash_dma_final()
926 if (!ctx->device->dma.nents) { hash_dma_final()
927 dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n", hash_dma_final()
929 ret = ctx->device->dma.nents; hash_dma_final()
933 bytes_written = hash_dma_write(ctx, req->src, req->nbytes); hash_dma_final()
941 wait_for_completion(&ctx->device->dma.complete); hash_dma_final()
942 hash_dma_done(ctx); hash_dma_final()
947 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { hash_dma_final()
948 unsigned int keylen = ctx->keylen; hash_dma_final()
949 u8 *key = ctx->key; hash_dma_final()
952 __func__, ctx->keylen); hash_dma_final()
956 hash_get_digest(device_data, digest, ctx->config.algorithm); hash_dma_final()
957 memcpy(req->result, digest, ctx->digestsize); hash_dma_final()
965 kfree(ctx->key); hash_dma_final()
978 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); hash_hw_final() local
983 ret = hash_get_device_data(ctx, &device_data); hash_hw_final()
987 dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx); hash_hw_final()
997 } else if (req->nbytes == 0 && ctx->keylen == 0) { hash_hw_final()
1007 if (!ret && likely(zero_hash_size == ctx->digestsize) && hash_hw_final()
1009 memcpy(req->result, &zero_hash[0], ctx->digestsize); hash_hw_final()
1019 zero_hash_size == ctx->digestsize ? hash_hw_final()
1024 } else if (req->nbytes == 0 && ctx->keylen > 0) { hash_hw_final()
1031 ret = init_hash_hw(device_data, ctx); hash_hw_final()
1048 if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) { hash_hw_final()
1049 unsigned int keylen = ctx->keylen; hash_hw_final()
1050 u8 *key = ctx->key; hash_hw_final()
1053 __func__, ctx->keylen); hash_hw_final()
1057 hash_get_digest(device_data, digest, ctx->config.algorithm); hash_hw_final()
1058 memcpy(req->result, digest, ctx->digestsize); hash_hw_final()
1066 kfree(ctx->key); hash_hw_final()
1085 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); hash_hw_update() local
1097 /* Check if ctx->state.length + msg_length hash_hw_update()
1105 ret = hash_get_device_data(ctx, &device_data); hash_hw_update()
1112 ret = hash_process_data(device_data, ctx, req_ctx, msg_length, hash_hw_update()
1350 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); hash_setkey() local
1355 ctx->key = kmemdup(key, keylen, GFP_KERNEL); hash_setkey()
1356 if (!ctx->key) { hash_setkey()
1357 pr_err("%s: Failed to allocate ctx->key for %d\n", hash_setkey()
1361 ctx->keylen = keylen; hash_setkey()
1369 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_sha1_init() local
1371 ctx->config.data_format = HASH_DATA_8_BITS; ahash_sha1_init()
1372 ctx->config.algorithm = HASH_ALGO_SHA1; ahash_sha1_init()
1373 ctx->config.oper_mode = HASH_OPER_MODE_HASH; ahash_sha1_init()
1374 ctx->digestsize = SHA1_DIGEST_SIZE; ahash_sha1_init()
1382 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); ahash_sha256_init() local
1384 ctx->config.data_format = HASH_DATA_8_BITS; ahash_sha256_init()
1385 ctx->config.algorithm = HASH_ALGO_SHA256; ahash_sha256_init()
1386 ctx->config.oper_mode = HASH_OPER_MODE_HASH; ahash_sha256_init()
1387 ctx->digestsize = SHA256_DIGEST_SIZE; ahash_sha256_init()
1425 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); hmac_sha1_init() local
1427 ctx->config.data_format = HASH_DATA_8_BITS; hmac_sha1_init()
1428 ctx->config.algorithm = HASH_ALGO_SHA1; hmac_sha1_init()
1429 ctx->config.oper_mode = HASH_OPER_MODE_HMAC; hmac_sha1_init()
1430 ctx->digestsize = SHA1_DIGEST_SIZE; hmac_sha1_init()
1438 struct hash_ctx *ctx = crypto_ahash_ctx(tfm); hmac_sha256_init() local
1440 ctx->config.data_format = HASH_DATA_8_BITS; hmac_sha256_init()
1441 ctx->config.algorithm = HASH_ALGO_SHA256; hmac_sha256_init()
1442 ctx->config.oper_mode = HASH_OPER_MODE_HMAC; hmac_sha256_init()
1443 ctx->digestsize = SHA256_DIGEST_SIZE; hmac_sha256_init()
1497 struct hash_ctx *ctx = crypto_tfm_ctx(tfm); hash_cra_init() local
1508 ctx->config.data_format = HASH_DATA_8_BITS; hash_cra_init()
1509 ctx->config.algorithm = hash_alg->conf.algorithm; hash_cra_init()
1510 ctx->config.oper_mode = hash_alg->conf.oper_mode; hash_cra_init()
1512 ctx->digestsize = hash_alg->hash.halg.digestsize; hash_cra_init()
485 init_hash_hw(struct hash_device_data *device_data, struct hash_ctx *ctx) init_hash_hw() argument
778 hash_process_data(struct hash_device_data *device_data, struct hash_ctx *ctx, struct hash_req_ctx *req_ctx, int msg_length, u8 *data_buffer, u8 *buffer, u8 *index) hash_process_data() argument
/linux-4.1.27/drivers/media/platform/s5p-tv/
H A Dsii9234_drv.c105 static int sii9234_reset(struct sii9234_context *ctx) sii9234_reset() argument
107 struct i2c_client *client = ctx->client; sii9234_reset()
111 gpio_direction_output(ctx->gpio_n_reset, 1); sii9234_reset()
113 gpio_direction_output(ctx->gpio_n_reset, 0); sii9234_reset()
115 gpio_direction_output(ctx->gpio_n_reset, 1); sii9234_reset()
216 static int sii9234_set_internal(struct sii9234_context *ctx) sii9234_set_internal() argument
218 struct i2c_client *client = ctx->client; sii9234_set_internal()
233 struct sii9234_context *ctx = sd_to_context(sd); sii9234_runtime_suspend() local
234 struct i2c_client *client = ctx->client; sii9234_runtime_suspend()
239 regulator_disable(ctx->power); sii9234_runtime_suspend()
247 struct sii9234_context *ctx = sd_to_context(sd); sii9234_runtime_resume() local
248 struct i2c_client *client = ctx->client; sii9234_runtime_resume()
252 ret = regulator_enable(ctx->power); sii9234_runtime_resume()
256 ret = sii9234_reset(ctx); sii9234_runtime_resume()
264 ret = sii9234_set_internal(ctx); sii9234_runtime_resume()
272 regulator_disable(ctx->power); sii9234_runtime_resume()
284 struct sii9234_context *ctx = sd_to_context(sd); sii9234_s_power() local
288 ret = pm_runtime_get_sync(&ctx->client->dev); sii9234_s_power()
290 ret = pm_runtime_put(&ctx->client->dev); sii9234_s_power()
297 struct sii9234_context *ctx = sd_to_context(sd); sii9234_s_stream() local
300 sii9234_writeb_mask(ctx->client, 0x1a, enable ? 0 : ~0 , 1 << 4); sii9234_s_stream()
322 struct sii9234_context *ctx; sii9234_probe() local
325 ctx = devm_kzalloc(&client->dev, sizeof(*ctx), GFP_KERNEL); sii9234_probe()
326 if (!ctx) { sii9234_probe()
331 ctx->client = client; sii9234_probe()
333 ctx->power = devm_regulator_get(dev, "hdmi-en"); sii9234_probe()
334 if (IS_ERR(ctx->power)) { sii9234_probe()
336 return PTR_ERR(ctx->power); sii9234_probe()
339 ctx->gpio_n_reset = pdata->gpio_n_reset; sii9234_probe()
340 ret = devm_gpio_request(dev, ctx->gpio_n_reset, "MHL_RST"); sii9234_probe()
346 v4l2_i2c_subdev_init(&ctx->sd, client, &sii9234_ops); sii9234_probe()

Completed in 7153 milliseconds

12345